• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// DO NOT EDIT
16// This file was machine generated by github.com/tensorflow/tensorflow/tensorflow/go/genop/internal
17//
18// WARNING: This generation of wrapper function for TensorFlow ops is in an
19// experimental state. The generated API can change without notice.
20
21package op
22
23import tf "github.com/tensorflow/tensorflow/tensorflow/go"
24
25// optionalAttr is an intentionally un-exported type to hide
26// details of how optional attributes to operations are implemented.
27type optionalAttr map[string]interface{}
28
29func makeOutputList(op *tf.Operation, start int, output string) ([]tf.Output, int, error) {
30	size, err := op.OutputListSize(output)
31	if err != nil {
32		return nil, start, err
33	}
34	list := make([]tf.Output, size)
35	for i := 0; i < size; i++ {
36		list[i] = op.Output(start + i)
37	}
38	return list, start + size, nil
39}
40
41// TPUPartitionedInputAttr is an optional argument to TPUPartitionedInput.
42type TPUPartitionedInputAttr func(optionalAttr)
43
44// TPUPartitionedInputPartitionDim sets the optional partition_dim attribute to value.
45//
46// value: An integer describles which dimension is partitioned. -1 means
47// those inputs are replicated.
48// If not specified, defaults to 0
49func TPUPartitionedInputPartitionDim(value int64) TPUPartitionedInputAttr {
50	return func(m optionalAttr) {
51		m["partition_dim"] = value
52	}
53}
54
55// An op that groups a list of partitioned inputs together. This op
56//
57// Arguments:
58//	inputs: A list of partitioned inputs which must have the same shape.
59//
60// Returns A handle which represents the full shape of partitioned tensors.
61func TPUPartitionedInput(scope *Scope, inputs []tf.Output, optional ...TPUPartitionedInputAttr) (output tf.Output) {
62	if scope.Err() != nil {
63		return
64	}
65	attrs := map[string]interface{}{}
66	for _, a := range optional {
67		a(attrs)
68	}
69	opspec := tf.OpSpec{
70		Type: "TPUPartitionedInput",
71		Input: []tf.Input{
72			tf.OutputList(inputs),
73		},
74		Attrs: attrs,
75	}
76	op := scope.AddOperation(opspec)
77	return op.Output(0)
78}
79
80// Op that loads and executes a TPU program on a TPU device.
81//
82// For the internal use of the distributed TPU compiler.
83func TPUExecute(scope *Scope, args []tf.Output, key tf.Output, Tresults []tf.DataType) (results []tf.Output) {
84	if scope.Err() != nil {
85		return
86	}
87	attrs := map[string]interface{}{"Tresults": Tresults}
88	opspec := tf.OpSpec{
89		Type: "TPUExecute",
90		Input: []tf.Input{
91			tf.OutputList(args), key,
92		},
93		Attrs: attrs,
94	}
95	op := scope.AddOperation(opspec)
96	if scope.Err() != nil {
97		return
98	}
99	var idx int
100	var err error
101	if results, idx, err = makeOutputList(op, idx, "results"); err != nil {
102		scope.UpdateErr("TPUExecute", err)
103		return
104	}
105	return results
106}
107
108// Returns the result of a TPU compilation.
109//
110// This operation returns the result of a TPU compilation as a serialized
111// CompilationResultProto, which holds a status and an error message if an error
112// occurred during compilation.
113func TPUCompilationResult(scope *Scope) (output tf.Output) {
114	if scope.Err() != nil {
115		return
116	}
117	opspec := tf.OpSpec{
118		Type: "TPUCompilationResult",
119	}
120	op := scope.AddOperation(opspec)
121	return op.Output(0)
122}
123
124// A TPU core selector Op.
125//
126// This Op produces a set of TPU cores (for warm-up) or a single TPU core
127// (for regular inference) to execute the TPU program on. The output is
128// consumed by TPUPartitionedCall.
129//
130// Returns A vector 1 or more TPU cores.
131func TPUOrdinalSelector(scope *Scope) (device_ordinals tf.Output) {
132	if scope.Err() != nil {
133		return
134	}
135	opspec := tf.OpSpec{
136		Type: "TPUOrdinalSelector",
137	}
138	op := scope.AddOperation(opspec)
139	return op.Output(0)
140}
141
142// Retrieves a single tensor from the computation outfeed. Device ordinal is a
143// tensor allowing dynamic outfeed.
144//
145// This operation will block indefinitely until data is available.
146//
147// Arguments:
148//	device_ordinal: An int scalar tensor, representing the TPU device to use. This should be -1 when
149// the Op is running on a TPU device, and >= 0 when the Op is running on the CPU
150// device.
151//	dtype: The type of elements in the tensor.
152//	shape: The shape of the tensor.
153//
154// Returns A tensor that will be read from the device outfeed.
155func OutfeedDequeueV2(scope *Scope, device_ordinal tf.Output, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
156	if scope.Err() != nil {
157		return
158	}
159	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
160	opspec := tf.OpSpec{
161		Type: "OutfeedDequeueV2",
162		Input: []tf.Input{
163			device_ordinal,
164		},
165		Attrs: attrs,
166	}
167	op := scope.AddOperation(opspec)
168	return op.Output(0)
169}
170
171// OutfeedDequeueTupleAttr is an optional argument to OutfeedDequeueTuple.
172type OutfeedDequeueTupleAttr func(optionalAttr)
173
174// OutfeedDequeueTupleDeviceOrdinal sets the optional device_ordinal attribute to value.
175//
176// value: The TPU device to use. This should be -1 when the Op
177// is running on a TPU device, and >= 0 when the Op is running on the CPU
178// device.
179// If not specified, defaults to -1
180func OutfeedDequeueTupleDeviceOrdinal(value int64) OutfeedDequeueTupleAttr {
181	return func(m optionalAttr) {
182		m["device_ordinal"] = value
183	}
184}
185
186// Retrieve multiple values from the computation outfeed.
187//
188// This operation will block indefinitely until data is available. Output `i`
189// corresponds to XLA tuple element `i`.
190//
191// Arguments:
192//	dtypes: The element types of each element in `outputs`.
193//	shapes: The shapes of each tensor in `outputs`.
194//
195// Returns A list of tensors that will be read from the outfeed.
196func OutfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape, optional ...OutfeedDequeueTupleAttr) (outputs []tf.Output) {
197	if scope.Err() != nil {
198		return
199	}
200	attrs := map[string]interface{}{"dtypes": dtypes, "shapes": shapes}
201	for _, a := range optional {
202		a(attrs)
203	}
204	opspec := tf.OpSpec{
205		Type: "OutfeedDequeueTuple",
206
207		Attrs: attrs,
208	}
209	op := scope.AddOperation(opspec)
210	if scope.Err() != nil {
211		return
212	}
213	var idx int
214	var err error
215	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
216		scope.UpdateErr("OutfeedDequeueTuple", err)
217		return
218	}
219	return outputs
220}
221
222// Enqueue a Tensor on the computation outfeed.
223//
224// Arguments:
225//	input: A tensor that will be inserted into the outfeed queue.
226//
227// Returns the created operation.
228func OutfeedEnqueue(scope *Scope, input tf.Output) (o *tf.Operation) {
229	if scope.Err() != nil {
230		return
231	}
232	opspec := tf.OpSpec{
233		Type: "OutfeedEnqueue",
234		Input: []tf.Input{
235			input,
236		},
237	}
238	return scope.AddOperation(opspec)
239}
240
241// InfeedEnqueuePrelinearizedBufferAttr is an optional argument to InfeedEnqueuePrelinearizedBuffer.
242type InfeedEnqueuePrelinearizedBufferAttr func(optionalAttr)
243
244// InfeedEnqueuePrelinearizedBufferDeviceOrdinal sets the optional device_ordinal attribute to value.
245//
246// value: The TPU device to use. This should be -1 when the Op is running on a TPU device
247// and = 0 when the Op is running on the CPU device.
248// If not specified, defaults to -1
249func InfeedEnqueuePrelinearizedBufferDeviceOrdinal(value int64) InfeedEnqueuePrelinearizedBufferAttr {
250	return func(m optionalAttr) {
251		m["device_ordinal"] = value
252	}
253}
254
255// An op which enqueues prelinearized buffer into TPU infeed.
256//
257// Arguments:
258//	input: A variant tensor representing linearized output.
259//
260// Returns the created operation.
261func InfeedEnqueuePrelinearizedBuffer(scope *Scope, input tf.Output, optional ...InfeedEnqueuePrelinearizedBufferAttr) (o *tf.Operation) {
262	if scope.Err() != nil {
263		return
264	}
265	attrs := map[string]interface{}{}
266	for _, a := range optional {
267		a(attrs)
268	}
269	opspec := tf.OpSpec{
270		Type: "InfeedEnqueuePrelinearizedBuffer",
271		Input: []tf.Input{
272			input,
273		},
274		Attrs: attrs,
275	}
276	return scope.AddOperation(opspec)
277}
278
279// PrelinearizeAttr is an optional argument to Prelinearize.
280type PrelinearizeAttr func(optionalAttr)
281
282// PrelinearizeShape sets the optional shape attribute to value.
283//
284// value: The shape of the tensor.
285// If not specified, defaults to {}
286func PrelinearizeShape(value tf.Shape) PrelinearizeAttr {
287	return func(m optionalAttr) {
288		m["shape"] = value
289	}
290}
291
292// PrelinearizeLayout sets the optional layout attribute to value.
293//
294// value: A vector holding the requested layout in minor-to-major sequence. If a layout
295// attribute is passed but its values are all -1 the layout will be computed by
296// the infeed operation.
297// If not specified, defaults to {}
298func PrelinearizeLayout(value []int64) PrelinearizeAttr {
299	return func(m optionalAttr) {
300		m["layout"] = value
301	}
302}
303
304// An op which linearizes one Tensor value to an opaque variant tensor.
305//
306// Arguments:
307//	input: A tensor that will be linearized.
308func Prelinearize(scope *Scope, input tf.Output, optional ...PrelinearizeAttr) (output tf.Output) {
309	if scope.Err() != nil {
310		return
311	}
312	attrs := map[string]interface{}{}
313	for _, a := range optional {
314		a(attrs)
315	}
316	opspec := tf.OpSpec{
317		Type: "Prelinearize",
318		Input: []tf.Input{
319			input,
320		},
321		Attrs: attrs,
322	}
323	op := scope.AddOperation(opspec)
324	return op.Output(0)
325}
326
327// InfeedEnqueueTupleAttr is an optional argument to InfeedEnqueueTuple.
328type InfeedEnqueueTupleAttr func(optionalAttr)
329
330// InfeedEnqueueTupleLayouts sets the optional layouts attribute to value.
331//
332// value: A vector holding the requested layout in minor-to-major sequence for
333// all the tuple shapes, in the order the shapes appear in the "shapes" input.
334// The layout elements for a sub-shape can be set to -1, in which case the
335// corresponding layout will be computed by the infeed operation.
336// If not specified, defaults to {}
337func InfeedEnqueueTupleLayouts(value []int64) InfeedEnqueueTupleAttr {
338	return func(m optionalAttr) {
339		m["layouts"] = value
340	}
341}
342
343// InfeedEnqueueTupleDeviceOrdinal sets the optional device_ordinal attribute to value.
344//
345// value: The TPU device to use. This should be -1 when the Op
346// is running on a TPU device, and >= 0 when the Op is running on the CPU
347// device.
348// If not specified, defaults to -1
349func InfeedEnqueueTupleDeviceOrdinal(value int64) InfeedEnqueueTupleAttr {
350	return func(m optionalAttr) {
351		m["device_ordinal"] = value
352	}
353}
354
355// Feeds multiple Tensor values into the computation as an XLA tuple.
356//
357// Arguments:
358//	inputs: A list of tensors that will be provided using the infeed mechanism.
359//	shapes: The shapes of each tensor in `inputs`.
360//
361// Returns the created operation.
362func InfeedEnqueueTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, optional ...InfeedEnqueueTupleAttr) (o *tf.Operation) {
363	if scope.Err() != nil {
364		return
365	}
366	attrs := map[string]interface{}{"shapes": shapes}
367	for _, a := range optional {
368		a(attrs)
369	}
370	opspec := tf.OpSpec{
371		Type: "InfeedEnqueueTuple",
372		Input: []tf.Input{
373			tf.OutputList(inputs),
374		},
375		Attrs: attrs,
376	}
377	return scope.AddOperation(opspec)
378}
379
380// ReadVariableXlaSplitNDAttr is an optional argument to ReadVariableXlaSplitND.
381type ReadVariableXlaSplitNDAttr func(optionalAttr)
382
383// ReadVariableXlaSplitNDPaddings sets the optional paddings attribute to value.
384//
385// value: Optional list of right paddings per dimension of input tensor to apply before
386// splitting. This can be used to make a dimension evenly divisible.
387// If not specified, defaults to {}
388func ReadVariableXlaSplitNDPaddings(value []int64) ReadVariableXlaSplitNDAttr {
389	return func(m optionalAttr) {
390		m["paddings"] = value
391	}
392}
393
394// Splits resource variable input tensor across all dimensions.
395//
396// An op which splits the resource variable input tensor based on the given
397// num_splits attribute, pads slices optionally, and returned the slices. Slices
398// are returned in row-major order.
399//
400// This op may be generated via the TPU bridge.
401//
402// For example, with `input` tensor:
403// ```
404// [[0, 1, 2],
405//  [3, 4, 5],
406//  [6, 7, 8]]
407// ```
408// `num_splits`:
409// ```
410// [2, 2]
411// ```
412// and `paddings`:
413// ```
414// [1, 1]
415// ```
416// the expected `outputs` is:
417// ```
418// [[0, 1],
419//  [3, 4]]
420// [[2, 0],
421//  [5, 0]]
422// [[6, 7],
423//  [0, 0]]
424// [[8, 0],
425//  [0, 0]]
426// ```
427//
428// Arguments:
429//	resource: Resource variable of input tensor to split across all dimensions.
430//   }
431//   out_arg {
432//     name: "outputs"
433//     description: <<END
434// Output slices based on input and num_splits defined, in row-major order.
435//
436//
437//	num_splits: Number of ways to split per dimension. Shape dimensions must be evenly
438// divisible.
439func ReadVariableXlaSplitND(scope *Scope, resource tf.Output, T tf.DataType, N int64, num_splits []int64, optional ...ReadVariableXlaSplitNDAttr) (outputs []tf.Output) {
440	if scope.Err() != nil {
441		return
442	}
443	attrs := map[string]interface{}{"T": T, "N": N, "num_splits": num_splits}
444	for _, a := range optional {
445		a(attrs)
446	}
447	opspec := tf.OpSpec{
448		Type: "ReadVariableXlaSplitND",
449		Input: []tf.Input{
450			resource,
451		},
452		Attrs: attrs,
453	}
454	op := scope.AddOperation(opspec)
455	if scope.Err() != nil {
456		return
457	}
458	var idx int
459	var err error
460	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
461		scope.UpdateErr("ReadVariableXlaSplitND", err)
462		return
463	}
464	return outputs
465}
466
467// XlaConcatNDAttr is an optional argument to XlaConcatND.
468type XlaConcatNDAttr func(optionalAttr)
469
470// XlaConcatNDPaddings sets the optional paddings attribute to value.
471//
472// value: Optional list of right paddings per dimension to strip from the final merged
473// tensor. These paddings must not exceed the dimension size of the merged result
474// prior to stripping paddings.
475// If not specified, defaults to {}
476func XlaConcatNDPaddings(value []int64) XlaConcatNDAttr {
477	return func(m optionalAttr) {
478		m["paddings"] = value
479	}
480}
481
482// Concats input tensor across all dimensions.
483//
484// An op which merges slices the input tensor based on the given num_splits
485// attribute, strips paddings optionally, and returns the merged tensor without
486// paddings.
487//
488// This op may be generated via the TPU bridge.
489//
490// For example, with `input` tensor:
491// ```
492// [[0, 1],
493//  [4, 5]]
494// [[2, 3],
495//  [6, 7]]
496// [[8, 9],
497//  [12, 13]]
498// [[10, 11],
499//  [14, 15]]
500// ```
501// `num_splits`:
502// ```
503// [2, 2]
504// ```
505// and `paddings`:
506// ```
507// [1, 1]
508// ```
509// the expected `outputs` is:
510// ```
511// [[0, 1, 2],
512//  [4, 5, 6],
513//  [8, 9, 10]]
514// ```
515//
516// Arguments:
517//	inputs: Input tensor slices in row-major order to merge across all dimensions. All
518// inputs must have the same shape.
519//   }
520//   out_arg {
521//     name: "output"
522//     description: <<END
523// Output tensor formed from merging input slices based on num_concats defined.
524//	num_concats: Number of ways to merge per dimension.
525func XlaConcatND(scope *Scope, inputs []tf.Output, num_concats []int64, optional ...XlaConcatNDAttr) (output tf.Output) {
526	if scope.Err() != nil {
527		return
528	}
529	attrs := map[string]interface{}{"num_concats": num_concats}
530	for _, a := range optional {
531		a(attrs)
532	}
533	opspec := tf.OpSpec{
534		Type: "XlaConcatND",
535		Input: []tf.Input{
536			tf.OutputList(inputs),
537		},
538		Attrs: attrs,
539	}
540	op := scope.AddOperation(opspec)
541	return op.Output(0)
542}
543
544// XlaSplitNDAttr is an optional argument to XlaSplitND.
545type XlaSplitNDAttr func(optionalAttr)
546
547// XlaSplitNDPaddings sets the optional paddings attribute to value.
548//
549// value: Optional list of right paddings per dimension of input tensor to apply before
550// splitting. This can be used to make a dimension evenly divisible.
551// If not specified, defaults to {}
552func XlaSplitNDPaddings(value []int64) XlaSplitNDAttr {
553	return func(m optionalAttr) {
554		m["paddings"] = value
555	}
556}
557
558// Splits input tensor across all dimensions.
559//
560// An op which slices the input tensor based on the given num_splits attribute,
561// pads slices optionally, and returned the slices. Slices are returned in
562// row-major order.
563//
564// This op may be generated via the TPU bridge.
565//
566// For example, with `input` tensor:
567// ```
568// [[0, 1, 2],
569//  [3, 4, 5],
570//  [6, 7, 8]]
571// ```
572// `num_splits`:
573// ```
574// [2, 2]
575// ```
576// and `paddings`:
577// ```
578// [1, 1]
579// ```
580// the expected `outputs` is:
581// ```
582// [[0, 1],
583//  [3, 4]]
584// [[2, 0],
585//  [5, 0]]
586// [[6, 7],
587//  [0, 0]]
588// [[8, 0],
589//  [0, 0]]
590// ```
591//
592// Arguments:
593//	input: Input tensor to split across all dimensions.
594//   }
595//   out_arg {
596//     name: "outputs"
597//     description: <<END
598// Output slices based on input and num_splits defined, in row-major order.
599//
600//	num_splits: Number of ways to split per dimension. Shape dimensions must be evenly
601// divisible.
602func XlaSplitND(scope *Scope, input tf.Output, N int64, num_splits []int64, optional ...XlaSplitNDAttr) (outputs []tf.Output) {
603	if scope.Err() != nil {
604		return
605	}
606	attrs := map[string]interface{}{"N": N, "num_splits": num_splits}
607	for _, a := range optional {
608		a(attrs)
609	}
610	opspec := tf.OpSpec{
611		Type: "XlaSplitND",
612		Input: []tf.Input{
613			input,
614		},
615		Attrs: attrs,
616	}
617	op := scope.AddOperation(opspec)
618	if scope.Err() != nil {
619		return
620	}
621	var idx int
622	var err error
623	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
624		scope.UpdateErr("XlaSplitND", err)
625		return
626	}
627	return outputs
628}
629
630// Worker heartbeat op.
631//
632// Heartbeats may be sent periodically to indicate the coordinator is still active,
633// to retrieve the current worker status and to expedite shutdown when necessary.
634//
635// Arguments:
636//	request: A string tensor containing a serialized WorkerHeartbeatRequest
637//
638// Returns A string tensor containing a serialized WorkerHeartbeatResponse
639func WorkerHeartbeat(scope *Scope, request tf.Output) (response tf.Output) {
640	if scope.Err() != nil {
641		return
642	}
643	opspec := tf.OpSpec{
644		Type: "WorkerHeartbeat",
645		Input: []tf.Input{
646			request,
647		},
648	}
649	op := scope.AddOperation(opspec)
650	return op.Output(0)
651}
652
653// RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr is an optional argument to RetrieveTPUEmbeddingFrequencyEstimatorParameters.
654type RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr func(optionalAttr)
655
656// RetrieveTPUEmbeddingFrequencyEstimatorParametersTableId sets the optional table_id attribute to value.
657// If not specified, defaults to -1
658func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableId(value int64) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr {
659	return func(m optionalAttr) {
660		m["table_id"] = value
661	}
662}
663
664// RetrieveTPUEmbeddingFrequencyEstimatorParametersTableName sets the optional table_name attribute to value.
665// If not specified, defaults to ""
666func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableName(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr {
667	return func(m optionalAttr) {
668		m["table_name"] = value
669	}
670}
671
672// RetrieveTPUEmbeddingFrequencyEstimatorParametersConfig sets the optional config attribute to value.
673// If not specified, defaults to ""
674func RetrieveTPUEmbeddingFrequencyEstimatorParametersConfig(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr {
675	return func(m optionalAttr) {
676		m["config"] = value
677	}
678}
679
680// Retrieve frequency estimator embedding parameters.
681//
682// An op that retrieves optimization parameters from embedding to host
683// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
684// the correct embedding table configuration. For example, this op is
685// used to retrieve updated parameters before saving a checkpoint.
686//
687// Returns:
688//	parameters: Parameter parameters updated by the frequency estimator optimization algorithm.
689//	last_hit_step: Parameter last_hit_step updated by the frequency estimator optimization
690// algorithm.
691func RetrieveTPUEmbeddingFrequencyEstimatorParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr) (parameters tf.Output, last_hit_step tf.Output) {
692	if scope.Err() != nil {
693		return
694	}
695	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
696	for _, a := range optional {
697		a(attrs)
698	}
699	opspec := tf.OpSpec{
700		Type: "RetrieveTPUEmbeddingFrequencyEstimatorParameters",
701
702		Attrs: attrs,
703	}
704	op := scope.AddOperation(opspec)
705	return op.Output(0), op.Output(1)
706}
707
708// LoadTPUEmbeddingAdadeltaParametersAttr is an optional argument to LoadTPUEmbeddingAdadeltaParameters.
709type LoadTPUEmbeddingAdadeltaParametersAttr func(optionalAttr)
710
711// LoadTPUEmbeddingAdadeltaParametersTableId sets the optional table_id attribute to value.
712// If not specified, defaults to -1
713func LoadTPUEmbeddingAdadeltaParametersTableId(value int64) LoadTPUEmbeddingAdadeltaParametersAttr {
714	return func(m optionalAttr) {
715		m["table_id"] = value
716	}
717}
718
719// LoadTPUEmbeddingAdadeltaParametersTableName sets the optional table_name attribute to value.
720// If not specified, defaults to ""
721func LoadTPUEmbeddingAdadeltaParametersTableName(value string) LoadTPUEmbeddingAdadeltaParametersAttr {
722	return func(m optionalAttr) {
723		m["table_name"] = value
724	}
725}
726
727// LoadTPUEmbeddingAdadeltaParametersConfig sets the optional config attribute to value.
728// If not specified, defaults to ""
729func LoadTPUEmbeddingAdadeltaParametersConfig(value string) LoadTPUEmbeddingAdadeltaParametersAttr {
730	return func(m optionalAttr) {
731		m["config"] = value
732	}
733}
734
735// Load Adadelta embedding parameters.
736//
737// An op that loads optimization parameters into HBM for embedding. Must be
738// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
739// embedding table configuration. For example, this op is used to install
740// parameters that are loaded from a checkpoint before a training loop is
741// executed.
742//
743// Arguments:
744//	parameters: Value of parameters used in the Adadelta optimization algorithm.
745//	accumulators: Value of accumulators used in the Adadelta optimization algorithm.
746//	updates: Value of updates used in the Adadelta optimization algorithm.
747//
748//
749//
750// Returns the created operation.
751func LoadTPUEmbeddingAdadeltaParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, updates tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdadeltaParametersAttr) (o *tf.Operation) {
752	if scope.Err() != nil {
753		return
754	}
755	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
756	for _, a := range optional {
757		a(attrs)
758	}
759	opspec := tf.OpSpec{
760		Type: "LoadTPUEmbeddingAdadeltaParameters",
761		Input: []tf.Input{
762			parameters, accumulators, updates,
763		},
764		Attrs: attrs,
765	}
766	return scope.AddOperation(opspec)
767}
768
769// LoadTPUEmbeddingMDLAdagradLightParametersAttr is an optional argument to LoadTPUEmbeddingMDLAdagradLightParameters.
770type LoadTPUEmbeddingMDLAdagradLightParametersAttr func(optionalAttr)
771
772// LoadTPUEmbeddingMDLAdagradLightParametersTableId sets the optional table_id attribute to value.
773// If not specified, defaults to -1
774func LoadTPUEmbeddingMDLAdagradLightParametersTableId(value int64) LoadTPUEmbeddingMDLAdagradLightParametersAttr {
775	return func(m optionalAttr) {
776		m["table_id"] = value
777	}
778}
779
780// LoadTPUEmbeddingMDLAdagradLightParametersTableName sets the optional table_name attribute to value.
781// If not specified, defaults to ""
782func LoadTPUEmbeddingMDLAdagradLightParametersTableName(value string) LoadTPUEmbeddingMDLAdagradLightParametersAttr {
783	return func(m optionalAttr) {
784		m["table_name"] = value
785	}
786}
787
788// LoadTPUEmbeddingMDLAdagradLightParametersConfig sets the optional config attribute to value.
789// If not specified, defaults to ""
790func LoadTPUEmbeddingMDLAdagradLightParametersConfig(value string) LoadTPUEmbeddingMDLAdagradLightParametersAttr {
791	return func(m optionalAttr) {
792		m["config"] = value
793	}
794}
795
796// Load MDL Adagrad Light embedding parameters.
797//
798// An op that loads optimization parameters into HBM for embedding. Must be
799// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
800// embedding table configuration. For example, this op is used to install
801// parameters that are loaded from a checkpoint before a training loop is
802// executed.
803//
804// Arguments:
805//	parameters: Value of parameters used in the MDL Adagrad Light optimization algorithm.
806//	accumulators: Value of accumulators used in the MDL Adagrad Light optimization algorithm.
807//	weights: Value of weights used in the MDL Adagrad Light optimization algorithm.
808//	benefits: Value of benefits used in the MDL Adagrad Light optimization algorithm.
809//
810//
811//
812// Returns the created operation.
813func LoadTPUEmbeddingMDLAdagradLightParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, weights tf.Output, benefits tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMDLAdagradLightParametersAttr) (o *tf.Operation) {
814	if scope.Err() != nil {
815		return
816	}
817	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
818	for _, a := range optional {
819		a(attrs)
820	}
821	opspec := tf.OpSpec{
822		Type: "LoadTPUEmbeddingMDLAdagradLightParameters",
823		Input: []tf.Input{
824			parameters, accumulators, weights, benefits,
825		},
826		Attrs: attrs,
827	}
828	return scope.AddOperation(opspec)
829}
830
831// LoadTPUEmbeddingRMSPropParametersAttr is an optional argument to LoadTPUEmbeddingRMSPropParameters.
832type LoadTPUEmbeddingRMSPropParametersAttr func(optionalAttr)
833
834// LoadTPUEmbeddingRMSPropParametersTableId sets the optional table_id attribute to value.
835// If not specified, defaults to -1
836func LoadTPUEmbeddingRMSPropParametersTableId(value int64) LoadTPUEmbeddingRMSPropParametersAttr {
837	return func(m optionalAttr) {
838		m["table_id"] = value
839	}
840}
841
842// LoadTPUEmbeddingRMSPropParametersTableName sets the optional table_name attribute to value.
843// If not specified, defaults to ""
844func LoadTPUEmbeddingRMSPropParametersTableName(value string) LoadTPUEmbeddingRMSPropParametersAttr {
845	return func(m optionalAttr) {
846		m["table_name"] = value
847	}
848}
849
850// LoadTPUEmbeddingRMSPropParametersConfig sets the optional config attribute to value.
851// If not specified, defaults to ""
852func LoadTPUEmbeddingRMSPropParametersConfig(value string) LoadTPUEmbeddingRMSPropParametersAttr {
853	return func(m optionalAttr) {
854		m["config"] = value
855	}
856}
857
858// Load RMSProp embedding parameters.
859//
860// An op that loads optimization parameters into HBM for embedding. Must be
861// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
862// embedding table configuration. For example, this op is used to install
863// parameters that are loaded from a checkpoint before a training loop is
864// executed.
865//
866// Arguments:
867//	parameters: Value of parameters used in the RMSProp optimization algorithm.
868//	ms: Value of ms used in the RMSProp optimization algorithm.
869//	mom: Value of mom used in the RMSProp optimization algorithm.
870//
871//
872//
873// Returns the created operation.
874func LoadTPUEmbeddingRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingRMSPropParametersAttr) (o *tf.Operation) {
875	if scope.Err() != nil {
876		return
877	}
878	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
879	for _, a := range optional {
880		a(attrs)
881	}
882	opspec := tf.OpSpec{
883		Type: "LoadTPUEmbeddingRMSPropParameters",
884		Input: []tf.Input{
885			parameters, ms, mom,
886		},
887		Attrs: attrs,
888	}
889	return scope.AddOperation(opspec)
890}
891
892// RetrieveTPUEmbeddingMomentumParametersAttr is an optional argument to RetrieveTPUEmbeddingMomentumParameters.
893type RetrieveTPUEmbeddingMomentumParametersAttr func(optionalAttr)
894
895// RetrieveTPUEmbeddingMomentumParametersTableId sets the optional table_id attribute to value.
896// If not specified, defaults to -1
897func RetrieveTPUEmbeddingMomentumParametersTableId(value int64) RetrieveTPUEmbeddingMomentumParametersAttr {
898	return func(m optionalAttr) {
899		m["table_id"] = value
900	}
901}
902
903// RetrieveTPUEmbeddingMomentumParametersTableName sets the optional table_name attribute to value.
904// If not specified, defaults to ""
905func RetrieveTPUEmbeddingMomentumParametersTableName(value string) RetrieveTPUEmbeddingMomentumParametersAttr {
906	return func(m optionalAttr) {
907		m["table_name"] = value
908	}
909}
910
911// RetrieveTPUEmbeddingMomentumParametersConfig sets the optional config attribute to value.
912// If not specified, defaults to ""
913func RetrieveTPUEmbeddingMomentumParametersConfig(value string) RetrieveTPUEmbeddingMomentumParametersAttr {
914	return func(m optionalAttr) {
915		m["config"] = value
916	}
917}
918
919// Retrieve Momentum embedding parameters.
920//
921// An op that retrieves optimization parameters from embedding to host
922// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
923// the correct embedding table configuration. For example, this op is
924// used to retrieve updated parameters before saving a checkpoint.
925//
926// Returns:
927//	parameters: Parameter parameters updated by the Momentum optimization algorithm.
928//	momenta: Parameter momenta updated by the Momentum optimization algorithm.
929func RetrieveTPUEmbeddingMomentumParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMomentumParametersAttr) (parameters tf.Output, momenta tf.Output) {
930	if scope.Err() != nil {
931		return
932	}
933	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
934	for _, a := range optional {
935		a(attrs)
936	}
937	opspec := tf.OpSpec{
938		Type: "RetrieveTPUEmbeddingMomentumParameters",
939
940		Attrs: attrs,
941	}
942	op := scope.AddOperation(opspec)
943	return op.Output(0), op.Output(1)
944}
945
946// LoadTPUEmbeddingFrequencyEstimatorParametersAttr is an optional argument to LoadTPUEmbeddingFrequencyEstimatorParameters.
947type LoadTPUEmbeddingFrequencyEstimatorParametersAttr func(optionalAttr)
948
949// LoadTPUEmbeddingFrequencyEstimatorParametersTableId sets the optional table_id attribute to value.
950// If not specified, defaults to -1
951func LoadTPUEmbeddingFrequencyEstimatorParametersTableId(value int64) LoadTPUEmbeddingFrequencyEstimatorParametersAttr {
952	return func(m optionalAttr) {
953		m["table_id"] = value
954	}
955}
956
957// LoadTPUEmbeddingFrequencyEstimatorParametersTableName sets the optional table_name attribute to value.
958// If not specified, defaults to ""
959func LoadTPUEmbeddingFrequencyEstimatorParametersTableName(value string) LoadTPUEmbeddingFrequencyEstimatorParametersAttr {
960	return func(m optionalAttr) {
961		m["table_name"] = value
962	}
963}
964
965// LoadTPUEmbeddingFrequencyEstimatorParametersConfig sets the optional config attribute to value.
966// If not specified, defaults to ""
967func LoadTPUEmbeddingFrequencyEstimatorParametersConfig(value string) LoadTPUEmbeddingFrequencyEstimatorParametersAttr {
968	return func(m optionalAttr) {
969		m["config"] = value
970	}
971}
972
973// Load frequency estimator embedding parameters.
974//
975// An op that loads optimization parameters into HBM for embedding. Must be
976// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
977// embedding table configuration. For example, this op is used to install
978// parameters that are loaded from a checkpoint before a training loop is
979// executed.
980//
981// Arguments:
982//	parameters: Value of parameters used in the frequency estimator optimization algorithm.
983//	last_hit_step: Value of last_hit_step used in the frequency estimator optimization algorithm.
984//
985//
986//
987// Returns the created operation.
988func LoadTPUEmbeddingFrequencyEstimatorParameters(scope *Scope, parameters tf.Output, last_hit_step tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFrequencyEstimatorParametersAttr) (o *tf.Operation) {
989	if scope.Err() != nil {
990		return
991	}
992	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
993	for _, a := range optional {
994		a(attrs)
995	}
996	opspec := tf.OpSpec{
997		Type: "LoadTPUEmbeddingFrequencyEstimatorParameters",
998		Input: []tf.Input{
999			parameters, last_hit_step,
1000		},
1001		Attrs: attrs,
1002	}
1003	return scope.AddOperation(opspec)
1004}
1005
1006// LoadTPUEmbeddingADAMParametersAttr is an optional argument to LoadTPUEmbeddingADAMParameters.
1007type LoadTPUEmbeddingADAMParametersAttr func(optionalAttr)
1008
1009// LoadTPUEmbeddingADAMParametersTableId sets the optional table_id attribute to value.
1010// If not specified, defaults to -1
1011func LoadTPUEmbeddingADAMParametersTableId(value int64) LoadTPUEmbeddingADAMParametersAttr {
1012	return func(m optionalAttr) {
1013		m["table_id"] = value
1014	}
1015}
1016
1017// LoadTPUEmbeddingADAMParametersTableName sets the optional table_name attribute to value.
1018// If not specified, defaults to ""
1019func LoadTPUEmbeddingADAMParametersTableName(value string) LoadTPUEmbeddingADAMParametersAttr {
1020	return func(m optionalAttr) {
1021		m["table_name"] = value
1022	}
1023}
1024
1025// LoadTPUEmbeddingADAMParametersConfig sets the optional config attribute to value.
1026// If not specified, defaults to ""
1027func LoadTPUEmbeddingADAMParametersConfig(value string) LoadTPUEmbeddingADAMParametersAttr {
1028	return func(m optionalAttr) {
1029		m["config"] = value
1030	}
1031}
1032
1033// Load ADAM embedding parameters.
1034//
1035// An op that loads optimization parameters into HBM for embedding. Must be
1036// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
1037// embedding table configuration. For example, this op is used to install
1038// parameters that are loaded from a checkpoint before a training loop is
1039// executed.
1040//
1041// Arguments:
1042//	parameters: Value of parameters used in the ADAM optimization algorithm.
1043//	momenta: Value of momenta used in the ADAM optimization algorithm.
1044//	velocities: Value of velocities used in the ADAM optimization algorithm.
1045//
1046//
1047//
1048// Returns the created operation.
1049func LoadTPUEmbeddingADAMParameters(scope *Scope, parameters tf.Output, momenta tf.Output, velocities tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingADAMParametersAttr) (o *tf.Operation) {
1050	if scope.Err() != nil {
1051		return
1052	}
1053	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
1054	for _, a := range optional {
1055		a(attrs)
1056	}
1057	opspec := tf.OpSpec{
1058		Type: "LoadTPUEmbeddingADAMParameters",
1059		Input: []tf.Input{
1060			parameters, momenta, velocities,
1061		},
1062		Attrs: attrs,
1063	}
1064	return scope.AddOperation(opspec)
1065}
1066
1067// LoadTPUEmbeddingFTRLParametersAttr is an optional argument to LoadTPUEmbeddingFTRLParameters.
1068type LoadTPUEmbeddingFTRLParametersAttr func(optionalAttr)
1069
1070// LoadTPUEmbeddingFTRLParametersTableId sets the optional table_id attribute to value.
1071// If not specified, defaults to -1
1072func LoadTPUEmbeddingFTRLParametersTableId(value int64) LoadTPUEmbeddingFTRLParametersAttr {
1073	return func(m optionalAttr) {
1074		m["table_id"] = value
1075	}
1076}
1077
1078// LoadTPUEmbeddingFTRLParametersTableName sets the optional table_name attribute to value.
1079// If not specified, defaults to ""
1080func LoadTPUEmbeddingFTRLParametersTableName(value string) LoadTPUEmbeddingFTRLParametersAttr {
1081	return func(m optionalAttr) {
1082		m["table_name"] = value
1083	}
1084}
1085
1086// LoadTPUEmbeddingFTRLParametersConfig sets the optional config attribute to value.
1087// If not specified, defaults to ""
1088func LoadTPUEmbeddingFTRLParametersConfig(value string) LoadTPUEmbeddingFTRLParametersAttr {
1089	return func(m optionalAttr) {
1090		m["config"] = value
1091	}
1092}
1093
1094// Load FTRL embedding parameters.
1095//
1096// An op that loads optimization parameters into HBM for embedding. Must be
1097// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
1098// embedding table configuration. For example, this op is used to install
1099// parameters that are loaded from a checkpoint before a training loop is
1100// executed.
1101//
1102// Arguments:
1103//	parameters: Value of parameters used in the FTRL optimization algorithm.
1104//	accumulators: Value of accumulators used in the FTRL optimization algorithm.
1105//	linears: Value of linears used in the FTRL optimization algorithm.
1106//
1107//
1108//
1109// Returns the created operation.
1110func LoadTPUEmbeddingFTRLParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, linears tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFTRLParametersAttr) (o *tf.Operation) {
1111	if scope.Err() != nil {
1112		return
1113	}
1114	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
1115	for _, a := range optional {
1116		a(attrs)
1117	}
1118	opspec := tf.OpSpec{
1119		Type: "LoadTPUEmbeddingFTRLParameters",
1120		Input: []tf.Input{
1121			parameters, accumulators, linears,
1122		},
1123		Attrs: attrs,
1124	}
1125	return scope.AddOperation(opspec)
1126}
1127
1128// LoadTPUEmbeddingStochasticGradientDescentParametersAttr is an optional argument to LoadTPUEmbeddingStochasticGradientDescentParameters.
1129type LoadTPUEmbeddingStochasticGradientDescentParametersAttr func(optionalAttr)
1130
1131// LoadTPUEmbeddingStochasticGradientDescentParametersTableId sets the optional table_id attribute to value.
1132// If not specified, defaults to -1
1133func LoadTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) LoadTPUEmbeddingStochasticGradientDescentParametersAttr {
1134	return func(m optionalAttr) {
1135		m["table_id"] = value
1136	}
1137}
1138
1139// LoadTPUEmbeddingStochasticGradientDescentParametersTableName sets the optional table_name attribute to value.
1140// If not specified, defaults to ""
1141func LoadTPUEmbeddingStochasticGradientDescentParametersTableName(value string) LoadTPUEmbeddingStochasticGradientDescentParametersAttr {
1142	return func(m optionalAttr) {
1143		m["table_name"] = value
1144	}
1145}
1146
1147// LoadTPUEmbeddingStochasticGradientDescentParametersConfig sets the optional config attribute to value.
1148// If not specified, defaults to ""
1149func LoadTPUEmbeddingStochasticGradientDescentParametersConfig(value string) LoadTPUEmbeddingStochasticGradientDescentParametersAttr {
1150	return func(m optionalAttr) {
1151		m["config"] = value
1152	}
1153}
1154
1155// Load SGD embedding parameters.
1156//
1157// An op that loads optimization parameters into HBM for embedding. Must be
1158// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
1159// embedding table configuration. For example, this op is used to install
1160// parameters that are loaded from a checkpoint before a training loop is
1161// executed.
1162//
1163// Arguments:
1164//	parameters: Value of parameters used in the stochastic gradient descent optimization algorithm.
1165//
1166//
1167//
1168// Returns the created operation.
1169func LoadTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, parameters tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingStochasticGradientDescentParametersAttr) (o *tf.Operation) {
1170	if scope.Err() != nil {
1171		return
1172	}
1173	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
1174	for _, a := range optional {
1175		a(attrs)
1176	}
1177	opspec := tf.OpSpec{
1178		Type: "LoadTPUEmbeddingStochasticGradientDescentParameters",
1179		Input: []tf.Input{
1180			parameters,
1181		},
1182		Attrs: attrs,
1183	}
1184	return scope.AddOperation(opspec)
1185}
1186
1187// LoadTPUEmbeddingAdagradParametersAttr is an optional argument to LoadTPUEmbeddingAdagradParameters.
1188type LoadTPUEmbeddingAdagradParametersAttr func(optionalAttr)
1189
1190// LoadTPUEmbeddingAdagradParametersTableId sets the optional table_id attribute to value.
1191// If not specified, defaults to -1
1192func LoadTPUEmbeddingAdagradParametersTableId(value int64) LoadTPUEmbeddingAdagradParametersAttr {
1193	return func(m optionalAttr) {
1194		m["table_id"] = value
1195	}
1196}
1197
1198// LoadTPUEmbeddingAdagradParametersTableName sets the optional table_name attribute to value.
1199// If not specified, defaults to ""
1200func LoadTPUEmbeddingAdagradParametersTableName(value string) LoadTPUEmbeddingAdagradParametersAttr {
1201	return func(m optionalAttr) {
1202		m["table_name"] = value
1203	}
1204}
1205
1206// LoadTPUEmbeddingAdagradParametersConfig sets the optional config attribute to value.
1207// If not specified, defaults to ""
1208func LoadTPUEmbeddingAdagradParametersConfig(value string) LoadTPUEmbeddingAdagradParametersAttr {
1209	return func(m optionalAttr) {
1210		m["config"] = value
1211	}
1212}
1213
1214// Load Adagrad embedding parameters.
1215//
1216// An op that loads optimization parameters into HBM for embedding. Must be
1217// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
1218// embedding table configuration. For example, this op is used to install
1219// parameters that are loaded from a checkpoint before a training loop is
1220// executed.
1221//
1222// Arguments:
1223//	parameters: Value of parameters used in the Adagrad optimization algorithm.
1224//	accumulators: Value of accumulators used in the Adagrad optimization algorithm.
1225//
1226//
1227//
1228// Returns the created operation.
1229func LoadTPUEmbeddingAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdagradParametersAttr) (o *tf.Operation) {
1230	if scope.Err() != nil {
1231		return
1232	}
1233	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
1234	for _, a := range optional {
1235		a(attrs)
1236	}
1237	opspec := tf.OpSpec{
1238		Type: "LoadTPUEmbeddingAdagradParameters",
1239		Input: []tf.Input{
1240			parameters, accumulators,
1241		},
1242		Attrs: attrs,
1243	}
1244	return scope.AddOperation(opspec)
1245}
1246
1247// EnqueueTPUEmbeddingRaggedTensorBatchAttr is an optional argument to EnqueueTPUEmbeddingRaggedTensorBatch.
1248type EnqueueTPUEmbeddingRaggedTensorBatchAttr func(optionalAttr)
1249
1250// EnqueueTPUEmbeddingRaggedTensorBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
1251//
1252// value: The TPU device to use. Should be >= 0 and less than the number
1253// of TPU cores in the task on which the node is placed.
1254// If not specified, defaults to -1
1255func EnqueueTPUEmbeddingRaggedTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
1256	return func(m optionalAttr) {
1257		m["device_ordinal"] = value
1258	}
1259}
1260
1261// EnqueueTPUEmbeddingRaggedTensorBatchCombiners sets the optional combiners attribute to value.
1262//
1263// value: A list of string scalars, one for each embedding table that specify
1264// how to normalize the embedding activations after weighted summation.
1265// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
1266// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
1267// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
1268// all tables.
1269// If not specified, defaults to {}
1270func EnqueueTPUEmbeddingRaggedTensorBatchCombiners(value []string) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
1271	return func(m optionalAttr) {
1272		m["combiners"] = value
1273	}
1274}
1275
1276// EnqueueTPUEmbeddingRaggedTensorBatchMaxSequenceLengths sets the optional max_sequence_lengths attribute to value.
1277// If not specified, defaults to {}
1278func EnqueueTPUEmbeddingRaggedTensorBatchMaxSequenceLengths(value []int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
1279	return func(m optionalAttr) {
1280		m["max_sequence_lengths"] = value
1281	}
1282}
1283
1284// EnqueueTPUEmbeddingRaggedTensorBatchNumFeatures sets the optional num_features attribute to value.
1285// If not specified, defaults to {}
1286func EnqueueTPUEmbeddingRaggedTensorBatchNumFeatures(value []int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
1287	return func(m optionalAttr) {
1288		m["num_features"] = value
1289	}
1290}
1291
1292// Eases the porting of code that uses tf.nn.embedding_lookup().
1293//
1294// sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond
1295// to the ith feature. table_ids[i] indicates which embedding table to look up ith
1296// feature.
1297//
1298// The tensors at corresponding positions in two of the input lists,
1299// embedding_indices and aggregation_weights, must have the same shape, i.e. rank 1
1300// with dim_size() equal to the total number of lookups into the table described by
1301// the corresponding feature.
1302//
1303// Arguments:
1304//	sample_splits: A list of rank 1 Tensors specifying the break points for splitting
1305// embedding_indices and aggregation_weights into rows.
1306// It corresponds to ids.row_splits in embedding_lookup(), when ids is a
1307// RaggedTensor.
1308//	embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
1309// It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor.
1310//	aggregation_weights: A list of rank 1 Tensors containing per training example
1311// aggregation weights. It corresponds to the values field of a RaggedTensor
1312// with the same row_splits as ids in embedding_lookup(), when ids is a
1313// RaggedTensor.
1314//	mode_override: A string input that overrides the mode specified in the
1315// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
1316// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
1317// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
1318//	table_ids: A list of integers specifying the identifier of the embedding table
1319// (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the
1320// corresponding input. The ith input is looked up using table_ids[i]. The size
1321// of the table_ids list must be equal to that of sample_indices,
1322// embedding_indices and aggregation_weights.
1323//
1324// Returns the created operation.
1325func EnqueueTPUEmbeddingRaggedTensorBatch(scope *Scope, sample_splits []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, table_ids []int64, optional ...EnqueueTPUEmbeddingRaggedTensorBatchAttr) (o *tf.Operation) {
1326	if scope.Err() != nil {
1327		return
1328	}
1329	attrs := map[string]interface{}{"table_ids": table_ids}
1330	for _, a := range optional {
1331		a(attrs)
1332	}
1333	opspec := tf.OpSpec{
1334		Type: "EnqueueTPUEmbeddingRaggedTensorBatch",
1335		Input: []tf.Input{
1336			tf.OutputList(sample_splits), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
1337		},
1338		Attrs: attrs,
1339	}
1340	return scope.AddOperation(opspec)
1341}
1342
1343// EnqueueTPUEmbeddingSparseTensorBatchAttr is an optional argument to EnqueueTPUEmbeddingSparseTensorBatch.
1344type EnqueueTPUEmbeddingSparseTensorBatchAttr func(optionalAttr)
1345
1346// EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
1347//
1348// value: The TPU device to use. Should be >= 0 and less than the number
1349// of TPU cores in the task on which the node is placed.
1350// If not specified, defaults to -1
1351func EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingSparseTensorBatchAttr {
1352	return func(m optionalAttr) {
1353		m["device_ordinal"] = value
1354	}
1355}
1356
1357// EnqueueTPUEmbeddingSparseTensorBatchCombiners sets the optional combiners attribute to value.
1358//
1359// value: A list of string scalars, one for each embedding table that specify
1360// how to normalize the embedding activations after weighted summation.
1361// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
1362// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
1363// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
1364// all tables.
1365// If not specified, defaults to {}
1366func EnqueueTPUEmbeddingSparseTensorBatchCombiners(value []string) EnqueueTPUEmbeddingSparseTensorBatchAttr {
1367	return func(m optionalAttr) {
1368		m["combiners"] = value
1369	}
1370}
1371
1372// EnqueueTPUEmbeddingSparseTensorBatchMaxSequenceLengths sets the optional max_sequence_lengths attribute to value.
1373// If not specified, defaults to {}
1374func EnqueueTPUEmbeddingSparseTensorBatchMaxSequenceLengths(value []int64) EnqueueTPUEmbeddingSparseTensorBatchAttr {
1375	return func(m optionalAttr) {
1376		m["max_sequence_lengths"] = value
1377	}
1378}
1379
1380// EnqueueTPUEmbeddingSparseTensorBatchNumFeatures sets the optional num_features attribute to value.
1381// If not specified, defaults to {}
1382func EnqueueTPUEmbeddingSparseTensorBatchNumFeatures(value []int64) EnqueueTPUEmbeddingSparseTensorBatchAttr {
1383	return func(m optionalAttr) {
1384		m["num_features"] = value
1385	}
1386}
1387
1388// Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
1389//
1390// sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond
1391// to the ith feature. table_ids[i] indicates which embedding table to look up ith
1392// feature.
1393//
1394// The tensors at corresponding positions in the three input lists (sample_indices,
1395// embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1
1396// with dim_size() equal to the total number of lookups into the table described by
1397// the corresponding feature.
1398//
1399// Arguments:
1400//	sample_indices: A list of rank 1 Tensors specifying the training example to
1401// which the corresponding embedding_indices and aggregation_weights values
1402// belong. It corresponds to sp_ids.indices[:,0] in  embedding_lookup_sparse().
1403//	embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
1404// It corresponds to sp_ids.values in embedding_lookup_sparse().
1405//	aggregation_weights: A list of rank 1 Tensors containing per training example
1406// aggregation weights. It corresponds to sp_weights.values in
1407// embedding_lookup_sparse().
1408//	mode_override: A string input that overrides the mode specified in the
1409// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
1410// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
1411// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
1412//	table_ids: A list of integers specifying the identifier of the embedding table
1413// (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the
1414// corresponding input. The ith input is looked up using table_ids[i]. The size
1415// of the table_ids list must be equal to that of sample_indices,
1416// embedding_indices and aggregation_weights.
1417//
1418// Returns the created operation.
1419func EnqueueTPUEmbeddingSparseTensorBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, table_ids []int64, optional ...EnqueueTPUEmbeddingSparseTensorBatchAttr) (o *tf.Operation) {
1420	if scope.Err() != nil {
1421		return
1422	}
1423	attrs := map[string]interface{}{"table_ids": table_ids}
1424	for _, a := range optional {
1425		a(attrs)
1426	}
1427	opspec := tf.OpSpec{
1428		Type: "EnqueueTPUEmbeddingSparseTensorBatch",
1429		Input: []tf.Input{
1430			tf.OutputList(sample_indices), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
1431		},
1432		Attrs: attrs,
1433	}
1434	return scope.AddOperation(opspec)
1435}
1436
1437// EnqueueTPUEmbeddingIntegerBatchAttr is an optional argument to EnqueueTPUEmbeddingIntegerBatch.
1438type EnqueueTPUEmbeddingIntegerBatchAttr func(optionalAttr)
1439
1440// EnqueueTPUEmbeddingIntegerBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
1441//
1442// value: The TPU device to use. Should be >= 0 and less than the number
1443// of TPU cores in the task on which the node is placed.
1444// If not specified, defaults to -1
1445func EnqueueTPUEmbeddingIntegerBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingIntegerBatchAttr {
1446	return func(m optionalAttr) {
1447		m["device_ordinal"] = value
1448	}
1449}
1450
1451// An op that enqueues a list of input batch tensors to TPUEmbedding.
1452//
1453// Arguments:
1454//	batch: A list of 1D tensors, one for each embedding table, containing the
1455// indices into the tables.
1456//	mode_override: A string input that overrides the mode specified in the
1457// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
1458// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
1459// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
1460//
1461// Returns the created operation.
1462func EnqueueTPUEmbeddingIntegerBatch(scope *Scope, batch []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingIntegerBatchAttr) (o *tf.Operation) {
1463	if scope.Err() != nil {
1464		return
1465	}
1466	attrs := map[string]interface{}{}
1467	for _, a := range optional {
1468		a(attrs)
1469	}
1470	opspec := tf.OpSpec{
1471		Type: "EnqueueTPUEmbeddingIntegerBatch",
1472		Input: []tf.Input{
1473			tf.OutputList(batch), mode_override,
1474		},
1475		Attrs: attrs,
1476	}
1477	return scope.AddOperation(opspec)
1478}
1479
1480// An op enabling differentiation of TPU Embeddings.
1481//
1482// This op simply returns its first input, which is assumed to have been sliced
1483// from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of
1484// this op, and its first argument being a trainable Variable, enables automatic
1485// differentiation of graphs containing embeddings via the TPU Embedding Python
1486// libraries.
1487//
1488// Arguments:
1489//	embedding_variable: A trainable variable, enabling optimizers to find this op.
1490//	sliced_activations: The embedding activations Tensor to return.
1491//	table_id: The id of the table in the embedding layer configuration from which
1492// these activations were computed.
1493//	lookup_id: Identifier of the set of embedding indices which produced these
1494// activations.
1495func TPUEmbeddingActivations(scope *Scope, embedding_variable tf.Output, sliced_activations tf.Output, table_id int64, lookup_id int64) (output tf.Output) {
1496	if scope.Err() != nil {
1497		return
1498	}
1499	attrs := map[string]interface{}{"table_id": table_id, "lookup_id": lookup_id}
1500	opspec := tf.OpSpec{
1501		Type: "TPUEmbeddingActivations",
1502		Input: []tf.Input{
1503			embedding_variable, sliced_activations,
1504		},
1505		Attrs: attrs,
1506	}
1507	op := scope.AddOperation(opspec)
1508	return op.Output(0)
1509}
1510
1511// An op that receives embedding activations on the TPU.
1512//
1513// The TPU system performs the embedding lookups and aggregations specified by
1514// the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The
1515// results of these aggregations are visible to the Tensorflow Graph as the
1516// outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing
1517// one Tensor of activations per table specified in the model. There can be at
1518// most one RecvTPUEmbeddingActivations op in the TPU graph.
1519//
1520// Arguments:
1521//	num_outputs: The number of output activation tensors, equal to the number of
1522// embedding tables in the model.
1523//	config: Serialized TPUEmbeddingConfiguration proto.
1524//
1525// Returns A TensorList of embedding activations containing one Tensor per
1526// embedding table in the model.
1527func RecvTPUEmbeddingActivations(scope *Scope, num_outputs int64, config string) (outputs []tf.Output) {
1528	if scope.Err() != nil {
1529		return
1530	}
1531	attrs := map[string]interface{}{"num_outputs": num_outputs, "config": config}
1532	opspec := tf.OpSpec{
1533		Type: "RecvTPUEmbeddingActivations",
1534
1535		Attrs: attrs,
1536	}
1537	op := scope.AddOperation(opspec)
1538	if scope.Err() != nil {
1539		return
1540	}
1541	var idx int
1542	var err error
1543	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
1544		scope.UpdateErr("RecvTPUEmbeddingActivations", err)
1545		return
1546	}
1547	return outputs
1548}
1549
1550// Sets up TPUEmbedding in a distributed TPU system.
1551//
1552// Arguments:
1553//	config: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that
1554// describes the embedding lookups of the program.
1555//
1556// Returns the created operation.
1557func ConfigureTPUEmbedding(scope *Scope, config string) (o *tf.Operation) {
1558	if scope.Err() != nil {
1559		return
1560	}
1561	attrs := map[string]interface{}{"config": config}
1562	opspec := tf.OpSpec{
1563		Type: "ConfigureTPUEmbedding",
1564
1565		Attrs: attrs,
1566	}
1567	return scope.AddOperation(opspec)
1568}
1569
1570// Shuts down a running distributed TPU system.
1571//
1572// The op returns an error if no system is running.
1573//
1574// Returns the created operation.
1575func ShutdownDistributedTPU(scope *Scope) (o *tf.Operation) {
1576	if scope.Err() != nil {
1577		return
1578	}
1579	opspec := tf.OpSpec{
1580		Type: "ShutdownDistributedTPU",
1581	}
1582	return scope.AddOperation(opspec)
1583}
1584
1585// ConfigureDistributedTPUAttr is an optional argument to ConfigureDistributedTPU.
1586type ConfigureDistributedTPUAttr func(optionalAttr)
1587
1588// ConfigureDistributedTPUEmbeddingConfig sets the optional embedding_config attribute to value.
1589//
1590// value: Reserved. Do not use.
1591// If not specified, defaults to ""
1592func ConfigureDistributedTPUEmbeddingConfig(value string) ConfigureDistributedTPUAttr {
1593	return func(m optionalAttr) {
1594		m["embedding_config"] = value
1595	}
1596}
1597
1598// ConfigureDistributedTPUTpuEmbeddingConfig sets the optional tpu_embedding_config attribute to value.
1599//
1600// value: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that
1601// describes the embedding lookups of the program.
1602// If not specified, defaults to ""
1603func ConfigureDistributedTPUTpuEmbeddingConfig(value string) ConfigureDistributedTPUAttr {
1604	return func(m optionalAttr) {
1605		m["tpu_embedding_config"] = value
1606	}
1607}
1608
1609// ConfigureDistributedTPUIsGlobalInit sets the optional is_global_init attribute to value.
1610//
1611// value: Reserved. Do not use.
1612// If not specified, defaults to false
1613func ConfigureDistributedTPUIsGlobalInit(value bool) ConfigureDistributedTPUAttr {
1614	return func(m optionalAttr) {
1615		m["is_global_init"] = value
1616	}
1617}
1618
1619// ConfigureDistributedTPUEnableWholeMeshCompilations sets the optional enable_whole_mesh_compilations attribute to value.
1620// If not specified, defaults to false
1621func ConfigureDistributedTPUEnableWholeMeshCompilations(value bool) ConfigureDistributedTPUAttr {
1622	return func(m optionalAttr) {
1623		m["enable_whole_mesh_compilations"] = value
1624	}
1625}
1626
1627// ConfigureDistributedTPUCompilationFailureClosesChips sets the optional compilation_failure_closes_chips attribute to value.
1628// If not specified, defaults to true
1629func ConfigureDistributedTPUCompilationFailureClosesChips(value bool) ConfigureDistributedTPUAttr {
1630	return func(m optionalAttr) {
1631		m["compilation_failure_closes_chips"] = value
1632	}
1633}
1634
1635// Sets up the centralized structures for a distributed TPU system.
1636//
1637// Returns A serialized tensorflow.tpu.TopologyProto that describes the TPU
1638// topology.
1639func ConfigureDistributedTPU(scope *Scope, optional ...ConfigureDistributedTPUAttr) (topology tf.Output) {
1640	if scope.Err() != nil {
1641		return
1642	}
1643	attrs := map[string]interface{}{}
1644	for _, a := range optional {
1645		a(attrs)
1646	}
1647	opspec := tf.OpSpec{
1648		Type: "ConfigureDistributedTPU",
1649
1650		Attrs: attrs,
1651	}
1652	op := scope.AddOperation(opspec)
1653	return op.Output(0)
1654}
1655
1656// ResourceApplyAddSignAttr is an optional argument to ResourceApplyAddSign.
1657type ResourceApplyAddSignAttr func(optionalAttr)
1658
1659// ResourceApplyAddSignUseLocking sets the optional use_locking attribute to value.
1660//
1661// value: If `True`, updating of the var and m tensors is
1662// protected by a lock; otherwise the behavior is undefined, but may exhibit less
1663// contention.
1664// If not specified, defaults to false
1665func ResourceApplyAddSignUseLocking(value bool) ResourceApplyAddSignAttr {
1666	return func(m optionalAttr) {
1667		m["use_locking"] = value
1668	}
1669}
1670
1671// Update '*var' according to the AddSign update.
1672//
1673// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
1674// update <- (alpha + sign_decay * sign(g) *sign(m)) * g
1675// variable <- variable - lr_t * update
1676//
1677// Arguments:
1678//	var_: Should be from a Variable().
1679//	m: Should be from a Variable().
1680//	lr: Scaling factor. Must be a scalar.
1681//	alpha: Must be a scalar.
1682//	sign_decay: Must be a scalar.
1683//	beta: Must be a scalar.
1684//	grad: The gradient.
1685//
1686// Returns the created operation.
1687func ResourceApplyAddSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, alpha tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyAddSignAttr) (o *tf.Operation) {
1688	if scope.Err() != nil {
1689		return
1690	}
1691	attrs := map[string]interface{}{}
1692	for _, a := range optional {
1693		a(attrs)
1694	}
1695	opspec := tf.OpSpec{
1696		Type: "ResourceApplyAddSign",
1697		Input: []tf.Input{
1698			var_, m, lr, alpha, sign_decay, beta, grad,
1699		},
1700		Attrs: attrs,
1701	}
1702	return scope.AddOperation(opspec)
1703}
1704
1705// An op to send a tensor to the host.
1706//
1707// input: the tensor that will be sent to the host.
1708// Tinput: element type for input.
1709// key: A unique identifier for this region used to match up host transfers.
1710//
1711// Returns the created operation.
1712func XlaSendToHost(scope *Scope, input tf.Output, key string) (o *tf.Operation) {
1713	if scope.Err() != nil {
1714		return
1715	}
1716	attrs := map[string]interface{}{"key": key}
1717	opspec := tf.OpSpec{
1718		Type: "XlaSendToHost",
1719		Input: []tf.Input{
1720			input,
1721		},
1722		Attrs: attrs,
1723	}
1724	return scope.AddOperation(opspec)
1725}
1726
1727// ResourceSparseApplyRMSPropAttr is an optional argument to ResourceSparseApplyRMSProp.
1728type ResourceSparseApplyRMSPropAttr func(optionalAttr)
1729
1730// ResourceSparseApplyRMSPropUseLocking sets the optional use_locking attribute to value.
1731//
1732// value: If `True`, updating of the var, ms, and mom tensors is protected
1733// by a lock; otherwise the behavior is undefined, but may exhibit less
1734// contention.
1735// If not specified, defaults to false
1736func ResourceSparseApplyRMSPropUseLocking(value bool) ResourceSparseApplyRMSPropAttr {
1737	return func(m optionalAttr) {
1738		m["use_locking"] = value
1739	}
1740}
1741
1742// Update '*var' according to the RMSProp algorithm.
1743//
1744// Note that in dense implementation of this algorithm, ms and mom will
1745// update even if the grad is zero, but in this sparse implementation, ms
1746// and mom will not update in iterations during which the grad is zero.
1747//
1748// mean_square = decay * mean_square + (1-decay) * gradient ** 2
1749// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
1750//
1751// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
1752// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
1753// var <- var - mom
1754//
1755// Arguments:
1756//	var_: Should be from a Variable().
1757//	ms: Should be from a Variable().
1758//	mom: Should be from a Variable().
1759//	lr: Scaling factor. Must be a scalar.
1760//	rho: Decay rate. Must be a scalar.
1761//
1762//	epsilon: Ridge term. Must be a scalar.
1763//	grad: The gradient.
1764//	indices: A vector of indices into the first dimension of var, ms and mom.
1765//
1766// Returns the created operation.
1767func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyRMSPropAttr) (o *tf.Operation) {
1768	if scope.Err() != nil {
1769		return
1770	}
1771	attrs := map[string]interface{}{}
1772	for _, a := range optional {
1773		a(attrs)
1774	}
1775	opspec := tf.OpSpec{
1776		Type: "ResourceSparseApplyRMSProp",
1777		Input: []tf.Input{
1778			var_, ms, mom, lr, rho, momentum, epsilon, grad, indices,
1779		},
1780		Attrs: attrs,
1781	}
1782	return scope.AddOperation(opspec)
1783}
1784
1785// ResourceApplyAdaMaxAttr is an optional argument to ResourceApplyAdaMax.
1786type ResourceApplyAdaMaxAttr func(optionalAttr)
1787
1788// ResourceApplyAdaMaxUseLocking sets the optional use_locking attribute to value.
1789//
1790// value: If `True`, updating of the var, m, and v tensors will be protected
1791// by a lock; otherwise the behavior is undefined, but may exhibit less
1792// contention.
1793// If not specified, defaults to false
1794func ResourceApplyAdaMaxUseLocking(value bool) ResourceApplyAdaMaxAttr {
1795	return func(m optionalAttr) {
1796		m["use_locking"] = value
1797	}
1798}
1799
1800// Update '*var' according to the AdaMax algorithm.
1801//
1802// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
1803// v_t <- max(beta2 * v_{t-1}, abs(g))
1804// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
1805//
1806// Arguments:
1807//	var_: Should be from a Variable().
1808//	m: Should be from a Variable().
1809//	v: Should be from a Variable().
1810//	beta1_power: Must be a scalar.
1811//	lr: Scaling factor. Must be a scalar.
1812//	beta1: Momentum factor. Must be a scalar.
1813//	beta2: Momentum factor. Must be a scalar.
1814//	epsilon: Ridge term. Must be a scalar.
1815//	grad: The gradient.
1816//
1817// Returns the created operation.
1818func ResourceApplyAdaMax(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdaMaxAttr) (o *tf.Operation) {
1819	if scope.Err() != nil {
1820		return
1821	}
1822	attrs := map[string]interface{}{}
1823	for _, a := range optional {
1824		a(attrs)
1825	}
1826	opspec := tf.OpSpec{
1827		Type: "ResourceApplyAdaMax",
1828		Input: []tf.Input{
1829			var_, m, v, beta1_power, lr, beta1, beta2, epsilon, grad,
1830		},
1831		Attrs: attrs,
1832	}
1833	return scope.AddOperation(opspec)
1834}
1835
1836// ResourceApplyKerasMomentumAttr is an optional argument to ResourceApplyKerasMomentum.
1837type ResourceApplyKerasMomentumAttr func(optionalAttr)
1838
1839// ResourceApplyKerasMomentumUseLocking sets the optional use_locking attribute to value.
1840//
1841// value: If `True`, updating of the var and accum tensors will be protected
1842// by a lock; otherwise the behavior is undefined, but may exhibit less
1843// contention.
1844// If not specified, defaults to false
1845func ResourceApplyKerasMomentumUseLocking(value bool) ResourceApplyKerasMomentumAttr {
1846	return func(m optionalAttr) {
1847		m["use_locking"] = value
1848	}
1849}
1850
1851// ResourceApplyKerasMomentumUseNesterov sets the optional use_nesterov attribute to value.
1852//
1853// value: If `True`, the tensor passed to compute grad will be
1854// var + momentum * accum, so in the end, the var you get is actually
1855// var + momentum * accum.
1856// If not specified, defaults to false
1857func ResourceApplyKerasMomentumUseNesterov(value bool) ResourceApplyKerasMomentumAttr {
1858	return func(m optionalAttr) {
1859		m["use_nesterov"] = value
1860	}
1861}
1862
1863// Update '*var' according to the momentum scheme.
1864//
1865// Set use_nesterov = True if you want to use Nesterov momentum.
1866//
1867// accum = accum * momentum - lr * grad
1868// var += accum
1869//
1870// Arguments:
1871//	var_: Should be from a Variable().
1872//	accum: Should be from a Variable().
1873//	lr: Scaling factor. Must be a scalar.
1874//	grad: The gradient.
1875//	momentum: Momentum. Must be a scalar.
1876//
1877// Returns the created operation.
1878func ResourceApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyKerasMomentumAttr) (o *tf.Operation) {
1879	if scope.Err() != nil {
1880		return
1881	}
1882	attrs := map[string]interface{}{}
1883	for _, a := range optional {
1884		a(attrs)
1885	}
1886	opspec := tf.OpSpec{
1887		Type: "ResourceApplyKerasMomentum",
1888		Input: []tf.Input{
1889			var_, accum, lr, grad, momentum,
1890		},
1891		Attrs: attrs,
1892	}
1893	return scope.AddOperation(opspec)
1894}
1895
1896// ResourceSparseApplyMomentumAttr is an optional argument to ResourceSparseApplyMomentum.
1897type ResourceSparseApplyMomentumAttr func(optionalAttr)
1898
1899// ResourceSparseApplyMomentumUseLocking sets the optional use_locking attribute to value.
1900//
1901// value: If `True`, updating of the var and accum tensors will be protected
1902// by a lock; otherwise the behavior is undefined, but may exhibit less
1903// contention.
1904// If not specified, defaults to false
1905func ResourceSparseApplyMomentumUseLocking(value bool) ResourceSparseApplyMomentumAttr {
1906	return func(m optionalAttr) {
1907		m["use_locking"] = value
1908	}
1909}
1910
1911// ResourceSparseApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
1912//
1913// value: If `True`, the tensor passed to compute grad will be
1914// var - lr * momentum * accum, so in the end, the var you get is actually
1915// var - lr * momentum * accum.
1916// If not specified, defaults to false
1917func ResourceSparseApplyMomentumUseNesterov(value bool) ResourceSparseApplyMomentumAttr {
1918	return func(m optionalAttr) {
1919		m["use_nesterov"] = value
1920	}
1921}
1922
1923// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
1924//
1925// Set use_nesterov = True if you want to use Nesterov momentum.
1926//
1927// That is for rows we have grad for, we update var and accum as follows:
1928//
1929// accum = accum * momentum + grad
1930// var -= lr * accum
1931//
1932// Arguments:
1933//	var_: Should be from a Variable().
1934//	accum: Should be from a Variable().
1935//	lr: Learning rate. Must be a scalar.
1936//	grad: The gradient.
1937//	indices: A vector of indices into the first dimension of var and accum.
1938//	momentum: Momentum. Must be a scalar.
1939//
1940// Returns the created operation.
1941func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyMomentumAttr) (o *tf.Operation) {
1942	if scope.Err() != nil {
1943		return
1944	}
1945	attrs := map[string]interface{}{}
1946	for _, a := range optional {
1947		a(attrs)
1948	}
1949	opspec := tf.OpSpec{
1950		Type: "ResourceSparseApplyMomentum",
1951		Input: []tf.Input{
1952			var_, accum, lr, grad, indices, momentum,
1953		},
1954		Attrs: attrs,
1955	}
1956	return scope.AddOperation(opspec)
1957}
1958
1959// ResourceApplyMomentumAttr is an optional argument to ResourceApplyMomentum.
1960type ResourceApplyMomentumAttr func(optionalAttr)
1961
1962// ResourceApplyMomentumUseLocking sets the optional use_locking attribute to value.
1963//
1964// value: If `True`, updating of the var and accum tensors will be protected
1965// by a lock; otherwise the behavior is undefined, but may exhibit less
1966// contention.
1967// If not specified, defaults to false
1968func ResourceApplyMomentumUseLocking(value bool) ResourceApplyMomentumAttr {
1969	return func(m optionalAttr) {
1970		m["use_locking"] = value
1971	}
1972}
1973
1974// ResourceApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
1975//
1976// value: If `True`, the tensor passed to compute grad will be
1977// var - lr * momentum * accum, so in the end, the var you get is actually
1978// var - lr * momentum * accum.
1979// If not specified, defaults to false
1980func ResourceApplyMomentumUseNesterov(value bool) ResourceApplyMomentumAttr {
1981	return func(m optionalAttr) {
1982		m["use_nesterov"] = value
1983	}
1984}
1985
1986// Update '*var' according to the momentum scheme.
1987//
1988// Set use_nesterov = True if you want to use Nesterov momentum.
1989//
1990// accum = accum * momentum + grad
1991// var -= lr * accum
1992//
1993// Arguments:
1994//	var_: Should be from a Variable().
1995//	accum: Should be from a Variable().
1996//	lr: Scaling factor. Must be a scalar.
1997//	grad: The gradient.
1998//	momentum: Momentum. Must be a scalar.
1999//
2000// Returns the created operation.
2001func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyMomentumAttr) (o *tf.Operation) {
2002	if scope.Err() != nil {
2003		return
2004	}
2005	attrs := map[string]interface{}{}
2006	for _, a := range optional {
2007		a(attrs)
2008	}
2009	opspec := tf.OpSpec{
2010		Type: "ResourceApplyMomentum",
2011		Input: []tf.Input{
2012			var_, accum, lr, grad, momentum,
2013		},
2014		Attrs: attrs,
2015	}
2016	return scope.AddOperation(opspec)
2017}
2018
2019// ResourceSparseApplyFtrlV2Attr is an optional argument to ResourceSparseApplyFtrlV2.
2020type ResourceSparseApplyFtrlV2Attr func(optionalAttr)
2021
2022// ResourceSparseApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
2023//
2024// value: If `True`, updating of the var and accum tensors will be protected
2025// by a lock; otherwise the behavior is undefined, but may exhibit less
2026// contention.
2027// If not specified, defaults to false
2028func ResourceSparseApplyFtrlV2UseLocking(value bool) ResourceSparseApplyFtrlV2Attr {
2029	return func(m optionalAttr) {
2030		m["use_locking"] = value
2031	}
2032}
2033
2034// ResourceSparseApplyFtrlV2MultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
2035// If not specified, defaults to false
2036func ResourceSparseApplyFtrlV2MultiplyLinearByLr(value bool) ResourceSparseApplyFtrlV2Attr {
2037	return func(m optionalAttr) {
2038		m["multiply_linear_by_lr"] = value
2039	}
2040}
2041
2042// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
2043//
2044// That is for rows we have grad for, we update var, accum and linear as follows:
2045// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
2046// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
2047// linear += grad_with_shrinkage +
2048//     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
2049// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
2050// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
2051// accum = accum_new
2052//
2053// Arguments:
2054//	var_: Should be from a Variable().
2055//	accum: Should be from a Variable().
2056//	linear: Should be from a Variable().
2057//	grad: The gradient.
2058//	indices: A vector of indices into the first dimension of var and accum.
2059//	lr: Scaling factor. Must be a scalar.
2060//	l1: L1 regularization. Must be a scalar.
2061//	l2: L2 shrinkage regularization. Must be a scalar.
2062//
2063//	lr_power: Scaling factor. Must be a scalar.
2064//
2065// Returns the created operation.
2066func ResourceSparseApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlV2Attr) (o *tf.Operation) {
2067	if scope.Err() != nil {
2068		return
2069	}
2070	attrs := map[string]interface{}{}
2071	for _, a := range optional {
2072		a(attrs)
2073	}
2074	opspec := tf.OpSpec{
2075		Type: "ResourceSparseApplyFtrlV2",
2076		Input: []tf.Input{
2077			var_, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power,
2078		},
2079		Attrs: attrs,
2080	}
2081	return scope.AddOperation(opspec)
2082}
2083
2084// ResourceSparseApplyFtrlAttr is an optional argument to ResourceSparseApplyFtrl.
2085type ResourceSparseApplyFtrlAttr func(optionalAttr)
2086
2087// ResourceSparseApplyFtrlUseLocking sets the optional use_locking attribute to value.
2088//
2089// value: If `True`, updating of the var and accum tensors will be protected
2090// by a lock; otherwise the behavior is undefined, but may exhibit less
2091// contention.
2092// If not specified, defaults to false
2093func ResourceSparseApplyFtrlUseLocking(value bool) ResourceSparseApplyFtrlAttr {
2094	return func(m optionalAttr) {
2095		m["use_locking"] = value
2096	}
2097}
2098
2099// ResourceSparseApplyFtrlMultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
2100// If not specified, defaults to false
2101func ResourceSparseApplyFtrlMultiplyLinearByLr(value bool) ResourceSparseApplyFtrlAttr {
2102	return func(m optionalAttr) {
2103		m["multiply_linear_by_lr"] = value
2104	}
2105}
2106
2107// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
2108//
2109// That is for rows we have grad for, we update var, accum and linear as follows:
2110// accum_new = accum + grad * grad
2111// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
2112// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
2113// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
2114// accum = accum_new
2115//
2116// Arguments:
2117//	var_: Should be from a Variable().
2118//	accum: Should be from a Variable().
2119//	linear: Should be from a Variable().
2120//	grad: The gradient.
2121//	indices: A vector of indices into the first dimension of var and accum.
2122//	lr: Scaling factor. Must be a scalar.
2123//	l1: L1 regularization. Must be a scalar.
2124//	l2: L2 regularization. Must be a scalar.
2125//	lr_power: Scaling factor. Must be a scalar.
2126//
2127// Returns the created operation.
2128func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlAttr) (o *tf.Operation) {
2129	if scope.Err() != nil {
2130		return
2131	}
2132	attrs := map[string]interface{}{}
2133	for _, a := range optional {
2134		a(attrs)
2135	}
2136	opspec := tf.OpSpec{
2137		Type: "ResourceSparseApplyFtrl",
2138		Input: []tf.Input{
2139			var_, accum, linear, grad, indices, lr, l1, l2, lr_power,
2140		},
2141		Attrs: attrs,
2142	}
2143	return scope.AddOperation(opspec)
2144}
2145
2146// ResourceSparseApplyAdagradDAAttr is an optional argument to ResourceSparseApplyAdagradDA.
2147type ResourceSparseApplyAdagradDAAttr func(optionalAttr)
2148
2149// ResourceSparseApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
2150//
2151// value: If True, updating of the var and accum tensors will be protected by
2152// a lock; otherwise the behavior is undefined, but may exhibit less contention.
2153// If not specified, defaults to false
2154func ResourceSparseApplyAdagradDAUseLocking(value bool) ResourceSparseApplyAdagradDAAttr {
2155	return func(m optionalAttr) {
2156		m["use_locking"] = value
2157	}
2158}
2159
2160// Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
2161//
2162// Arguments:
2163//	var_: Should be from a Variable().
2164//	gradient_accumulator: Should be from a Variable().
2165//	gradient_squared_accumulator: Should be from a Variable().
2166//	grad: The gradient.
2167//	indices: A vector of indices into the first dimension of var and accum.
2168//	lr: Learning rate. Must be a scalar.
2169//	l1: L1 regularization. Must be a scalar.
2170//	l2: L2 regularization. Must be a scalar.
2171//	global_step: Training step number. Must be a scalar.
2172//
2173// Returns the created operation.
2174func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceSparseApplyAdagradDAAttr) (o *tf.Operation) {
2175	if scope.Err() != nil {
2176		return
2177	}
2178	attrs := map[string]interface{}{}
2179	for _, a := range optional {
2180		a(attrs)
2181	}
2182	opspec := tf.OpSpec{
2183		Type: "ResourceSparseApplyAdagradDA",
2184		Input: []tf.Input{
2185			var_, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step,
2186		},
2187		Attrs: attrs,
2188	}
2189	return scope.AddOperation(opspec)
2190}
2191
2192// ResourceSparseApplyAdagradV2Attr is an optional argument to ResourceSparseApplyAdagradV2.
2193type ResourceSparseApplyAdagradV2Attr func(optionalAttr)
2194
2195// ResourceSparseApplyAdagradV2UseLocking sets the optional use_locking attribute to value.
2196//
2197// value: If `True`, updating of the var and accum tensors will be protected
2198// by a lock; otherwise the behavior is undefined, but may exhibit less
2199// contention.
2200// If not specified, defaults to false
2201func ResourceSparseApplyAdagradV2UseLocking(value bool) ResourceSparseApplyAdagradV2Attr {
2202	return func(m optionalAttr) {
2203		m["use_locking"] = value
2204	}
2205}
2206
2207// ResourceSparseApplyAdagradV2UpdateSlots sets the optional update_slots attribute to value.
2208// If not specified, defaults to true
2209func ResourceSparseApplyAdagradV2UpdateSlots(value bool) ResourceSparseApplyAdagradV2Attr {
2210	return func(m optionalAttr) {
2211		m["update_slots"] = value
2212	}
2213}
2214
2215// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
2216//
2217// That is for rows we have grad for, we update var and accum as follows:
2218// accum += grad * grad
2219// var -= lr * grad * (1 / sqrt(accum))
2220//
2221// Arguments:
2222//	var_: Should be from a Variable().
2223//	accum: Should be from a Variable().
2224//	lr: Learning rate. Must be a scalar.
2225//	epsilon: Constant factor. Must be a scalar.
2226//	grad: The gradient.
2227//	indices: A vector of indices into the first dimension of var and accum.
2228//
2229// Returns the created operation.
2230func ResourceSparseApplyAdagradV2(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradV2Attr) (o *tf.Operation) {
2231	if scope.Err() != nil {
2232		return
2233	}
2234	attrs := map[string]interface{}{}
2235	for _, a := range optional {
2236		a(attrs)
2237	}
2238	opspec := tf.OpSpec{
2239		Type: "ResourceSparseApplyAdagradV2",
2240		Input: []tf.Input{
2241			var_, accum, lr, epsilon, grad, indices,
2242		},
2243		Attrs: attrs,
2244	}
2245	return scope.AddOperation(opspec)
2246}
2247
2248// ResourceApplyAdagradV2Attr is an optional argument to ResourceApplyAdagradV2.
2249type ResourceApplyAdagradV2Attr func(optionalAttr)
2250
2251// ResourceApplyAdagradV2UseLocking sets the optional use_locking attribute to value.
2252//
2253// value: If `True`, updating of the var and accum tensors will be protected
2254// by a lock; otherwise the behavior is undefined, but may exhibit less
2255// contention.
2256// If not specified, defaults to false
2257func ResourceApplyAdagradV2UseLocking(value bool) ResourceApplyAdagradV2Attr {
2258	return func(m optionalAttr) {
2259		m["use_locking"] = value
2260	}
2261}
2262
2263// ResourceApplyAdagradV2UpdateSlots sets the optional update_slots attribute to value.
2264// If not specified, defaults to true
2265func ResourceApplyAdagradV2UpdateSlots(value bool) ResourceApplyAdagradV2Attr {
2266	return func(m optionalAttr) {
2267		m["update_slots"] = value
2268	}
2269}
2270
2271// Update '*var' according to the adagrad scheme.
2272//
2273// accum += grad * grad
2274// var -= lr * grad * (1 / (sqrt(accum) + epsilon))
2275//
2276// Arguments:
2277//	var_: Should be from a Variable().
2278//	accum: Should be from a Variable().
2279//	lr: Scaling factor. Must be a scalar.
2280//	epsilon: Constant factor. Must be a scalar.
2281//	grad: The gradient.
2282//
2283// Returns the created operation.
2284func ResourceApplyAdagradV2(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdagradV2Attr) (o *tf.Operation) {
2285	if scope.Err() != nil {
2286		return
2287	}
2288	attrs := map[string]interface{}{}
2289	for _, a := range optional {
2290		a(attrs)
2291	}
2292	opspec := tf.OpSpec{
2293		Type: "ResourceApplyAdagradV2",
2294		Input: []tf.Input{
2295			var_, accum, lr, epsilon, grad,
2296		},
2297		Attrs: attrs,
2298	}
2299	return scope.AddOperation(opspec)
2300}
2301
2302// ResourceApplyAdagradAttr is an optional argument to ResourceApplyAdagrad.
2303type ResourceApplyAdagradAttr func(optionalAttr)
2304
2305// ResourceApplyAdagradUseLocking sets the optional use_locking attribute to value.
2306//
2307// value: If `True`, updating of the var and accum tensors will be protected
2308// by a lock; otherwise the behavior is undefined, but may exhibit less
2309// contention.
2310// If not specified, defaults to false
2311func ResourceApplyAdagradUseLocking(value bool) ResourceApplyAdagradAttr {
2312	return func(m optionalAttr) {
2313		m["use_locking"] = value
2314	}
2315}
2316
2317// ResourceApplyAdagradUpdateSlots sets the optional update_slots attribute to value.
2318// If not specified, defaults to true
2319func ResourceApplyAdagradUpdateSlots(value bool) ResourceApplyAdagradAttr {
2320	return func(m optionalAttr) {
2321		m["update_slots"] = value
2322	}
2323}
2324
2325// Update '*var' according to the adagrad scheme.
2326//
2327// accum += grad * grad
2328// var -= lr * grad * (1 / sqrt(accum))
2329//
2330// Arguments:
2331//	var_: Should be from a Variable().
2332//	accum: Should be from a Variable().
2333//	lr: Scaling factor. Must be a scalar.
2334//	grad: The gradient.
2335//
2336// Returns the created operation.
2337func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, optional ...ResourceApplyAdagradAttr) (o *tf.Operation) {
2338	if scope.Err() != nil {
2339		return
2340	}
2341	attrs := map[string]interface{}{}
2342	for _, a := range optional {
2343		a(attrs)
2344	}
2345	opspec := tf.OpSpec{
2346		Type: "ResourceApplyAdagrad",
2347		Input: []tf.Input{
2348			var_, accum, lr, grad,
2349		},
2350		Attrs: attrs,
2351	}
2352	return scope.AddOperation(opspec)
2353}
2354
2355// ResourceSparseApplyProximalGradientDescentAttr is an optional argument to ResourceSparseApplyProximalGradientDescent.
2356type ResourceSparseApplyProximalGradientDescentAttr func(optionalAttr)
2357
2358// ResourceSparseApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
2359//
2360// value: If True, the subtraction will be protected by a lock;
2361// otherwise the behavior is undefined, but may exhibit less contention.
2362// If not specified, defaults to false
2363func ResourceSparseApplyProximalGradientDescentUseLocking(value bool) ResourceSparseApplyProximalGradientDescentAttr {
2364	return func(m optionalAttr) {
2365		m["use_locking"] = value
2366	}
2367}
2368
2369// Sparse update '*var' as FOBOS algorithm with fixed learning rate.
2370//
2371// That is for rows we have grad for, we update var as follows:
2372// prox_v = var - alpha * grad
2373// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
2374//
2375// Arguments:
2376//	var_: Should be from a Variable().
2377//	alpha: Scaling factor. Must be a scalar.
2378//	l1: L1 regularization. Must be a scalar.
2379//	l2: L2 regularization. Must be a scalar.
2380//	grad: The gradient.
2381//	indices: A vector of indices into the first dimension of var and accum.
2382//
2383// Returns the created operation.
2384func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalGradientDescentAttr) (o *tf.Operation) {
2385	if scope.Err() != nil {
2386		return
2387	}
2388	attrs := map[string]interface{}{}
2389	for _, a := range optional {
2390		a(attrs)
2391	}
2392	opspec := tf.OpSpec{
2393		Type: "ResourceSparseApplyProximalGradientDescent",
2394		Input: []tf.Input{
2395			var_, alpha, l1, l2, grad, indices,
2396		},
2397		Attrs: attrs,
2398	}
2399	return scope.AddOperation(opspec)
2400}
2401
2402// ResourceApplyProximalGradientDescentAttr is an optional argument to ResourceApplyProximalGradientDescent.
2403type ResourceApplyProximalGradientDescentAttr func(optionalAttr)
2404
2405// ResourceApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
2406//
2407// value: If True, the subtraction will be protected by a lock;
2408// otherwise the behavior is undefined, but may exhibit less contention.
2409// If not specified, defaults to false
2410func ResourceApplyProximalGradientDescentUseLocking(value bool) ResourceApplyProximalGradientDescentAttr {
2411	return func(m optionalAttr) {
2412		m["use_locking"] = value
2413	}
2414}
2415
2416// Update '*var' as FOBOS algorithm with fixed learning rate.
2417//
2418// prox_v = var - alpha * delta
2419// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
2420//
2421// Arguments:
2422//	var_: Should be from a Variable().
2423//	alpha: Scaling factor. Must be a scalar.
2424//	l1: L1 regularization. Must be a scalar.
2425//	l2: L2 regularization. Must be a scalar.
2426//	delta: The change.
2427//
2428// Returns the created operation.
2429func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, delta tf.Output, optional ...ResourceApplyProximalGradientDescentAttr) (o *tf.Operation) {
2430	if scope.Err() != nil {
2431		return
2432	}
2433	attrs := map[string]interface{}{}
2434	for _, a := range optional {
2435		a(attrs)
2436	}
2437	opspec := tf.OpSpec{
2438		Type: "ResourceApplyProximalGradientDescent",
2439		Input: []tf.Input{
2440			var_, alpha, l1, l2, delta,
2441		},
2442		Attrs: attrs,
2443	}
2444	return scope.AddOperation(opspec)
2445}
2446
2447// Creates ngrams from ragged string data.
2448//
2449// This op accepts a ragged tensor with 1 ragged dimension containing only
2450// strings and outputs a ragged tensor with 1 ragged dimension containing ngrams
2451// of that string, joined along the innermost axis.
2452//
2453// Arguments:
2454//	data: The values tensor of the ragged string tensor to make ngrams out of. Must be a
2455// 1D string tensor.
2456//	data_splits: The splits tensor of the ragged string tensor to make ngrams out of.
2457//	separator: The string to append between elements of the token. Use "" for no separator.
2458//	ngram_widths: The sizes of the ngrams to create.
2459//	left_pad: The string to use to pad the left side of the ngram sequence. Only used if
2460// pad_width != 0.
2461//	right_pad: The string to use to pad the right side of the ngram sequence. Only used if
2462// pad_width != 0.
2463//	pad_width: The number of padding elements to add to each side of each
2464// sequence. Note that padding will never be greater than 'ngram_widths'-1
2465// regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1`
2466// elements.
2467//
2468//
2469// Returns:
2470//	ngrams: The values tensor of the output ngrams ragged tensor.
2471//	ngrams_splits: The splits tensor of the output ngrams ragged tensor.
2472func StringNGrams(scope *Scope, data tf.Output, data_splits tf.Output, separator string, ngram_widths []int64, left_pad string, right_pad string, pad_width int64, preserve_short_sequences bool) (ngrams tf.Output, ngrams_splits tf.Output) {
2473	if scope.Err() != nil {
2474		return
2475	}
2476	attrs := map[string]interface{}{"separator": separator, "ngram_widths": ngram_widths, "left_pad": left_pad, "right_pad": right_pad, "pad_width": pad_width, "preserve_short_sequences": preserve_short_sequences}
2477	opspec := tf.OpSpec{
2478		Type: "StringNGrams",
2479		Input: []tf.Input{
2480			data, data_splits,
2481		},
2482		Attrs: attrs,
2483	}
2484	op := scope.AddOperation(opspec)
2485	return op.Output(0), op.Output(1)
2486}
2487
2488// UnicodeDecodeWithOffsetsAttr is an optional argument to UnicodeDecodeWithOffsets.
2489type UnicodeDecodeWithOffsetsAttr func(optionalAttr)
2490
2491// UnicodeDecodeWithOffsetsErrors sets the optional errors attribute to value.
2492//
2493// value: Error handling policy when there is invalid formatting found in the input.
2494// The value of 'strict' will cause the operation to produce a InvalidArgument
2495// error on any invalid input formatting. A value of 'replace' (the default) will
2496// cause the operation to replace any invalid formatting in the input with the
2497// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
2498// skip any invalid formatting in the input and produce no corresponding output
2499// character.
2500// If not specified, defaults to "replace"
2501func UnicodeDecodeWithOffsetsErrors(value string) UnicodeDecodeWithOffsetsAttr {
2502	return func(m optionalAttr) {
2503		m["errors"] = value
2504	}
2505}
2506
2507// UnicodeDecodeWithOffsetsReplacementChar sets the optional replacement_char attribute to value.
2508//
2509// value: The replacement character codepoint to be used in place of any invalid
2510// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
2511// be used. The default value is the default unicode replacement character is
2512// 0xFFFD or U+65533.)
2513// If not specified, defaults to 65533
2514func UnicodeDecodeWithOffsetsReplacementChar(value int64) UnicodeDecodeWithOffsetsAttr {
2515	return func(m optionalAttr) {
2516		m["replacement_char"] = value
2517	}
2518}
2519
2520// UnicodeDecodeWithOffsetsReplaceControlCharacters sets the optional replace_control_characters attribute to value.
2521//
2522// value: Whether to replace the C0 control characters (00-1F) with the
2523// `replacement_char`. Default is false.
2524// If not specified, defaults to false
2525func UnicodeDecodeWithOffsetsReplaceControlCharacters(value bool) UnicodeDecodeWithOffsetsAttr {
2526	return func(m optionalAttr) {
2527		m["replace_control_characters"] = value
2528	}
2529}
2530
2531// UnicodeDecodeWithOffsetsTsplits sets the optional Tsplits attribute to value.
2532// If not specified, defaults to DT_INT64
2533func UnicodeDecodeWithOffsetsTsplits(value tf.DataType) UnicodeDecodeWithOffsetsAttr {
2534	return func(m optionalAttr) {
2535		m["Tsplits"] = value
2536	}
2537}
2538
2539// Decodes each string in `input` into a sequence of Unicode code points.
2540//
2541// The character codepoints for all strings are returned using a single vector
2542// `char_values`, with strings expanded to characters in row-major order.
2543// Similarly, the character start byte offsets are returned using a single vector
2544// `char_to_byte_starts`, with strings expanded in row-major order.
2545//
2546// The `row_splits` tensor indicates where the codepoints and start offsets for
2547// each input string begin and end within the `char_values` and
2548// `char_to_byte_starts` tensors.  In particular, the values for the `i`th
2549// string (in row-major order) are stored in the slice
2550// `[row_splits[i]:row_splits[i+1]]`. Thus:
2551//
2552// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th
2553//   character in the `i`th string (in row-major order).
2554// * `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th
2555//   character in the `i`th string (in row-major order).
2556// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th
2557//   string (in row-major order).
2558//
2559// Arguments:
2560//	input: The text to be decoded. Can have any shape. Note that the output is flattened
2561// to a vector of char values.
2562//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
2563// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
2564//
2565// Returns:
2566//	row_splits: A 1D int32 tensor containing the row splits.
2567//	char_values: A 1D int32 Tensor containing the decoded codepoints.
2568//	char_to_byte_starts: A 1D int32 Tensor containing the byte index in the input string where each
2569// character in `char_values` starts.
2570func UnicodeDecodeWithOffsets(scope *Scope, input tf.Output, input_encoding string, optional ...UnicodeDecodeWithOffsetsAttr) (row_splits tf.Output, char_values tf.Output, char_to_byte_starts tf.Output) {
2571	if scope.Err() != nil {
2572		return
2573	}
2574	attrs := map[string]interface{}{"input_encoding": input_encoding}
2575	for _, a := range optional {
2576		a(attrs)
2577	}
2578	opspec := tf.OpSpec{
2579		Type: "UnicodeDecodeWithOffsets",
2580		Input: []tf.Input{
2581			input,
2582		},
2583		Attrs: attrs,
2584	}
2585	op := scope.AddOperation(opspec)
2586	return op.Output(0), op.Output(1), op.Output(2)
2587}
2588
2589// UnicodeTranscodeAttr is an optional argument to UnicodeTranscode.
2590type UnicodeTranscodeAttr func(optionalAttr)
2591
2592// UnicodeTranscodeErrors sets the optional errors attribute to value.
2593//
2594// value: Error handling policy when there is invalid formatting found in the input.
2595// The value of 'strict' will cause the operation to produce a InvalidArgument
2596// error on any invalid input formatting. A value of 'replace' (the default) will
2597// cause the operation to replace any invalid formatting in the input with the
2598// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
2599// skip any invalid formatting in the input and produce no corresponding output
2600// character.
2601// If not specified, defaults to "replace"
2602func UnicodeTranscodeErrors(value string) UnicodeTranscodeAttr {
2603	return func(m optionalAttr) {
2604		m["errors"] = value
2605	}
2606}
2607
2608// UnicodeTranscodeReplacementChar sets the optional replacement_char attribute to value.
2609//
2610// value: The replacement character codepoint to be used in place of any invalid
2611// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
2612// be used. The default value is the default unicode replacement character is
2613// 0xFFFD or U+65533.)
2614//
2615// Note that for UTF-8, passing a replacement character expressible in 1 byte, such
2616// as ' ', will preserve string alignment to the source since invalid bytes will be
2617// replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte
2618// replacement character will preserve byte alignment to the source.
2619// If not specified, defaults to 65533
2620func UnicodeTranscodeReplacementChar(value int64) UnicodeTranscodeAttr {
2621	return func(m optionalAttr) {
2622		m["replacement_char"] = value
2623	}
2624}
2625
2626// UnicodeTranscodeReplaceControlCharacters sets the optional replace_control_characters attribute to value.
2627//
2628// value: Whether to replace the C0 control characters (00-1F) with the
2629// `replacement_char`. Default is false.
2630// If not specified, defaults to false
2631func UnicodeTranscodeReplaceControlCharacters(value bool) UnicodeTranscodeAttr {
2632	return func(m optionalAttr) {
2633		m["replace_control_characters"] = value
2634	}
2635}
2636
2637// Transcode the input text from a source encoding to a destination encoding.
2638//
2639// The input is a string tensor of any shape. The output is a string tensor of
2640// the same shape containing the transcoded strings. Output strings are always
2641// valid unicode. If the input contains invalid encoding positions, the
2642// `errors` attribute sets the policy for how to deal with them. If the default
2643// error-handling policy is used, invalid formatting will be substituted in the
2644// output by the `replacement_char`. If the errors policy is to `ignore`, any
2645// invalid encoding positions in the input are skipped and not included in the
2646// output. If it set to `strict` then any invalid formatting will result in an
2647// InvalidArgument error.
2648//
2649// This operation can be used with `output_encoding = input_encoding` to enforce
2650// correct formatting for inputs even if they are already in the desired encoding.
2651//
2652// If the input is prefixed by a Byte Order Mark needed to determine encoding
2653// (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that
2654// BOM will be consumed and not emitted into the output. If the input encoding
2655// is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is
2656// interpreted as a non-breaking-space and is preserved in the output (including
2657// always for UTF-8).
2658//
2659// The end result is that if the input is marked as an explicit endianness the
2660// transcoding is faithful to all codepoints in the source. If it is not marked
2661// with an explicit endianness, the BOM is not considered part of the string itself
2662// but as metadata, and so is not preserved in the output.
2663//
2664// Examples:
2665//
2666// >>> tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], "UTF-8", "UTF-16-BE")
2667// <tf.Tensor: shape=(3,), dtype=string, numpy=
2668// array([b'\x00H\x00e\x00l\x00l\x00o',
2669//        b'\x00T\x00e\x00n\x00s\x00o\x00r\x00F\x00l\x00o\x00w',
2670//        b'\x002\x00.\x00x'], dtype=object)>
2671// >>> tf.strings.unicode_transcode(["A", "B", "C"], "US ASCII", "UTF-8").numpy()
2672// array([b'A', b'B', b'C'], dtype=object)
2673//
2674// Arguments:
2675//	input: The text to be processed. Can have any shape.
2676//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
2677// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
2678//	output_encoding: The unicode encoding to use in the output. Must be one of
2679// `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian.
2680//
2681// Returns A string tensor containing unicode text encoded using `output_encoding`.
2682func UnicodeTranscode(scope *Scope, input tf.Output, input_encoding string, output_encoding string, optional ...UnicodeTranscodeAttr) (output tf.Output) {
2683	if scope.Err() != nil {
2684		return
2685	}
2686	attrs := map[string]interface{}{"input_encoding": input_encoding, "output_encoding": output_encoding}
2687	for _, a := range optional {
2688		a(attrs)
2689	}
2690	opspec := tf.OpSpec{
2691		Type: "UnicodeTranscode",
2692		Input: []tf.Input{
2693			input,
2694		},
2695		Attrs: attrs,
2696	}
2697	op := scope.AddOperation(opspec)
2698	return op.Output(0)
2699}
2700
2701// SubstrAttr is an optional argument to Substr.
2702type SubstrAttr func(optionalAttr)
2703
2704// SubstrUnit sets the optional unit attribute to value.
2705//
2706// value: The unit that is used to create the substring.  One of: `"BYTE"` (for
2707// defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8
2708// encoded Unicode code points).  The default is `"BYTE"`. Results are undefined if
2709// `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid
2710// UTF-8.
2711// If not specified, defaults to "BYTE"
2712func SubstrUnit(value string) SubstrAttr {
2713	return func(m optionalAttr) {
2714		m["unit"] = value
2715	}
2716}
2717
2718// Return substrings from `Tensor` of strings.
2719//
2720// For each string in the input `Tensor`, creates a substring starting at index
2721// `pos` with a total length of `len`.
2722//
2723// If `len` defines a substring that would extend beyond the length of the input
2724// string, or if `len` is negative, then as many characters as possible are used.
2725//
2726// A negative `pos` indicates distance within the string backwards from the end.
2727//
2728// If `pos` specifies an index which is out of range for any of the input strings,
2729// then an `InvalidArgumentError` is thrown.
2730//
2731// `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on
2732// Op creation.
2733//
2734// *NOTE*: `Substr` supports broadcasting up to two dimensions. More about
2735// broadcasting
2736// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
2737//
2738// ---
2739//
2740// Examples
2741//
2742// Using scalar `pos` and `len`:
2743//
2744// ```python
2745// input = [b'Hello', b'World']
2746// position = 1
2747// length = 3
2748//
2749// output = [b'ell', b'orl']
2750// ```
2751//
2752// Using `pos` and `len` with same shape as `input`:
2753//
2754// ```python
2755// input = [[b'ten', b'eleven', b'twelve'],
2756//          [b'thirteen', b'fourteen', b'fifteen'],
2757//          [b'sixteen', b'seventeen', b'eighteen']]
2758// position = [[1, 2, 3],
2759//             [1, 2, 3],
2760//             [1, 2, 3]]
2761// length =   [[2, 3, 4],
2762//             [4, 3, 2],
2763//             [5, 5, 5]]
2764//
2765// output = [[b'en', b'eve', b'lve'],
2766//           [b'hirt', b'urt', b'te'],
2767//           [b'ixtee', b'vente', b'hteen']]
2768// ```
2769//
2770// Broadcasting `pos` and `len` onto `input`:
2771//
2772// ```
2773// input = [[b'ten', b'eleven', b'twelve'],
2774//          [b'thirteen', b'fourteen', b'fifteen'],
2775//          [b'sixteen', b'seventeen', b'eighteen'],
2776//          [b'nineteen', b'twenty', b'twentyone']]
2777// position = [1, 2, 3]
2778// length =   [1, 2, 3]
2779//
2780// output = [[b'e', b'ev', b'lve'],
2781//           [b'h', b'ur', b'tee'],
2782//           [b'i', b've', b'hte'],
2783//           [b'i', b'en', b'nty']]
2784// ```
2785//
2786// Broadcasting `input` onto `pos` and `len`:
2787//
2788// ```
2789// input = b'thirteen'
2790// position = [1, 5, 7]
2791// length =   [3, 2, 1]
2792//
2793// output = [b'hir', b'ee', b'n']
2794// ```
2795//
2796// Raises:
2797//
2798//   * `ValueError`: If the first argument cannot be converted to a
2799//      Tensor of `dtype string`.
2800//   * `InvalidArgumentError`: If indices are out of range.
2801//   * `ValueError`: If `pos` and `len` are not the same shape.
2802//
2803//
2804// Arguments:
2805//	input: Tensor of strings
2806//	pos: Scalar defining the position of first character in each substring
2807//	len: Scalar defining the number of characters to include in each substring
2808//
2809// Returns Tensor of substrings
2810func Substr(scope *Scope, input tf.Output, pos tf.Output, len tf.Output, optional ...SubstrAttr) (output tf.Output) {
2811	if scope.Err() != nil {
2812		return
2813	}
2814	attrs := map[string]interface{}{}
2815	for _, a := range optional {
2816		a(attrs)
2817	}
2818	opspec := tf.OpSpec{
2819		Type: "Substr",
2820		Input: []tf.Input{
2821			input, pos, len,
2822		},
2823		Attrs: attrs,
2824	}
2825	op := scope.AddOperation(opspec)
2826	return op.Output(0)
2827}
2828
2829// Decode web-safe base64-encoded strings.
2830//
2831// Input may or may not have padding at the end. See EncodeBase64 for padding.
2832// Web-safe means that input must use - and _ instead of + and /.
2833//
2834// Arguments:
2835//	input: Base64 strings to decode.
2836//
2837// Returns Decoded strings.
2838func DecodeBase64(scope *Scope, input tf.Output) (output tf.Output) {
2839	if scope.Err() != nil {
2840		return
2841	}
2842	opspec := tf.OpSpec{
2843		Type: "DecodeBase64",
2844		Input: []tf.Input{
2845			input,
2846		},
2847	}
2848	op := scope.AddOperation(opspec)
2849	return op.Output(0)
2850}
2851
2852// EncodeBase64Attr is an optional argument to EncodeBase64.
2853type EncodeBase64Attr func(optionalAttr)
2854
2855// EncodeBase64Pad sets the optional pad attribute to value.
2856//
2857// value: Bool whether padding is applied at the ends.
2858// If not specified, defaults to false
2859func EncodeBase64Pad(value bool) EncodeBase64Attr {
2860	return func(m optionalAttr) {
2861		m["pad"] = value
2862	}
2863}
2864
2865// Encode strings into web-safe base64 format.
2866//
2867// Refer to the following article for more information on base64 format:
2868// en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
2869// end so that the encoded has length multiple of 4. See Padding section of the
2870// link above.
2871//
2872// Web-safe means that the encoder uses - and _ instead of + and /.
2873//
2874// Arguments:
2875//	input: Strings to be encoded.
2876//
2877// Returns Input strings encoded in base64.
2878func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (output tf.Output) {
2879	if scope.Err() != nil {
2880		return
2881	}
2882	attrs := map[string]interface{}{}
2883	for _, a := range optional {
2884		a(attrs)
2885	}
2886	opspec := tf.OpSpec{
2887		Type: "EncodeBase64",
2888		Input: []tf.Input{
2889			input,
2890		},
2891		Attrs: attrs,
2892	}
2893	op := scope.AddOperation(opspec)
2894	return op.Output(0)
2895}
2896
2897// StringUpperAttr is an optional argument to StringUpper.
2898type StringUpperAttr func(optionalAttr)
2899
2900// StringUpperEncoding sets the optional encoding attribute to value.
2901//
2902// value: Character encoding of `input`. Allowed values are '' and 'utf-8'.
2903// Value '' is interpreted as ASCII.
2904// If not specified, defaults to ""
2905func StringUpperEncoding(value string) StringUpperAttr {
2906	return func(m optionalAttr) {
2907		m["encoding"] = value
2908	}
2909}
2910
2911// Converts all lowercase characters into their respective uppercase replacements.
2912//
2913// Example:
2914//
2915// >>> tf.strings.upper("CamelCase string and ALL CAPS")
2916// <tf.Tensor: shape=(), dtype=string, numpy=b'CAMELCASE STRING AND ALL CAPS'>
2917//
2918//
2919// Arguments:
2920//	input: The input to be upper-cased.
2921func StringUpper(scope *Scope, input tf.Output, optional ...StringUpperAttr) (output tf.Output) {
2922	if scope.Err() != nil {
2923		return
2924	}
2925	attrs := map[string]interface{}{}
2926	for _, a := range optional {
2927		a(attrs)
2928	}
2929	opspec := tf.OpSpec{
2930		Type: "StringUpper",
2931		Input: []tf.Input{
2932			input,
2933		},
2934		Attrs: attrs,
2935	}
2936	op := scope.AddOperation(opspec)
2937	return op.Output(0)
2938}
2939
2940// StringSplitAttr is an optional argument to StringSplit.
2941type StringSplitAttr func(optionalAttr)
2942
2943// StringSplitSkipEmpty sets the optional skip_empty attribute to value.
2944//
2945// value: A `bool`. If `True`, skip the empty strings from the result.
2946// If not specified, defaults to true
2947func StringSplitSkipEmpty(value bool) StringSplitAttr {
2948	return func(m optionalAttr) {
2949		m["skip_empty"] = value
2950	}
2951}
2952
2953// Split elements of `input` based on `delimiter` into a `SparseTensor`.
2954//
2955// Let N be the size of source (typically N will be the batch size). Split each
2956// element of `input` based on `delimiter` and return a `SparseTensor`
2957// containing the splitted tokens. Empty tokens are ignored.
2958//
2959// `delimiter` can be empty, or a string of split characters. If `delimiter` is an
2960//  empty string, each element of `input` is split into individual single-byte
2961//  character strings, including splitting of UTF-8 multibyte sequences. Otherwise
2962//  every character of `delimiter` is a potential split point.
2963//
2964// For example:
2965//   N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
2966//   will be
2967//
2968//   indices = [0, 0;
2969//              0, 1;
2970//              1, 0;
2971//              1, 1;
2972//              1, 2]
2973//   shape = [2, 3]
2974//   values = ['hello', 'world', 'a', 'b', 'c']
2975//
2976// Arguments:
2977//	input: 1-D. Strings to split.
2978//	delimiter: 0-D. Delimiter characters (bytes), or empty string.
2979//
2980// Returns:
2981//	indices: A dense matrix of int64 representing the indices of the sparse tensor.
2982//	values: A vector of strings corresponding to the splited values.
2983//	shape: a length-2 vector of int64 representing the shape of the sparse
2984// tensor, where the first value is N and the second value is the maximum number
2985// of tokens in a single input entry.
2986func StringSplit(scope *Scope, input tf.Output, delimiter tf.Output, optional ...StringSplitAttr) (indices tf.Output, values tf.Output, shape tf.Output) {
2987	if scope.Err() != nil {
2988		return
2989	}
2990	attrs := map[string]interface{}{}
2991	for _, a := range optional {
2992		a(attrs)
2993	}
2994	opspec := tf.OpSpec{
2995		Type: "StringSplit",
2996		Input: []tf.Input{
2997			input, delimiter,
2998		},
2999		Attrs: attrs,
3000	}
3001	op := scope.AddOperation(opspec)
3002	return op.Output(0), op.Output(1), op.Output(2)
3003}
3004
3005// StringJoinAttr is an optional argument to StringJoin.
3006type StringJoinAttr func(optionalAttr)
3007
3008// StringJoinSeparator sets the optional separator attribute to value.
3009//
3010// value: string, an optional join separator.
3011// If not specified, defaults to ""
3012func StringJoinSeparator(value string) StringJoinAttr {
3013	return func(m optionalAttr) {
3014		m["separator"] = value
3015	}
3016}
3017
3018// Joins the strings in the given list of string tensors into one tensor;
3019//
3020// with the given separator (default is an empty separator).
3021//
3022// Examples:
3023//
3024// >>> s = ["hello", "world", "tensorflow"]
3025// >>> tf.strings.join(s, " ")
3026// <tf.Tensor: shape=(), dtype=string, numpy=b'hello world tensorflow'>
3027//
3028// Arguments:
3029//	inputs: A list of string tensors.  The tensors must all have the same shape,
3030// or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
3031// of non-scalar inputs.
3032func StringJoin(scope *Scope, inputs []tf.Output, optional ...StringJoinAttr) (output tf.Output) {
3033	if scope.Err() != nil {
3034		return
3035	}
3036	attrs := map[string]interface{}{}
3037	for _, a := range optional {
3038		a(attrs)
3039	}
3040	opspec := tf.OpSpec{
3041		Type: "StringJoin",
3042		Input: []tf.Input{
3043			tf.OutputList(inputs),
3044		},
3045		Attrs: attrs,
3046	}
3047	op := scope.AddOperation(opspec)
3048	return op.Output(0)
3049}
3050
3051// RetrieveTPUEmbeddingProximalAdagradParametersAttr is an optional argument to RetrieveTPUEmbeddingProximalAdagradParameters.
3052type RetrieveTPUEmbeddingProximalAdagradParametersAttr func(optionalAttr)
3053
3054// RetrieveTPUEmbeddingProximalAdagradParametersTableId sets the optional table_id attribute to value.
3055// If not specified, defaults to -1
3056func RetrieveTPUEmbeddingProximalAdagradParametersTableId(value int64) RetrieveTPUEmbeddingProximalAdagradParametersAttr {
3057	return func(m optionalAttr) {
3058		m["table_id"] = value
3059	}
3060}
3061
3062// RetrieveTPUEmbeddingProximalAdagradParametersTableName sets the optional table_name attribute to value.
3063// If not specified, defaults to ""
3064func RetrieveTPUEmbeddingProximalAdagradParametersTableName(value string) RetrieveTPUEmbeddingProximalAdagradParametersAttr {
3065	return func(m optionalAttr) {
3066		m["table_name"] = value
3067	}
3068}
3069
3070// RetrieveTPUEmbeddingProximalAdagradParametersConfig sets the optional config attribute to value.
3071// If not specified, defaults to ""
3072func RetrieveTPUEmbeddingProximalAdagradParametersConfig(value string) RetrieveTPUEmbeddingProximalAdagradParametersAttr {
3073	return func(m optionalAttr) {
3074		m["config"] = value
3075	}
3076}
3077
3078// Retrieve proximal Adagrad embedding parameters.
3079//
3080// An op that retrieves optimization parameters from embedding to host
3081// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
3082// the correct embedding table configuration. For example, this op is
3083// used to retrieve updated parameters before saving a checkpoint.
3084//
3085// Returns:
3086//	parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm.
3087//	accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm.
3088func RetrieveTPUEmbeddingProximalAdagradParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingProximalAdagradParametersAttr) (parameters tf.Output, accumulators tf.Output) {
3089	if scope.Err() != nil {
3090		return
3091	}
3092	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
3093	for _, a := range optional {
3094		a(attrs)
3095	}
3096	opspec := tf.OpSpec{
3097		Type: "RetrieveTPUEmbeddingProximalAdagradParameters",
3098
3099		Attrs: attrs,
3100	}
3101	op := scope.AddOperation(opspec)
3102	return op.Output(0), op.Output(1)
3103}
3104
3105// ReduceJoinAttr is an optional argument to ReduceJoin.
3106type ReduceJoinAttr func(optionalAttr)
3107
3108// ReduceJoinKeepDims sets the optional keep_dims attribute to value.
3109//
3110// value: If `True`, retain reduced dimensions with length `1`.
3111// If not specified, defaults to false
3112func ReduceJoinKeepDims(value bool) ReduceJoinAttr {
3113	return func(m optionalAttr) {
3114		m["keep_dims"] = value
3115	}
3116}
3117
3118// ReduceJoinSeparator sets the optional separator attribute to value.
3119//
3120// value: The separator to use when joining.
3121// If not specified, defaults to ""
3122func ReduceJoinSeparator(value string) ReduceJoinAttr {
3123	return func(m optionalAttr) {
3124		m["separator"] = value
3125	}
3126}
3127
3128// Joins a string Tensor across the given dimensions.
3129//
3130// Computes the string join across dimensions in the given string Tensor of shape
3131// `[\\(d_0, d_1, ..., d_{n-1}\\)]`.  Returns a new Tensor created by joining the input
3132// strings with the given separator (default: empty string).  Negative indices are
3133// counted backwards from the end, with `-1` being equivalent to `n - 1`.  If
3134// indices are not specified, joins across all dimensions beginning from `n - 1`
3135// through `0`.
3136//
3137// For example:
3138//
3139// ```python
3140// # tensor `a` is [["a", "b"], ["c", "d"]]
3141// tf.reduce_join(a, 0) ==> ["ac", "bd"]
3142// tf.reduce_join(a, 1) ==> ["ab", "cd"]
3143// tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
3144// tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
3145// tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
3146// tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
3147// tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
3148// tf.reduce_join(a, [0, 1]) ==> "acbd"
3149// tf.reduce_join(a, [1, 0]) ==> "abcd"
3150// tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]]
3151// tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd"
3152// ```
3153//
3154// Arguments:
3155//	inputs: The input to be joined.  All reduced indices must have non-zero size.
3156//	reduction_indices: The dimensions to reduce over.  Dimensions are reduced in the
3157// order specified.  Omitting `reduction_indices` is equivalent to passing
3158// `[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.
3159//
3160// Returns Has shape equal to that of the input with reduced dimensions removed or
3161// set to `1` depending on `keep_dims`.
3162func ReduceJoin(scope *Scope, inputs tf.Output, reduction_indices tf.Output, optional ...ReduceJoinAttr) (output tf.Output) {
3163	if scope.Err() != nil {
3164		return
3165	}
3166	attrs := map[string]interface{}{}
3167	for _, a := range optional {
3168		a(attrs)
3169	}
3170	opspec := tf.OpSpec{
3171		Type: "ReduceJoin",
3172		Input: []tf.Input{
3173			inputs, reduction_indices,
3174		},
3175		Attrs: attrs,
3176	}
3177	op := scope.AddOperation(opspec)
3178	return op.Output(0)
3179}
3180
3181// Converts each string in the input Tensor to its hash mod by a number of buckets.
3182//
3183// The hash function is deterministic on the content of the string within the
3184// process and will never change. However, it is not suitable for cryptography.
3185// This function may be used when CPU time is scarce and inputs are trusted or
3186// unimportant. There is a risk of adversaries constructing inputs that all hash
3187// to the same bucket. To prevent this problem, use a strong hash function with
3188// `tf.string_to_hash_bucket_strong`.
3189//
3190// Examples:
3191//
3192// >>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy()
3193// array([0, 2, 2])
3194//
3195// Arguments:
3196//	input: The strings to assign a hash bucket.
3197//	num_buckets: The number of buckets.
3198//
3199// Returns A Tensor of the same shape as the input `string_tensor`.
3200func StringToHashBucketFast(scope *Scope, input tf.Output, num_buckets int64) (output tf.Output) {
3201	if scope.Err() != nil {
3202		return
3203	}
3204	attrs := map[string]interface{}{"num_buckets": num_buckets}
3205	opspec := tf.OpSpec{
3206		Type: "StringToHashBucketFast",
3207		Input: []tf.Input{
3208			input,
3209		},
3210		Attrs: attrs,
3211	}
3212	op := scope.AddOperation(opspec)
3213	return op.Output(0)
3214}
3215
3216// StaticRegexReplaceAttr is an optional argument to StaticRegexReplace.
3217type StaticRegexReplaceAttr func(optionalAttr)
3218
3219// StaticRegexReplaceReplaceGlobal sets the optional replace_global attribute to value.
3220//
3221// value: If True, the replacement is global, otherwise the replacement
3222// is done only on the first match.
3223// If not specified, defaults to true
3224func StaticRegexReplaceReplaceGlobal(value bool) StaticRegexReplaceAttr {
3225	return func(m optionalAttr) {
3226		m["replace_global"] = value
3227	}
3228}
3229
3230// Replaces the match of pattern in input with rewrite.
3231//
3232// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
3233//
3234// Arguments:
3235//	input: The text to be processed.
3236//	pattern: The regular expression to match the input.
3237//	rewrite: The rewrite to be applied to the matched expression.
3238//
3239// Returns The text after applying pattern and rewrite.
3240func StaticRegexReplace(scope *Scope, input tf.Output, pattern string, rewrite string, optional ...StaticRegexReplaceAttr) (output tf.Output) {
3241	if scope.Err() != nil {
3242		return
3243	}
3244	attrs := map[string]interface{}{"pattern": pattern, "rewrite": rewrite}
3245	for _, a := range optional {
3246		a(attrs)
3247	}
3248	opspec := tf.OpSpec{
3249		Type: "StaticRegexReplace",
3250		Input: []tf.Input{
3251			input,
3252		},
3253		Attrs: attrs,
3254	}
3255	op := scope.AddOperation(opspec)
3256	return op.Output(0)
3257}
3258
3259// RegexReplaceAttr is an optional argument to RegexReplace.
3260type RegexReplaceAttr func(optionalAttr)
3261
3262// RegexReplaceReplaceGlobal sets the optional replace_global attribute to value.
3263//
3264// value: If True, the replacement is global (that is, all matches of the `pattern` regular
3265// expression in each input string are rewritten), otherwise the `rewrite`
3266// substitution is only made for the first `pattern` match.
3267// If not specified, defaults to true
3268func RegexReplaceReplaceGlobal(value bool) RegexReplaceAttr {
3269	return func(m optionalAttr) {
3270		m["replace_global"] = value
3271	}
3272}
3273
3274// Replaces matches of the `pattern` regular expression in `input` with the
3275// replacement string provided in `rewrite`.
3276//
3277// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
3278//
3279// Arguments:
3280//	input: The text to be processed.
3281//	pattern: The regular expression to be matched in the `input` strings.
3282//	rewrite: The rewrite string to be substituted for the `pattern` expression where it is
3283// matched in the `input` strings.
3284//
3285// Returns The text after applying pattern match and rewrite substitution.
3286func RegexReplace(scope *Scope, input tf.Output, pattern tf.Output, rewrite tf.Output, optional ...RegexReplaceAttr) (output tf.Output) {
3287	if scope.Err() != nil {
3288		return
3289	}
3290	attrs := map[string]interface{}{}
3291	for _, a := range optional {
3292		a(attrs)
3293	}
3294	opspec := tf.OpSpec{
3295		Type: "RegexReplace",
3296		Input: []tf.Input{
3297			input, pattern, rewrite,
3298		},
3299		Attrs: attrs,
3300	}
3301	op := scope.AddOperation(opspec)
3302	return op.Output(0)
3303}
3304
3305// Outputs deterministic pseudorandom random integers from a uniform distribution.
3306//
3307// The generated values follow a uniform distribution in the range `[minval, maxval)`.
3308//
3309// The outputs are a deterministic function of `shape`, `key`, `counter`, `alg`, `minval` and `maxval`.
3310//
3311// Arguments:
3312//	shape: The shape of the output tensor.
3313//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
3314//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
3315//	alg: The RNG algorithm (shape int32[]).
3316//	minval: Minimum value (inclusive, scalar).
3317//	maxval: Maximum value (exclusive, scalar).
3318//
3319// Returns Random values with specified shape.
3320func StatelessRandomUniformIntV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output) {
3321	if scope.Err() != nil {
3322		return
3323	}
3324	opspec := tf.OpSpec{
3325		Type: "StatelessRandomUniformIntV2",
3326		Input: []tf.Input{
3327			shape, key, counter, alg, minval, maxval,
3328		},
3329	}
3330	op := scope.AddOperation(opspec)
3331	return op.Output(0)
3332}
3333
3334// StatelessTruncatedNormalV2Attr is an optional argument to StatelessTruncatedNormalV2.
3335type StatelessTruncatedNormalV2Attr func(optionalAttr)
3336
3337// StatelessTruncatedNormalV2Dtype sets the optional dtype attribute to value.
3338//
3339// value: The type of the output.
3340// If not specified, defaults to DT_FLOAT
3341func StatelessTruncatedNormalV2Dtype(value tf.DataType) StatelessTruncatedNormalV2Attr {
3342	return func(m optionalAttr) {
3343		m["dtype"] = value
3344	}
3345}
3346
3347// Outputs deterministic pseudorandom values from a truncated normal distribution.
3348//
3349// The generated values follow a normal distribution with mean 0 and standard
3350// deviation 1, except that values whose magnitude is more than 2 standard
3351// deviations from the mean are dropped and re-picked.
3352//
3353// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
3354//
3355// Arguments:
3356//	shape: The shape of the output tensor.
3357//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
3358//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
3359//	alg: The RNG algorithm (shape int32[]).
3360//
3361// Returns Random values with specified shape.
3362func StatelessTruncatedNormalV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessTruncatedNormalV2Attr) (output tf.Output) {
3363	if scope.Err() != nil {
3364		return
3365	}
3366	attrs := map[string]interface{}{}
3367	for _, a := range optional {
3368		a(attrs)
3369	}
3370	opspec := tf.OpSpec{
3371		Type: "StatelessTruncatedNormalV2",
3372		Input: []tf.Input{
3373			shape, key, counter, alg,
3374		},
3375		Attrs: attrs,
3376	}
3377	op := scope.AddOperation(opspec)
3378	return op.Output(0)
3379}
3380
3381// StatelessRandomNormalV2Attr is an optional argument to StatelessRandomNormalV2.
3382type StatelessRandomNormalV2Attr func(optionalAttr)
3383
3384// StatelessRandomNormalV2Dtype sets the optional dtype attribute to value.
3385//
3386// value: The type of the output.
3387// If not specified, defaults to DT_FLOAT
3388func StatelessRandomNormalV2Dtype(value tf.DataType) StatelessRandomNormalV2Attr {
3389	return func(m optionalAttr) {
3390		m["dtype"] = value
3391	}
3392}
3393
3394// Outputs deterministic pseudorandom values from a normal distribution.
3395//
3396// The generated values will have mean 0 and standard deviation 1.
3397//
3398// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
3399//
3400// Arguments:
3401//	shape: The shape of the output tensor.
3402//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
3403//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
3404//	alg: The RNG algorithm (shape int32[]).
3405//
3406// Returns Random values with specified shape.
3407func StatelessRandomNormalV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomNormalV2Attr) (output tf.Output) {
3408	if scope.Err() != nil {
3409		return
3410	}
3411	attrs := map[string]interface{}{}
3412	for _, a := range optional {
3413		a(attrs)
3414	}
3415	opspec := tf.OpSpec{
3416		Type: "StatelessRandomNormalV2",
3417		Input: []tf.Input{
3418			shape, key, counter, alg,
3419		},
3420		Attrs: attrs,
3421	}
3422	op := scope.AddOperation(opspec)
3423	return op.Output(0)
3424}
3425
3426// StatelessRandomUniformV2Attr is an optional argument to StatelessRandomUniformV2.
3427type StatelessRandomUniformV2Attr func(optionalAttr)
3428
3429// StatelessRandomUniformV2Dtype sets the optional dtype attribute to value.
3430//
3431// value: The type of the output.
3432// If not specified, defaults to DT_FLOAT
3433func StatelessRandomUniformV2Dtype(value tf.DataType) StatelessRandomUniformV2Attr {
3434	return func(m optionalAttr) {
3435		m["dtype"] = value
3436	}
3437}
3438
3439// Outputs deterministic pseudorandom random values from a uniform distribution.
3440//
3441// The generated values follow a uniform distribution in the range `[0, 1)`. The
3442// lower bound 0 is included in the range, while the upper bound 1 is excluded.
3443//
3444// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
3445//
3446// Arguments:
3447//	shape: The shape of the output tensor.
3448//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
3449//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
3450//	alg: The RNG algorithm (shape int32[]).
3451//
3452// Returns Random values with specified shape.
3453func StatelessRandomUniformV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomUniformV2Attr) (output tf.Output) {
3454	if scope.Err() != nil {
3455		return
3456	}
3457	attrs := map[string]interface{}{}
3458	for _, a := range optional {
3459		a(attrs)
3460	}
3461	opspec := tf.OpSpec{
3462		Type: "StatelessRandomUniformV2",
3463		Input: []tf.Input{
3464			shape, key, counter, alg,
3465		},
3466		Attrs: attrs,
3467	}
3468	op := scope.AddOperation(opspec)
3469	return op.Output(0)
3470}
3471
3472// Outputs deterministic pseudorandom random numbers from a gamma distribution.
3473//
3474// Outputs random values from a gamma distribution.
3475//
3476// The outputs are a deterministic function of `shape`, `seed`, and `alpha`.
3477//
3478// Arguments:
3479//	shape: The shape of the output tensor.
3480//	seed: 2 seeds (shape [2]).
3481//	alpha: The concentration of the gamma distribution. Shape must match the rightmost
3482// dimensions of `shape`.
3483//
3484// Returns Random values with specified shape.
3485func StatelessRandomGammaV2(scope *Scope, shape tf.Output, seed tf.Output, alpha tf.Output) (output tf.Output) {
3486	if scope.Err() != nil {
3487		return
3488	}
3489	opspec := tf.OpSpec{
3490		Type: "StatelessRandomGammaV2",
3491		Input: []tf.Input{
3492			shape, seed, alpha,
3493		},
3494	}
3495	op := scope.AddOperation(opspec)
3496	return op.Output(0)
3497}
3498
3499// StatelessMultinomialAttr is an optional argument to StatelessMultinomial.
3500type StatelessMultinomialAttr func(optionalAttr)
3501
3502// StatelessMultinomialOutputDtype sets the optional output_dtype attribute to value.
3503// If not specified, defaults to DT_INT64
3504func StatelessMultinomialOutputDtype(value tf.DataType) StatelessMultinomialAttr {
3505	return func(m optionalAttr) {
3506		m["output_dtype"] = value
3507	}
3508}
3509
3510// Draws samples from a multinomial distribution.
3511//
3512// Arguments:
3513//	logits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
3514// represents the unnormalized log probabilities for all classes.
3515//	num_samples: 0-D.  Number of independent samples to draw for each row slice.
3516//	seed: 2 seeds (shape [2]).
3517//
3518// Returns 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
3519// contains the drawn class labels with range `[0, num_classes)`.
3520func StatelessMultinomial(scope *Scope, logits tf.Output, num_samples tf.Output, seed tf.Output, optional ...StatelessMultinomialAttr) (output tf.Output) {
3521	if scope.Err() != nil {
3522		return
3523	}
3524	attrs := map[string]interface{}{}
3525	for _, a := range optional {
3526		a(attrs)
3527	}
3528	opspec := tf.OpSpec{
3529		Type: "StatelessMultinomial",
3530		Input: []tf.Input{
3531			logits, num_samples, seed,
3532		},
3533		Attrs: attrs,
3534	}
3535	op := scope.AddOperation(opspec)
3536	return op.Output(0)
3537}
3538
3539// StatelessRandomNormalAttr is an optional argument to StatelessRandomNormal.
3540type StatelessRandomNormalAttr func(optionalAttr)
3541
3542// StatelessRandomNormalDtype sets the optional dtype attribute to value.
3543//
3544// value: The type of the output.
3545// If not specified, defaults to DT_FLOAT
3546func StatelessRandomNormalDtype(value tf.DataType) StatelessRandomNormalAttr {
3547	return func(m optionalAttr) {
3548		m["dtype"] = value
3549	}
3550}
3551
3552// Outputs deterministic pseudorandom values from a normal distribution.
3553//
3554// The generated values will have mean 0 and standard deviation 1.
3555//
3556// The outputs are a deterministic function of `shape` and `seed`.
3557//
3558// Arguments:
3559//	shape: The shape of the output tensor.
3560//	seed: 2 seeds (shape [2]).
3561//
3562// Returns Random values with specified shape.
3563func StatelessRandomNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomNormalAttr) (output tf.Output) {
3564	if scope.Err() != nil {
3565		return
3566	}
3567	attrs := map[string]interface{}{}
3568	for _, a := range optional {
3569		a(attrs)
3570	}
3571	opspec := tf.OpSpec{
3572		Type: "StatelessRandomNormal",
3573		Input: []tf.Input{
3574			shape, seed,
3575		},
3576		Attrs: attrs,
3577	}
3578	op := scope.AddOperation(opspec)
3579	return op.Output(0)
3580}
3581
3582// StatelessRandomUniformAttr is an optional argument to StatelessRandomUniform.
3583type StatelessRandomUniformAttr func(optionalAttr)
3584
3585// StatelessRandomUniformDtype sets the optional dtype attribute to value.
3586//
3587// value: The type of the output.
3588// If not specified, defaults to DT_FLOAT
3589func StatelessRandomUniformDtype(value tf.DataType) StatelessRandomUniformAttr {
3590	return func(m optionalAttr) {
3591		m["dtype"] = value
3592	}
3593}
3594
3595// Outputs deterministic pseudorandom random values from a uniform distribution.
3596//
3597// The generated values follow a uniform distribution in the range `[0, 1)`. The
3598// lower bound 0 is included in the range, while the upper bound 1 is excluded.
3599//
3600// The outputs are a deterministic function of `shape` and `seed`.
3601//
3602// Arguments:
3603//	shape: The shape of the output tensor.
3604//	seed: 2 seeds (shape [2]).
3605//
3606// Returns Random values with specified shape.
3607func StatelessRandomUniform(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformAttr) (output tf.Output) {
3608	if scope.Err() != nil {
3609		return
3610	}
3611	attrs := map[string]interface{}{}
3612	for _, a := range optional {
3613		a(attrs)
3614	}
3615	opspec := tf.OpSpec{
3616		Type: "StatelessRandomUniform",
3617		Input: []tf.Input{
3618			shape, seed,
3619		},
3620		Attrs: attrs,
3621	}
3622	op := scope.AddOperation(opspec)
3623	return op.Output(0)
3624}
3625
3626// EnqueueTPUEmbeddingSparseBatchAttr is an optional argument to EnqueueTPUEmbeddingSparseBatch.
3627type EnqueueTPUEmbeddingSparseBatchAttr func(optionalAttr)
3628
3629// EnqueueTPUEmbeddingSparseBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
3630//
3631// value: The TPU device to use. Should be >= 0 and less than the number
3632// of TPU cores in the task on which the node is placed.
3633// If not specified, defaults to -1
3634func EnqueueTPUEmbeddingSparseBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingSparseBatchAttr {
3635	return func(m optionalAttr) {
3636		m["device_ordinal"] = value
3637	}
3638}
3639
3640// EnqueueTPUEmbeddingSparseBatchCombiners sets the optional combiners attribute to value.
3641//
3642// value: A list of string scalars, one for each embedding table that specify
3643// how to normalize the embedding activations after weighted summation.
3644// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
3645// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
3646// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
3647// all tables.
3648// If not specified, defaults to {}
3649func EnqueueTPUEmbeddingSparseBatchCombiners(value []string) EnqueueTPUEmbeddingSparseBatchAttr {
3650	return func(m optionalAttr) {
3651		m["combiners"] = value
3652	}
3653}
3654
3655// An op that enqueues TPUEmbedding input indices from a SparseTensor.
3656//
3657// This Op eases the porting of code that uses embedding_lookup_sparse(),
3658// although some Python preprocessing of the SparseTensor arguments to
3659// embedding_lookup_sparse() is required to produce the arguments to this Op,
3660// since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training
3661// step.
3662//
3663// The tensors at corresponding positions in the three input lists
3664// must have the same shape, i.e. rank 1 with dim_size() equal to the total
3665// number of lookups into the table described by the corresponding table_id.
3666//
3667// Arguments:
3668//	sample_indices: A list of rank 1 Tensors specifying the training example and
3669// feature to which the corresponding embedding_indices and aggregation_weights
3670// values belong. sample_indices[i] must equal b * nf + f, where nf is the
3671// number of features from the corresponding table, f is in [0, nf), and
3672// b is in [0, batch size).
3673//	embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
3674//	aggregation_weights: A list of rank 1 Tensors containing per sample -- i.e. per
3675// (training example, feature) -- aggregation weights.
3676//	mode_override: A string input that overrides the mode specified in the
3677// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
3678// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
3679// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
3680//
3681// Returns the created operation.
3682func EnqueueTPUEmbeddingSparseBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingSparseBatchAttr) (o *tf.Operation) {
3683	if scope.Err() != nil {
3684		return
3685	}
3686	attrs := map[string]interface{}{}
3687	for _, a := range optional {
3688		a(attrs)
3689	}
3690	opspec := tf.OpSpec{
3691		Type: "EnqueueTPUEmbeddingSparseBatch",
3692		Input: []tf.Input{
3693			tf.OutputList(sample_indices), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
3694		},
3695		Attrs: attrs,
3696	}
3697	return scope.AddOperation(opspec)
3698}
3699
3700// ResourceScatterNdUpdateAttr is an optional argument to ResourceScatterNdUpdate.
3701type ResourceScatterNdUpdateAttr func(optionalAttr)
3702
3703// ResourceScatterNdUpdateUseLocking sets the optional use_locking attribute to value.
3704//
3705// value: An optional bool. Defaults to True. If True, the assignment will
3706// be protected by a lock; otherwise the behavior is undefined,
3707// but may exhibit less contention.
3708// If not specified, defaults to true
3709func ResourceScatterNdUpdateUseLocking(value bool) ResourceScatterNdUpdateAttr {
3710	return func(m optionalAttr) {
3711		m["use_locking"] = value
3712	}
3713}
3714
3715// Applies sparse `updates` to individual values or slices within a given
3716//
3717// variable according to `indices`.
3718//
3719// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
3720//
3721// `indices` must be integer tensor, containing indices into `ref`.
3722// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
3723//
3724// The innermost dimension of `indices` (with length `K`) corresponds to
3725// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
3726// dimension of `ref`.
3727//
3728// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
3729//
3730// ```
3731// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
3732// ```
3733//
3734// For example, say we want to update 4 scattered elements to a rank-1 tensor to
3735// 8 elements. In Python, that update would look like this:
3736//
3737// ```python
3738//     ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
3739//     indices = tf.constant([[4], [3], [1] ,[7]])
3740//     updates = tf.constant([9, 10, 11, 12])
3741//     update = tf.scatter_nd_update(ref, indices, updates)
3742//     with tf.Session() as sess:
3743//       print sess.run(update)
3744// ```
3745//
3746// The resulting update to ref would look like this:
3747//
3748//     [1, 11, 3, 10, 9, 6, 7, 12]
3749//
3750// See `tf.scatter_nd` for more details about how to make updates to
3751// slices.
3752//
3753// Arguments:
3754//	ref: A resource handle. Must be from a VarHandleOp.
3755//	indices: A Tensor. Must be one of the following types: int32, int64.
3756// A tensor of indices into ref.
3757//	updates: A Tensor. Must have the same type as ref. A tensor of updated
3758// values to add to ref.
3759//
3760// Returns the created operation.
3761func ResourceScatterNdUpdate(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdUpdateAttr) (o *tf.Operation) {
3762	if scope.Err() != nil {
3763		return
3764	}
3765	attrs := map[string]interface{}{}
3766	for _, a := range optional {
3767		a(attrs)
3768	}
3769	opspec := tf.OpSpec{
3770		Type: "ResourceScatterNdUpdate",
3771		Input: []tf.Input{
3772			ref, indices, updates,
3773		},
3774		Attrs: attrs,
3775	}
3776	return scope.AddOperation(opspec)
3777}
3778
3779// An Op to permute tensors across replicated TPU instances.
3780//
3781// Each instance supplies its own input.
3782//
3783// For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing
3784// source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs:
3785// `[D, A, B, C]`.
3786//
3787// Arguments:
3788//	input: The local input to be permuted. Currently only supports float and
3789// bfloat16.
3790//	source_target_pairs: A tensor with shape [num_pairs, 2].
3791//
3792// Returns The permuted input.
3793func CollectivePermute(scope *Scope, input tf.Output, source_target_pairs tf.Output) (output tf.Output) {
3794	if scope.Err() != nil {
3795		return
3796	}
3797	opspec := tf.OpSpec{
3798		Type: "CollectivePermute",
3799		Input: []tf.Input{
3800			input, source_target_pairs,
3801		},
3802	}
3803	op := scope.AddOperation(opspec)
3804	return op.Output(0)
3805}
3806
3807// IRFFT3DAttr is an optional argument to IRFFT3D.
3808type IRFFT3DAttr func(optionalAttr)
3809
3810// IRFFT3DTreal sets the optional Treal attribute to value.
3811// If not specified, defaults to DT_FLOAT
3812func IRFFT3DTreal(value tf.DataType) IRFFT3DAttr {
3813	return func(m optionalAttr) {
3814		m["Treal"] = value
3815	}
3816}
3817
3818// Inverse 3D real-valued fast Fourier transform.
3819//
3820// Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
3821// signal over the inner-most 3 dimensions of `input`.
3822//
3823// The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
3824// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
3825// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
3826// from the size of the inner-most 3 dimensions of `input`. If the FFT length used
3827// to compute `input` is odd, it should be provided since it cannot be inferred
3828// properly.
3829//
3830// Along each axis `IRFFT3D` is computed on, if `fft_length` (or
3831// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
3832// corresponding dimension of `input`, the dimension is cropped. If it is larger,
3833// the dimension is padded with zeros.
3834//
3835// Arguments:
3836//	input: A complex tensor.
3837//	fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
3838//
3839// Returns A float32 tensor of the same rank as `input`. The inner-most 3
3840//   dimensions of `input` are replaced with the `fft_length` samples of their
3841//   inverse 3D real Fourier transform.
3842//
3843// @compatibility(numpy)
3844// Equivalent to np.irfftn with 3 dimensions.
3845// @end_compatibility
3846func IRFFT3D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFT3DAttr) (output tf.Output) {
3847	if scope.Err() != nil {
3848		return
3849	}
3850	attrs := map[string]interface{}{}
3851	for _, a := range optional {
3852		a(attrs)
3853	}
3854	opspec := tf.OpSpec{
3855		Type: "IRFFT3D",
3856		Input: []tf.Input{
3857			input, fft_length,
3858		},
3859		Attrs: attrs,
3860	}
3861	op := scope.AddOperation(opspec)
3862	return op.Output(0)
3863}
3864
3865// IRFFT2DAttr is an optional argument to IRFFT2D.
3866type IRFFT2DAttr func(optionalAttr)
3867
3868// IRFFT2DTreal sets the optional Treal attribute to value.
3869// If not specified, defaults to DT_FLOAT
3870func IRFFT2DTreal(value tf.DataType) IRFFT2DAttr {
3871	return func(m optionalAttr) {
3872		m["Treal"] = value
3873	}
3874}
3875
3876// Inverse 2D real-valued fast Fourier transform.
3877//
3878// Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
3879// signal over the inner-most 2 dimensions of `input`.
3880//
3881// The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
3882// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
3883// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
3884// from the size of the inner-most 2 dimensions of `input`. If the FFT length used
3885// to compute `input` is odd, it should be provided since it cannot be inferred
3886// properly.
3887//
3888// Along each axis `IRFFT2D` is computed on, if `fft_length` (or
3889// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
3890// corresponding dimension of `input`, the dimension is cropped. If it is larger,
3891// the dimension is padded with zeros.
3892//
3893// Arguments:
3894//	input: A complex tensor.
3895//	fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
3896//
3897// Returns A float32 tensor of the same rank as `input`. The inner-most 2
3898//   dimensions of `input` are replaced with the `fft_length` samples of their
3899//   inverse 2D Fourier transform.
3900//
3901// @compatibility(numpy)
3902// Equivalent to np.fft.irfft2
3903// @end_compatibility
3904func IRFFT2D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFT2DAttr) (output tf.Output) {
3905	if scope.Err() != nil {
3906		return
3907	}
3908	attrs := map[string]interface{}{}
3909	for _, a := range optional {
3910		a(attrs)
3911	}
3912	opspec := tf.OpSpec{
3913		Type: "IRFFT2D",
3914		Input: []tf.Input{
3915			input, fft_length,
3916		},
3917		Attrs: attrs,
3918	}
3919	op := scope.AddOperation(opspec)
3920	return op.Output(0)
3921}
3922
3923// RFFT2DAttr is an optional argument to RFFT2D.
3924type RFFT2DAttr func(optionalAttr)
3925
3926// RFFT2DTcomplex sets the optional Tcomplex attribute to value.
3927// If not specified, defaults to DT_COMPLEX64
3928func RFFT2DTcomplex(value tf.DataType) RFFT2DAttr {
3929	return func(m optionalAttr) {
3930		m["Tcomplex"] = value
3931	}
3932}
3933
3934// 2D real-valued fast Fourier transform.
3935//
3936// Computes the 2-dimensional discrete Fourier transform of a real-valued signal
3937// over the inner-most 2 dimensions of `input`.
3938//
3939// Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
3940// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
3941// of `output`: the zero-frequency term, followed by the `fft_length / 2`
3942// positive-frequency terms.
3943//
3944// Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
3945// corresponding dimension of `input`, the dimension is cropped. If it is larger,
3946// the dimension is padded with zeros.
3947//
3948// Arguments:
3949//	input: A float32 tensor.
3950//	fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
3951//
3952// Returns A complex64 tensor of the same rank as `input`. The inner-most 2
3953//   dimensions of `input` are replaced with their 2D Fourier transform. The
3954//   inner-most dimension contains `fft_length / 2 + 1` unique frequency
3955//   components.
3956//
3957// @compatibility(numpy)
3958// Equivalent to np.fft.rfft2
3959// @end_compatibility
3960func RFFT2D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFT2DAttr) (output tf.Output) {
3961	if scope.Err() != nil {
3962		return
3963	}
3964	attrs := map[string]interface{}{}
3965	for _, a := range optional {
3966		a(attrs)
3967	}
3968	opspec := tf.OpSpec{
3969		Type: "RFFT2D",
3970		Input: []tf.Input{
3971			input, fft_length,
3972		},
3973		Attrs: attrs,
3974	}
3975	op := scope.AddOperation(opspec)
3976	return op.Output(0)
3977}
3978
3979// IRFFTAttr is an optional argument to IRFFT.
3980type IRFFTAttr func(optionalAttr)
3981
3982// IRFFTTreal sets the optional Treal attribute to value.
3983// If not specified, defaults to DT_FLOAT
3984func IRFFTTreal(value tf.DataType) IRFFTAttr {
3985	return func(m optionalAttr) {
3986		m["Treal"] = value
3987	}
3988}
3989
3990// Inverse real-valued fast Fourier transform.
3991//
3992// Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
3993// signal over the inner-most dimension of `input`.
3994//
3995// The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
3996// `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
3997// `fft_length` is not provided, it is computed from the size of the inner-most
3998// dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
3999// compute `input` is odd, it should be provided since it cannot be inferred
4000// properly.
4001//
4002// Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
4003// than the corresponding dimension of `input`, the dimension is cropped. If it is
4004// larger, the dimension is padded with zeros.
4005//
4006// Arguments:
4007//	input: A complex tensor.
4008//	fft_length: An int32 tensor of shape [1]. The FFT length.
4009//
4010// Returns A float32 tensor of the same rank as `input`. The inner-most
4011//   dimension of `input` is replaced with the `fft_length` samples of its inverse
4012//   1D Fourier transform.
4013//
4014// @compatibility(numpy)
4015// Equivalent to np.fft.irfft
4016// @end_compatibility
4017func IRFFT(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFTAttr) (output tf.Output) {
4018	if scope.Err() != nil {
4019		return
4020	}
4021	attrs := map[string]interface{}{}
4022	for _, a := range optional {
4023		a(attrs)
4024	}
4025	opspec := tf.OpSpec{
4026		Type: "IRFFT",
4027		Input: []tf.Input{
4028			input, fft_length,
4029		},
4030		Attrs: attrs,
4031	}
4032	op := scope.AddOperation(opspec)
4033	return op.Output(0)
4034}
4035
4036// RFFTAttr is an optional argument to RFFT.
4037type RFFTAttr func(optionalAttr)
4038
4039// RFFTTcomplex sets the optional Tcomplex attribute to value.
4040// If not specified, defaults to DT_COMPLEX64
4041func RFFTTcomplex(value tf.DataType) RFFTAttr {
4042	return func(m optionalAttr) {
4043		m["Tcomplex"] = value
4044	}
4045}
4046
4047// Real-valued fast Fourier transform.
4048//
4049// Computes the 1-dimensional discrete Fourier transform of a real-valued signal
4050// over the inner-most dimension of `input`.
4051//
4052// Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
4053// `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
4054// followed by the `fft_length / 2` positive-frequency terms.
4055//
4056// Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
4057// corresponding dimension of `input`, the dimension is cropped. If it is larger,
4058// the dimension is padded with zeros.
4059//
4060// Arguments:
4061//	input: A float32 tensor.
4062//	fft_length: An int32 tensor of shape [1]. The FFT length.
4063//
4064// Returns A complex64 tensor of the same rank as `input`. The inner-most
4065//   dimension of `input` is replaced with the `fft_length / 2 + 1` unique
4066//   frequency components of its 1D Fourier transform.
4067//
4068// @compatibility(numpy)
4069// Equivalent to np.fft.rfft
4070// @end_compatibility
4071func RFFT(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFTAttr) (output tf.Output) {
4072	if scope.Err() != nil {
4073		return
4074	}
4075	attrs := map[string]interface{}{}
4076	for _, a := range optional {
4077		a(attrs)
4078	}
4079	opspec := tf.OpSpec{
4080		Type: "RFFT",
4081		Input: []tf.Input{
4082			input, fft_length,
4083		},
4084		Attrs: attrs,
4085	}
4086	op := scope.AddOperation(opspec)
4087	return op.Output(0)
4088}
4089
4090// 3D fast Fourier transform.
4091//
4092// Computes the 3-dimensional discrete Fourier transform over the inner-most 3
4093// dimensions of `input`.
4094//
4095// Arguments:
4096//	input: A complex tensor.
4097//
4098// Returns A complex tensor of the same shape as `input`. The inner-most 3
4099//   dimensions of `input` are replaced with their 3D Fourier transform.
4100//
4101// @compatibility(numpy)
4102// Equivalent to np.fft.fftn with 3 dimensions.
4103// @end_compatibility
4104func FFT3D(scope *Scope, input tf.Output) (output tf.Output) {
4105	if scope.Err() != nil {
4106		return
4107	}
4108	opspec := tf.OpSpec{
4109		Type: "FFT3D",
4110		Input: []tf.Input{
4111			input,
4112		},
4113	}
4114	op := scope.AddOperation(opspec)
4115	return op.Output(0)
4116}
4117
4118// Inverse 2D fast Fourier transform.
4119//
4120// Computes the inverse 2-dimensional discrete Fourier transform over the
4121// inner-most 2 dimensions of `input`.
4122//
4123// Arguments:
4124//	input: A complex tensor.
4125//
4126// Returns A complex tensor of the same shape as `input`. The inner-most 2
4127//   dimensions of `input` are replaced with their inverse 2D Fourier transform.
4128//
4129// @compatibility(numpy)
4130// Equivalent to np.fft.ifft2
4131// @end_compatibility
4132func IFFT2D(scope *Scope, input tf.Output) (output tf.Output) {
4133	if scope.Err() != nil {
4134		return
4135	}
4136	opspec := tf.OpSpec{
4137		Type: "IFFT2D",
4138		Input: []tf.Input{
4139			input,
4140		},
4141	}
4142	op := scope.AddOperation(opspec)
4143	return op.Output(0)
4144}
4145
4146// 2D fast Fourier transform.
4147//
4148// Computes the 2-dimensional discrete Fourier transform over the inner-most
4149// 2 dimensions of `input`.
4150//
4151// Arguments:
4152//	input: A complex tensor.
4153//
4154// Returns A complex tensor of the same shape as `input`. The inner-most 2
4155//   dimensions of `input` are replaced with their 2D Fourier transform.
4156//
4157// @compatibility(numpy)
4158// Equivalent to np.fft.fft2
4159// @end_compatibility
4160func FFT2D(scope *Scope, input tf.Output) (output tf.Output) {
4161	if scope.Err() != nil {
4162		return
4163	}
4164	opspec := tf.OpSpec{
4165		Type: "FFT2D",
4166		Input: []tf.Input{
4167			input,
4168		},
4169	}
4170	op := scope.AddOperation(opspec)
4171	return op.Output(0)
4172}
4173
4174// Inverse fast Fourier transform.
4175//
4176// Computes the inverse 1-dimensional discrete Fourier transform over the
4177// inner-most dimension of `input`.
4178//
4179// Arguments:
4180//	input: A complex tensor.
4181//
4182// Returns A complex tensor of the same shape as `input`. The inner-most
4183//   dimension of `input` is replaced with its inverse 1D Fourier transform.
4184//
4185// @compatibility(numpy)
4186// Equivalent to np.fft.ifft
4187// @end_compatibility
4188func IFFT(scope *Scope, input tf.Output) (output tf.Output) {
4189	if scope.Err() != nil {
4190		return
4191	}
4192	opspec := tf.OpSpec{
4193		Type: "IFFT",
4194		Input: []tf.Input{
4195			input,
4196		},
4197	}
4198	op := scope.AddOperation(opspec)
4199	return op.Output(0)
4200}
4201
4202// Fast Fourier transform.
4203//
4204// Computes the 1-dimensional discrete Fourier transform over the inner-most
4205// dimension of `input`.
4206//
4207// Arguments:
4208//	input: A complex tensor.
4209//
4210// Returns A complex tensor of the same shape as `input`. The inner-most
4211//   dimension of `input` is replaced with its 1D Fourier transform.
4212//
4213// @compatibility(numpy)
4214// Equivalent to np.fft.fft
4215// @end_compatibility
4216func FFT(scope *Scope, input tf.Output) (output tf.Output) {
4217	if scope.Err() != nil {
4218		return
4219	}
4220	opspec := tf.OpSpec{
4221		Type: "FFT",
4222		Input: []tf.Input{
4223			input,
4224		},
4225	}
4226	op := scope.AddOperation(opspec)
4227	return op.Output(0)
4228}
4229
4230// Writes a scalar summary.
4231//
4232// Writes scalar `value` at `step` with `tag` using summary `writer`.
4233//
4234// Returns the created operation.
4235func WriteScalarSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, value tf.Output) (o *tf.Operation) {
4236	if scope.Err() != nil {
4237		return
4238	}
4239	opspec := tf.OpSpec{
4240		Type: "WriteScalarSummary",
4241		Input: []tf.Input{
4242			writer, step, tag, value,
4243		},
4244	}
4245	return scope.AddOperation(opspec)
4246}
4247
4248// The gradient of SparseFillEmptyRows.
4249//
4250// Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
4251// shaped `[N_full]`, where `N_full >= N` and copies data into either
4252// `d_values` or `d_default_value`.  Here `d_values` is shaped `[N]` and
4253// `d_default_value` is a scalar.
4254//
4255//   d_values[j] = grad_values[reverse_index_map[j]]
4256//   d_default_value = sum_{k : 0 .. N_full - 1} (
4257//      grad_values[k] * 1{k not in reverse_index_map})
4258//
4259// Arguments:
4260//	reverse_index_map: 1-D.  The reverse index map from SparseFillEmptyRows.
4261//	grad_values: 1-D.  The gradients from backprop.
4262//
4263// Returns:
4264//	d_values: 1-D.  The backprop into values.
4265//	d_default_value: 0-D.  The backprop into default_value.
4266func SparseFillEmptyRowsGrad(scope *Scope, reverse_index_map tf.Output, grad_values tf.Output) (d_values tf.Output, d_default_value tf.Output) {
4267	if scope.Err() != nil {
4268		return
4269	}
4270	opspec := tf.OpSpec{
4271		Type: "SparseFillEmptyRowsGrad",
4272		Input: []tf.Input{
4273			reverse_index_map, grad_values,
4274		},
4275	}
4276	op := scope.AddOperation(opspec)
4277	return op.Output(0), op.Output(1)
4278}
4279
4280// Fills empty rows in the input 2-D `SparseTensor` with a default value.
4281//
4282// The input `SparseTensor` is represented via the tuple of inputs
4283// (`indices`, `values`, `dense_shape`).  The output `SparseTensor` has the
4284// same `dense_shape` but with indices `output_indices` and values
4285// `output_values`.
4286//
4287// This op inserts a single entry for every row that doesn't have any values.
4288// The index is created as `[row, 0, ..., 0]` and the inserted value
4289// is `default_value`.
4290//
4291// For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
4292//
4293//     [0, 1]: a
4294//     [0, 3]: b
4295//     [2, 0]: c
4296//     [3, 1]: d
4297//
4298// Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
4299//
4300//     [0, 1]: a
4301//     [0, 3]: b
4302//     [1, 0]: default_value
4303//     [2, 0]: c
4304//     [3, 1]: d
4305//     [4, 0]: default_value
4306//
4307// The output `SparseTensor` will be in row-major order and will have the
4308// same shape as the input.
4309//
4310// This op also returns an indicator vector shaped `[dense_shape[0]]` such that
4311//
4312//     empty_row_indicator[i] = True iff row i was an empty row.
4313//
4314// And a reverse index map vector shaped `[indices.shape[0]]` that is used during
4315// backpropagation,
4316//
4317//     reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
4318//
4319// Arguments:
4320//	indices: 2-D. the indices of the sparse tensor.
4321//	values: 1-D. the values of the sparse tensor.
4322//	dense_shape: 1-D. the shape of the sparse tensor.
4323//	default_value: 0-D. default value to insert into location `[row, 0, ..., 0]`
4324//   for rows missing from the input sparse tensor.
4325// output indices: 2-D. the indices of the filled sparse tensor.
4326//
4327// Returns:
4328//	output_indices
4329//	output_values: 1-D. the values of the filled sparse tensor.
4330//	empty_row_indicator: 1-D. whether the dense row was missing in the
4331// input sparse tensor.
4332//	reverse_index_map: 1-D. a map from the input indices to the output indices.
4333func SparseFillEmptyRows(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, default_value tf.Output) (output_indices tf.Output, output_values tf.Output, empty_row_indicator tf.Output, reverse_index_map tf.Output) {
4334	if scope.Err() != nil {
4335		return
4336	}
4337	opspec := tf.OpSpec{
4338		Type: "SparseFillEmptyRows",
4339		Input: []tf.Input{
4340			indices, values, dense_shape, default_value,
4341		},
4342	}
4343	op := scope.AddOperation(opspec)
4344	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
4345}
4346
4347// Returns the element-wise min of two SparseTensors.
4348//
4349// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
4350//
4351// Arguments:
4352//	a_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
4353// SparseTensor, in the canonical lexicographic ordering.
4354//	a_values: 1-D.  `N` non-empty values corresponding to `a_indices`.
4355//	a_shape: 1-D.  Shape of the input SparseTensor.
4356//	b_indices: counterpart to `a_indices` for the other operand.
4357//	b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
4358//	b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
4359//
4360// Returns:
4361//	output_indices: 2-D.  The indices of the output SparseTensor.
4362//	output_values: 1-D.  The values of the output SparseTensor.
4363func SparseSparseMinimum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
4364	if scope.Err() != nil {
4365		return
4366	}
4367	opspec := tf.OpSpec{
4368		Type: "SparseSparseMinimum",
4369		Input: []tf.Input{
4370			a_indices, a_values, a_shape, b_indices, b_values, b_shape,
4371		},
4372	}
4373	op := scope.AddOperation(opspec)
4374	return op.Output(0), op.Output(1)
4375}
4376
4377// Returns the element-wise max of two SparseTensors.
4378//
4379// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
4380//
4381// Arguments:
4382//	a_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
4383// SparseTensor, in the canonical lexicographic ordering.
4384//	a_values: 1-D.  `N` non-empty values corresponding to `a_indices`.
4385//	a_shape: 1-D.  Shape of the input SparseTensor.
4386//	b_indices: counterpart to `a_indices` for the other operand.
4387//	b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
4388//	b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
4389//
4390// Returns:
4391//	output_indices: 2-D.  The indices of the output SparseTensor.
4392//	output_values: 1-D.  The values of the output SparseTensor.
4393func SparseSparseMaximum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
4394	if scope.Err() != nil {
4395		return
4396	}
4397	opspec := tf.OpSpec{
4398		Type: "SparseSparseMaximum",
4399		Input: []tf.Input{
4400			a_indices, a_values, a_shape, b_indices, b_values, b_shape,
4401		},
4402	}
4403	op := scope.AddOperation(opspec)
4404	return op.Output(0), op.Output(1)
4405}
4406
4407// Applies softmax to a batched N-D `SparseTensor`.
4408//
4409// The inputs represent an N-D SparseTensor  with logical shape `[..., B, C]`
4410// (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
4411//
4412// This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
4413// logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
4414// zero elements do not participate*.  Specifically, the algorithm is equivalent
4415// to the following:
4416//
4417//   (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
4418//       with shape `[B, C]`, along the size-C dimension;
4419//   (2) Masks out the original implicitly-zero locations;
4420//   (3) Renormalizes the remaining elements.
4421//
4422// Hence, the `SparseTensor` result has exactly the same non-zero indices and
4423// shape.
4424//
4425// Arguments:
4426//	sp_indices: 2-D.  `NNZ x R` matrix with the indices of non-empty values in a
4427// SparseTensor, in canonical ordering.
4428//	sp_values: 1-D.  `NNZ` non-empty values corresponding to `sp_indices`.
4429//	sp_shape: 1-D.  Shape of the input SparseTensor.
4430//
4431// Returns 1-D.  The `NNZ` values for the result `SparseTensor`.
4432func SparseSoftmax(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output) (output tf.Output) {
4433	if scope.Err() != nil {
4434		return
4435	}
4436	opspec := tf.OpSpec{
4437		Type: "SparseSoftmax",
4438		Input: []tf.Input{
4439			sp_indices, sp_values, sp_shape,
4440		},
4441	}
4442	op := scope.AddOperation(opspec)
4443	return op.Output(0)
4444}
4445
4446// Component-wise divides a SparseTensor by a dense Tensor.
4447//
4448// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
4449// the other direction.
4450//
4451// Arguments:
4452//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
4453// SparseTensor, possibly not in canonical ordering.
4454//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
4455//	sp_shape: 1-D.  Shape of the input SparseTensor.
4456//	dense: `R`-D.  The dense Tensor operand.
4457//
4458// Returns 1-D.  The `N` values that are operated on.
4459func SparseDenseCwiseDiv(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
4460	if scope.Err() != nil {
4461		return
4462	}
4463	opspec := tf.OpSpec{
4464		Type: "SparseDenseCwiseDiv",
4465		Input: []tf.Input{
4466			sp_indices, sp_values, sp_shape, dense,
4467		},
4468	}
4469	op := scope.AddOperation(opspec)
4470	return op.Output(0)
4471}
4472
4473// SparseReduceMaxAttr is an optional argument to SparseReduceMax.
4474type SparseReduceMaxAttr func(optionalAttr)
4475
4476// SparseReduceMaxKeepDims sets the optional keep_dims attribute to value.
4477//
4478// value: If true, retain reduced dimensions with length 1.
4479// If not specified, defaults to false
4480func SparseReduceMaxKeepDims(value bool) SparseReduceMaxAttr {
4481	return func(m optionalAttr) {
4482		m["keep_dims"] = value
4483	}
4484}
4485
4486// Computes the max of elements across dimensions of a SparseTensor.
4487//
4488// This Op takes a SparseTensor and is the sparse counterpart to
4489// `tf.reduce_max()`.  In particular, this Op also returns a dense `Tensor`
4490// instead of a sparse one.
4491//
4492// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
4493// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
4494// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
4495// with length 1.
4496//
4497// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
4498// with a single element is returned.  Additionally, the axes can be negative,
4499// which are interpreted according to the indexing rules in Python.
4500//
4501// Arguments:
4502//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
4503// SparseTensor, possibly not in canonical ordering.
4504//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
4505//	input_shape: 1-D.  Shape of the input SparseTensor.
4506//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
4507//
4508// Returns `R-K`-D.  The reduced Tensor.
4509func SparseReduceMax(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxAttr) (output tf.Output) {
4510	if scope.Err() != nil {
4511		return
4512	}
4513	attrs := map[string]interface{}{}
4514	for _, a := range optional {
4515		a(attrs)
4516	}
4517	opspec := tf.OpSpec{
4518		Type: "SparseReduceMax",
4519		Input: []tf.Input{
4520			input_indices, input_values, input_shape, reduction_axes,
4521		},
4522		Attrs: attrs,
4523	}
4524	op := scope.AddOperation(opspec)
4525	return op.Output(0)
4526}
4527
4528// Reshapes a SparseTensor to represent values in a new dense shape.
4529//
4530// This operation has the same semantics as reshape on the represented dense
4531// tensor.  The `input_indices` are recomputed based on the requested `new_shape`.
4532//
4533// If one component of `new_shape` is the special value -1, the size of that
4534// dimension is computed so that the total dense size remains constant.  At
4535// most one component of `new_shape` can be -1.  The number of dense elements
4536// implied by `new_shape` must be the same as the number of dense elements
4537// originally implied by `input_shape`.
4538//
4539// Reshaping does not affect the order of values in the SparseTensor.
4540//
4541// If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
4542// has length `R_out`, then `input_indices` has shape `[N, R_in]`,
4543// `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
4544// `output_shape` has length `R_out`.
4545//
4546// Arguments:
4547//	input_indices: 2-D.  `N x R_in` matrix with the indices of non-empty values in a
4548// SparseTensor.
4549//	input_shape: 1-D.  `R_in` vector with the input SparseTensor's dense shape.
4550//	new_shape: 1-D.  `R_out` vector with the requested new dense shape.
4551//
4552// Returns:
4553//	output_indices: 2-D.  `N x R_out` matrix with the updated indices of non-empty
4554// values in the output SparseTensor.
4555//	output_shape: 1-D.  `R_out` vector with the full dense shape of the output
4556// SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
4557// filled in.
4558func SparseReshape(scope *Scope, input_indices tf.Output, input_shape tf.Output, new_shape tf.Output) (output_indices tf.Output, output_shape tf.Output) {
4559	if scope.Err() != nil {
4560		return
4561	}
4562	opspec := tf.OpSpec{
4563		Type: "SparseReshape",
4564		Input: []tf.Input{
4565			input_indices, input_shape, new_shape,
4566		},
4567	}
4568	op := scope.AddOperation(opspec)
4569	return op.Output(0), op.Output(1)
4570}
4571
4572// The gradient operator for the SparseSlice op.
4573//
4574// This op takes in the upstream gradient w.r.t. non-empty values of
4575// the sliced `SparseTensor`, and outputs the gradients w.r.t.
4576// the non-empty values of input `SparseTensor`.
4577//
4578// Arguments:
4579//	backprop_val_grad: 1-D. The gradient with respect to
4580// the non-empty values of the sliced `SparseTensor`.
4581//	input_indices: 2-D.  The `indices` of the input `SparseTensor`.
4582//	input_start: 1-D. tensor represents the start of the slice.
4583//	output_indices: 2-D.  The `indices` of the sliced `SparseTensor`.
4584//
4585// Returns 1-D. The gradient with respect to the non-empty values of input `SparseTensor`.
4586func SparseSliceGrad(scope *Scope, backprop_val_grad tf.Output, input_indices tf.Output, input_start tf.Output, output_indices tf.Output) (val_grad tf.Output) {
4587	if scope.Err() != nil {
4588		return
4589	}
4590	opspec := tf.OpSpec{
4591		Type: "SparseSliceGrad",
4592		Input: []tf.Input{
4593			backprop_val_grad, input_indices, input_start, output_indices,
4594		},
4595	}
4596	op := scope.AddOperation(opspec)
4597	return op.Output(0)
4598}
4599
4600// Generates sparse cross from a list of sparse and dense tensors.
4601//
4602// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
4603// representing features of one feature column. It outputs a 2D `SparseTensor` with
4604// the batchwise crosses of these features.
4605//
4606// For example, if the inputs are
4607//
4608//     inputs[0]: SparseTensor with shape = [2, 2]
4609//     [0, 0]: "a"
4610//     [1, 0]: "b"
4611//     [1, 1]: "c"
4612//
4613//     inputs[1]: SparseTensor with shape = [2, 1]
4614//     [0, 0]: "d"
4615//     [1, 0]: "e"
4616//
4617//     inputs[2]: Tensor [["f"], ["g"]]
4618//
4619// then the output will be
4620//
4621//     shape = [2, 2]
4622//     [0, 0]: "a_X_d_X_f"
4623//     [1, 0]: "b_X_e_X_g"
4624//     [1, 1]: "c_X_e_X_g"
4625//
4626// if hashed_output=true then the output will be
4627//
4628//     shape = [2, 2]
4629//     [0, 0]: FingerprintCat64(
4630//                 Fingerprint64("f"), FingerprintCat64(
4631//                     Fingerprint64("d"), Fingerprint64("a")))
4632//     [1, 0]: FingerprintCat64(
4633//                 Fingerprint64("g"), FingerprintCat64(
4634//                     Fingerprint64("e"), Fingerprint64("b")))
4635//     [1, 1]: FingerprintCat64(
4636//                 Fingerprint64("g"), FingerprintCat64(
4637//                     Fingerprint64("e"), Fingerprint64("c")))
4638//
4639// Arguments:
4640//	indices: 2-D.  Indices of each input `SparseTensor`.
4641//	values: 1-D.   values of each `SparseTensor`.
4642//	shapes: 1-D.   Shapes of each `SparseTensor`.
4643//	dense_inputs: 2-D.    Columns represented by dense `Tensor`.
4644//	num_buckets: It is used if hashed_output is true.
4645// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
4646//	strong_hash: boolean, if true, siphash with salt will be used instead of farmhash.
4647//	salt: Specify the salt that will be used by the siphash function.
4648//
4649// Returns:
4650//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
4651//	output_values: 1-D.  Non-empty values of the concatenated or hashed
4652// `SparseTensor`.
4653//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
4654func SparseCrossHashed(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, num_buckets tf.Output, strong_hash tf.Output, salt tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
4655	if scope.Err() != nil {
4656		return
4657	}
4658	opspec := tf.OpSpec{
4659		Type: "SparseCrossHashed",
4660		Input: []tf.Input{
4661			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs), num_buckets, strong_hash, salt,
4662		},
4663	}
4664	op := scope.AddOperation(opspec)
4665	return op.Output(0), op.Output(1), op.Output(2)
4666}
4667
4668// Generates sparse cross from a list of sparse and dense tensors.
4669//
4670// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
4671// representing features of one feature column. It outputs a 2D `SparseTensor` with
4672// the batchwise crosses of these features.
4673//
4674// For example, if the inputs are
4675//
4676//     inputs[0]: SparseTensor with shape = [2, 2]
4677//     [0, 0]: "a"
4678//     [1, 0]: "b"
4679//     [1, 1]: "c"
4680//
4681//     inputs[1]: SparseTensor with shape = [2, 1]
4682//     [0, 0]: "d"
4683//     [1, 0]: "e"
4684//
4685//     inputs[2]: Tensor [["f"], ["g"]]
4686//
4687// then the output will be
4688//
4689//     shape = [2, 2]
4690//     [0, 0]: "a_X_d_X_f"
4691//     [1, 0]: "b_X_e_X_g"
4692//     [1, 1]: "c_X_e_X_g"
4693//
4694// if hashed_output=true then the output will be
4695//
4696//     shape = [2, 2]
4697//     [0, 0]: FingerprintCat64(
4698//                 Fingerprint64("f"), FingerprintCat64(
4699//                     Fingerprint64("d"), Fingerprint64("a")))
4700//     [1, 0]: FingerprintCat64(
4701//                 Fingerprint64("g"), FingerprintCat64(
4702//                     Fingerprint64("e"), Fingerprint64("b")))
4703//     [1, 1]: FingerprintCat64(
4704//                 Fingerprint64("g"), FingerprintCat64(
4705//                     Fingerprint64("e"), Fingerprint64("c")))
4706//
4707// Arguments:
4708//	indices: 2-D.  Indices of each input `SparseTensor`.
4709//	values: 1-D.   values of each `SparseTensor`.
4710//	shapes: 1-D.   Shapes of each `SparseTensor`.
4711//	dense_inputs: 2-D.    Columns represented by dense `Tensor`.
4712//	hashed_output: If true, returns the hash of the cross instead of the string.
4713// This will allow us avoiding string manipulations.
4714//	num_buckets: It is used if hashed_output is true.
4715// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
4716//	hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
4717// function to combine the crosses fingerprints.
4718//
4719//
4720//
4721// Returns:
4722//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
4723//	output_values: 1-D.  Non-empty values of the concatenated or hashed
4724// `SparseTensor`.
4725//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
4726func SparseCross(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, hashed_output bool, num_buckets int64, hash_key int64, out_type tf.DataType, internal_type tf.DataType) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
4727	if scope.Err() != nil {
4728		return
4729	}
4730	attrs := map[string]interface{}{"hashed_output": hashed_output, "num_buckets": num_buckets, "hash_key": hash_key, "out_type": out_type, "internal_type": internal_type}
4731	opspec := tf.OpSpec{
4732		Type: "SparseCross",
4733		Input: []tf.Input{
4734			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs),
4735		},
4736		Attrs: attrs,
4737	}
4738	op := scope.AddOperation(opspec)
4739	return op.Output(0), op.Output(1), op.Output(2)
4740}
4741
4742// Concatenates a list of `SparseTensor` along the specified dimension.
4743//
4744// Concatenation is with respect to the dense versions of these sparse tensors.
4745// It is assumed that each input is a `SparseTensor` whose elements are ordered
4746// along increasing dimension number.
4747//
4748// All inputs' shapes must match, except for the concat dimension.  The
4749// `indices`, `values`, and `shapes` lists must have the same length.
4750//
4751// The output shape is identical to the inputs', except along the concat
4752// dimension, where it is the sum of the inputs' sizes along that dimension.
4753//
4754// The output elements will be resorted to preserve the sort order along
4755// increasing dimension number.
4756//
4757// This op runs in `O(M log M)` time, where `M` is the total number of non-empty
4758// values across all inputs. This is due to the need for an internal sort in
4759// order to concatenate efficiently across an arbitrary dimension.
4760//
4761// For example, if `concat_dim = 1` and the inputs are
4762//
4763//     sp_inputs[0]: shape = [2, 3]
4764//     [0, 2]: "a"
4765//     [1, 0]: "b"
4766//     [1, 1]: "c"
4767//
4768//     sp_inputs[1]: shape = [2, 4]
4769//     [0, 1]: "d"
4770//     [0, 2]: "e"
4771//
4772// then the output will be
4773//
4774//     shape = [2, 7]
4775//     [0, 2]: "a"
4776//     [0, 4]: "d"
4777//     [0, 5]: "e"
4778//     [1, 0]: "b"
4779//     [1, 1]: "c"
4780//
4781// Graphically this is equivalent to doing
4782//
4783//     [    a] concat [  d e  ] = [    a   d e  ]
4784//     [b c  ]        [       ]   [b c          ]
4785//
4786// Arguments:
4787//	indices: 2-D.  Indices of each input `SparseTensor`.
4788//	values: 1-D.  Non-empty values of each `SparseTensor`.
4789//	shapes: 1-D.  Shapes of each `SparseTensor`.
4790//	concat_dim: Dimension to concatenate along. Must be in range [-rank, rank),
4791// where rank is the number of dimensions in each input `SparseTensor`.
4792//
4793// Returns:
4794//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
4795//	output_values: 1-D.  Non-empty values of the concatenated `SparseTensor`.
4796//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
4797func SparseConcat(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, concat_dim int64) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
4798	if scope.Err() != nil {
4799		return
4800	}
4801	attrs := map[string]interface{}{"concat_dim": concat_dim}
4802	opspec := tf.OpSpec{
4803		Type: "SparseConcat",
4804		Input: []tf.Input{
4805			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes),
4806		},
4807		Attrs: attrs,
4808	}
4809	op := scope.AddOperation(opspec)
4810	return op.Output(0), op.Output(1), op.Output(2)
4811}
4812
4813// SerializeManySparseAttr is an optional argument to SerializeManySparse.
4814type SerializeManySparseAttr func(optionalAttr)
4815
4816// SerializeManySparseOutType sets the optional out_type attribute to value.
4817//
4818// value: The `dtype` to use for serialization; the supported types are `string`
4819// (default) and `variant`.
4820// If not specified, defaults to DT_STRING
4821func SerializeManySparseOutType(value tf.DataType) SerializeManySparseAttr {
4822	return func(m optionalAttr) {
4823		m["out_type"] = value
4824	}
4825}
4826
4827// Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object.
4828//
4829// The `SparseTensor` must have rank `R` greater than 1, and the first dimension
4830// is treated as the minibatch dimension.  Elements of the `SparseTensor`
4831// must be sorted in increasing order of this first dimension.  The serialized
4832// `SparseTensor` objects going into each row of `serialized_sparse` will have
4833// rank `R-1`.
4834//
4835// The minibatch size `N` is extracted from `sparse_shape[0]`.
4836//
4837// Arguments:
4838//	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
4839//	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
4840//	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
4841func SerializeManySparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeManySparseAttr) (serialized_sparse tf.Output) {
4842	if scope.Err() != nil {
4843		return
4844	}
4845	attrs := map[string]interface{}{}
4846	for _, a := range optional {
4847		a(attrs)
4848	}
4849	opspec := tf.OpSpec{
4850		Type: "SerializeManySparse",
4851		Input: []tf.Input{
4852			sparse_indices, sparse_values, sparse_shape,
4853		},
4854		Attrs: attrs,
4855	}
4856	op := scope.AddOperation(opspec)
4857	return op.Output(0)
4858}
4859
4860// SerializeSparseAttr is an optional argument to SerializeSparse.
4861type SerializeSparseAttr func(optionalAttr)
4862
4863// SerializeSparseOutType sets the optional out_type attribute to value.
4864//
4865// value: The `dtype` to use for serialization; the supported types are `string`
4866// (default) and `variant`.
4867// If not specified, defaults to DT_STRING
4868func SerializeSparseOutType(value tf.DataType) SerializeSparseAttr {
4869	return func(m optionalAttr) {
4870		m["out_type"] = value
4871	}
4872}
4873
4874// Serialize a `SparseTensor` into a `[3]` `Tensor` object.
4875//
4876// Arguments:
4877//	sparse_indices: 2-D.  The `indices` of the `SparseTensor`.
4878//	sparse_values: 1-D.  The `values` of the `SparseTensor`.
4879//	sparse_shape: 1-D.  The `shape` of the `SparseTensor`.
4880func SerializeSparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeSparseAttr) (serialized_sparse tf.Output) {
4881	if scope.Err() != nil {
4882		return
4883	}
4884	attrs := map[string]interface{}{}
4885	for _, a := range optional {
4886		a(attrs)
4887	}
4888	opspec := tf.OpSpec{
4889		Type: "SerializeSparse",
4890		Input: []tf.Input{
4891			sparse_indices, sparse_values, sparse_shape,
4892		},
4893		Attrs: attrs,
4894	}
4895	op := scope.AddOperation(opspec)
4896	return op.Output(0)
4897}
4898
4899// The gradient operator for the SparseAdd op.
4900//
4901// The SparseAdd op calculates A + B, where A, B, and the sum are all represented
4902// as `SparseTensor` objects.  This op takes in the upstream gradient w.r.t.
4903// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
4904// values of A and B.
4905//
4906// Arguments:
4907//	backprop_val_grad: 1-D with shape `[nnz(sum)]`.  The gradient with respect to
4908// the non-empty values of the sum.
4909//	a_indices: 2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
4910//	b_indices: 2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
4911//	sum_indices: 2-D.  The `indices` of the sum `SparseTensor`, size
4912// `[nnz(sum), ndims]`.
4913//
4914// Returns:
4915//	a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the
4916// non-empty values of A.
4917//	b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the
4918// non-empty values of B.
4919func SparseAddGrad(scope *Scope, backprop_val_grad tf.Output, a_indices tf.Output, b_indices tf.Output, sum_indices tf.Output) (a_val_grad tf.Output, b_val_grad tf.Output) {
4920	if scope.Err() != nil {
4921		return
4922	}
4923	opspec := tf.OpSpec{
4924		Type: "SparseAddGrad",
4925		Input: []tf.Input{
4926			backprop_val_grad, a_indices, b_indices, sum_indices,
4927		},
4928	}
4929	op := scope.AddOperation(opspec)
4930	return op.Output(0), op.Output(1)
4931}
4932
4933// Computes the sparse Cholesky decomposition of `input`.
4934//
4935// Computes the Sparse Cholesky decomposition of a sparse matrix, with the given
4936// fill-in reducing permutation.
4937//
4938// The input sparse matrix and the fill-in reducing permutation `permutation` must
4939// have compatible shapes. If the sparse matrix has rank 3; with the batch
4940// dimension `B`, then the `permutation` must be of rank 2; with the same batch
4941// dimension `B`. There is no support for broadcasting.
4942//
4943// Furthermore, each component vector of `permutation` must be of length `N`,
4944// containing each of the integers {0, 1, ..., N - 1} exactly once, where `N` is
4945// the number of rows of each component of the sparse matrix.
4946//
4947// Each component of the input sparse matrix must represent a symmetric positive
4948// definite (SPD) matrix; although only the lower triangular part of the matrix is
4949// read. If any individual component is not SPD, then an InvalidArgument error is
4950// thrown.
4951//
4952// The returned sparse matrix has the same dense shape as the input sparse matrix.
4953// For each component `A` of the input sparse matrix, the corresponding output
4954// sparse matrix represents `L`, the lower triangular Cholesky factor satisfying
4955// the following identity:
4956//
4957// ```
4958//   A = L * Lt
4959// ```
4960//
4961// where Lt denotes the transpose of L (or its conjugate transpose, if `type` is
4962// `complex64` or `complex128`).
4963//
4964// The `type` parameter denotes the type of the matrix elements. The supported
4965// types are: `float32`, `float64`, `complex64` and `complex128`.
4966//
4967// Usage example:
4968//
4969// ```python
4970//     from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
4971//
4972//     a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])
4973//     a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)
4974//     a_dense_shape = [4, 4]
4975//
4976//     with tf.Session() as sess:
4977//       # Define (COO format) SparseTensor over Numpy array.
4978//       a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
4979//
4980//       # Convert SparseTensors to CSR SparseMatrix.
4981//       a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
4982//           a_st.indices, a_st.values, a_st.dense_shape)
4983//
4984//       # Obtain the Sparse Cholesky factor using AMD Ordering for reducing zero
4985//       # fill-in (number of structural non-zeros in the sparse Cholesky factor).
4986//       ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)
4987//       cholesky_sparse_matrices = (
4988//           sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
4989//               sparse_matrix, ordering_amd, type=tf.float32))
4990//
4991//       # Convert the CSRSparseMatrix Cholesky factor to a dense Tensor
4992//       dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
4993//           cholesky_sparse_matrices, tf.float32)
4994//
4995//       # Evaluate the dense Tensor value.
4996//       dense_cholesky_value = sess.run(dense_cholesky)
4997// ```
4998//
4999// `dense_cholesky_value` stores the dense Cholesky factor:
5000//
5001// ```
5002//     [[  1.  0.    0.    0.]
5003//      [  0.  1.41  0.    0.]
5004//      [  0.  0.70  1.58  0.]
5005//      [  0.  0.    0.    2.]]
5006// ```
5007//
5008//
5009// input: A `CSRSparseMatrix`.
5010// permutation: A `Tensor`.
5011// type: The type of `input`.
5012//
5013// Arguments:
5014//	input: A `CSRSparseMatrix`.
5015//	permutation: A fill-in reducing permutation matrix.
5016//
5017//
5018// Returns The sparse Cholesky decompsition of `input`.
5019func SparseMatrixSparseCholesky(scope *Scope, input tf.Output, permutation tf.Output, type_ tf.DataType) (output tf.Output) {
5020	if scope.Err() != nil {
5021		return
5022	}
5023	attrs := map[string]interface{}{"type": type_}
5024	opspec := tf.OpSpec{
5025		Type: "SparseMatrixSparseCholesky",
5026		Input: []tf.Input{
5027			input, permutation,
5028		},
5029		Attrs: attrs,
5030	}
5031	op := scope.AddOperation(opspec)
5032	return op.Output(0)
5033}
5034
5035// SparseMatrixTransposeAttr is an optional argument to SparseMatrixTranspose.
5036type SparseMatrixTransposeAttr func(optionalAttr)
5037
5038// SparseMatrixTransposeConjugate sets the optional conjugate attribute to value.
5039//
5040// value: Indicates whether `input` should be conjugated.
5041// If not specified, defaults to false
5042func SparseMatrixTransposeConjugate(value bool) SparseMatrixTransposeAttr {
5043	return func(m optionalAttr) {
5044		m["conjugate"] = value
5045	}
5046}
5047
5048// Transposes the inner (matrix) dimensions of a CSRSparseMatrix.
5049//
5050// Transposes the inner (matrix) dimensions of a SparseMatrix and optionally
5051// conjugates its values.
5052//
5053// Arguments:
5054//	input: A CSRSparseMatrix.
5055//
5056//
5057// Returns A CSRSparseMatrix.
5058func SparseMatrixTranspose(scope *Scope, input tf.Output, type_ tf.DataType, optional ...SparseMatrixTransposeAttr) (output tf.Output) {
5059	if scope.Err() != nil {
5060		return
5061	}
5062	attrs := map[string]interface{}{"type": type_}
5063	for _, a := range optional {
5064		a(attrs)
5065	}
5066	opspec := tf.OpSpec{
5067		Type: "SparseMatrixTranspose",
5068		Input: []tf.Input{
5069			input,
5070		},
5071		Attrs: attrs,
5072	}
5073	op := scope.AddOperation(opspec)
5074	return op.Output(0)
5075}
5076
5077// Slice a `SparseTensor` based on the `start` and `size`.
5078//
5079// For example, if the input is
5080//
5081//     input_tensor = shape = [2, 7]
5082//     [    a   d e  ]
5083//     [b c          ]
5084//
5085// Graphically the output tensors are:
5086//
5087//     sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
5088//     [    a  ]
5089//     [b c    ]
5090//
5091//     sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
5092//     [ d e  ]
5093//     [      ]
5094//
5095// Arguments:
5096//	indices: 2-D tensor represents the indices of the sparse tensor.
5097//	values: 1-D tensor represents the values of the sparse tensor.
5098//	shape: 1-D. tensor represents the shape of the sparse tensor.
5099//	start: 1-D. tensor represents the start of the slice.
5100//	size: 1-D. tensor represents the size of the slice.
5101// output indices: A list of 1-D tensors represents the indices of the output
5102// sparse tensors.
5103//
5104// Returns:
5105//	output_indices
5106//	output_values: A list of 1-D tensors represents the values of the output sparse
5107// tensors.
5108//	output_shape: A list of 1-D tensors represents the shape of the output sparse
5109// tensors.
5110func SparseSlice(scope *Scope, indices tf.Output, values tf.Output, shape tf.Output, start tf.Output, size tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
5111	if scope.Err() != nil {
5112		return
5113	}
5114	opspec := tf.OpSpec{
5115		Type: "SparseSlice",
5116		Input: []tf.Input{
5117			indices, values, shape, start, size,
5118		},
5119	}
5120	op := scope.AddOperation(opspec)
5121	return op.Output(0), op.Output(1), op.Output(2)
5122}
5123
5124// SparseMatrixSparseMatMulAttr is an optional argument to SparseMatrixSparseMatMul.
5125type SparseMatrixSparseMatMulAttr func(optionalAttr)
5126
5127// SparseMatrixSparseMatMulTransposeA sets the optional transpose_a attribute to value.
5128//
5129// value: Indicates whether `a` should be transposed.
5130// If not specified, defaults to false
5131func SparseMatrixSparseMatMulTransposeA(value bool) SparseMatrixSparseMatMulAttr {
5132	return func(m optionalAttr) {
5133		m["transpose_a"] = value
5134	}
5135}
5136
5137// SparseMatrixSparseMatMulTransposeB sets the optional transpose_b attribute to value.
5138//
5139// value: Indicates whether `b` should be transposed.
5140// If not specified, defaults to false
5141func SparseMatrixSparseMatMulTransposeB(value bool) SparseMatrixSparseMatMulAttr {
5142	return func(m optionalAttr) {
5143		m["transpose_b"] = value
5144	}
5145}
5146
5147// SparseMatrixSparseMatMulAdjointA sets the optional adjoint_a attribute to value.
5148//
5149// value: Indicates whether `a` should be conjugate-transposed.
5150// If not specified, defaults to false
5151func SparseMatrixSparseMatMulAdjointA(value bool) SparseMatrixSparseMatMulAttr {
5152	return func(m optionalAttr) {
5153		m["adjoint_a"] = value
5154	}
5155}
5156
5157// SparseMatrixSparseMatMulAdjointB sets the optional adjoint_b attribute to value.
5158//
5159// value: Indicates whether `b` should be conjugate-transposed.
5160// If not specified, defaults to false
5161func SparseMatrixSparseMatMulAdjointB(value bool) SparseMatrixSparseMatMulAttr {
5162	return func(m optionalAttr) {
5163		m["adjoint_b"] = value
5164	}
5165}
5166
5167// Sparse-matrix-multiplies two CSR matrices `a` and `b`.
5168//
5169// Performs a matrix multiplication of a sparse matrix `a` with a sparse matrix
5170// `b`; returns a sparse matrix `a * b`, unless either `a` or `b` is transposed or
5171// adjointed.
5172//
5173// Each matrix may be transposed or adjointed (conjugated and transposed)
5174// according to the Boolean parameters `transpose_a`, `adjoint_a`, `transpose_b`
5175// and `adjoint_b`. At most one of `transpose_a` or `adjoint_a` may be True.
5176// Similarly, at most one of `transpose_b` or `adjoint_b` may be True.
5177//
5178// The inputs must have compatible shapes. That is, the inner dimension of `a`
5179// must be equal to the outer dimension of `b`. This requirement is adjusted
5180// according to whether either `a` or `b` is transposed or adjointed.
5181//
5182// The `type` parameter denotes the type of the matrix elements. Both `a` and `b`
5183// must have the same type. The supported types are: `float32`, `float64`,
5184// `complex64` and `complex128`.
5185//
5186// Both `a` and `b` must have the same rank. Broadcasting is not supported. If they
5187// have rank 3, each batch of 2D CSRSparseMatrices within `a` and `b` must have the
5188// same dense shape.
5189//
5190// The sparse matrix product may have numeric (non-structural) zeros.
5191// TODO(anudhyan): Consider adding a boolean attribute to control whether to prune
5192// zeros.
5193//
5194// Usage example:
5195//
5196// ```python
5197//     from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
5198//
5199//     a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]])
5200//     a_values = np.array([1.0, 5.0, -1.0, -2.0], np.float32)
5201//     a_dense_shape = [4, 5]
5202//
5203//     b_indices = np.array([[0, 0], [3, 0], [3, 1]])
5204//     b_values = np.array([2.0, 7.0, 8.0], np.float32)
5205//     b_dense_shape = [5, 3]
5206//
5207//     with tf.Session() as sess:
5208//       # Define (COO format) Sparse Tensors over Numpy arrays
5209//       a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
5210//       b_st = tf.sparse.SparseTensor(b_indices, b_values, b_dense_shape)
5211//
5212//       # Convert SparseTensors to CSR SparseMatrix
5213//       a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
5214//           a_st.indices, a_st.values, a_st.dense_shape)
5215//       b_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
5216//           b_st.indices, b_st.values, b_st.dense_shape)
5217//
5218//       # Compute the CSR SparseMatrix matrix multiplication
5219//       c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
5220//           a=a_sm, b=b_sm, type=tf.float32)
5221//
5222//       # Convert the CSR SparseMatrix product to a dense Tensor
5223//       c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
5224//           c_sm, tf.float32)
5225//       # Evaluate the dense Tensor value
5226//       c_sm_dense_value = sess.run(c_sm_dense)
5227// ```
5228//
5229// `c_sm_dense_value` stores the dense matrix product:
5230//
5231// ```
5232//     [[  2.   0.   0.]
5233//      [  0.   0.   0.]
5234//      [ 35.  40.   0.]
5235//      [ -4.   0.   0.]]
5236// ```
5237//
5238// a: A `CSRSparseMatrix`.
5239// b: A `CSRSparseMatrix` with the same type and rank as `a`.
5240// type: The type of both `a` and `b`.
5241// transpose_a: If True, `a` transposed before multiplication.
5242// transpose_b: If True, `b` transposed before multiplication.
5243// adjoint_a: If True, `a` adjointed before multiplication.
5244// adjoint_b: If True, `b` adjointed before multiplication.
5245//
5246// Arguments:
5247//	a: A CSRSparseMatrix.
5248//	b: A CSRSparseMatrix.
5249//
5250//
5251// Returns A CSRSparseMatrix.
5252func SparseMatrixSparseMatMul(scope *Scope, a tf.Output, b tf.Output, type_ tf.DataType, optional ...SparseMatrixSparseMatMulAttr) (c tf.Output) {
5253	if scope.Err() != nil {
5254		return
5255	}
5256	attrs := map[string]interface{}{"type": type_}
5257	for _, a := range optional {
5258		a(attrs)
5259	}
5260	opspec := tf.OpSpec{
5261		Type: "SparseMatrixSparseMatMul",
5262		Input: []tf.Input{
5263			a, b,
5264		},
5265		Attrs: attrs,
5266	}
5267	op := scope.AddOperation(opspec)
5268	return op.Output(0)
5269}
5270
5271// Element-wise multiplication of a sparse matrix with a dense tensor.
5272//
5273// Returns a sparse matrix.
5274//
5275// The dense tensor `b` may be either a scalar; otherwise `a` must be a rank-3
5276// `SparseMatrix`; in this case `b` must be shaped `[batch_size, 1, 1]` and the
5277// multiply operation broadcasts.
5278//
5279// **NOTE** even if `b` is zero, the sparsity structure of the output does not
5280// change.
5281//
5282// Arguments:
5283//	a: A CSRSparseMatrix.
5284//	b: A dense tensor.
5285//
5286// Returns A dense output tensor.
5287func SparseMatrixMul(scope *Scope, a tf.Output, b tf.Output) (output tf.Output) {
5288	if scope.Err() != nil {
5289		return
5290	}
5291	opspec := tf.OpSpec{
5292		Type: "SparseMatrixMul",
5293		Input: []tf.Input{
5294			a, b,
5295		},
5296	}
5297	op := scope.AddOperation(opspec)
5298	return op.Output(0)
5299}
5300
5301// SparseMatrixMatMulAttr is an optional argument to SparseMatrixMatMul.
5302type SparseMatrixMatMulAttr func(optionalAttr)
5303
5304// SparseMatrixMatMulTransposeA sets the optional transpose_a attribute to value.
5305//
5306// value: Indicates whether `a` should be transposed.
5307// If not specified, defaults to false
5308func SparseMatrixMatMulTransposeA(value bool) SparseMatrixMatMulAttr {
5309	return func(m optionalAttr) {
5310		m["transpose_a"] = value
5311	}
5312}
5313
5314// SparseMatrixMatMulTransposeB sets the optional transpose_b attribute to value.
5315//
5316// value: Indicates whether `b` should be transposed.
5317// If not specified, defaults to false
5318func SparseMatrixMatMulTransposeB(value bool) SparseMatrixMatMulAttr {
5319	return func(m optionalAttr) {
5320		m["transpose_b"] = value
5321	}
5322}
5323
5324// SparseMatrixMatMulAdjointA sets the optional adjoint_a attribute to value.
5325//
5326// value: Indicates whether `a` should be conjugate-transposed.
5327// If not specified, defaults to false
5328func SparseMatrixMatMulAdjointA(value bool) SparseMatrixMatMulAttr {
5329	return func(m optionalAttr) {
5330		m["adjoint_a"] = value
5331	}
5332}
5333
5334// SparseMatrixMatMulAdjointB sets the optional adjoint_b attribute to value.
5335//
5336// value: Indicates whether `b` should be conjugate-transposed.
5337// If not specified, defaults to false
5338func SparseMatrixMatMulAdjointB(value bool) SparseMatrixMatMulAttr {
5339	return func(m optionalAttr) {
5340		m["adjoint_b"] = value
5341	}
5342}
5343
5344// SparseMatrixMatMulTransposeOutput sets the optional transpose_output attribute to value.
5345//
5346// value: Transposes the product of `a` and `b`.
5347// If not specified, defaults to false
5348func SparseMatrixMatMulTransposeOutput(value bool) SparseMatrixMatMulAttr {
5349	return func(m optionalAttr) {
5350		m["transpose_output"] = value
5351	}
5352}
5353
5354// SparseMatrixMatMulConjugateOutput sets the optional conjugate_output attribute to value.
5355//
5356// value: Conjugates the product of `a` and `b`.
5357// If not specified, defaults to false
5358func SparseMatrixMatMulConjugateOutput(value bool) SparseMatrixMatMulAttr {
5359	return func(m optionalAttr) {
5360		m["conjugate_output"] = value
5361	}
5362}
5363
5364// Matrix-multiplies a sparse matrix with a dense matrix.
5365//
5366// Returns a dense matrix.
5367// For inputs A and B, where A is CSR and B is dense; this op returns a dense C;
5368//
5369// If transpose_output is false, returns:
5370// ```
5371//   C = A . B
5372// ```
5373//
5374// If transpose_output is `true`, returns:
5375// ```
5376//   C = transpose(A . B) = transpose(B) . transpose(A)
5377// ```
5378// where the transposition is performed along the two innermost (matrix)
5379// dimensions.
5380//
5381// If conjugate_output is `true`, returns:
5382// ```
5383//   C = conjugate(A . B) = conjugate(A) . conjugate(B)
5384// ```
5385//
5386// If both conjugate_output and transpose_output are `true`, returns:
5387// ```
5388//   C = conjugate(transpose(A . B)) = conjugate(transpose(B)) .
5389//                                     conjugate(transpose(A))
5390// ```
5391//
5392// Arguments:
5393//	a: A CSRSparseMatrix.
5394//	b: A dense tensor.
5395//
5396// Returns A dense output tensor.
5397func SparseMatrixMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatrixMatMulAttr) (output tf.Output) {
5398	if scope.Err() != nil {
5399		return
5400	}
5401	attrs := map[string]interface{}{}
5402	for _, a := range optional {
5403		a(attrs)
5404	}
5405	opspec := tf.OpSpec{
5406		Type: "SparseMatrixMatMul",
5407		Input: []tf.Input{
5408			a, b,
5409		},
5410		Attrs: attrs,
5411	}
5412	op := scope.AddOperation(opspec)
5413	return op.Output(0)
5414}
5415
5416// Reads out the CSR components at batch `index`.
5417//
5418// This op is meant only for debugging / testing, and its interface is not expected
5419// to be stable.
5420//
5421// Arguments:
5422//	csr_sparse_matrix: A batched CSRSparseMatrix.
5423//	index: The index in `csr_sparse_matrix`'s batch.
5424//
5425//
5426// Returns:
5427//	row_ptrs: An array containing CSR matrix row pointers.
5428//	col_inds: An array containing CSR matrix column indices.
5429//	values: An array containing CSR matrix nonzero values.
5430func CSRSparseMatrixComponents(scope *Scope, csr_sparse_matrix tf.Output, index tf.Output, type_ tf.DataType) (row_ptrs tf.Output, col_inds tf.Output, values tf.Output) {
5431	if scope.Err() != nil {
5432		return
5433	}
5434	attrs := map[string]interface{}{"type": type_}
5435	opspec := tf.OpSpec{
5436		Type: "CSRSparseMatrixComponents",
5437		Input: []tf.Input{
5438			csr_sparse_matrix, index,
5439		},
5440		Attrs: attrs,
5441	}
5442	op := scope.AddOperation(opspec)
5443	return op.Output(0), op.Output(1), op.Output(2)
5444}
5445
5446// Convert a (possibly batched) CSRSparseMatrix to dense.
5447//
5448// Arguments:
5449//	sparse_input: A batched CSRSparseMatrix.
5450//
5451//
5452// Returns A dense tensor.
5453func CSRSparseMatrixToDense(scope *Scope, sparse_input tf.Output, type_ tf.DataType) (dense_output tf.Output) {
5454	if scope.Err() != nil {
5455		return
5456	}
5457	attrs := map[string]interface{}{"type": type_}
5458	opspec := tf.OpSpec{
5459		Type: "CSRSparseMatrixToDense",
5460		Input: []tf.Input{
5461			sparse_input,
5462		},
5463		Attrs: attrs,
5464	}
5465	op := scope.AddOperation(opspec)
5466	return op.Output(0)
5467}
5468
5469// Converts a SparseTensor to a (possibly batched) CSRSparseMatrix.
5470//
5471// Arguments:
5472//	indices: SparseTensor indices.
5473//	values: SparseTensor values.
5474//	dense_shape: SparseTensor dense shape.
5475//
5476// Returns A (possibly batched) CSRSparseMatrix.
5477func SparseTensorToCSRSparseMatrix(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (sparse_matrix tf.Output) {
5478	if scope.Err() != nil {
5479		return
5480	}
5481	opspec := tf.OpSpec{
5482		Type: "SparseTensorToCSRSparseMatrix",
5483		Input: []tf.Input{
5484			indices, values, dense_shape,
5485		},
5486	}
5487	op := scope.AddOperation(opspec)
5488	return op.Output(0)
5489}
5490
5491// SparseToSparseSetOperationAttr is an optional argument to SparseToSparseSetOperation.
5492type SparseToSparseSetOperationAttr func(optionalAttr)
5493
5494// SparseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
5495// If not specified, defaults to true
5496func SparseToSparseSetOperationValidateIndices(value bool) SparseToSparseSetOperationAttr {
5497	return func(m optionalAttr) {
5498		m["validate_indices"] = value
5499	}
5500}
5501
5502// Applies set operation along last dimension of 2 `SparseTensor` inputs.
5503//
5504// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
5505//
5506// If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
5507// order and range of `set1` and `set2` indices.
5508//
5509// Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
5510// and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
5511// as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
5512// ignored.
5513//
5514// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
5515// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
5516// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
5517// ignored.
5518//
5519// If `validate_indices` is `True`, this op validates the order and range of `set1`
5520// and `set2` indices.
5521//
5522// Output `result` is a `SparseTensor` represented by `result_indices`,
5523// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
5524// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
5525// dimension contains the result of `set_operation` applied to the corresponding
5526// `[0...n-1]` dimension of `set`.
5527//
5528// Arguments:
5529//	set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
5530// order.
5531//	set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
5532// order.
5533//	set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
5534// be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
5535// max set size across `0...n-1` dimensions.
5536//	set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
5537// order.
5538//	set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
5539// order.
5540//	set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
5541// be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
5542// max set size across `0...n-1` dimensions.
5543//
5544//
5545// Returns:
5546//	result_indices: 2D indices of a `SparseTensor`.
5547//	result_values: 1D values of a `SparseTensor`.
5548//	result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
5549// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
5550// is the max result set size across all `0...n-1` dimensions.
5551func SparseToSparseSetOperation(scope *Scope, set1_indices tf.Output, set1_values tf.Output, set1_shape tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...SparseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
5552	if scope.Err() != nil {
5553		return
5554	}
5555	attrs := map[string]interface{}{"set_operation": set_operation}
5556	for _, a := range optional {
5557		a(attrs)
5558	}
5559	opspec := tf.OpSpec{
5560		Type: "SparseToSparseSetOperation",
5561		Input: []tf.Input{
5562			set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape,
5563		},
5564		Attrs: attrs,
5565	}
5566	op := scope.AddOperation(opspec)
5567	return op.Output(0), op.Output(1), op.Output(2)
5568}
5569
5570// DenseToDenseSetOperationAttr is an optional argument to DenseToDenseSetOperation.
5571type DenseToDenseSetOperationAttr func(optionalAttr)
5572
5573// DenseToDenseSetOperationValidateIndices sets the optional validate_indices attribute to value.
5574// If not specified, defaults to true
5575func DenseToDenseSetOperationValidateIndices(value bool) DenseToDenseSetOperationAttr {
5576	return func(m optionalAttr) {
5577		m["validate_indices"] = value
5578	}
5579}
5580
5581// Applies set operation along last dimension of 2 `Tensor` inputs.
5582//
5583// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
5584//
5585// Output `result` is a `SparseTensor` represented by `result_indices`,
5586// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
5587// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
5588// dimension contains the result of `set_operation` applied to the corresponding
5589// `[0...n-1]` dimension of `set`.
5590//
5591// Arguments:
5592//	set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
5593// Dimension `n` contains values in a set, duplicates are allowed but ignored.
5594//	set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
5595// Dimension `n` contains values in a set, duplicates are allowed but ignored.
5596//
5597//
5598// Returns:
5599//	result_indices: 2D indices of a `SparseTensor`.
5600//	result_values: 1D values of a `SparseTensor`.
5601//	result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
5602// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
5603// is the max result set size across all `0...n-1` dimensions.
5604func DenseToDenseSetOperation(scope *Scope, set1 tf.Output, set2 tf.Output, set_operation string, optional ...DenseToDenseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
5605	if scope.Err() != nil {
5606		return
5607	}
5608	attrs := map[string]interface{}{"set_operation": set_operation}
5609	for _, a := range optional {
5610		a(attrs)
5611	}
5612	opspec := tf.OpSpec{
5613		Type: "DenseToDenseSetOperation",
5614		Input: []tf.Input{
5615			set1, set2,
5616		},
5617		Attrs: attrs,
5618	}
5619	op := scope.AddOperation(opspec)
5620	return op.Output(0), op.Output(1), op.Output(2)
5621}
5622
5623// RecvAttr is an optional argument to Recv.
5624type RecvAttr func(optionalAttr)
5625
5626// RecvClientTerminated sets the optional client_terminated attribute to value.
5627//
5628// value: If set to true, this indicates that the node was added
5629// to the graph as a result of a client-side feed or fetch of Tensor data,
5630// in which case the corresponding send or recv is expected to be managed
5631// locally by the caller.
5632// If not specified, defaults to false
5633func RecvClientTerminated(value bool) RecvAttr {
5634	return func(m optionalAttr) {
5635		m["client_terminated"] = value
5636	}
5637}
5638
5639// Receives the named tensor from send_device on recv_device.
5640//
5641// Arguments:
5642//
5643//	tensor_name: The name of the tensor to receive.
5644//	send_device: The name of the device sending the tensor.
5645//	send_device_incarnation: The current incarnation of send_device.
5646//	recv_device: The name of the device receiving the tensor.
5647//
5648// Returns The tensor to receive.
5649func Recv(scope *Scope, tensor_type tf.DataType, tensor_name string, send_device string, send_device_incarnation int64, recv_device string, optional ...RecvAttr) (tensor tf.Output) {
5650	if scope.Err() != nil {
5651		return
5652	}
5653	attrs := map[string]interface{}{"tensor_type": tensor_type, "tensor_name": tensor_name, "send_device": send_device, "send_device_incarnation": send_device_incarnation, "recv_device": recv_device}
5654	for _, a := range optional {
5655		a(attrs)
5656	}
5657	opspec := tf.OpSpec{
5658		Type: "Recv",
5659
5660		Attrs: attrs,
5661	}
5662	op := scope.AddOperation(opspec)
5663	return op.Output(0)
5664}
5665
5666// Computes fingerprints of the input strings.
5667//
5668// Arguments:
5669//	input: vector of strings to compute fingerprints on.
5670//
5671// Returns a (N,2) shaped matrix where N is the number of elements in the input
5672// vector. Each row contains the low and high parts of the fingerprint.
5673func SdcaFprint(scope *Scope, input tf.Output) (output tf.Output) {
5674	if scope.Err() != nil {
5675		return
5676	}
5677	opspec := tf.OpSpec{
5678		Type: "SdcaFprint",
5679		Input: []tf.Input{
5680			input,
5681		},
5682	}
5683	op := scope.AddOperation(opspec)
5684	return op.Output(0)
5685}
5686
5687// SdcaOptimizerV2Attr is an optional argument to SdcaOptimizerV2.
5688type SdcaOptimizerV2Attr func(optionalAttr)
5689
5690// SdcaOptimizerV2Adaptive sets the optional adaptive attribute to value.
5691//
5692// value: Whether to use Adaptive SDCA for the inner loop.
5693// If not specified, defaults to true
5694func SdcaOptimizerV2Adaptive(value bool) SdcaOptimizerV2Attr {
5695	return func(m optionalAttr) {
5696		m["adaptive"] = value
5697	}
5698}
5699
5700// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
5701//
5702// linear models with L1 + L2 regularization. As global optimization objective is
5703// strongly-convex, the optimizer optimizes the dual objective at each step. The
5704// optimizer applies each update one example at a time. Examples are sampled
5705// uniformly, and the optimizer is learning rate free and enjoys linear convergence
5706// rate.
5707//
5708// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
5709// Shai Shalev-Shwartz, Tong Zhang. 2012
5710//
5711// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
5712//
5713// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
5714// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
5715// Peter Richtarik, Martin Takac. 2015
5716//
5717// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
5718// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
5719//
5720// Arguments:
5721//	sparse_example_indices: a list of vectors which contain example indices.
5722//	sparse_feature_indices: a list of vectors which contain feature indices.
5723//	sparse_feature_values: a list of vectors which contains feature value
5724// associated with each feature group.
5725//	dense_features: a list of matrices which contains the dense feature values.
5726//	example_weights: a vector which contains the weight associated with each
5727// example.
5728//	example_labels: a vector which contains the label/target associated with each
5729// example.
5730//	sparse_indices: a list of vectors where each value is the indices which has
5731// corresponding weights in sparse_weights. This field maybe omitted for the
5732// dense approach.
5733//	sparse_weights: a list of vectors where each value is the weight associated with
5734// a sparse feature group.
5735//	dense_weights: a list of vectors where the values are the weights associated
5736// with a dense feature group.
5737//	example_state_data: a list of vectors containing the example state data.
5738//	loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
5739// squared and hinge losses.
5740//	l1: Symmetric l1 regularization strength.
5741//	l2: Symmetric l2 regularization strength.
5742//	num_loss_partitions: Number of partitions of the global loss function.
5743//	num_inner_iterations: Number of iterations per mini-batch.
5744//
5745// Returns:
5746//	out_example_state_data: a list of vectors containing the updated example state
5747// data.
5748//	out_delta_sparse_weights: a list of vectors where each value is the delta
5749// weights associated with a sparse feature group.
5750//	out_delta_dense_weights: a list of vectors where the values are the delta
5751// weights associated with a dense feature group.
5752func SdcaOptimizerV2(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerV2Attr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output) {
5753	if scope.Err() != nil {
5754		return
5755	}
5756	attrs := map[string]interface{}{"loss_type": loss_type, "l1": l1, "l2": l2, "num_loss_partitions": num_loss_partitions, "num_inner_iterations": num_inner_iterations}
5757	for _, a := range optional {
5758		a(attrs)
5759	}
5760	opspec := tf.OpSpec{
5761		Type: "SdcaOptimizerV2",
5762		Input: []tf.Input{
5763			tf.OutputList(sparse_example_indices), tf.OutputList(sparse_feature_indices), tf.OutputList(sparse_feature_values), tf.OutputList(dense_features), example_weights, example_labels, tf.OutputList(sparse_indices), tf.OutputList(sparse_weights), tf.OutputList(dense_weights), example_state_data,
5764		},
5765		Attrs: attrs,
5766	}
5767	op := scope.AddOperation(opspec)
5768	if scope.Err() != nil {
5769		return
5770	}
5771	var idx int
5772	var err error
5773	out_example_state_data = op.Output(idx)
5774	if out_delta_sparse_weights, idx, err = makeOutputList(op, idx, "out_delta_sparse_weights"); err != nil {
5775		scope.UpdateErr("SdcaOptimizerV2", err)
5776		return
5777	}
5778	if out_delta_dense_weights, idx, err = makeOutputList(op, idx, "out_delta_dense_weights"); err != nil {
5779		scope.UpdateErr("SdcaOptimizerV2", err)
5780		return
5781	}
5782	return out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights
5783}
5784
5785// SdcaOptimizerAttr is an optional argument to SdcaOptimizer.
5786type SdcaOptimizerAttr func(optionalAttr)
5787
5788// SdcaOptimizerAdaptative sets the optional adaptative attribute to value.
5789//
5790// value: Whether to use Adaptive SDCA for the inner loop.
5791// If not specified, defaults to true
5792func SdcaOptimizerAdaptative(value bool) SdcaOptimizerAttr {
5793	return func(m optionalAttr) {
5794		m["adaptative"] = value
5795	}
5796}
5797
5798// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
5799//
5800// linear models with L1 + L2 regularization. As global optimization objective is
5801// strongly-convex, the optimizer optimizes the dual objective at each step. The
5802// optimizer applies each update one example at a time. Examples are sampled
5803// uniformly, and the optimizer is learning rate free and enjoys linear convergence
5804// rate.
5805//
5806// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
5807// Shai Shalev-Shwartz, Tong Zhang. 2012
5808//
5809// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
5810//
5811// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
5812// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
5813// Peter Richtarik, Martin Takac. 2015
5814//
5815// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
5816// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
5817//
5818// Arguments:
5819//	sparse_example_indices: a list of vectors which contain example indices.
5820//	sparse_feature_indices: a list of vectors which contain feature indices.
5821//	sparse_feature_values: a list of vectors which contains feature value
5822// associated with each feature group.
5823//	dense_features: a list of matrices which contains the dense feature values.
5824//	example_weights: a vector which contains the weight associated with each
5825// example.
5826//	example_labels: a vector which contains the label/target associated with each
5827// example.
5828//	sparse_indices: a list of vectors where each value is the indices which has
5829// corresponding weights in sparse_weights. This field maybe omitted for the
5830// dense approach.
5831//	sparse_weights: a list of vectors where each value is the weight associated with
5832// a sparse feature group.
5833//	dense_weights: a list of vectors where the values are the weights associated
5834// with a dense feature group.
5835//	example_state_data: a list of vectors containing the example state data.
5836//	loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
5837// squared and hinge losses.
5838//	l1: Symmetric l1 regularization strength.
5839//	l2: Symmetric l2 regularization strength.
5840//	num_loss_partitions: Number of partitions of the global loss function.
5841//	num_inner_iterations: Number of iterations per mini-batch.
5842//
5843// Returns:
5844//	out_example_state_data: a list of vectors containing the updated example state
5845// data.
5846//	out_delta_sparse_weights: a list of vectors where each value is the delta
5847// weights associated with a sparse feature group.
5848//	out_delta_dense_weights: a list of vectors where the values are the delta
5849// weights associated with a dense feature group.
5850func SdcaOptimizer(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerAttr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output) {
5851	if scope.Err() != nil {
5852		return
5853	}
5854	attrs := map[string]interface{}{"loss_type": loss_type, "l1": l1, "l2": l2, "num_loss_partitions": num_loss_partitions, "num_inner_iterations": num_inner_iterations}
5855	for _, a := range optional {
5856		a(attrs)
5857	}
5858	opspec := tf.OpSpec{
5859		Type: "SdcaOptimizer",
5860		Input: []tf.Input{
5861			tf.OutputList(sparse_example_indices), tf.OutputList(sparse_feature_indices), tf.OutputList(sparse_feature_values), tf.OutputList(dense_features), example_weights, example_labels, tf.OutputList(sparse_indices), tf.OutputList(sparse_weights), tf.OutputList(dense_weights), example_state_data,
5862		},
5863		Attrs: attrs,
5864	}
5865	op := scope.AddOperation(opspec)
5866	if scope.Err() != nil {
5867		return
5868	}
5869	var idx int
5870	var err error
5871	out_example_state_data = op.Output(idx)
5872	if out_delta_sparse_weights, idx, err = makeOutputList(op, idx, "out_delta_sparse_weights"); err != nil {
5873		scope.UpdateErr("SdcaOptimizer", err)
5874		return
5875	}
5876	if out_delta_dense_weights, idx, err = makeOutputList(op, idx, "out_delta_dense_weights"); err != nil {
5877		scope.UpdateErr("SdcaOptimizer", err)
5878		return
5879	}
5880	return out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights
5881}
5882
5883// This op consumes a lock created by `MutexLock`.
5884//
5885// This op exists to consume a tensor created by `MutexLock` (other than
5886// direct control dependencies).  It should be the only that consumes the tensor,
5887// and will raise an error if it is not.  Its only purpose is to keep the
5888// mutex lock tensor alive until it is consumed by this op.
5889//
5890// **NOTE**: This operation must run on the same device as its input.  This may
5891// be enforced via the `colocate_with` mechanism.
5892//
5893// Arguments:
5894//	mutex_lock: A tensor returned by `MutexLock`.
5895//
5896// Returns the created operation.
5897func ConsumeMutexLock(scope *Scope, mutex_lock tf.Output) (o *tf.Operation) {
5898	if scope.Err() != nil {
5899		return
5900	}
5901	opspec := tf.OpSpec{
5902		Type: "ConsumeMutexLock",
5903		Input: []tf.Input{
5904			mutex_lock,
5905		},
5906	}
5907	return scope.AddOperation(opspec)
5908}
5909
5910// MutexV2Attr is an optional argument to MutexV2.
5911type MutexV2Attr func(optionalAttr)
5912
5913// MutexV2Container sets the optional container attribute to value.
5914//
5915// value: If non-empty, this variable is placed in the given container.
5916// Otherwise, a default container is used.
5917// If not specified, defaults to ""
5918func MutexV2Container(value string) MutexV2Attr {
5919	return func(m optionalAttr) {
5920		m["container"] = value
5921	}
5922}
5923
5924// MutexV2SharedName sets the optional shared_name attribute to value.
5925//
5926// value: If non-empty, this variable is named in the given bucket
5927// with this shared_name. Otherwise, the node name is used instead.
5928// If not specified, defaults to ""
5929func MutexV2SharedName(value string) MutexV2Attr {
5930	return func(m optionalAttr) {
5931		m["shared_name"] = value
5932	}
5933}
5934
5935// Creates a Mutex resource that can be locked by `MutexLock`.
5936//
5937// Returns The mutex resource.
5938func MutexV2(scope *Scope, optional ...MutexV2Attr) (resource tf.Output) {
5939	if scope.Err() != nil {
5940		return
5941	}
5942	attrs := map[string]interface{}{}
5943	for _, a := range optional {
5944		a(attrs)
5945	}
5946	opspec := tf.OpSpec{
5947		Type: "MutexV2",
5948
5949		Attrs: attrs,
5950	}
5951	op := scope.AddOperation(opspec)
5952	return op.Output(0)
5953}
5954
5955// Assigns sparse updates to the variable referenced by `resource`.
5956//
5957// This operation computes
5958//
5959//     # Scalar indices
5960//     ref[indices, ...] = updates[...]
5961//
5962//     # Vector indices (for each i)
5963//     ref[indices[i], ...] = updates[i, ...]
5964//
5965//     # High rank indices (for each i, ..., j)
5966//     ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
5967//
5968// Arguments:
5969//	resource: Should be from a `Variable` node.
5970//	indices: A tensor of indices into the first dimension of `ref`.
5971//	updates: A tensor of updated values to add to `ref`.
5972//
5973// Returns the created operation.
5974func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
5975	if scope.Err() != nil {
5976		return
5977	}
5978	opspec := tf.OpSpec{
5979		Type: "ResourceScatterUpdate",
5980		Input: []tf.Input{
5981			resource, indices, updates,
5982		},
5983	}
5984	return scope.AddOperation(opspec)
5985}
5986
5987// Reduces sparse updates into the variable referenced by `resource` using the `min` operation.
5988//
5989// This operation computes
5990//
5991//     # Scalar indices
5992//     ref[indices, ...] = min(ref[indices, ...], updates[...])
5993//
5994//     # Vector indices (for each i)
5995//     ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
5996//
5997//     # High rank indices (for each i, ..., j)
5998//     ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
5999//
6000// Duplicate entries are handled correctly: if multiple `indices` reference
6001// the same location, their contributions are combined.
6002//
6003// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
6004//
6005// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
6006// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
6007// </div>
6008//
6009// Arguments:
6010//	resource: Should be from a `Variable` node.
6011//	indices: A tensor of indices into the first dimension of `ref`.
6012//	updates: A tensor of updated values to add to `ref`.
6013//
6014// Returns the created operation.
6015func ResourceScatterMin(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
6016	if scope.Err() != nil {
6017		return
6018	}
6019	opspec := tf.OpSpec{
6020		Type: "ResourceScatterMin",
6021		Input: []tf.Input{
6022			resource, indices, updates,
6023		},
6024	}
6025	return scope.AddOperation(opspec)
6026}
6027
6028// Subtracts sparse updates from the variable referenced by `resource`.
6029//
6030// This operation computes
6031//
6032//     # Scalar indices
6033//     ref[indices, ...] -= updates[...]
6034//
6035//     # Vector indices (for each i)
6036//     ref[indices[i], ...] -= updates[i, ...]
6037//
6038//     # High rank indices (for each i, ..., j)
6039//     ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
6040//
6041// Duplicate entries are handled correctly: if multiple `indices` reference
6042// the same location, their contributions add.
6043//
6044// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
6045//
6046// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
6047// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
6048// </div>
6049//
6050// Arguments:
6051//	resource: Should be from a `Variable` node.
6052//	indices: A tensor of indices into the first dimension of `ref`.
6053//	updates: A tensor of updated values to add to `ref`.
6054//
6055// Returns the created operation.
6056func ResourceScatterSub(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
6057	if scope.Err() != nil {
6058		return
6059	}
6060	opspec := tf.OpSpec{
6061		Type: "ResourceScatterSub",
6062		Input: []tf.Input{
6063			resource, indices, updates,
6064		},
6065	}
6066	return scope.AddOperation(opspec)
6067}
6068
6069// Adds sparse updates to the variable referenced by `resource`.
6070//
6071// This operation computes
6072//
6073//     # Scalar indices
6074//     ref[indices, ...] += updates[...]
6075//
6076//     # Vector indices (for each i)
6077//     ref[indices[i], ...] += updates[i, ...]
6078//
6079//     # High rank indices (for each i, ..., j)
6080//     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
6081//
6082// Duplicate entries are handled correctly: if multiple `indices` reference
6083// the same location, their contributions add.
6084//
6085// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
6086//
6087// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
6088// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
6089// </div>
6090//
6091// Arguments:
6092//	resource: Should be from a `Variable` node.
6093//	indices: A tensor of indices into the first dimension of `ref`.
6094//	updates: A tensor of updated values to add to `ref`.
6095//
6096// Returns the created operation.
6097func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
6098	if scope.Err() != nil {
6099		return
6100	}
6101	opspec := tf.OpSpec{
6102		Type: "ResourceScatterAdd",
6103		Input: []tf.Input{
6104			resource, indices, updates,
6105		},
6106	}
6107	return scope.AddOperation(opspec)
6108}
6109
6110// AssignVariableXlaConcatNDAttr is an optional argument to AssignVariableXlaConcatND.
6111type AssignVariableXlaConcatNDAttr func(optionalAttr)
6112
6113// AssignVariableXlaConcatNDPaddings sets the optional paddings attribute to value.
6114//
6115// value: Optional list of right paddings per dimension to strip from the final merged
6116// tensor. These paddings must not exceed the dimension size of the merged result
6117// prior to stripping paddings.
6118// If not specified, defaults to {}
6119func AssignVariableXlaConcatNDPaddings(value []int64) AssignVariableXlaConcatNDAttr {
6120	return func(m optionalAttr) {
6121		m["paddings"] = value
6122	}
6123}
6124
6125// Concats input tensor across all dimensions.
6126//
6127// An op which merges slices the input tensor based on the given num_splits
6128// attribute, strips paddings optionally, and writes the merged tensor without
6129// paddings to the resource variable.
6130//
6131// This op may be generated via the TPU bridge.
6132//
6133// For example, with `input` tensor:
6134// ```
6135// [[0, 1],
6136//  [4, 5]]
6137// [[2, 3],
6138//  [6, 7]]
6139// [[8, 9],
6140//  [12, 13]]
6141// [[10, 11],
6142//  [14, 15]]
6143// ```
6144// `num_splits`:
6145// ```
6146// [2, 2]
6147// ```
6148// and `paddings`:
6149// ```
6150// [1, 1]
6151// ```
6152// the expected `outputs` is:
6153// ```
6154// [[0, 1, 2],
6155//  [4, 5, 6],
6156//  [8, 9, 10]]
6157// ```
6158//
6159// Arguments:
6160//	resource: Resource variable for concatenated input tensors across all dimensions.
6161//   }
6162//   in_arg {
6163//     name: "inputs"
6164//     description: <<END
6165// Input tensor slices in row-major order to merge across all dimensions. All
6166// inputs must have the same shape.
6167//   }
6168//   out_arg {
6169//     name: "output"
6170//     description: <<END
6171// Output tensor formed from merging input slices based on num_concats defined.
6172//
6173//	num_concats: Number of ways to merge per dimension.
6174//
6175// Returns the created operation.
6176func AssignVariableXlaConcatND(scope *Scope, resource tf.Output, inputs []tf.Output, num_concats []int64, optional ...AssignVariableXlaConcatNDAttr) (o *tf.Operation) {
6177	if scope.Err() != nil {
6178		return
6179	}
6180	attrs := map[string]interface{}{"num_concats": num_concats}
6181	for _, a := range optional {
6182		a(attrs)
6183	}
6184	opspec := tf.OpSpec{
6185		Type: "AssignVariableXlaConcatND",
6186		Input: []tf.Input{
6187			resource, tf.OutputList(inputs),
6188		},
6189		Attrs: attrs,
6190	}
6191	return scope.AddOperation(opspec)
6192}
6193
6194// VariableShapeAttr is an optional argument to VariableShape.
6195type VariableShapeAttr func(optionalAttr)
6196
6197// VariableShapeOutType sets the optional out_type attribute to value.
6198// If not specified, defaults to DT_INT32
6199func VariableShapeOutType(value tf.DataType) VariableShapeAttr {
6200	return func(m optionalAttr) {
6201		m["out_type"] = value
6202	}
6203}
6204
6205// Returns the shape of the variable pointed to by `resource`.
6206//
6207// This operation returns a 1-D integer tensor representing the shape of `input`.
6208//
6209// For example:
6210//
6211// ```
6212// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
6213// shape(t) ==> [2, 2, 3]
6214// ```
6215func VariableShape(scope *Scope, input tf.Output, optional ...VariableShapeAttr) (output tf.Output) {
6216	if scope.Err() != nil {
6217		return
6218	}
6219	attrs := map[string]interface{}{}
6220	for _, a := range optional {
6221		a(attrs)
6222	}
6223	opspec := tf.OpSpec{
6224		Type: "VariableShape",
6225		Input: []tf.Input{
6226			input,
6227		},
6228		Attrs: attrs,
6229	}
6230	op := scope.AddOperation(opspec)
6231	return op.Output(0)
6232}
6233
6234// Advance the counter of a counter-based RNG.
6235//
6236// The state of the RNG after
6237// `rng_read_and_skip(n)` will be the same as that after `uniform([n])`
6238// (or any other distribution). The actual increment added to the
6239// counter is an unspecified implementation choice.
6240//
6241// Arguments:
6242//	resource: The handle of the resource variable that stores the state of the RNG.
6243//	alg: The RNG algorithm.
6244//	delta: The amount of advancement.
6245//
6246// Returns The old value of the resource variable, before incrementing. Since state size is algorithm-dependent, this output will be right-padded with zeros to reach shape int64[3] (the current maximal state size among algorithms).
6247func RngReadAndSkip(scope *Scope, resource tf.Output, alg tf.Output, delta tf.Output) (value tf.Output) {
6248	if scope.Err() != nil {
6249		return
6250	}
6251	opspec := tf.OpSpec{
6252		Type: "RngReadAndSkip",
6253		Input: []tf.Input{
6254			resource, alg, delta,
6255		},
6256	}
6257	op := scope.AddOperation(opspec)
6258	return op.Output(0)
6259}
6260
6261// Advance the counter of a counter-based RNG.
6262//
6263// The state of the RNG after
6264// `rng_skip(n)` will be the same as that after `stateful_uniform([n])`
6265// (or any other distribution). The actual increment added to the
6266// counter is an unspecified implementation detail.
6267//
6268// Arguments:
6269//	resource: The handle of the resource variable that stores the state of the RNG.
6270//	algorithm: The RNG algorithm.
6271//	delta: The amount of advancement.
6272//
6273// Returns the created operation.
6274func RngSkip(scope *Scope, resource tf.Output, algorithm tf.Output, delta tf.Output) (o *tf.Operation) {
6275	if scope.Err() != nil {
6276		return
6277	}
6278	opspec := tf.OpSpec{
6279		Type: "RngSkip",
6280		Input: []tf.Input{
6281			resource, algorithm, delta,
6282		},
6283	}
6284	return scope.AddOperation(opspec)
6285}
6286
6287// StatefulTruncatedNormalAttr is an optional argument to StatefulTruncatedNormal.
6288type StatefulTruncatedNormalAttr func(optionalAttr)
6289
6290// StatefulTruncatedNormalDtype sets the optional dtype attribute to value.
6291//
6292// value: The type of the output.
6293// If not specified, defaults to DT_FLOAT
6294func StatefulTruncatedNormalDtype(value tf.DataType) StatefulTruncatedNormalAttr {
6295	return func(m optionalAttr) {
6296		m["dtype"] = value
6297	}
6298}
6299
6300// Outputs random values from a truncated normal distribution.
6301//
6302// The generated values follow a normal distribution with mean 0 and standard
6303// deviation 1, except that values whose magnitude is more than 2 standard
6304// deviations from the mean are dropped and re-picked.
6305//
6306// Arguments:
6307//	resource: The handle of the resource variable that stores the state of the RNG.
6308//	algorithm: The RNG algorithm.
6309//	shape: The shape of the output tensor.
6310//
6311// Returns Random values with specified shape.
6312func StatefulTruncatedNormal(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulTruncatedNormalAttr) (output tf.Output) {
6313	if scope.Err() != nil {
6314		return
6315	}
6316	attrs := map[string]interface{}{}
6317	for _, a := range optional {
6318		a(attrs)
6319	}
6320	opspec := tf.OpSpec{
6321		Type: "StatefulTruncatedNormal",
6322		Input: []tf.Input{
6323			resource, algorithm, shape,
6324		},
6325		Attrs: attrs,
6326	}
6327	op := scope.AddOperation(opspec)
6328	return op.Output(0)
6329}
6330
6331// StatefulStandardNormalV2Attr is an optional argument to StatefulStandardNormalV2.
6332type StatefulStandardNormalV2Attr func(optionalAttr)
6333
6334// StatefulStandardNormalV2Dtype sets the optional dtype attribute to value.
6335//
6336// value: The type of the output.
6337// If not specified, defaults to DT_FLOAT
6338func StatefulStandardNormalV2Dtype(value tf.DataType) StatefulStandardNormalV2Attr {
6339	return func(m optionalAttr) {
6340		m["dtype"] = value
6341	}
6342}
6343
6344// Outputs random values from a normal distribution.
6345//
6346// The generated values will have mean 0 and standard deviation 1.
6347//
6348// Arguments:
6349//	resource: The handle of the resource variable that stores the state of the RNG.
6350//	algorithm: The RNG algorithm.
6351//	shape: The shape of the output tensor.
6352//
6353// Returns A tensor of the specified shape filled with random normal values.
6354func StatefulStandardNormalV2(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulStandardNormalV2Attr) (output tf.Output) {
6355	if scope.Err() != nil {
6356		return
6357	}
6358	attrs := map[string]interface{}{}
6359	for _, a := range optional {
6360		a(attrs)
6361	}
6362	opspec := tf.OpSpec{
6363		Type: "StatefulStandardNormalV2",
6364		Input: []tf.Input{
6365			resource, algorithm, shape,
6366		},
6367		Attrs: attrs,
6368	}
6369	op := scope.AddOperation(opspec)
6370	return op.Output(0)
6371}
6372
6373// StatefulUniformFullIntAttr is an optional argument to StatefulUniformFullInt.
6374type StatefulUniformFullIntAttr func(optionalAttr)
6375
6376// StatefulUniformFullIntDtype sets the optional dtype attribute to value.
6377//
6378// value: The type of the output.
6379// If not specified, defaults to DT_UINT64
6380func StatefulUniformFullIntDtype(value tf.DataType) StatefulUniformFullIntAttr {
6381	return func(m optionalAttr) {
6382		m["dtype"] = value
6383	}
6384}
6385
6386// Outputs random integers from a uniform distribution.
6387//
6388// The generated values are uniform integers covering the whole range of `dtype`.
6389//
6390// Arguments:
6391//	resource: The handle of the resource variable that stores the state of the RNG.
6392//	algorithm: The RNG algorithm.
6393//	shape: The shape of the output tensor.
6394//
6395// Returns Random values with specified shape.
6396func StatefulUniformFullInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulUniformFullIntAttr) (output tf.Output) {
6397	if scope.Err() != nil {
6398		return
6399	}
6400	attrs := map[string]interface{}{}
6401	for _, a := range optional {
6402		a(attrs)
6403	}
6404	opspec := tf.OpSpec{
6405		Type: "StatefulUniformFullInt",
6406		Input: []tf.Input{
6407			resource, algorithm, shape,
6408		},
6409		Attrs: attrs,
6410	}
6411	op := scope.AddOperation(opspec)
6412	return op.Output(0)
6413}
6414
6415// Computes the LSTM cell backward propagation for the entire time sequence.
6416//
6417// This implementation is to be used in conjunction of BlockLSTMV2.
6418//
6419// Arguments:
6420//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
6421// with zeros beyond this length.
6422//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
6423//	cs_prev: Value of the initial cell state.
6424//	h_prev: Initial output of cell (to be used for peephole).
6425//	w: The weight matrix.
6426//	wci: The weight matrix for input gate peephole connection.
6427//	wcf: The weight matrix for forget gate peephole connection.
6428//	wco: The weight matrix for output gate peephole connection.
6429//	b: The bias vector.
6430//	i: The input gate over the whole time sequence.
6431//	cs: The cell state before the tanh over the whole time sequence.
6432//	f: The forget gate over the whole time sequence.
6433//	o: The output gate over the whole time sequence.
6434//	ci: The cell input over the whole time sequence.
6435//	co: The cell after the tanh over the whole time sequence.
6436//	h: The output h vector over the whole time sequence.
6437//	cs_grad: The current gradient of cs.
6438//	h_grad: The gradient of h vector.
6439//	use_peephole: Whether to use peephole weights.
6440//
6441// Returns:
6442//	x_grad: The gradient of x to be back-propped.
6443//	cs_prev_grad: The gradient of cs_prev to be back-propped.
6444//	h_prev_grad: The gradient of h_prev to be back-propped.
6445//	w_grad: The gradient for w to be back-propped.
6446//	wci_grad: The gradient for wci to be back-propped.
6447//	wcf_grad: The gradient for wcf to be back-propped.
6448//	wco_grad: The gradient for wco to be back-propped.
6449//	b_grad: The gradient for w to be back-propped.
6450func BlockLSTMGradV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (x_grad tf.Output, cs_prev_grad tf.Output, h_prev_grad tf.Output, w_grad tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output, b_grad tf.Output) {
6451	if scope.Err() != nil {
6452		return
6453	}
6454	attrs := map[string]interface{}{"use_peephole": use_peephole}
6455	opspec := tf.OpSpec{
6456		Type: "BlockLSTMGradV2",
6457		Input: []tf.Input{
6458			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad,
6459		},
6460		Attrs: attrs,
6461	}
6462	op := scope.AddOperation(opspec)
6463	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6), op.Output(7)
6464}
6465
6466// Computes the LSTM cell backward propagation for the entire time sequence.
6467//
6468// This implementation is to be used in conjunction of LSTMBlock.
6469//
6470// Arguments:
6471//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
6472// with zeros beyond this length.
6473//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
6474//	cs_prev: Value of the initial cell state.
6475//	h_prev: Initial output of cell (to be used for peephole).
6476//	w: The weight matrix.
6477//	wci: The weight matrix for input gate peephole connection.
6478//	wcf: The weight matrix for forget gate peephole connection.
6479//	wco: The weight matrix for output gate peephole connection.
6480//	b: The bias vector.
6481//	i: The input gate over the whole time sequence.
6482//	cs: The cell state before the tanh over the whole time sequence.
6483//	f: The forget gate over the whole time sequence.
6484//	o: The output gate over the whole time sequence.
6485//	ci: The cell input over the whole time sequence.
6486//	co: The cell after the tanh over the whole time sequence.
6487//	h: The output h vector over the whole time sequence.
6488//	cs_grad: The current gradient of cs.
6489//	h_grad: The gradient of h vector.
6490//	use_peephole: Whether to use peephole weights.
6491//
6492// Returns:
6493//	x_grad: The gradient of x to be back-propped.
6494//	cs_prev_grad: The gradient of cs_prev to be back-propped.
6495//	h_prev_grad: The gradient of h_prev to be back-propped.
6496//	w_grad: The gradient for w to be back-propped.
6497//	wci_grad: The gradient for wci to be back-propped.
6498//	wcf_grad: The gradient for wcf to be back-propped.
6499//	wco_grad: The gradient for wco to be back-propped.
6500//	b_grad: The gradient for w to be back-propped.
6501func BlockLSTMGrad(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (x_grad tf.Output, cs_prev_grad tf.Output, h_prev_grad tf.Output, w_grad tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output, b_grad tf.Output) {
6502	if scope.Err() != nil {
6503		return
6504	}
6505	attrs := map[string]interface{}{"use_peephole": use_peephole}
6506	opspec := tf.OpSpec{
6507		Type: "BlockLSTMGrad",
6508		Input: []tf.Input{
6509			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad,
6510		},
6511		Attrs: attrs,
6512	}
6513	op := scope.AddOperation(opspec)
6514	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6), op.Output(7)
6515}
6516
6517// LSTMBlockCellAttr is an optional argument to LSTMBlockCell.
6518type LSTMBlockCellAttr func(optionalAttr)
6519
6520// LSTMBlockCellForgetBias sets the optional forget_bias attribute to value.
6521//
6522// value: The forget gate bias.
6523// If not specified, defaults to 1
6524func LSTMBlockCellForgetBias(value float32) LSTMBlockCellAttr {
6525	return func(m optionalAttr) {
6526		m["forget_bias"] = value
6527	}
6528}
6529
6530// LSTMBlockCellCellClip sets the optional cell_clip attribute to value.
6531//
6532// value: Value to clip the 'cs' value to.
6533// If not specified, defaults to 3
6534func LSTMBlockCellCellClip(value float32) LSTMBlockCellAttr {
6535	return func(m optionalAttr) {
6536		m["cell_clip"] = value
6537	}
6538}
6539
6540// LSTMBlockCellUsePeephole sets the optional use_peephole attribute to value.
6541//
6542// value: Whether to use peephole weights.
6543// If not specified, defaults to false
6544func LSTMBlockCellUsePeephole(value bool) LSTMBlockCellAttr {
6545	return func(m optionalAttr) {
6546		m["use_peephole"] = value
6547	}
6548}
6549
6550// Computes the LSTM cell forward propagation for 1 time step.
6551//
6552// This implementation uses 1 weight matrix and 1 bias vector, and there's an
6553// optional peephole connection.
6554//
6555// This kernel op implements the following mathematical equations:
6556//
6557// ```python
6558// xh = [x, h_prev]
6559// [i, f, ci, o] = xh * w + b
6560// f = f + forget_bias
6561//
6562// if not use_peephole:
6563//   wci = wcf = wco = 0
6564//
6565// i = sigmoid(cs_prev * wci + i)
6566// f = sigmoid(cs_prev * wcf + f)
6567// ci = tanh(ci)
6568//
6569// cs = ci .* i + cs_prev .* f
6570// cs = clip(cs, cell_clip)
6571//
6572// o = sigmoid(cs * wco + o)
6573// co = tanh(cs)
6574// h = co .* o
6575// ```
6576//
6577// Arguments:
6578//	x: The input to the LSTM cell, shape (batch_size, num_inputs).
6579//	cs_prev: Value of the cell state at previous time step.
6580//	h_prev: Output of the previous cell at previous time step.
6581//	w: The weight matrix.
6582//	wci: The weight matrix for input gate peephole connection.
6583//	wcf: The weight matrix for forget gate peephole connection.
6584//	wco: The weight matrix for output gate peephole connection.
6585//	b: The bias vector.
6586//
6587// Returns:
6588//	i: The input gate.
6589//	cs: The cell state before the tanh.
6590//	f: The forget gate.
6591//	o: The output gate.
6592//	ci: The cell input.
6593//	co: The cell after the tanh.
6594//	h: The output h vector.
6595func LSTMBlockCell(scope *Scope, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...LSTMBlockCellAttr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output) {
6596	if scope.Err() != nil {
6597		return
6598	}
6599	attrs := map[string]interface{}{}
6600	for _, a := range optional {
6601		a(attrs)
6602	}
6603	opspec := tf.OpSpec{
6604		Type: "LSTMBlockCell",
6605		Input: []tf.Input{
6606			x, cs_prev, h_prev, w, wci, wcf, wco, b,
6607		},
6608		Attrs: attrs,
6609	}
6610	op := scope.AddOperation(opspec)
6611	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
6612}
6613
6614// BlockLSTMAttr is an optional argument to BlockLSTM.
6615type BlockLSTMAttr func(optionalAttr)
6616
6617// BlockLSTMForgetBias sets the optional forget_bias attribute to value.
6618//
6619// value: The forget gate bias.
6620// If not specified, defaults to 1
6621func BlockLSTMForgetBias(value float32) BlockLSTMAttr {
6622	return func(m optionalAttr) {
6623		m["forget_bias"] = value
6624	}
6625}
6626
6627// BlockLSTMCellClip sets the optional cell_clip attribute to value.
6628//
6629// value: Value to clip the 'cs' value to.
6630// If not specified, defaults to 3
6631func BlockLSTMCellClip(value float32) BlockLSTMAttr {
6632	return func(m optionalAttr) {
6633		m["cell_clip"] = value
6634	}
6635}
6636
6637// BlockLSTMUsePeephole sets the optional use_peephole attribute to value.
6638//
6639// value: Whether to use peephole weights.
6640// If not specified, defaults to false
6641func BlockLSTMUsePeephole(value bool) BlockLSTMAttr {
6642	return func(m optionalAttr) {
6643		m["use_peephole"] = value
6644	}
6645}
6646
6647// Computes the LSTM cell forward propagation for all the time steps.
6648//
6649// This is equivalent to applying LSTMBlockCell in a loop, like so:
6650//
6651// ```python
6652// for x1 in unpack(x):
6653//   i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
6654//     x1, cs_prev, h_prev, w, wci, wcf, wco, b)
6655//   cs_prev = cs1
6656//   h_prev = h1
6657//   i.append(i1)
6658//   cs.append(cs1)
6659//   f.append(f1)
6660//   o.append(o1)
6661//   ci.append(ci1)
6662//   co.append(co1)
6663//   h.append(h1)
6664// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
6665// ```
6666//
6667// Arguments:
6668//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
6669// with zeros beyond this length.
6670//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
6671//	cs_prev: Value of the initial cell state.
6672//	h_prev: Initial output of cell (to be used for peephole).
6673//	w: The weight matrix.
6674//	wci: The weight matrix for input gate peephole connection.
6675//	wcf: The weight matrix for forget gate peephole connection.
6676//	wco: The weight matrix for output gate peephole connection.
6677//	b: The bias vector.
6678//
6679// Returns:
6680//	i: The input gate over the whole time sequence.
6681//	cs: The cell state before the tanh over the whole time sequence.
6682//	f: The forget gate over the whole time sequence.
6683//	o: The output gate over the whole time sequence.
6684//	ci: The cell input over the whole time sequence.
6685//	co: The cell after the tanh over the whole time sequence.
6686//	h: The output h vector over the whole time sequence.
6687func BlockLSTM(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...BlockLSTMAttr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output) {
6688	if scope.Err() != nil {
6689		return
6690	}
6691	attrs := map[string]interface{}{}
6692	for _, a := range optional {
6693		a(attrs)
6694	}
6695	opspec := tf.OpSpec{
6696		Type: "BlockLSTM",
6697		Input: []tf.Input{
6698			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
6699		},
6700		Attrs: attrs,
6701	}
6702	op := scope.AddOperation(opspec)
6703	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
6704}
6705
6706// Computes the GRU cell forward propagation for 1 time step.
6707//
6708// Args
6709//     x: Input to the GRU cell.
6710//     h_prev: State input from the previous GRU cell.
6711//     w_ru: Weight matrix for the reset and update gate.
6712//     w_c: Weight matrix for the cell connection gate.
6713//     b_ru: Bias vector for the reset and update gate.
6714//     b_c: Bias vector for the cell connection gate.
6715//
6716// Returns
6717//     r: Output of the reset gate.
6718//     u: Output of the update gate.
6719//     c: Output of the cell connection gate.
6720//     h: Current state of the GRU cell.
6721//
6722// Note on notation of the variables:
6723//
6724// Concatenation of a and b is represented by a_b
6725// Element-wise dot product of a and b is represented by ab
6726// Element-wise dot product is represented by \circ
6727// Matrix multiplication is represented by *
6728//
6729// Biases are initialized with :
6730// `b_ru` - constant_initializer(1.0)
6731// `b_c` - constant_initializer(0.0)
6732//
6733// This kernel op implements the following mathematical equations:
6734//
6735// ```
6736// x_h_prev = [x, h_prev]
6737//
6738// [r_bar u_bar] = x_h_prev * w_ru + b_ru
6739//
6740// r = sigmoid(r_bar)
6741// u = sigmoid(u_bar)
6742//
6743// h_prevr = h_prev \circ r
6744//
6745// x_h_prevr = [x h_prevr]
6746//
6747// c_bar = x_h_prevr * w_c + b_c
6748// c = tanh(c_bar)
6749//
6750// h = (1-u) \circ c + u \circ h_prev
6751// ```
6752func GRUBlockCell(scope *Scope, x tf.Output, h_prev tf.Output, w_ru tf.Output, w_c tf.Output, b_ru tf.Output, b_c tf.Output) (r tf.Output, u tf.Output, c tf.Output, h tf.Output) {
6753	if scope.Err() != nil {
6754		return
6755	}
6756	opspec := tf.OpSpec{
6757		Type: "GRUBlockCell",
6758		Input: []tf.Input{
6759			x, h_prev, w_ru, w_c, b_ru, b_c,
6760		},
6761	}
6762	op := scope.AddOperation(opspec)
6763	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
6764}
6765
6766// Deserialize and concatenate `SparseTensors` from a serialized minibatch.
6767//
6768// The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
6769// `N` is the minibatch size and the rows correspond to packed outputs of
6770// `SerializeSparse`.  The ranks of the original `SparseTensor` objects
6771// must all match.  When the final `SparseTensor` is created, it has rank one
6772// higher than the ranks of the incoming `SparseTensor` objects
6773// (they have been concatenated along a new row dimension).
6774//
6775// The output `SparseTensor` object's shape values for all dimensions but the
6776// first are the max across the input `SparseTensor` objects' shape values
6777// for the corresponding dimensions.  Its first shape value is `N`, the minibatch
6778// size.
6779//
6780// The input `SparseTensor` objects' indices are assumed ordered in
6781// standard lexicographic order.  If this is not the case, after this
6782// step run `SparseReorder` to restore index ordering.
6783//
6784// For example, if the serialized input is a `[2 x 3]` matrix representing two
6785// original `SparseTensor` objects:
6786//
6787//     index = [ 0]
6788//             [10]
6789//             [20]
6790//     values = [1, 2, 3]
6791//     shape = [50]
6792//
6793// and
6794//
6795//     index = [ 2]
6796//             [10]
6797//     values = [4, 5]
6798//     shape = [30]
6799//
6800// then the final deserialized `SparseTensor` will be:
6801//
6802//     index = [0  0]
6803//             [0 10]
6804//             [0 20]
6805//             [1  2]
6806//             [1 10]
6807//     values = [1, 2, 3, 4, 5]
6808//     shape = [2 50]
6809//
6810// Arguments:
6811//	serialized_sparse: 2-D, The `N` serialized `SparseTensor` objects.
6812// Must have 3 columns.
6813//	dtype: The `dtype` of the serialized `SparseTensor` objects.
6814func DeserializeManySparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
6815	if scope.Err() != nil {
6816		return
6817	}
6818	attrs := map[string]interface{}{"dtype": dtype}
6819	opspec := tf.OpSpec{
6820		Type: "DeserializeManySparse",
6821		Input: []tf.Input{
6822			serialized_sparse,
6823		},
6824		Attrs: attrs,
6825	}
6826	op := scope.AddOperation(opspec)
6827	return op.Output(0), op.Output(1), op.Output(2)
6828}
6829
6830// PrelinearizeTupleAttr is an optional argument to PrelinearizeTuple.
6831type PrelinearizeTupleAttr func(optionalAttr)
6832
6833// PrelinearizeTupleLayouts sets the optional layouts attribute to value.
6834//
6835// value: A vector holding the requested layout in minor-to-major sequence for all the
6836// tuple shapes in the order the shapes appear in the "shapes" input. The layout
6837// elements for a sub-shape can be set to -1 in which case the corresponding layout
6838// will be computed by the infeed operation.
6839// If not specified, defaults to {}
6840func PrelinearizeTupleLayouts(value []int64) PrelinearizeTupleAttr {
6841	return func(m optionalAttr) {
6842		m["layouts"] = value
6843	}
6844}
6845
6846// An op which linearizes multiple Tensor values to an opaque variant tensor.
6847//
6848// Arguments:
6849//	inputs: A list of tensors that will be provided using the infeed mechanism.
6850//	shapes: The shapes of each tensor in `inputs`.
6851func PrelinearizeTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, optional ...PrelinearizeTupleAttr) (output tf.Output) {
6852	if scope.Err() != nil {
6853		return
6854	}
6855	attrs := map[string]interface{}{"shapes": shapes}
6856	for _, a := range optional {
6857		a(attrs)
6858	}
6859	opspec := tf.OpSpec{
6860		Type: "PrelinearizeTuple",
6861		Input: []tf.Input{
6862			tf.OutputList(inputs),
6863		},
6864		Attrs: attrs,
6865	}
6866	op := scope.AddOperation(opspec)
6867	return op.Output(0)
6868}
6869
6870// Op that executes a program with optional in-place variable updates.
6871//
6872// It (optionally) reads device variables, loads and executes a TPU program on a
6873// TPU device, and then (optionally) in-place updates variables using the program
6874// outputs, as specified in attributes device_var_reads_indices (program input
6875// indices from directly reading variables) and device_var_updates_indices (program
6876// output indices used to update variables, -1 means no-update/read-only). Such
6877// program outputs are consumed by these variables will not appear in the op
6878// output. For the internal use of the distributed TPU compiler.
6879func TPUExecuteAndUpdateVariables(scope *Scope, args []tf.Output, key tf.Output, Tresults []tf.DataType, device_var_reads_indices []int64, device_var_updates_indices []int64) (results []tf.Output) {
6880	if scope.Err() != nil {
6881		return
6882	}
6883	attrs := map[string]interface{}{"Tresults": Tresults, "device_var_reads_indices": device_var_reads_indices, "device_var_updates_indices": device_var_updates_indices}
6884	opspec := tf.OpSpec{
6885		Type: "TPUExecuteAndUpdateVariables",
6886		Input: []tf.Input{
6887			tf.OutputList(args), key,
6888		},
6889		Attrs: attrs,
6890	}
6891	op := scope.AddOperation(opspec)
6892	if scope.Err() != nil {
6893		return
6894	}
6895	var idx int
6896	var err error
6897	if results, idx, err = makeOutputList(op, idx, "results"); err != nil {
6898		scope.UpdateErr("TPUExecuteAndUpdateVariables", err)
6899		return
6900	}
6901	return results
6902}
6903
6904// StatelessRandomUniformFullIntAttr is an optional argument to StatelessRandomUniformFullInt.
6905type StatelessRandomUniformFullIntAttr func(optionalAttr)
6906
6907// StatelessRandomUniformFullIntDtype sets the optional dtype attribute to value.
6908//
6909// value: The type of the output.
6910// If not specified, defaults to DT_UINT64
6911func StatelessRandomUniformFullIntDtype(value tf.DataType) StatelessRandomUniformFullIntAttr {
6912	return func(m optionalAttr) {
6913		m["dtype"] = value
6914	}
6915}
6916
6917// Outputs deterministic pseudorandom random integers from a uniform distribution.
6918//
6919// The generated values are uniform integers covering the whole range of `dtype`.
6920//
6921// The outputs are a deterministic function of `shape` and `seed`.
6922//
6923// Arguments:
6924//	shape: The shape of the output tensor.
6925//	seed: 2 seeds (shape [2]).
6926//
6927// Returns Random values with specified shape.
6928func StatelessRandomUniformFullInt(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformFullIntAttr) (output tf.Output) {
6929	if scope.Err() != nil {
6930		return
6931	}
6932	attrs := map[string]interface{}{}
6933	for _, a := range optional {
6934		a(attrs)
6935	}
6936	opspec := tf.OpSpec{
6937		Type: "StatelessRandomUniformFullInt",
6938		Input: []tf.Input{
6939			shape, seed,
6940		},
6941		Attrs: attrs,
6942	}
6943	op := scope.AddOperation(opspec)
6944	return op.Output(0)
6945}
6946
6947// Calculates the gradient of the SparseMatrixSoftmax op.
6948//
6949// Arguments:
6950//	softmax: A CSRSparseMatrix.
6951//	grad_softmax: The gradient of `softmax`.
6952//
6953//
6954// Returns The output gradient.
6955func SparseMatrixSoftmaxGrad(scope *Scope, softmax tf.Output, grad_softmax tf.Output, type_ tf.DataType) (gradient tf.Output) {
6956	if scope.Err() != nil {
6957		return
6958	}
6959	attrs := map[string]interface{}{"type": type_}
6960	opspec := tf.OpSpec{
6961		Type: "SparseMatrixSoftmaxGrad",
6962		Input: []tf.Input{
6963			softmax, grad_softmax,
6964		},
6965		Attrs: attrs,
6966	}
6967	op := scope.AddOperation(opspec)
6968	return op.Output(0)
6969}
6970
6971// Computes the GRU cell back-propagation for 1 time step.
6972//
6973// Args
6974//     x: Input to the GRU cell.
6975//     h_prev: State input from the previous GRU cell.
6976//     w_ru: Weight matrix for the reset and update gate.
6977//     w_c: Weight matrix for the cell connection gate.
6978//     b_ru: Bias vector for the reset and update gate.
6979//     b_c: Bias vector for the cell connection gate.
6980//     r: Output of the reset gate.
6981//     u: Output of the update gate.
6982//     c: Output of the cell connection gate.
6983//     d_h: Gradients of the h_new wrt to objective function.
6984//
6985// Returns
6986//     d_x: Gradients of the x wrt to objective function.
6987//     d_h_prev: Gradients of the h wrt to objective function.
6988//     d_c_bar Gradients of the c_bar wrt to objective function.
6989//     d_r_bar_u_bar Gradients of the r_bar & u_bar wrt to objective function.
6990//
6991// This kernel op implements the following mathematical equations:
6992//
6993// Note on notation of the variables:
6994//
6995// Concatenation of a and b is represented by a_b
6996// Element-wise dot product of a and b is represented by ab
6997// Element-wise dot product is represented by \circ
6998// Matrix multiplication is represented by *
6999//
7000// Additional notes for clarity:
7001//
7002// `w_ru` can be segmented into 4 different matrices.
7003// ```
7004// w_ru = [w_r_x w_u_x
7005//         w_r_h_prev w_u_h_prev]
7006// ```
7007// Similarly, `w_c` can be segmented into 2 different matrices.
7008// ```
7009// w_c = [w_c_x w_c_h_prevr]
7010// ```
7011// Same goes for biases.
7012// ```
7013// b_ru = [b_ru_x b_ru_h]
7014// b_c = [b_c_x b_c_h]
7015// ```
7016// Another note on notation:
7017// ```
7018// d_x = d_x_component_1 + d_x_component_2
7019//
7020// where d_x_component_1 = d_r_bar * w_r_x^T + d_u_bar * w_r_x^T
7021// and d_x_component_2 = d_c_bar * w_c_x^T
7022//
7023// d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + d_h \circ u
7024// where d_h_prev_componenet_1 = d_r_bar * w_r_h_prev^T + d_u_bar * w_r_h_prev^T
7025// ```
7026//
7027// Mathematics behind the Gradients below:
7028// ```
7029// d_c_bar = d_h \circ (1-u) \circ (1-c \circ c)
7030// d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)
7031//
7032// d_r_bar_u_bar = [d_r_bar d_u_bar]
7033//
7034// [d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T
7035//
7036// [d_x_component_2 d_h_prevr] = d_c_bar * w_c^T
7037//
7038// d_x = d_x_component_1 + d_x_component_2
7039//
7040// d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u
7041// ```
7042// Below calculation is performed in the python wrapper for the Gradients
7043// (not in the gradient kernel.)
7044// ```
7045// d_w_ru = x_h_prevr^T * d_c_bar
7046//
7047// d_w_c = x_h_prev^T * d_r_bar_u_bar
7048//
7049// d_b_ru = sum of d_r_bar_u_bar along axis = 0
7050//
7051// d_b_c = sum of d_c_bar along axis = 0
7052// ```
7053func GRUBlockCellGrad(scope *Scope, x tf.Output, h_prev tf.Output, w_ru tf.Output, w_c tf.Output, b_ru tf.Output, b_c tf.Output, r tf.Output, u tf.Output, c tf.Output, d_h tf.Output) (d_x tf.Output, d_h_prev tf.Output, d_c_bar tf.Output, d_r_bar_u_bar tf.Output) {
7054	if scope.Err() != nil {
7055		return
7056	}
7057	opspec := tf.OpSpec{
7058		Type: "GRUBlockCellGrad",
7059		Input: []tf.Input{
7060			x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h,
7061		},
7062	}
7063	op := scope.AddOperation(opspec)
7064	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
7065}
7066
7067// RandomPoissonV2Attr is an optional argument to RandomPoissonV2.
7068type RandomPoissonV2Attr func(optionalAttr)
7069
7070// RandomPoissonV2Seed sets the optional seed attribute to value.
7071//
7072// value: If either `seed` or `seed2` are set to be non-zero, the random number
7073// generator is seeded by the given seed.  Otherwise, it is seeded by a
7074// random seed.
7075// If not specified, defaults to 0
7076func RandomPoissonV2Seed(value int64) RandomPoissonV2Attr {
7077	return func(m optionalAttr) {
7078		m["seed"] = value
7079	}
7080}
7081
7082// RandomPoissonV2Seed2 sets the optional seed2 attribute to value.
7083//
7084// value: A second seed to avoid seed collision.
7085// If not specified, defaults to 0
7086func RandomPoissonV2Seed2(value int64) RandomPoissonV2Attr {
7087	return func(m optionalAttr) {
7088		m["seed2"] = value
7089	}
7090}
7091
7092// RandomPoissonV2Dtype sets the optional dtype attribute to value.
7093// If not specified, defaults to DT_INT64
7094func RandomPoissonV2Dtype(value tf.DataType) RandomPoissonV2Attr {
7095	return func(m optionalAttr) {
7096		m["dtype"] = value
7097	}
7098}
7099
7100// Outputs random values from the Poisson distribution(s) described by rate.
7101//
7102// This op uses two algorithms, depending on rate. If rate >= 10, then
7103// the algorithm by Hormann is used to acquire samples via
7104// transformation-rejection.
7105// See http://www.sciencedirect.com/science/article/pii/0167668793909974.
7106//
7107// Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
7108// random variables.
7109// See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
7110// Programming, Volume 2. Addison Wesley
7111//
7112// Arguments:
7113//	shape: 1-D integer tensor. Shape of independent samples to draw from each
7114// distribution described by the shape parameters given in rate.
7115//	rate: A tensor in which each scalar is a "rate" parameter describing the
7116// associated poisson distribution.
7117//
7118// Returns A tensor with shape `shape + shape(rate)`. Each slice
7119// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
7120// `rate[i0, i1, ...iN]`.
7121func RandomPoissonV2(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonV2Attr) (output tf.Output) {
7122	if scope.Err() != nil {
7123		return
7124	}
7125	attrs := map[string]interface{}{}
7126	for _, a := range optional {
7127		a(attrs)
7128	}
7129	opspec := tf.OpSpec{
7130		Type: "RandomPoissonV2",
7131		Input: []tf.Input{
7132			shape, rate,
7133		},
7134		Attrs: attrs,
7135	}
7136	op := scope.AddOperation(opspec)
7137	return op.Output(0)
7138}
7139
7140// LoadTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to LoadTPUEmbeddingCenteredRMSPropParameters.
7141type LoadTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
7142
7143// LoadTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value.
7144// If not specified, defaults to -1
7145func LoadTPUEmbeddingCenteredRMSPropParametersTableId(value int64) LoadTPUEmbeddingCenteredRMSPropParametersAttr {
7146	return func(m optionalAttr) {
7147		m["table_id"] = value
7148	}
7149}
7150
7151// LoadTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value.
7152// If not specified, defaults to ""
7153func LoadTPUEmbeddingCenteredRMSPropParametersTableName(value string) LoadTPUEmbeddingCenteredRMSPropParametersAttr {
7154	return func(m optionalAttr) {
7155		m["table_name"] = value
7156	}
7157}
7158
7159// LoadTPUEmbeddingCenteredRMSPropParametersConfig sets the optional config attribute to value.
7160// If not specified, defaults to ""
7161func LoadTPUEmbeddingCenteredRMSPropParametersConfig(value string) LoadTPUEmbeddingCenteredRMSPropParametersAttr {
7162	return func(m optionalAttr) {
7163		m["config"] = value
7164	}
7165}
7166
7167// Load centered RMSProp embedding parameters.
7168//
7169// An op that loads optimization parameters into HBM for embedding. Must be
7170// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
7171// embedding table configuration. For example, this op is used to install
7172// parameters that are loaded from a checkpoint before a training loop is
7173// executed.
7174//
7175// Arguments:
7176//	parameters: Value of parameters used in the centered RMSProp optimization algorithm.
7177//	ms: Value of ms used in the centered RMSProp optimization algorithm.
7178//	mom: Value of mom used in the centered RMSProp optimization algorithm.
7179//	mg: Value of mg used in the centered RMSProp optimization algorithm.
7180//
7181//
7182//
7183// Returns the created operation.
7184func LoadTPUEmbeddingCenteredRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingCenteredRMSPropParametersAttr) (o *tf.Operation) {
7185	if scope.Err() != nil {
7186		return
7187	}
7188	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
7189	for _, a := range optional {
7190		a(attrs)
7191	}
7192	opspec := tf.OpSpec{
7193		Type: "LoadTPUEmbeddingCenteredRMSPropParameters",
7194		Input: []tf.Input{
7195			parameters, ms, mom, mg,
7196		},
7197		Attrs: attrs,
7198	}
7199	return scope.AddOperation(opspec)
7200}
7201
7202// RandomPoissonAttr is an optional argument to RandomPoisson.
7203type RandomPoissonAttr func(optionalAttr)
7204
7205// RandomPoissonSeed sets the optional seed attribute to value.
7206// If not specified, defaults to 0
7207func RandomPoissonSeed(value int64) RandomPoissonAttr {
7208	return func(m optionalAttr) {
7209		m["seed"] = value
7210	}
7211}
7212
7213// RandomPoissonSeed2 sets the optional seed2 attribute to value.
7214// If not specified, defaults to 0
7215func RandomPoissonSeed2(value int64) RandomPoissonAttr {
7216	return func(m optionalAttr) {
7217		m["seed2"] = value
7218	}
7219}
7220
7221// Use RandomPoissonV2 instead.
7222//
7223// DEPRECATED at GraphDef version 25: Replaced by RandomPoissonV2
7224func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonAttr) (output tf.Output) {
7225	if scope.Err() != nil {
7226		return
7227	}
7228	attrs := map[string]interface{}{}
7229	for _, a := range optional {
7230		a(attrs)
7231	}
7232	opspec := tf.OpSpec{
7233		Type: "RandomPoisson",
7234		Input: []tf.Input{
7235			shape, rate,
7236		},
7237		Attrs: attrs,
7238	}
7239	op := scope.AddOperation(opspec)
7240	return op.Output(0)
7241}
7242
7243// Computes the derivative of a Gamma random sample w.r.t. `alpha`.
7244func RandomGammaGrad(scope *Scope, alpha tf.Output, sample tf.Output) (output tf.Output) {
7245	if scope.Err() != nil {
7246		return
7247	}
7248	opspec := tf.OpSpec{
7249		Type: "RandomGammaGrad",
7250		Input: []tf.Input{
7251			alpha, sample,
7252		},
7253	}
7254	op := scope.AddOperation(opspec)
7255	return op.Output(0)
7256}
7257
7258// NonDeterministicIntsAttr is an optional argument to NonDeterministicInts.
7259type NonDeterministicIntsAttr func(optionalAttr)
7260
7261// NonDeterministicIntsDtype sets the optional dtype attribute to value.
7262//
7263// value: The type of the output.
7264// If not specified, defaults to DT_INT64
7265func NonDeterministicIntsDtype(value tf.DataType) NonDeterministicIntsAttr {
7266	return func(m optionalAttr) {
7267		m["dtype"] = value
7268	}
7269}
7270
7271// Non-deterministically generates some integers.
7272//
7273// This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results.
7274//
7275// Arguments:
7276//	shape: The shape of the output tensor.
7277//
7278// Returns Non-deterministic integer values with specified shape.
7279func NonDeterministicInts(scope *Scope, shape tf.Output, optional ...NonDeterministicIntsAttr) (output tf.Output) {
7280	if scope.Err() != nil {
7281		return
7282	}
7283	attrs := map[string]interface{}{}
7284	for _, a := range optional {
7285		a(attrs)
7286	}
7287	opspec := tf.OpSpec{
7288		Type: "NonDeterministicInts",
7289		Input: []tf.Input{
7290			shape,
7291		},
7292		Attrs: attrs,
7293	}
7294	op := scope.AddOperation(opspec)
7295	return op.Output(0)
7296}
7297
7298// MultinomialAttr is an optional argument to Multinomial.
7299type MultinomialAttr func(optionalAttr)
7300
7301// MultinomialSeed sets the optional seed attribute to value.
7302//
7303// value: If either seed or seed2 is set to be non-zero, the internal random number
7304// generator is seeded by the given seed.  Otherwise, a random seed is used.
7305// If not specified, defaults to 0
7306func MultinomialSeed(value int64) MultinomialAttr {
7307	return func(m optionalAttr) {
7308		m["seed"] = value
7309	}
7310}
7311
7312// MultinomialSeed2 sets the optional seed2 attribute to value.
7313//
7314// value: A second seed to avoid seed collision.
7315// If not specified, defaults to 0
7316func MultinomialSeed2(value int64) MultinomialAttr {
7317	return func(m optionalAttr) {
7318		m["seed2"] = value
7319	}
7320}
7321
7322// MultinomialOutputDtype sets the optional output_dtype attribute to value.
7323// If not specified, defaults to DT_INT64
7324func MultinomialOutputDtype(value tf.DataType) MultinomialAttr {
7325	return func(m optionalAttr) {
7326		m["output_dtype"] = value
7327	}
7328}
7329
7330// Draws samples from a multinomial distribution.
7331//
7332// Arguments:
7333//	logits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
7334// represents the unnormalized log probabilities for all classes.
7335//	num_samples: 0-D.  Number of independent samples to draw for each row slice.
7336//
7337// Returns 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
7338// contains the drawn class labels with range `[0, num_classes)`.
7339func Multinomial(scope *Scope, logits tf.Output, num_samples tf.Output, optional ...MultinomialAttr) (output tf.Output) {
7340	if scope.Err() != nil {
7341		return
7342	}
7343	attrs := map[string]interface{}{}
7344	for _, a := range optional {
7345		a(attrs)
7346	}
7347	opspec := tf.OpSpec{
7348		Type: "Multinomial",
7349		Input: []tf.Input{
7350			logits, num_samples,
7351		},
7352		Attrs: attrs,
7353	}
7354	op := scope.AddOperation(opspec)
7355	return op.Output(0)
7356}
7357
7358// RandomShuffleAttr is an optional argument to RandomShuffle.
7359type RandomShuffleAttr func(optionalAttr)
7360
7361// RandomShuffleSeed sets the optional seed attribute to value.
7362//
7363// value: If either `seed` or `seed2` are set to be non-zero, the random number
7364// generator is seeded by the given seed.  Otherwise, it is seeded by a
7365// random seed.
7366// If not specified, defaults to 0
7367func RandomShuffleSeed(value int64) RandomShuffleAttr {
7368	return func(m optionalAttr) {
7369		m["seed"] = value
7370	}
7371}
7372
7373// RandomShuffleSeed2 sets the optional seed2 attribute to value.
7374//
7375// value: A second seed to avoid seed collision.
7376// If not specified, defaults to 0
7377func RandomShuffleSeed2(value int64) RandomShuffleAttr {
7378	return func(m optionalAttr) {
7379		m["seed2"] = value
7380	}
7381}
7382
7383// Randomly shuffles a tensor along its first dimension.
7384//
7385//   The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
7386//   to one and only one `output[i]`. For example, a mapping that might occur for a
7387//   3x2 tensor is:
7388//
7389// ```
7390// [[1, 2],       [[5, 6],
7391//  [3, 4],  ==>   [1, 2],
7392//  [5, 6]]        [3, 4]]
7393// ```
7394//
7395// Arguments:
7396//	value: The tensor to be shuffled.
7397//
7398// Returns A tensor of same shape and type as `value`, shuffled along its first
7399// dimension.
7400func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr) (output tf.Output) {
7401	if scope.Err() != nil {
7402		return
7403	}
7404	attrs := map[string]interface{}{}
7405	for _, a := range optional {
7406		a(attrs)
7407	}
7408	opspec := tf.OpSpec{
7409		Type: "RandomShuffle",
7410		Input: []tf.Input{
7411			value,
7412		},
7413		Attrs: attrs,
7414	}
7415	op := scope.AddOperation(opspec)
7416	return op.Output(0)
7417}
7418
7419// Returns max(x, y) element-wise.
7420//
7421// *NOTE*: `RiscMax` does not supports broadcasting.
7422//
7423// Given two input tensors, the `tf.risc_max` operation computes the maximum for every element in the tensor.
7424//
7425func RiscMax(scope *Scope, x tf.Output, y tf.Output) (max tf.Output) {
7426	if scope.Err() != nil {
7427		return
7428	}
7429	opspec := tf.OpSpec{
7430		Type: "RiscMax",
7431		Input: []tf.Input{
7432			x, y,
7433		},
7434	}
7435	op := scope.AddOperation(opspec)
7436	return op.Output(0)
7437}
7438
7439// RandomUniformIntAttr is an optional argument to RandomUniformInt.
7440type RandomUniformIntAttr func(optionalAttr)
7441
7442// RandomUniformIntSeed sets the optional seed attribute to value.
7443//
7444// value: If either `seed` or `seed2` are set to be non-zero, the random number
7445// generator is seeded by the given seed.  Otherwise, it is seeded by a
7446// random seed.
7447// If not specified, defaults to 0
7448func RandomUniformIntSeed(value int64) RandomUniformIntAttr {
7449	return func(m optionalAttr) {
7450		m["seed"] = value
7451	}
7452}
7453
7454// RandomUniformIntSeed2 sets the optional seed2 attribute to value.
7455//
7456// value: A second seed to avoid seed collision.
7457// If not specified, defaults to 0
7458func RandomUniformIntSeed2(value int64) RandomUniformIntAttr {
7459	return func(m optionalAttr) {
7460		m["seed2"] = value
7461	}
7462}
7463
7464// Outputs random integers from a uniform distribution.
7465//
7466// The generated values are uniform integers in the range `[minval, maxval)`.
7467// The lower bound `minval` is included in the range, while the upper bound
7468// `maxval` is excluded.
7469//
7470// The random integers are slightly biased unless `maxval - minval` is an exact
7471// power of two.  The bias is small for values of `maxval - minval` significantly
7472// smaller than the range of the output (either `2^32` or `2^64`).
7473//
7474// Arguments:
7475//	shape: The shape of the output tensor.
7476//	minval: 0-D.  Inclusive lower bound on the generated integers.
7477//	maxval: 0-D.  Exclusive upper bound on the generated integers.
7478//
7479// Returns A tensor of the specified shape filled with uniform random integers.
7480func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf.Output, optional ...RandomUniformIntAttr) (output tf.Output) {
7481	if scope.Err() != nil {
7482		return
7483	}
7484	attrs := map[string]interface{}{}
7485	for _, a := range optional {
7486		a(attrs)
7487	}
7488	opspec := tf.OpSpec{
7489		Type: "RandomUniformInt",
7490		Input: []tf.Input{
7491			shape, minval, maxval,
7492		},
7493		Attrs: attrs,
7494	}
7495	op := scope.AddOperation(opspec)
7496	return op.Output(0)
7497}
7498
7499// Create a dense tensor from a ragged tensor, possibly altering its shape.
7500//
7501// The `ragged_to_dense` op creates a dense tensor from a list of row partition
7502// tensors, a value vector, and default values. If the shape is unspecified, the
7503// minimal shape required to contain all the elements in the ragged tensor (the
7504// natural shape) will be used. If some dimensions are left unspecified, then the
7505// size of the natural shape is used in that dimension.
7506//
7507// The default_value will be broadcast to the output shape. After that, the values
7508// from the ragged tensor overwrite the default values. Note that the default_value
7509// must have less dimensions than the value.
7510//
7511// The row partition tensors are in the order of the dimensions.
7512// At present, the types can be:
7513// * "ROW_SPLITS": the row_splits tensor from the ragged tensor.
7514// * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
7515// * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it
7516//   is preceded by "FIRST_DIM_SIZE".
7517//
7518// Arguments:
7519//	shape: The desired shape of the output tensor. If left unspecified (empty),
7520// the minimal shape required to contain all the elements in the ragged tensor
7521// (the natural shape) will be used. If some dimensions are left unspecified, then
7522// the size of the natural shape is used in that dimension.
7523//
7524// Note that dense dimensions cannot be modified by the shape argument. Trying to
7525// change the size of a dense dimension will cause the op to fail.
7526// Examples:
7527// natural shape: [4, 5, 6]
7528// shape: -1
7529// output shape: [4, 5, 6]
7530//
7531// natural shape: [4, 5, 6]
7532// shape: [3, -1, 2]
7533// output shape: [3, 5, 2]
7534//
7535// natural shape: [4, 5, 6]
7536// shape: [3, 7, 2]
7537// output shape: [3, 7, 2]
7538//
7539//	values: A 1D tensor representing the values of the ragged tensor.
7540//	default_value: The default_value when the shape is larger than the ragged tensor. The
7541// default_value is broadcast until it is the shape of the output tensor, and
7542// then overwritten by values in the ragged tensor. The default value must be
7543// compatible with this broadcast operation, and must have fewer dimensions than
7544// the value tensor.
7545//
7546//	row_partition_types: The types of the row partition tensors. At present, these can be:
7547// * "ROW_SPLITS": the row_splits tensor from the ragged tensor.
7548// * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
7549// * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it
7550//   is preceeded by "FIRST_DIM_SIZE".
7551// The tensors are in the order of the dimensions.
7552//
7553// Returns The resulting dense tensor.
7554func RaggedTensorToTensor(scope *Scope, shape tf.Output, values tf.Output, default_value tf.Output, row_partition_tensors []tf.Output, row_partition_types []string) (result tf.Output) {
7555	if scope.Err() != nil {
7556		return
7557	}
7558	attrs := map[string]interface{}{"row_partition_types": row_partition_types}
7559	opspec := tf.OpSpec{
7560		Type: "RaggedTensorToTensor",
7561		Input: []tf.Input{
7562			shape, values, default_value, tf.OutputList(row_partition_tensors),
7563		},
7564		Attrs: attrs,
7565	}
7566	op := scope.AddOperation(opspec)
7567	return op.Output(0)
7568}
7569
7570// Helper used to compute the gradient for `RaggedTensorToVariant`.
7571//
7572// Computes the gradient for the dense_values input to the RaggedTensorToVariant
7573// op, given the variant-encoded ragged gradients of the outputs, along with
7574// the outer row-splits and the shape of the dense-values that were provided as
7575// inputs to the RaggedTensorToVariant op.
7576//
7577// Arguments:
7578//	encoded_ragged_grad: A `variant` Tensor containing encoded `RaggedTensor` gradients.
7579//	row_splits: Outermost row-splits that were used as input to the RaggedTensorToVariant op.
7580//	dense_values_shape: Shape of the dense_values that was used as an input to the
7581// RaggedTensorToVariant op.
7582//
7583//
7584// Returns Gradient for the dense_values of the RaggedTensorToVariant op.
7585func RaggedTensorToVariantGradient(scope *Scope, encoded_ragged_grad tf.Output, row_splits tf.Output, dense_values_shape tf.Output, Tvalues tf.DataType) (dense_values_grad tf.Output) {
7586	if scope.Err() != nil {
7587		return
7588	}
7589	attrs := map[string]interface{}{"Tvalues": Tvalues}
7590	opspec := tf.OpSpec{
7591		Type: "RaggedTensorToVariantGradient",
7592		Input: []tf.Input{
7593			encoded_ragged_grad, row_splits, dense_values_shape,
7594		},
7595		Attrs: attrs,
7596	}
7597	op := scope.AddOperation(opspec)
7598	return op.Output(0)
7599}
7600
7601// Gather ragged slices from `params` axis `0` according to `indices`.
7602//
7603// Outputs a `RaggedTensor` output composed from `output_dense_values` and
7604// `output_nested_splits`, such that:
7605//
7606// ```python
7607// output.shape = indices.shape + params.shape[1:]
7608// output.ragged_rank = indices.shape.ndims + params.ragged_rank
7609// output[i...j, d0...dn] = params[indices[i...j], d0...dn]
7610// ```
7611//
7612// where
7613//
7614// * `params =
7615//    ragged.from_nested_row_splits(params_dense_values, params_nested_splits)`
7616//    provides the values that should be gathered.
7617// * `indices` ia a dense tensor with dtype `int32` or `int64`, indicating which
7618//    values should be gathered.
7619// * `output =
7620//    ragged.from_nested_row_splits(output_dense_values, output_nested_splits)`
7621//    is the output tensor.
7622//
7623// (Note: This c++ op is used to implement the higher-level python
7624// `tf.ragged.gather` op, which also supports ragged indices.)
7625//
7626//
7627// Arguments:
7628//	params_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the
7629// `params` RaggedTensor input.
7630//	params_dense_values: The `flat_values` for the `params` RaggedTensor. There was a terminology change
7631// at the python level from dense_values to flat_values, so dense_values is the
7632// deprecated name.
7633//	indices: Indices in the outermost dimension of `params` of the values that should be
7634// gathered.
7635//	OUTPUT_RAGGED_RANK: The ragged rank of the output RaggedTensor. `output_nested_splits` will contain
7636// this number of `row_splits` tensors. This value should equal
7637// `indices.shape.ndims + params.ragged_rank - 1`.
7638//
7639// Returns:
7640//	output_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the
7641// returned RaggedTensor.
7642//	output_dense_values: The `flat_values` for the returned RaggedTensor.
7643func RaggedGather(scope *Scope, params_nested_splits []tf.Output, params_dense_values tf.Output, indices tf.Output, OUTPUT_RAGGED_RANK int64) (output_nested_splits []tf.Output, output_dense_values tf.Output) {
7644	if scope.Err() != nil {
7645		return
7646	}
7647	attrs := map[string]interface{}{"OUTPUT_RAGGED_RANK": OUTPUT_RAGGED_RANK}
7648	opspec := tf.OpSpec{
7649		Type: "RaggedGather",
7650		Input: []tf.Input{
7651			tf.OutputList(params_nested_splits), params_dense_values, indices,
7652		},
7653		Attrs: attrs,
7654	}
7655	op := scope.AddOperation(opspec)
7656	if scope.Err() != nil {
7657		return
7658	}
7659	var idx int
7660	var err error
7661	if output_nested_splits, idx, err = makeOutputList(op, idx, "output_nested_splits"); err != nil {
7662		scope.UpdateErr("RaggedGather", err)
7663		return
7664	}
7665	output_dense_values = op.Output(idx)
7666	return output_nested_splits, output_dense_values
7667}
7668
7669// StringToNumberAttr is an optional argument to StringToNumber.
7670type StringToNumberAttr func(optionalAttr)
7671
7672// StringToNumberOutType sets the optional out_type attribute to value.
7673//
7674// value: The numeric type to interpret each string in `string_tensor` as.
7675// If not specified, defaults to DT_FLOAT
7676func StringToNumberOutType(value tf.DataType) StringToNumberAttr {
7677	return func(m optionalAttr) {
7678		m["out_type"] = value
7679	}
7680}
7681
7682// Converts each string in the input Tensor to the specified numeric type.
7683//
7684// (Note that int32 overflow results in an error while float overflow
7685// results in a rounded value.)
7686//
7687// Example:
7688//
7689// >>> strings = ["5.0", "3.0", "7.0"]
7690// >>> tf.strings.to_number(strings)
7691// <tf.Tensor: shape=(3,), dtype=float32, numpy=array([5., 3., 7.], dtype=float32)>
7692//
7693//
7694// Returns A Tensor of the same shape as the input `string_tensor`.
7695func StringToNumber(scope *Scope, string_tensor tf.Output, optional ...StringToNumberAttr) (output tf.Output) {
7696	if scope.Err() != nil {
7697		return
7698	}
7699	attrs := map[string]interface{}{}
7700	for _, a := range optional {
7701		a(attrs)
7702	}
7703	opspec := tf.OpSpec{
7704		Type: "StringToNumber",
7705		Input: []tf.Input{
7706			string_tensor,
7707		},
7708		Attrs: attrs,
7709	}
7710	op := scope.AddOperation(opspec)
7711	return op.Output(0)
7712}
7713
7714// Convert JSON-encoded Example records to binary protocol buffer strings.
7715//
7716//
7717// Note: This is **not** a general purpose JSON parsing op.
7718//
7719// This op converts JSON-serialized
7720// `tf.train.Example` (created with `json_format.MessageToJson`, following the
7721// [standard JSON mapping](https://developers.google.com/protocol-buffers/docs/proto3#json))
7722// to a binary-serialized `tf.train.Example` (equivalent to
7723// `Example.SerializeToString()`) suitable for conversion to tensors with
7724// `tf.io.parse_example`.
7725//
7726// Arguments:
7727//	json_examples: Each string is a JSON object serialized according to the JSON
7728// mapping of the Example proto.
7729//
7730// Returns Each string is a binary Example protocol buffer corresponding
7731// to the respective element of `json_examples`.
7732func DecodeJSONExample(scope *Scope, json_examples tf.Output) (binary_examples tf.Output) {
7733	if scope.Err() != nil {
7734		return
7735	}
7736	opspec := tf.OpSpec{
7737		Type: "DecodeJSONExample",
7738		Input: []tf.Input{
7739			json_examples,
7740		},
7741	}
7742	op := scope.AddOperation(opspec)
7743	return op.Output(0)
7744}
7745
7746// Transforms a Tensor into a serialized TensorProto proto.
7747//
7748// Arguments:
7749//	tensor: A Tensor of type `T`.
7750//
7751// Returns A serialized TensorProto proto of the input tensor.
7752func SerializeTensor(scope *Scope, tensor tf.Output) (serialized tf.Output) {
7753	if scope.Err() != nil {
7754		return
7755	}
7756	opspec := tf.OpSpec{
7757		Type: "SerializeTensor",
7758		Input: []tf.Input{
7759			tensor,
7760		},
7761	}
7762	op := scope.AddOperation(opspec)
7763	return op.Output(0)
7764}
7765
7766// Transforms a serialized tensorflow.TensorProto proto into a Tensor.
7767//
7768// Arguments:
7769//	serialized: A scalar string containing a serialized TensorProto proto.
7770//	out_type: The type of the serialized tensor.  The provided type must match the
7771// type of the serialized tensor and no implicit conversion will take place.
7772//
7773// Returns A Tensor of type `out_type`.
7774func ParseTensor(scope *Scope, serialized tf.Output, out_type tf.DataType) (output tf.Output) {
7775	if scope.Err() != nil {
7776		return
7777	}
7778	attrs := map[string]interface{}{"out_type": out_type}
7779	opspec := tf.OpSpec{
7780		Type: "ParseTensor",
7781		Input: []tf.Input{
7782			serialized,
7783		},
7784		Attrs: attrs,
7785	}
7786	op := scope.AddOperation(opspec)
7787	return op.Output(0)
7788}
7789
7790// ParseSequenceExampleV2Attr is an optional argument to ParseSequenceExampleV2.
7791type ParseSequenceExampleV2Attr func(optionalAttr)
7792
7793// ParseSequenceExampleV2NcontextSparse sets the optional Ncontext_sparse attribute to value.
7794// If not specified, defaults to 0
7795//
7796// REQUIRES: value >= 0
7797func ParseSequenceExampleV2NcontextSparse(value int64) ParseSequenceExampleV2Attr {
7798	return func(m optionalAttr) {
7799		m["Ncontext_sparse"] = value
7800	}
7801}
7802
7803// ParseSequenceExampleV2ContextSparseTypes sets the optional context_sparse_types attribute to value.
7804//
7805// value: A list of Ncontext_sparse types; the data types of data in
7806// each context Feature given in context_sparse_keys.
7807// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
7808// DT_INT64 (Int64List), and DT_STRING (BytesList).
7809// If not specified, defaults to {}
7810//
7811// REQUIRES: len(value) >= 0
7812func ParseSequenceExampleV2ContextSparseTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
7813	return func(m optionalAttr) {
7814		m["context_sparse_types"] = value
7815	}
7816}
7817
7818// ParseSequenceExampleV2ContextRaggedValueTypes sets the optional context_ragged_value_types attribute to value.
7819//
7820// value: RaggedTensor.value dtypes for the ragged context features.
7821// If not specified, defaults to {}
7822//
7823// REQUIRES: len(value) >= 0
7824func ParseSequenceExampleV2ContextRaggedValueTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
7825	return func(m optionalAttr) {
7826		m["context_ragged_value_types"] = value
7827	}
7828}
7829
7830// ParseSequenceExampleV2ContextRaggedSplitTypes sets the optional context_ragged_split_types attribute to value.
7831//
7832// value: RaggedTensor.row_split dtypes for the ragged context features.
7833// If not specified, defaults to {}
7834//
7835// REQUIRES: len(value) >= 0
7836func ParseSequenceExampleV2ContextRaggedSplitTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
7837	return func(m optionalAttr) {
7838		m["context_ragged_split_types"] = value
7839	}
7840}
7841
7842// ParseSequenceExampleV2ContextDenseShapes sets the optional context_dense_shapes attribute to value.
7843//
7844// value: A list of Ncontext_dense shapes; the shapes of data in
7845// each context Feature given in context_dense_keys.
7846// The number of elements in the Feature corresponding to context_dense_key[j]
7847// must always equal context_dense_shapes[j].NumEntries().
7848// The shape of context_dense_values[j] will match context_dense_shapes[j].
7849// If not specified, defaults to {}
7850//
7851// REQUIRES: len(value) >= 0
7852func ParseSequenceExampleV2ContextDenseShapes(value []tf.Shape) ParseSequenceExampleV2Attr {
7853	return func(m optionalAttr) {
7854		m["context_dense_shapes"] = value
7855	}
7856}
7857
7858// ParseSequenceExampleV2NfeatureListSparse sets the optional Nfeature_list_sparse attribute to value.
7859// If not specified, defaults to 0
7860//
7861// REQUIRES: value >= 0
7862func ParseSequenceExampleV2NfeatureListSparse(value int64) ParseSequenceExampleV2Attr {
7863	return func(m optionalAttr) {
7864		m["Nfeature_list_sparse"] = value
7865	}
7866}
7867
7868// ParseSequenceExampleV2NfeatureListDense sets the optional Nfeature_list_dense attribute to value.
7869// If not specified, defaults to 0
7870//
7871// REQUIRES: value >= 0
7872func ParseSequenceExampleV2NfeatureListDense(value int64) ParseSequenceExampleV2Attr {
7873	return func(m optionalAttr) {
7874		m["Nfeature_list_dense"] = value
7875	}
7876}
7877
7878// ParseSequenceExampleV2FeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
7879// If not specified, defaults to {}
7880//
7881// REQUIRES: len(value) >= 0
7882func ParseSequenceExampleV2FeatureListDenseTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
7883	return func(m optionalAttr) {
7884		m["feature_list_dense_types"] = value
7885	}
7886}
7887
7888// ParseSequenceExampleV2FeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
7889//
7890// value: A list of Nfeature_list_sparse types; the data types
7891// of data in each FeatureList given in feature_list_sparse_keys.
7892// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
7893// DT_INT64 (Int64List), and DT_STRING (BytesList).
7894// If not specified, defaults to {}
7895//
7896// REQUIRES: len(value) >= 0
7897func ParseSequenceExampleV2FeatureListSparseTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
7898	return func(m optionalAttr) {
7899		m["feature_list_sparse_types"] = value
7900	}
7901}
7902
7903// ParseSequenceExampleV2FeatureListRaggedValueTypes sets the optional feature_list_ragged_value_types attribute to value.
7904//
7905// value: RaggedTensor.value dtypes for the ragged FeatureList features.
7906// If not specified, defaults to {}
7907//
7908// REQUIRES: len(value) >= 0
7909func ParseSequenceExampleV2FeatureListRaggedValueTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
7910	return func(m optionalAttr) {
7911		m["feature_list_ragged_value_types"] = value
7912	}
7913}
7914
7915// ParseSequenceExampleV2FeatureListRaggedSplitTypes sets the optional feature_list_ragged_split_types attribute to value.
7916//
7917// value: RaggedTensor.row_split dtypes for the ragged FeatureList features.
7918// If not specified, defaults to {}
7919//
7920// REQUIRES: len(value) >= 0
7921func ParseSequenceExampleV2FeatureListRaggedSplitTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
7922	return func(m optionalAttr) {
7923		m["feature_list_ragged_split_types"] = value
7924	}
7925}
7926
7927// ParseSequenceExampleV2FeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
7928//
7929// value: A list of Nfeature_list_dense shapes; the shapes of
7930// data in each FeatureList given in feature_list_dense_keys.
7931// The shape of each Feature in the FeatureList corresponding to
7932// feature_list_dense_key[j] must always equal
7933// feature_list_dense_shapes[j].NumEntries().
7934// If not specified, defaults to {}
7935//
7936// REQUIRES: len(value) >= 0
7937func ParseSequenceExampleV2FeatureListDenseShapes(value []tf.Shape) ParseSequenceExampleV2Attr {
7938	return func(m optionalAttr) {
7939		m["feature_list_dense_shapes"] = value
7940	}
7941}
7942
7943// Transforms a vector of tf.io.SequenceExample protos (as strings) into
7944// typed tensors.
7945//
7946// Arguments:
7947//	serialized: A scalar or vector containing binary serialized SequenceExample protos.
7948//	debug_name: A scalar or vector containing the names of the serialized protos.
7949// May contain, for example, table key (descriptive) name for the
7950// corresponding serialized proto.  This is purely useful for debugging
7951// purposes, and the presence of values here has no effect on the output.
7952// May also be an empty vector if no name is available.
7953//	context_sparse_keys: The keys expected in the Examples' features associated with context_sparse
7954// values.
7955//	context_dense_keys: The keys expected in the SequenceExamples' context features associated with
7956// dense values.
7957//	context_ragged_keys: The keys expected in the Examples' features associated with context_ragged
7958// values.
7959//	feature_list_sparse_keys: The keys expected in the FeatureLists associated with sparse values.
7960//	feature_list_dense_keys: The keys expected in the SequenceExamples' feature_lists associated
7961// with lists of dense values.
7962//	feature_list_ragged_keys: The keys expected in the FeatureLists associated with ragged values.
7963//	feature_list_dense_missing_assumed_empty: A vector corresponding 1:1 with feature_list_dense_keys, indicating which
7964// features may be missing from the SequenceExamples.  If the associated
7965// FeatureList is missing, it is treated as empty.
7966//	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
7967// context_dense_defaults[j] provides default values
7968// when the SequenceExample's context map lacks context_dense_key[j].
7969// If an empty Tensor is provided for context_dense_defaults[j],
7970// then the Feature context_dense_keys[j] is required.
7971// The input type is inferred from context_dense_defaults[j], even when it's
7972// empty.  If context_dense_defaults[j] is not empty, its shape must match
7973// context_dense_shapes[j].
7974func ParseSequenceExampleV2(scope *Scope, serialized tf.Output, debug_name tf.Output, context_sparse_keys tf.Output, context_dense_keys tf.Output, context_ragged_keys tf.Output, feature_list_sparse_keys tf.Output, feature_list_dense_keys tf.Output, feature_list_ragged_keys tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_dense_defaults []tf.Output, optional ...ParseSequenceExampleV2Attr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, context_ragged_values []tf.Output, context_ragged_row_splits []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output, feature_list_dense_lengths []tf.Output, feature_list_ragged_values []tf.Output, feature_list_ragged_outer_splits []tf.Output, feature_list_ragged_inner_splits []tf.Output) {
7975	if scope.Err() != nil {
7976		return
7977	}
7978	attrs := map[string]interface{}{}
7979	for _, a := range optional {
7980		a(attrs)
7981	}
7982	opspec := tf.OpSpec{
7983		Type: "ParseSequenceExampleV2",
7984		Input: []tf.Input{
7985			serialized, debug_name, context_sparse_keys, context_dense_keys, context_ragged_keys, feature_list_sparse_keys, feature_list_dense_keys, feature_list_ragged_keys, feature_list_dense_missing_assumed_empty, tf.OutputList(context_dense_defaults),
7986		},
7987		Attrs: attrs,
7988	}
7989	op := scope.AddOperation(opspec)
7990	if scope.Err() != nil {
7991		return
7992	}
7993	var idx int
7994	var err error
7995	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
7996		scope.UpdateErr("ParseSequenceExampleV2", err)
7997		return
7998	}
7999	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
8000		scope.UpdateErr("ParseSequenceExampleV2", err)
8001		return
8002	}
8003	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
8004		scope.UpdateErr("ParseSequenceExampleV2", err)
8005		return
8006	}
8007	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
8008		scope.UpdateErr("ParseSequenceExampleV2", err)
8009		return
8010	}
8011	if context_ragged_values, idx, err = makeOutputList(op, idx, "context_ragged_values"); err != nil {
8012		scope.UpdateErr("ParseSequenceExampleV2", err)
8013		return
8014	}
8015	if context_ragged_row_splits, idx, err = makeOutputList(op, idx, "context_ragged_row_splits"); err != nil {
8016		scope.UpdateErr("ParseSequenceExampleV2", err)
8017		return
8018	}
8019	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
8020		scope.UpdateErr("ParseSequenceExampleV2", err)
8021		return
8022	}
8023	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
8024		scope.UpdateErr("ParseSequenceExampleV2", err)
8025		return
8026	}
8027	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
8028		scope.UpdateErr("ParseSequenceExampleV2", err)
8029		return
8030	}
8031	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
8032		scope.UpdateErr("ParseSequenceExampleV2", err)
8033		return
8034	}
8035	if feature_list_dense_lengths, idx, err = makeOutputList(op, idx, "feature_list_dense_lengths"); err != nil {
8036		scope.UpdateErr("ParseSequenceExampleV2", err)
8037		return
8038	}
8039	if feature_list_ragged_values, idx, err = makeOutputList(op, idx, "feature_list_ragged_values"); err != nil {
8040		scope.UpdateErr("ParseSequenceExampleV2", err)
8041		return
8042	}
8043	if feature_list_ragged_outer_splits, idx, err = makeOutputList(op, idx, "feature_list_ragged_outer_splits"); err != nil {
8044		scope.UpdateErr("ParseSequenceExampleV2", err)
8045		return
8046	}
8047	if feature_list_ragged_inner_splits, idx, err = makeOutputList(op, idx, "feature_list_ragged_inner_splits"); err != nil {
8048		scope.UpdateErr("ParseSequenceExampleV2", err)
8049		return
8050	}
8051	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, context_ragged_values, context_ragged_row_splits, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths, feature_list_ragged_values, feature_list_ragged_outer_splits, feature_list_ragged_inner_splits
8052}
8053
8054// Transforms a vector of brain.Example protos (as strings) into typed tensors.
8055//
8056// Arguments:
8057//	serialized: A vector containing a batch of binary serialized Example protos.
8058//	names: A vector containing the names of the serialized protos.
8059// May contain, for example, table key (descriptive) names for the
8060// corresponding serialized protos.  These are purely useful for debugging
8061// purposes, and the presence of values here has no effect on the output.
8062// May also be an empty vector if no names are available.
8063// If non-empty, this vector must be the same length as "serialized".
8064//	sparse_keys: A list of Nsparse string Tensors (scalars).
8065// The keys expected in the Examples' features associated with sparse values.
8066//	dense_keys: A list of Ndense string Tensors (scalars).
8067// The keys expected in the Examples' features associated with dense values.
8068//	dense_defaults: A list of Ndense Tensors (some may be empty).
8069// dense_defaults[j] provides default values
8070// when the example's feature_map lacks dense_key[j].  If an empty Tensor is
8071// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
8072// The input type is inferred from dense_defaults[j], even when it's empty.
8073// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
8074// then the shape of dense_defaults[j] must match that of dense_shapes[j].
8075// If dense_shapes[j] has an undefined major dimension (variable strides dense
8076// feature), dense_defaults[j] must contain a single element:
8077// the padding element.
8078//	sparse_types: A list of Nsparse types; the data types of data in each Feature
8079// given in sparse_keys.
8080// Currently the ParseExample supports DT_FLOAT (FloatList),
8081// DT_INT64 (Int64List), and DT_STRING (BytesList).
8082//	dense_shapes: A list of Ndense shapes; the shapes of data in each Feature
8083// given in dense_keys.
8084// The number of elements in the Feature corresponding to dense_key[j]
8085// must always equal dense_shapes[j].NumEntries().
8086// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
8087// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
8088// The dense outputs are just the inputs row-stacked by batch.
8089// This works for dense_shapes[j] = (-1, D1, ..., DN).  In this case
8090// the shape of the output Tensor dense_values[j] will be
8091// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks
8092// of elements of length D1 * .... * DN, across all minibatch entries
8093// in the input.  Any minibatch entry with less than M blocks of elements of
8094// length D1 * ... * DN will be padded with the corresponding default_value
8095// scalar element along the second dimension.
8096func ParseExample(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys []tf.Output, dense_keys []tf.Output, dense_defaults []tf.Output, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
8097	if scope.Err() != nil {
8098		return
8099	}
8100	attrs := map[string]interface{}{"sparse_types": sparse_types, "dense_shapes": dense_shapes}
8101	opspec := tf.OpSpec{
8102		Type: "ParseExample",
8103		Input: []tf.Input{
8104			serialized, names, tf.OutputList(sparse_keys), tf.OutputList(dense_keys), tf.OutputList(dense_defaults),
8105		},
8106		Attrs: attrs,
8107	}
8108	op := scope.AddOperation(opspec)
8109	if scope.Err() != nil {
8110		return
8111	}
8112	var idx int
8113	var err error
8114	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
8115		scope.UpdateErr("ParseExample", err)
8116		return
8117	}
8118	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
8119		scope.UpdateErr("ParseExample", err)
8120		return
8121	}
8122	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
8123		scope.UpdateErr("ParseExample", err)
8124		return
8125	}
8126	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
8127		scope.UpdateErr("ParseExample", err)
8128		return
8129	}
8130	return sparse_indices, sparse_values, sparse_shapes, dense_values
8131}
8132
8133// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr is an optional argument to QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.
8134type QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr func(optionalAttr)
8135
8136// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType sets the optional out_type attribute to value.
8137//
8138// value: The type of the output.
8139// If not specified, defaults to DT_QUINT8
8140func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr {
8141	return func(m optionalAttr) {
8142		m["out_type"] = value
8143	}
8144}
8145
8146// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations sets the optional dilations attribute to value.
8147//
8148// value: List of dilation values.
8149// If not specified, defaults to {i:1 i:1 i:1 i:1}
8150func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr {
8151	return func(m optionalAttr) {
8152		m["dilations"] = value
8153	}
8154}
8155
8156// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizePaddingList sets the optional padding_list attribute to value.
8157// If not specified, defaults to {}
8158func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizePaddingList(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr {
8159	return func(m optionalAttr) {
8160		m["padding_list"] = value
8161	}
8162}
8163
8164// Computes quantized depthwise Conv2D with Bias, Relu and Requantize.
8165//
8166// Arguments:
8167//	input: The original input tensor.
8168//	filter: The original filter tensor.
8169//	bias: The original bias tensor.
8170//	min_input: The float value that the minimum quantized input value represents.
8171//	max_input: The float value that the maximum quantized input value represents.
8172//	min_filter: The float value that the minimum quantized filter value represents.
8173//	max_filter: The float value that the maximum quantized filter value represents.
8174//	min_freezed_output: The minimum float value of the output tensor.
8175//	max_freezed_output: The maximum float value of the output tensor.
8176//	strides: List of stride values.
8177//
8178//
8179// Returns:
8180//	output: The output tensor.
8181//	min_output: The float value that the minimum quantized output value represents.
8182//	max_output: The float value that the maximum quantized output value represents.
8183func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, min_freezed_output tf.Output, max_freezed_output tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
8184	if scope.Err() != nil {
8185		return
8186	}
8187	attrs := map[string]interface{}{"strides": strides, "padding": padding}
8188	for _, a := range optional {
8189		a(attrs)
8190	}
8191	opspec := tf.OpSpec{
8192		Type: "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize",
8193		Input: []tf.Input{
8194			input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output,
8195		},
8196		Attrs: attrs,
8197	}
8198	op := scope.AddOperation(opspec)
8199	return op.Output(0), op.Output(1), op.Output(2)
8200}
8201
8202// QuantizedDepthwiseConv2DWithBiasAttr is an optional argument to QuantizedDepthwiseConv2DWithBias.
8203type QuantizedDepthwiseConv2DWithBiasAttr func(optionalAttr)
8204
8205// QuantizedDepthwiseConv2DWithBiasOutType sets the optional out_type attribute to value.
8206//
8207// value: The type of the output.
8208// If not specified, defaults to DT_QINT32
8209func QuantizedDepthwiseConv2DWithBiasOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAttr {
8210	return func(m optionalAttr) {
8211		m["out_type"] = value
8212	}
8213}
8214
8215// QuantizedDepthwiseConv2DWithBiasDilations sets the optional dilations attribute to value.
8216//
8217// value: List of dilation values.
8218// If not specified, defaults to {i:1 i:1 i:1 i:1}
8219func QuantizedDepthwiseConv2DWithBiasDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAttr {
8220	return func(m optionalAttr) {
8221		m["dilations"] = value
8222	}
8223}
8224
8225// Computes quantized depthwise Conv2D with Bias.
8226//
8227// Arguments:
8228//	input: The original input tensor.
8229//	filter: The original filter tensor.
8230//	bias: The original bias tensor.
8231//	min_input: The float value that the minimum quantized input value represents.
8232//	max_input: The float value that the maximum quantized input value represents.
8233//	min_filter: The float value that the minimum quantized filter value represents.
8234//	max_filter: The float value that the maximum quantized filter value represents.
8235//	strides: List of stride values.
8236//
8237//
8238// Returns:
8239//	output: The output tensor.
8240//	min_output: The float value that the minimum quantized output value represents.
8241//	max_output: The float value that the maximum quantized output value represents.
8242func QuantizedDepthwiseConv2DWithBias(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
8243	if scope.Err() != nil {
8244		return
8245	}
8246	attrs := map[string]interface{}{"strides": strides, "padding": padding}
8247	for _, a := range optional {
8248		a(attrs)
8249	}
8250	opspec := tf.OpSpec{
8251		Type: "QuantizedDepthwiseConv2DWithBias",
8252		Input: []tf.Input{
8253			input, filter, bias, min_input, max_input, min_filter, max_filter,
8254		},
8255		Attrs: attrs,
8256	}
8257	op := scope.AddOperation(opspec)
8258	return op.Output(0), op.Output(1), op.Output(2)
8259}
8260
8261// QuantizedDepthwiseConv2DAttr is an optional argument to QuantizedDepthwiseConv2D.
8262type QuantizedDepthwiseConv2DAttr func(optionalAttr)
8263
8264// QuantizedDepthwiseConv2DOutType sets the optional out_type attribute to value.
8265//
8266// value: The type of the output.
8267// If not specified, defaults to DT_QINT32
8268func QuantizedDepthwiseConv2DOutType(value tf.DataType) QuantizedDepthwiseConv2DAttr {
8269	return func(m optionalAttr) {
8270		m["out_type"] = value
8271	}
8272}
8273
8274// QuantizedDepthwiseConv2DDilations sets the optional dilations attribute to value.
8275//
8276// value: List of dilation values.
8277// If not specified, defaults to {i:1 i:1 i:1 i:1}
8278func QuantizedDepthwiseConv2DDilations(value []int64) QuantizedDepthwiseConv2DAttr {
8279	return func(m optionalAttr) {
8280		m["dilations"] = value
8281	}
8282}
8283
8284// Computes quantized depthwise Conv2D.
8285//
8286// Arguments:
8287//	input: The original input tensor.
8288//	filter: The original filter tensor.
8289//	min_input: The float value that the minimum quantized input value represents.
8290//	max_input: The float value that the maximum quantized input value represents.
8291//	min_filter: The float value that the minimum quantized filter value represents.
8292//	max_filter: The float value that the maximum quantized filter value represents.
8293//	strides: List of stride values.
8294//
8295//
8296// Returns:
8297//	output: The output tensor.
8298//	min_output: The float value that the minimum quantized output value represents.
8299//	max_output: The float value that the maximum quantized output value represents.
8300func QuantizedDepthwiseConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
8301	if scope.Err() != nil {
8302		return
8303	}
8304	attrs := map[string]interface{}{"strides": strides, "padding": padding}
8305	for _, a := range optional {
8306		a(attrs)
8307	}
8308	opspec := tf.OpSpec{
8309		Type: "QuantizedDepthwiseConv2D",
8310		Input: []tf.Input{
8311			input, filter, min_input, max_input, min_filter, max_filter,
8312		},
8313		Attrs: attrs,
8314	}
8315	op := scope.AddOperation(opspec)
8316	return op.Output(0), op.Output(1), op.Output(2)
8317}
8318
8319// QuantizedConv2DPerChannelAttr is an optional argument to QuantizedConv2DPerChannel.
8320type QuantizedConv2DPerChannelAttr func(optionalAttr)
8321
8322// QuantizedConv2DPerChannelOutType sets the optional out_type attribute to value.
8323//
8324// value: The quantized type of output tensor that needs to be converted.
8325// If not specified, defaults to DT_QINT32
8326func QuantizedConv2DPerChannelOutType(value tf.DataType) QuantizedConv2DPerChannelAttr {
8327	return func(m optionalAttr) {
8328		m["out_type"] = value
8329	}
8330}
8331
8332// QuantizedConv2DPerChannelDilations sets the optional dilations attribute to value.
8333//
8334// value: list of dilation values.
8335// If not specified, defaults to {i:1 i:1 i:1 i:1}
8336func QuantizedConv2DPerChannelDilations(value []int64) QuantizedConv2DPerChannelAttr {
8337	return func(m optionalAttr) {
8338		m["dilations"] = value
8339	}
8340}
8341
8342// Computes QuantizedConv2D per channel.
8343//
8344// Arguments:
8345//	input: The original input tensor.
8346//	filter: The original filter tensor.
8347//	min_input: The minimum value of the input tensor
8348//	max_input: The maximum value of the input tensor.
8349//	min_filter: The minimum value of the filter tensor.
8350//	max_filter: The maximum value of the filter tensor.
8351//	strides: list of stride values.
8352//
8353//
8354// Returns:
8355//	output: The output tensor.
8356//	min_output: The minimum value of the final output tensor.
8357//	max_output: The maximum value of the final output tensor.
8358func QuantizedConv2DPerChannel(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DPerChannelAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
8359	if scope.Err() != nil {
8360		return
8361	}
8362	attrs := map[string]interface{}{"strides": strides, "padding": padding}
8363	for _, a := range optional {
8364		a(attrs)
8365	}
8366	opspec := tf.OpSpec{
8367		Type: "QuantizedConv2DPerChannel",
8368		Input: []tf.Input{
8369			input, filter, min_input, max_input, min_filter, max_filter,
8370		},
8371		Attrs: attrs,
8372	}
8373	op := scope.AddOperation(opspec)
8374	return op.Output(0), op.Output(1), op.Output(2)
8375}
8376
8377// RetrieveTPUEmbeddingRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingRMSPropParameters.
8378type RetrieveTPUEmbeddingRMSPropParametersAttr func(optionalAttr)
8379
8380// RetrieveTPUEmbeddingRMSPropParametersTableId sets the optional table_id attribute to value.
8381// If not specified, defaults to -1
8382func RetrieveTPUEmbeddingRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingRMSPropParametersAttr {
8383	return func(m optionalAttr) {
8384		m["table_id"] = value
8385	}
8386}
8387
8388// RetrieveTPUEmbeddingRMSPropParametersTableName sets the optional table_name attribute to value.
8389// If not specified, defaults to ""
8390func RetrieveTPUEmbeddingRMSPropParametersTableName(value string) RetrieveTPUEmbeddingRMSPropParametersAttr {
8391	return func(m optionalAttr) {
8392		m["table_name"] = value
8393	}
8394}
8395
8396// RetrieveTPUEmbeddingRMSPropParametersConfig sets the optional config attribute to value.
8397// If not specified, defaults to ""
8398func RetrieveTPUEmbeddingRMSPropParametersConfig(value string) RetrieveTPUEmbeddingRMSPropParametersAttr {
8399	return func(m optionalAttr) {
8400		m["config"] = value
8401	}
8402}
8403
8404// Retrieve RMSProp embedding parameters.
8405//
8406// An op that retrieves optimization parameters from embedding to host
8407// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
8408// the correct embedding table configuration. For example, this op is
8409// used to retrieve updated parameters before saving a checkpoint.
8410//
8411// Returns:
8412//	parameters: Parameter parameters updated by the RMSProp optimization algorithm.
8413//	ms: Parameter ms updated by the RMSProp optimization algorithm.
8414//	mom: Parameter mom updated by the RMSProp optimization algorithm.
8415func RetrieveTPUEmbeddingRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output) {
8416	if scope.Err() != nil {
8417		return
8418	}
8419	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
8420	for _, a := range optional {
8421		a(attrs)
8422	}
8423	opspec := tf.OpSpec{
8424		Type: "RetrieveTPUEmbeddingRMSPropParameters",
8425
8426		Attrs: attrs,
8427	}
8428	op := scope.AddOperation(opspec)
8429	return op.Output(0), op.Output(1), op.Output(2)
8430}
8431
8432// QuantizedMatMulWithBiasAndReluAttr is an optional argument to QuantizedMatMulWithBiasAndRelu.
8433type QuantizedMatMulWithBiasAndReluAttr func(optionalAttr)
8434
8435// QuantizedMatMulWithBiasAndReluToutput sets the optional Toutput attribute to value.
8436// If not specified, defaults to DT_QINT32
8437func QuantizedMatMulWithBiasAndReluToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAttr {
8438	return func(m optionalAttr) {
8439		m["Toutput"] = value
8440	}
8441}
8442
8443// QuantizedMatMulWithBiasAndReluTransposeA sets the optional transpose_a attribute to value.
8444//
8445// value: If true, `a` is transposed before multiplication.
8446// If not specified, defaults to false
8447func QuantizedMatMulWithBiasAndReluTransposeA(value bool) QuantizedMatMulWithBiasAndReluAttr {
8448	return func(m optionalAttr) {
8449		m["transpose_a"] = value
8450	}
8451}
8452
8453// QuantizedMatMulWithBiasAndReluTransposeB sets the optional transpose_b attribute to value.
8454//
8455// value: If true, `b` is transposed before multiplication.
8456// If not specified, defaults to false
8457func QuantizedMatMulWithBiasAndReluTransposeB(value bool) QuantizedMatMulWithBiasAndReluAttr {
8458	return func(m optionalAttr) {
8459		m["transpose_b"] = value
8460	}
8461}
8462
8463// QuantizedMatMulWithBiasAndReluInputQuantMode sets the optional input_quant_mode attribute to value.
8464//
8465// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED.
8466// If not specified, defaults to "MIN_FIRST"
8467func QuantizedMatMulWithBiasAndReluInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAttr {
8468	return func(m optionalAttr) {
8469		m["input_quant_mode"] = value
8470	}
8471}
8472
8473// Perform a quantized matrix multiplication of  `a` by the matrix `b` with bias
8474// add and relu fusion.
8475//
8476// The inputs must be two-dimensional matrices and 1D bias vector. And the inner
8477// dimension of `a` (after being transposed if `transpose_a` is non-zero) must
8478// match the outer dimension of `b` (after being transposed if `transposed_b` is
8479// non-zero). Then do broadcast add operation with bias values on the matrix
8480// multiplication result. The bias size must match inner dimension of `b`. Then do
8481// relu activation to get non-negative result.
8482//
8483// Arguments:
8484//	a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
8485//	b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
8486//	bias: A 1D bias tensor with size matching with inner dimension of `b` (after being
8487// transposed if `transposed_b` is non-zero).
8488//	min_a: The float value that the lowest quantized `a` value represents.
8489//	max_a: The float value that the highest quantized `a` value represents.
8490//	min_b: The float value that the lowest quantized `b` value represents.
8491//	max_b: The float value that the highest quantized `b` value represents.
8492//
8493// Returns:
8494//	out
8495//	min_out: The float value that the lowest quantized output value represents.
8496//	max_out: The float value that the highest quantized output value represents.
8497func QuantizedMatMulWithBiasAndRelu(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulWithBiasAndReluAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
8498	if scope.Err() != nil {
8499		return
8500	}
8501	attrs := map[string]interface{}{}
8502	for _, a := range optional {
8503		a(attrs)
8504	}
8505	opspec := tf.OpSpec{
8506		Type: "QuantizedMatMulWithBiasAndRelu",
8507		Input: []tf.Input{
8508			a, b, bias, min_a, max_a, min_b, max_b,
8509		},
8510		Attrs: attrs,
8511	}
8512	op := scope.AddOperation(opspec)
8513	return op.Output(0), op.Output(1), op.Output(2)
8514}
8515
8516// QuantizedMatMulWithBiasAttr is an optional argument to QuantizedMatMulWithBias.
8517type QuantizedMatMulWithBiasAttr func(optionalAttr)
8518
8519// QuantizedMatMulWithBiasToutput sets the optional Toutput attribute to value.
8520// If not specified, defaults to DT_QINT32
8521func QuantizedMatMulWithBiasToutput(value tf.DataType) QuantizedMatMulWithBiasAttr {
8522	return func(m optionalAttr) {
8523		m["Toutput"] = value
8524	}
8525}
8526
8527// QuantizedMatMulWithBiasTransposeA sets the optional transpose_a attribute to value.
8528//
8529// value: If true, `a` is transposed before multiplication.
8530// If not specified, defaults to false
8531func QuantizedMatMulWithBiasTransposeA(value bool) QuantizedMatMulWithBiasAttr {
8532	return func(m optionalAttr) {
8533		m["transpose_a"] = value
8534	}
8535}
8536
8537// QuantizedMatMulWithBiasTransposeB sets the optional transpose_b attribute to value.
8538//
8539// value: If true, `b` is transposed before multiplication.
8540// If not specified, defaults to false
8541func QuantizedMatMulWithBiasTransposeB(value bool) QuantizedMatMulWithBiasAttr {
8542	return func(m optionalAttr) {
8543		m["transpose_b"] = value
8544	}
8545}
8546
8547// QuantizedMatMulWithBiasInputQuantMode sets the optional input_quant_mode attribute to value.
8548//
8549// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED.
8550// If not specified, defaults to "MIN_FIRST"
8551func QuantizedMatMulWithBiasInputQuantMode(value string) QuantizedMatMulWithBiasAttr {
8552	return func(m optionalAttr) {
8553		m["input_quant_mode"] = value
8554	}
8555}
8556
8557// Performs a quantized matrix multiplication of `a` by the matrix `b` with bias
8558// add.
8559//
8560// The inputs must be two-dimensional matrices and 1D bias vector. And the inner
8561// dimension of `a` (after being transposed if `transpose_a` is non-zero) must
8562// match the outer dimension of `b` (after being transposed if `transposed_b` is
8563// non-zero). Then do broadcast add operation with bias values on the matrix
8564// multiplication result. The bias size must match inner dimension of `b`.
8565//
8566// Arguments:
8567//	a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
8568//	b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
8569//	bias: A 1D bias tensor with size matching inner dimension of `b` (after being
8570// transposed if `transposed_b` is non-zero).
8571//	min_a: The float value that the lowest quantized `a` value represents.
8572//	max_a: The float value that the highest quantized `a` value represents.
8573//	min_b: The float value that the lowest quantized `b` value represents.
8574//	max_b: The float value that the highest quantized `b` value represents.
8575//
8576// Returns:
8577//	out
8578//	min_out: The float value that the lowest quantized output value represents.
8579//	max_out: The float value that the highest quantized output value represents.
8580func QuantizedMatMulWithBias(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulWithBiasAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
8581	if scope.Err() != nil {
8582		return
8583	}
8584	attrs := map[string]interface{}{}
8585	for _, a := range optional {
8586		a(attrs)
8587	}
8588	opspec := tf.OpSpec{
8589		Type: "QuantizedMatMulWithBias",
8590		Input: []tf.Input{
8591			a, b, bias, min_a, max_a, min_b, max_b,
8592		},
8593		Attrs: attrs,
8594	}
8595	op := scope.AddOperation(opspec)
8596	return op.Output(0), op.Output(1), op.Output(2)
8597}
8598
8599// Quantized Batch normalization.
8600//
8601// This op is deprecated and will be removed in the future. Prefer
8602// `tf.nn.batch_normalization`.
8603//
8604// Arguments:
8605//	t: A 4D input Tensor.
8606//	t_min: The value represented by the lowest quantized input.
8607//	t_max: The value represented by the highest quantized input.
8608//	m: A 1D mean Tensor with size matching the last dimension of t.
8609// This is the first output from tf.nn.moments,
8610// or a saved moving average thereof.
8611//	m_min: The value represented by the lowest quantized mean.
8612//	m_max: The value represented by the highest quantized mean.
8613//	v: A 1D variance Tensor with size matching the last dimension of t.
8614// This is the second output from tf.nn.moments,
8615// or a saved moving average thereof.
8616//	v_min: The value represented by the lowest quantized variance.
8617//	v_max: The value represented by the highest quantized variance.
8618//	beta: A 1D beta Tensor with size matching the last dimension of t.
8619// An offset to be added to the normalized tensor.
8620//	beta_min: The value represented by the lowest quantized offset.
8621//	beta_max: The value represented by the highest quantized offset.
8622//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
8623// If "scale_after_normalization" is true, this tensor will be multiplied
8624// with the normalized tensor.
8625//	gamma_min: The value represented by the lowest quantized gamma.
8626//	gamma_max: The value represented by the highest quantized gamma.
8627//
8628//	variance_epsilon: A small float number to avoid dividing by 0.
8629//	scale_after_normalization: A bool indicating whether the resulted tensor
8630// needs to be multiplied with gamma.
8631func QuantizedBatchNormWithGlobalNormalization(scope *Scope, t tf.Output, t_min tf.Output, t_max tf.Output, m tf.Output, m_min tf.Output, m_max tf.Output, v tf.Output, v_min tf.Output, v_max tf.Output, beta tf.Output, beta_min tf.Output, beta_max tf.Output, gamma tf.Output, gamma_min tf.Output, gamma_max tf.Output, out_type tf.DataType, variance_epsilon float32, scale_after_normalization bool) (result tf.Output, result_min tf.Output, result_max tf.Output) {
8632	if scope.Err() != nil {
8633		return
8634	}
8635	attrs := map[string]interface{}{"out_type": out_type, "variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
8636	opspec := tf.OpSpec{
8637		Type: "QuantizedBatchNormWithGlobalNormalization",
8638		Input: []tf.Input{
8639			t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max,
8640		},
8641		Attrs: attrs,
8642	}
8643	op := scope.AddOperation(opspec)
8644	return op.Output(0), op.Output(1), op.Output(2)
8645}
8646
8647// QuantizedReluXAttr is an optional argument to QuantizedReluX.
8648type QuantizedReluXAttr func(optionalAttr)
8649
8650// QuantizedReluXOutType sets the optional out_type attribute to value.
8651// If not specified, defaults to DT_QUINT8
8652func QuantizedReluXOutType(value tf.DataType) QuantizedReluXAttr {
8653	return func(m optionalAttr) {
8654		m["out_type"] = value
8655	}
8656}
8657
8658// Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
8659//
8660// Arguments:
8661//
8662//
8663//	min_features: The float value that the lowest quantized value represents.
8664//	max_features: The float value that the highest quantized value represents.
8665//
8666// Returns:
8667//	activations: Has the same output shape as "features".
8668//	min_activations: The float value that the lowest quantized value represents.
8669//	max_activations: The float value that the highest quantized value represents.
8670func QuantizedReluX(scope *Scope, features tf.Output, max_value tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluXAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
8671	if scope.Err() != nil {
8672		return
8673	}
8674	attrs := map[string]interface{}{}
8675	for _, a := range optional {
8676		a(attrs)
8677	}
8678	opspec := tf.OpSpec{
8679		Type: "QuantizedReluX",
8680		Input: []tf.Input{
8681			features, max_value, min_features, max_features,
8682		},
8683		Attrs: attrs,
8684	}
8685	op := scope.AddOperation(opspec)
8686	return op.Output(0), op.Output(1), op.Output(2)
8687}
8688
8689// Produces the average pool of the input tensor for quantized types.
8690//
8691// Arguments:
8692//	input: 4-D with shape `[batch, height, width, channels]`.
8693//	min_input: The float value that the lowest quantized input value represents.
8694//	max_input: The float value that the highest quantized input value represents.
8695//	ksize: The size of the window for each dimension of the input tensor.
8696// The length must be 4 to match the number of dimensions of the input.
8697//	strides: The stride of the sliding window for each dimension of the input
8698// tensor.  The length must be 4 to match the number of dimensions of the input.
8699//	padding: The type of padding algorithm to use.
8700//
8701// Returns:
8702//	output
8703//	min_output: The float value that the lowest quantized output value represents.
8704//	max_output: The float value that the highest quantized output value represents.
8705func QuantizedAvgPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
8706	if scope.Err() != nil {
8707		return
8708	}
8709	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
8710	opspec := tf.OpSpec{
8711		Type: "QuantizedAvgPool",
8712		Input: []tf.Input{
8713			input, min_input, max_input,
8714		},
8715		Attrs: attrs,
8716	}
8717	op := scope.AddOperation(opspec)
8718	return op.Output(0), op.Output(1), op.Output(2)
8719}
8720
8721// FractionalAvgPoolGradAttr is an optional argument to FractionalAvgPoolGrad.
8722type FractionalAvgPoolGradAttr func(optionalAttr)
8723
8724// FractionalAvgPoolGradOverlapping sets the optional overlapping attribute to value.
8725//
8726// value: When set to True, it means when pooling, the values at the boundary
8727// of adjacent pooling cells are used by both cells. For example:
8728//
8729// `index  0  1  2  3  4`
8730//
8731// `value  20 5  16 3  7`
8732//
8733// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
8734// The result would be [41/3, 26/3] for fractional avg pooling.
8735// If not specified, defaults to false
8736func FractionalAvgPoolGradOverlapping(value bool) FractionalAvgPoolGradAttr {
8737	return func(m optionalAttr) {
8738		m["overlapping"] = value
8739	}
8740}
8741
8742// Computes gradient of the FractionalAvgPool function.
8743//
8744// Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
8745// FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
8746// out_backprop to those indices that form the same pooling cell. Therefore, we
8747// just need to know the shape of original input tensor, instead of the whole
8748// tensor.
8749//
8750// Arguments:
8751//	orig_input_tensor_shape: Original input tensor shape for `fractional_avg_pool`
8752//	out_backprop: 4-D with shape `[batch, height, width, channels]`.  Gradients
8753// w.r.t. the output of `fractional_avg_pool`.
8754//	row_pooling_sequence: row pooling sequence, form pooling region with
8755// col_pooling_sequence.
8756//	col_pooling_sequence: column pooling sequence, form pooling region with
8757// row_pooling sequence.
8758//
8759// Returns 4-D.  Gradients w.r.t. the input of `fractional_avg_pool`.
8760func FractionalAvgPoolGrad(scope *Scope, orig_input_tensor_shape tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalAvgPoolGradAttr) (output tf.Output) {
8761	if scope.Err() != nil {
8762		return
8763	}
8764	attrs := map[string]interface{}{}
8765	for _, a := range optional {
8766		a(attrs)
8767	}
8768	opspec := tf.OpSpec{
8769		Type: "FractionalAvgPoolGrad",
8770		Input: []tf.Input{
8771			orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence,
8772		},
8773		Attrs: attrs,
8774	}
8775	op := scope.AddOperation(opspec)
8776	return op.Output(0)
8777}
8778
8779// FractionalMaxPoolGradAttr is an optional argument to FractionalMaxPoolGrad.
8780type FractionalMaxPoolGradAttr func(optionalAttr)
8781
8782// FractionalMaxPoolGradOverlapping sets the optional overlapping attribute to value.
8783//
8784// value: When set to True, it means when pooling, the values at the boundary
8785// of adjacent pooling cells are used by both cells. For example:
8786//
8787// `index  0  1  2  3  4`
8788//
8789// `value  20 5  16 3  7`
8790//
8791// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
8792// The result would be [20, 16] for fractional max pooling.
8793// If not specified, defaults to false
8794func FractionalMaxPoolGradOverlapping(value bool) FractionalMaxPoolGradAttr {
8795	return func(m optionalAttr) {
8796		m["overlapping"] = value
8797	}
8798}
8799
8800// Computes gradient of the FractionalMaxPool function.
8801//
8802// Arguments:
8803//	orig_input: Original input for `fractional_max_pool`
8804//	orig_output: Original output for `fractional_max_pool`
8805//	out_backprop: 4-D with shape `[batch, height, width, channels]`.  Gradients
8806// w.r.t. the output of `fractional_max_pool`.
8807//	row_pooling_sequence: row pooling sequence, form pooling region with
8808// col_pooling_sequence.
8809//	col_pooling_sequence: column pooling sequence, form pooling region with
8810// row_pooling sequence.
8811//
8812// Returns 4-D.  Gradients w.r.t. the input of `fractional_max_pool`.
8813func FractionalMaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalMaxPoolGradAttr) (output tf.Output) {
8814	if scope.Err() != nil {
8815		return
8816	}
8817	attrs := map[string]interface{}{}
8818	for _, a := range optional {
8819		a(attrs)
8820	}
8821	opspec := tf.OpSpec{
8822		Type: "FractionalMaxPoolGrad",
8823		Input: []tf.Input{
8824			orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence,
8825		},
8826		Attrs: attrs,
8827	}
8828	op := scope.AddOperation(opspec)
8829	return op.Output(0)
8830}
8831
8832// TopKV2Attr is an optional argument to TopKV2.
8833type TopKV2Attr func(optionalAttr)
8834
8835// TopKV2Sorted sets the optional sorted attribute to value.
8836//
8837// value: If true the resulting `k` elements will be sorted by the values in
8838// descending order.
8839// If not specified, defaults to true
8840func TopKV2Sorted(value bool) TopKV2Attr {
8841	return func(m optionalAttr) {
8842		m["sorted"] = value
8843	}
8844}
8845
8846// Finds values and indices of the `k` largest elements for the last dimension.
8847//
8848// If the input is a vector (rank-1), finds the `k` largest entries in the vector
8849// and outputs their values and indices as vectors.  Thus `values[j]` is the
8850// `j`-th largest entry in `input`, and its index is `indices[j]`.
8851//
8852// For matrices (resp. higher rank input), computes the top `k` entries in each
8853// row (resp. vector along the last dimension).  Thus,
8854//
8855//     values.shape = indices.shape = input.shape[:-1] + [k]
8856//
8857// If two elements are equal, the lower-index element appears first.
8858//
8859// Arguments:
8860//	input: 1-D or higher with last dimension at least `k`.
8861//	k: 0-D.  Number of top elements to look for along the last dimension (along each
8862// row for matrices).
8863//
8864// Returns:
8865//	values: The `k` largest elements along each last dimensional slice.
8866//	indices: The indices of `values` within the last dimension of `input`.
8867func TopKV2(scope *Scope, input tf.Output, k tf.Output, optional ...TopKV2Attr) (values tf.Output, indices tf.Output) {
8868	if scope.Err() != nil {
8869		return
8870	}
8871	attrs := map[string]interface{}{}
8872	for _, a := range optional {
8873		a(attrs)
8874	}
8875	opspec := tf.OpSpec{
8876		Type: "TopKV2",
8877		Input: []tf.Input{
8878			input, k,
8879		},
8880		Attrs: attrs,
8881	}
8882	op := scope.AddOperation(opspec)
8883	return op.Output(0), op.Output(1)
8884}
8885
8886// TopKAttr is an optional argument to TopK.
8887type TopKAttr func(optionalAttr)
8888
8889// TopKSorted sets the optional sorted attribute to value.
8890//
8891// value: If true the resulting `k` elements will be sorted by the values in
8892// descending order.
8893// If not specified, defaults to true
8894func TopKSorted(value bool) TopKAttr {
8895	return func(m optionalAttr) {
8896		m["sorted"] = value
8897	}
8898}
8899
8900// Finds values and indices of the `k` largest elements for the last dimension.
8901//
8902// DEPRECATED at GraphDef version 7: Use TopKV2 instead
8903//
8904// If the input is a vector (rank-1), finds the `k` largest entries in the vector
8905// and outputs their values and indices as vectors.  Thus `values[j]` is the
8906// `j`-th largest entry in `input`, and its index is `indices[j]`.
8907//
8908// For matrices (resp. higher rank input), computes the top `k` entries in each
8909// row (resp. vector along the last dimension).  Thus,
8910//
8911//     values.shape = indices.shape = input.shape[:-1] + [k]
8912//
8913// If two elements are equal, the lower-index element appears first.
8914//
8915// If `k` varies dynamically, use `TopKV2` below.
8916//
8917// Arguments:
8918//	input: 1-D or higher with last dimension at least `k`.
8919//	k: Number of top elements to look for along the last dimension (along each
8920// row for matrices).
8921//
8922// Returns:
8923//	values: The `k` largest elements along each last dimensional slice.
8924//	indices: The indices of `values` within the last dimension of `input`.
8925func TopK(scope *Scope, input tf.Output, k int64, optional ...TopKAttr) (values tf.Output, indices tf.Output) {
8926	if scope.Err() != nil {
8927		return
8928	}
8929	attrs := map[string]interface{}{"k": k}
8930	for _, a := range optional {
8931		a(attrs)
8932	}
8933	opspec := tf.OpSpec{
8934		Type: "TopK",
8935		Input: []tf.Input{
8936			input,
8937		},
8938		Attrs: attrs,
8939	}
8940	op := scope.AddOperation(opspec)
8941	return op.Output(0), op.Output(1)
8942}
8943
8944// Says whether the targets are in the top `K` predictions.
8945//
8946// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
8947// prediction for the target class is among the top `k` predictions among
8948// all predictions for example `i`. Note that the behavior of `InTopK` differs
8949// from the `TopK` op in its handling of ties; if multiple classes have the
8950// same prediction value and straddle the top-`k` boundary, all of those
8951// classes are considered to be in the top `k`.
8952//
8953// More formally, let
8954//
8955//   \\(predictions_i\\) be the predictions for all classes for example `i`,
8956//   \\(targets_i\\) be the target class for example `i`,
8957//   \\(out_i\\) be the output for example `i`,
8958//
8959// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
8960//
8961// Arguments:
8962//	predictions: A `batch_size` x `classes` tensor.
8963//	targets: A `batch_size` vector of class ids.
8964//	k: Number of top elements to look at for computing precision.
8965//
8966// Returns Computed precision at `k` as a `bool Tensor`.
8967func InTopKV2(scope *Scope, predictions tf.Output, targets tf.Output, k tf.Output) (precision tf.Output) {
8968	if scope.Err() != nil {
8969		return
8970	}
8971	opspec := tf.OpSpec{
8972		Type: "InTopKV2",
8973		Input: []tf.Input{
8974			predictions, targets, k,
8975		},
8976	}
8977	op := scope.AddOperation(opspec)
8978	return op.Output(0)
8979}
8980
8981// Computes softmax cross entropy cost and gradients to backpropagate.
8982//
8983// Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
8984// a matrix of label probabilities, but rather a single label per row
8985// of features.  This label is considered to have probability 1.0 for the
8986// given row.
8987//
8988// Inputs are the logits, not probabilities.
8989//
8990// Arguments:
8991//	features: batch_size x num_classes matrix
8992//	labels: batch_size vector with values in [0, num_classes).
8993// This is the label for the given minibatch entry.
8994//
8995// Returns:
8996//	loss: Per example loss (batch_size vector).
8997//	backprop: backpropagated gradients (batch_size x num_classes matrix).
8998func SparseSoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
8999	if scope.Err() != nil {
9000		return
9001	}
9002	opspec := tf.OpSpec{
9003		Type: "SparseSoftmaxCrossEntropyWithLogits",
9004		Input: []tf.Input{
9005			features, labels,
9006		},
9007	}
9008	op := scope.AddOperation(opspec)
9009	return op.Output(0), op.Output(1)
9010}
9011
9012// Computes log softmax activations.
9013//
9014// For each batch `i` and class `j` we have
9015//
9016//     logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
9017//
9018// Arguments:
9019//	logits: 2-D with shape `[batch_size, num_classes]`.
9020//
9021// Returns Same shape as `logits`.
9022func LogSoftmax(scope *Scope, logits tf.Output) (logsoftmax tf.Output) {
9023	if scope.Err() != nil {
9024		return
9025	}
9026	opspec := tf.OpSpec{
9027		Type: "LogSoftmax",
9028		Input: []tf.Input{
9029			logits,
9030		},
9031	}
9032	op := scope.AddOperation(opspec)
9033	return op.Output(0)
9034}
9035
9036// Computes softmax activations.
9037//
9038// For each batch `i` and class `j` we have
9039//
9040//     $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
9041//
9042// Arguments:
9043//	logits: 2-D with shape `[batch_size, num_classes]`.
9044//
9045// Returns Same shape as `logits`.
9046func Softmax(scope *Scope, logits tf.Output) (softmax tf.Output) {
9047	if scope.Err() != nil {
9048		return
9049	}
9050	opspec := tf.OpSpec{
9051		Type: "Softmax",
9052		Input: []tf.Input{
9053			logits,
9054		},
9055	}
9056	op := scope.AddOperation(opspec)
9057	return op.Output(0)
9058}
9059
9060// Computes softsign gradients for a softsign operation.
9061//
9062// Arguments:
9063//	gradients: The backpropagated gradients to the corresponding softsign operation.
9064//	features: The features passed as input to the corresponding softsign operation.
9065//
9066// Returns The gradients: `gradients / (1 + abs(features)) ** 2`.
9067func SoftsignGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
9068	if scope.Err() != nil {
9069		return
9070	}
9071	opspec := tf.OpSpec{
9072		Type: "SoftsignGrad",
9073		Input: []tf.Input{
9074			gradients, features,
9075		},
9076	}
9077	op := scope.AddOperation(opspec)
9078	return op.Output(0)
9079}
9080
9081// Computes softplus gradients for a softplus operation.
9082//
9083// Arguments:
9084//	gradients: The backpropagated gradients to the corresponding softplus operation.
9085//	features: The features passed as input to the corresponding softplus operation.
9086//
9087// Returns The gradients: `gradients / (1 + exp(-features))`.
9088func SoftplusGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
9089	if scope.Err() != nil {
9090		return
9091	}
9092	opspec := tf.OpSpec{
9093		Type: "SoftplusGrad",
9094		Input: []tf.Input{
9095			gradients, features,
9096		},
9097	}
9098	op := scope.AddOperation(opspec)
9099	return op.Output(0)
9100}
9101
9102// Computes the LSTM cell backward propagation for 1 timestep.
9103//
9104// This implementation is to be used in conjunction of LSTMBlockCell.
9105//
9106// Arguments:
9107//	x: The input to the LSTM cell, shape (batch_size, num_inputs).
9108//	cs_prev: The previous cell state.
9109//	h_prev: The previous h state.
9110//	w: The weight matrix.
9111//	wci: The weight matrix for input gate peephole connection.
9112//	wcf: The weight matrix for forget gate peephole connection.
9113//	wco: The weight matrix for output gate peephole connection.
9114//	b: The bias vector.
9115//	i: The input gate.
9116//	cs: The cell state before the tanh.
9117//	f: The forget gate.
9118//	o: The output gate.
9119//	ci: The cell input.
9120//	co: The cell after the tanh.
9121//	cs_grad: The current gradient of cs.
9122//	h_grad: The gradient of h vector.
9123//	use_peephole: Whether the cell uses peephole connections.
9124//
9125// Returns:
9126//	cs_prev_grad: The gradient of cs to be back-propped.
9127//	dicfo: The derivative wrt to [i, cs, f, o].
9128//	wci_grad: The gradient for wci to be back-propped.
9129//	wcf_grad: The gradient for wcf to be back-propped.
9130//	wco_grad: The gradient for wco to be back-propped.
9131func LSTMBlockCellGrad(scope *Scope, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (cs_prev_grad tf.Output, dicfo tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output) {
9132	if scope.Err() != nil {
9133		return
9134	}
9135	attrs := map[string]interface{}{"use_peephole": use_peephole}
9136	opspec := tf.OpSpec{
9137		Type: "LSTMBlockCellGrad",
9138		Input: []tf.Input{
9139			x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad,
9140		},
9141		Attrs: attrs,
9142	}
9143	op := scope.AddOperation(opspec)
9144	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
9145}
9146
9147// Computes gradients for the scaled exponential linear (Selu) operation.
9148//
9149// Arguments:
9150//	gradients: The backpropagated gradients to the corresponding Selu operation.
9151//	outputs: The outputs of the corresponding Selu operation.
9152//
9153// Returns The gradients: `gradients * (outputs + scale * alpha)`
9154// if outputs < 0, `scale * gradients` otherwise.
9155func SeluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
9156	if scope.Err() != nil {
9157		return
9158	}
9159	opspec := tf.OpSpec{
9160		Type: "SeluGrad",
9161		Input: []tf.Input{
9162			gradients, outputs,
9163		},
9164	}
9165	op := scope.AddOperation(opspec)
9166	return op.Output(0)
9167}
9168
9169// Computes gradients for the exponential linear (Elu) operation.
9170//
9171// Arguments:
9172//	gradients: The backpropagated gradients to the corresponding Elu operation.
9173//	outputs: The outputs of the corresponding Elu operation.
9174//
9175// Returns The gradients: `gradients * (outputs + 1)` if outputs < 0,
9176// `gradients` otherwise.
9177func EluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
9178	if scope.Err() != nil {
9179		return
9180	}
9181	opspec := tf.OpSpec{
9182		Type: "EluGrad",
9183		Input: []tf.Input{
9184			gradients, outputs,
9185		},
9186	}
9187	op := scope.AddOperation(opspec)
9188	return op.Output(0)
9189}
9190
9191// LeakyReluGradAttr is an optional argument to LeakyReluGrad.
9192type LeakyReluGradAttr func(optionalAttr)
9193
9194// LeakyReluGradAlpha sets the optional alpha attribute to value.
9195// If not specified, defaults to 0.2
9196func LeakyReluGradAlpha(value float32) LeakyReluGradAttr {
9197	return func(m optionalAttr) {
9198		m["alpha"] = value
9199	}
9200}
9201
9202// Computes rectified linear gradients for a LeakyRelu operation.
9203//
9204// Arguments:
9205//	gradients: The backpropagated gradients to the corresponding LeakyRelu operation.
9206//	features: The features passed as input to the corresponding LeakyRelu operation,
9207// OR the outputs of that operation (both work equivalently).
9208//
9209// Returns `gradients * (features > 0) + alpha * gradients * (features <= 0)`.
9210func LeakyReluGrad(scope *Scope, gradients tf.Output, features tf.Output, optional ...LeakyReluGradAttr) (backprops tf.Output) {
9211	if scope.Err() != nil {
9212		return
9213	}
9214	attrs := map[string]interface{}{}
9215	for _, a := range optional {
9216		a(attrs)
9217	}
9218	opspec := tf.OpSpec{
9219		Type: "LeakyReluGrad",
9220		Input: []tf.Input{
9221			gradients, features,
9222		},
9223		Attrs: attrs,
9224	}
9225	op := scope.AddOperation(opspec)
9226	return op.Output(0)
9227}
9228
9229// Computes rectified linear 6 gradients for a Relu6 operation.
9230//
9231// Arguments:
9232//	gradients: The backpropagated gradients to the corresponding Relu6 operation.
9233//	features: The features passed as input to the corresponding Relu6 operation, or
9234// its output; using either one produces the same result.
9235//
9236// Returns The gradients:
9237// `gradients * (features > 0) * (features < 6)`.
9238func Relu6Grad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
9239	if scope.Err() != nil {
9240		return
9241	}
9242	opspec := tf.OpSpec{
9243		Type: "Relu6Grad",
9244		Input: []tf.Input{
9245			gradients, features,
9246		},
9247	}
9248	op := scope.AddOperation(opspec)
9249	return op.Output(0)
9250}
9251
9252// Computes rectified linear gradients for a Relu operation.
9253//
9254// Arguments:
9255//	gradients: The backpropagated gradients to the corresponding Relu operation.
9256//	features: The features passed as input to the corresponding Relu operation, OR
9257// the outputs of that operation (both work equivalently).
9258//
9259// Returns `gradients * (features > 0)`.
9260func ReluGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
9261	if scope.Err() != nil {
9262		return
9263	}
9264	opspec := tf.OpSpec{
9265		Type: "ReluGrad",
9266		Input: []tf.Input{
9267			gradients, features,
9268		},
9269	}
9270	op := scope.AddOperation(opspec)
9271	return op.Output(0)
9272}
9273
9274// Computes the gradient of morphological 2-D dilation with respect to the filter.
9275//
9276// Arguments:
9277//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
9278//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
9279//	out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
9280//	strides: 1-D of length 4. The stride of the sliding window for each dimension of
9281// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
9282//	rates: 1-D of length 4. The input stride for atrous morphological dilation.
9283// Must be: `[1, rate_height, rate_width, 1]`.
9284//	padding: The type of padding algorithm to use.
9285//
9286// Returns 3-D with shape `[filter_height, filter_width, depth]`.
9287func Dilation2DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (filter_backprop tf.Output) {
9288	if scope.Err() != nil {
9289		return
9290	}
9291	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
9292	opspec := tf.OpSpec{
9293		Type: "Dilation2DBackpropFilter",
9294		Input: []tf.Input{
9295			input, filter, out_backprop,
9296		},
9297		Attrs: attrs,
9298	}
9299	op := scope.AddOperation(opspec)
9300	return op.Output(0)
9301}
9302
9303// MaxPoolGradGradWithArgmaxAttr is an optional argument to MaxPoolGradGradWithArgmax.
9304type MaxPoolGradGradWithArgmaxAttr func(optionalAttr)
9305
9306// MaxPoolGradGradWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
9307//
9308// value: Whether to include batch dimension in flattened index of `argmax`.
9309// If not specified, defaults to false
9310func MaxPoolGradGradWithArgmaxIncludeBatchInIndex(value bool) MaxPoolGradGradWithArgmaxAttr {
9311	return func(m optionalAttr) {
9312		m["include_batch_in_index"] = value
9313	}
9314}
9315
9316// Computes second-order gradients of the maxpooling function.
9317//
9318// Arguments:
9319//	input: The original input.
9320//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
9321// input of `max_pool`.
9322//	argmax: The indices of the maximum values chosen for each output of `max_pool`.
9323//	ksize: The size of the window for each dimension of the input tensor.
9324//	strides: The stride of the sliding window for each dimension of the
9325// input tensor.
9326//	padding: The type of padding algorithm to use.
9327//
9328// Returns Gradients of gradients w.r.t. the input of `max_pool`.
9329func MaxPoolGradGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradWithArgmaxAttr) (output tf.Output) {
9330	if scope.Err() != nil {
9331		return
9332	}
9333	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
9334	for _, a := range optional {
9335		a(attrs)
9336	}
9337	opspec := tf.OpSpec{
9338		Type: "MaxPoolGradGradWithArgmax",
9339		Input: []tf.Input{
9340			input, grad, argmax,
9341		},
9342		Attrs: attrs,
9343	}
9344	op := scope.AddOperation(opspec)
9345	return op.Output(0)
9346}
9347
9348// MaxPoolGradWithArgmaxAttr is an optional argument to MaxPoolGradWithArgmax.
9349type MaxPoolGradWithArgmaxAttr func(optionalAttr)
9350
9351// MaxPoolGradWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
9352//
9353// value: Whether to include batch dimension in flattened index of `argmax`.
9354// If not specified, defaults to false
9355func MaxPoolGradWithArgmaxIncludeBatchInIndex(value bool) MaxPoolGradWithArgmaxAttr {
9356	return func(m optionalAttr) {
9357		m["include_batch_in_index"] = value
9358	}
9359}
9360
9361// Computes gradients of the maxpooling function.
9362//
9363// Arguments:
9364//	input: The original input.
9365//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
9366// output of `max_pool`.
9367//	argmax: The indices of the maximum values chosen for each output of `max_pool`.
9368//	ksize: The size of the window for each dimension of the input tensor.
9369//	strides: The stride of the sliding window for each dimension of the
9370// input tensor.
9371//	padding: The type of padding algorithm to use.
9372//
9373// Returns Gradients w.r.t. the input of `max_pool`.
9374func MaxPoolGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradWithArgmaxAttr) (output tf.Output) {
9375	if scope.Err() != nil {
9376		return
9377	}
9378	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
9379	for _, a := range optional {
9380		a(attrs)
9381	}
9382	opspec := tf.OpSpec{
9383		Type: "MaxPoolGradWithArgmax",
9384		Input: []tf.Input{
9385			input, grad, argmax,
9386		},
9387		Attrs: attrs,
9388	}
9389	op := scope.AddOperation(opspec)
9390	return op.Output(0)
9391}
9392
9393// MaxPoolWithArgmaxAttr is an optional argument to MaxPoolWithArgmax.
9394type MaxPoolWithArgmaxAttr func(optionalAttr)
9395
9396// MaxPoolWithArgmaxTargmax sets the optional Targmax attribute to value.
9397// If not specified, defaults to DT_INT64
9398func MaxPoolWithArgmaxTargmax(value tf.DataType) MaxPoolWithArgmaxAttr {
9399	return func(m optionalAttr) {
9400		m["Targmax"] = value
9401	}
9402}
9403
9404// MaxPoolWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
9405//
9406// value: Whether to include batch dimension in flattened index of `argmax`.
9407// If not specified, defaults to false
9408func MaxPoolWithArgmaxIncludeBatchInIndex(value bool) MaxPoolWithArgmaxAttr {
9409	return func(m optionalAttr) {
9410		m["include_batch_in_index"] = value
9411	}
9412}
9413
9414// Performs max pooling on the input and outputs both max values and indices.
9415//
9416// The indices in `argmax` are flattened, so that a maximum value at position
9417// `[b, y, x, c]` becomes flattened index:
9418// `(y * width + x) * channels + c` if `include_batch_in_index` is False;
9419// `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True.
9420//
9421// The indices returned are always in `[0, height) x [0, width)` before flattening,
9422// even if padding is involved and the mathematically correct answer is outside
9423// (either negative or too large).  This is a bug, but fixing it is difficult to do
9424// in a safe backwards compatible way, especially due to flattening.
9425//
9426// Arguments:
9427//	input: 4-D with shape `[batch, height, width, channels]`.  Input to pool over.
9428//	ksize: The size of the window for each dimension of the input tensor.
9429//	strides: The stride of the sliding window for each dimension of the
9430// input tensor.
9431//	padding: The type of padding algorithm to use.
9432//
9433// Returns:
9434//	output: The max pooled output tensor.
9435//	argmax: 4-D.  The flattened indices of the max values chosen for each output.
9436func MaxPoolWithArgmax(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolWithArgmaxAttr) (output tf.Output, argmax tf.Output) {
9437	if scope.Err() != nil {
9438		return
9439	}
9440	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
9441	for _, a := range optional {
9442		a(attrs)
9443	}
9444	opspec := tf.OpSpec{
9445		Type: "MaxPoolWithArgmax",
9446		Input: []tf.Input{
9447			input,
9448		},
9449		Attrs: attrs,
9450	}
9451	op := scope.AddOperation(opspec)
9452	return op.Output(0), op.Output(1)
9453}
9454
9455// MaxPoolGradGradV2Attr is an optional argument to MaxPoolGradGradV2.
9456type MaxPoolGradGradV2Attr func(optionalAttr)
9457
9458// MaxPoolGradGradV2DataFormat sets the optional data_format attribute to value.
9459//
9460// value: Specify the data format of the input and output data. With the
9461// default format "NHWC", the data is stored in the order of:
9462//     [batch, in_height, in_width, in_channels].
9463// Alternatively, the format could be "NCHW", the data storage order of:
9464//     [batch, in_channels, in_height, in_width].
9465// If not specified, defaults to "NHWC"
9466func MaxPoolGradGradV2DataFormat(value string) MaxPoolGradGradV2Attr {
9467	return func(m optionalAttr) {
9468		m["data_format"] = value
9469	}
9470}
9471
9472// Computes second-order gradients of the maxpooling function.
9473//
9474// Arguments:
9475//	orig_input: The original input tensor.
9476//	orig_output: The original output tensor.
9477//	grad: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
9478//	ksize: The size of the window for each dimension of the input tensor.
9479//	strides: The stride of the sliding window for each dimension of the
9480// input tensor.
9481//	padding: The type of padding algorithm to use.
9482//
9483// Returns Gradients of gradients w.r.t. the input to `max_pool`.
9484func MaxPoolGradGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradGradV2Attr) (output tf.Output) {
9485	if scope.Err() != nil {
9486		return
9487	}
9488	attrs := map[string]interface{}{"padding": padding}
9489	for _, a := range optional {
9490		a(attrs)
9491	}
9492	opspec := tf.OpSpec{
9493		Type: "MaxPoolGradGradV2",
9494		Input: []tf.Input{
9495			orig_input, orig_output, grad, ksize, strides,
9496		},
9497		Attrs: attrs,
9498	}
9499	op := scope.AddOperation(opspec)
9500	return op.Output(0)
9501}
9502
9503// MaxPoolV2Attr is an optional argument to MaxPoolV2.
9504type MaxPoolV2Attr func(optionalAttr)
9505
9506// MaxPoolV2DataFormat sets the optional data_format attribute to value.
9507//
9508// value: Specify the data format of the input and output data. With the
9509// default format "NHWC", the data is stored in the order of:
9510//     [batch, in_height, in_width, in_channels].
9511// Alternatively, the format could be "NCHW", the data storage order of:
9512//     [batch, in_channels, in_height, in_width].
9513// If not specified, defaults to "NHWC"
9514func MaxPoolV2DataFormat(value string) MaxPoolV2Attr {
9515	return func(m optionalAttr) {
9516		m["data_format"] = value
9517	}
9518}
9519
9520// Performs max pooling on the input.
9521//
9522// Arguments:
9523//	input: 4-D input to pool over.
9524//	ksize: The size of the window for each dimension of the input tensor.
9525//	strides: The stride of the sliding window for each dimension of the
9526// input tensor.
9527//	padding: The type of padding algorithm to use.
9528//
9529// Returns The max pooled output tensor.
9530func MaxPoolV2(scope *Scope, input tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolV2Attr) (output tf.Output) {
9531	if scope.Err() != nil {
9532		return
9533	}
9534	attrs := map[string]interface{}{"padding": padding}
9535	for _, a := range optional {
9536		a(attrs)
9537	}
9538	opspec := tf.OpSpec{
9539		Type: "MaxPoolV2",
9540		Input: []tf.Input{
9541			input, ksize, strides,
9542		},
9543		Attrs: attrs,
9544	}
9545	op := scope.AddOperation(opspec)
9546	return op.Output(0)
9547}
9548
9549// LRNGradAttr is an optional argument to LRNGrad.
9550type LRNGradAttr func(optionalAttr)
9551
9552// LRNGradDepthRadius sets the optional depth_radius attribute to value.
9553//
9554// value: A depth radius.
9555// If not specified, defaults to 5
9556func LRNGradDepthRadius(value int64) LRNGradAttr {
9557	return func(m optionalAttr) {
9558		m["depth_radius"] = value
9559	}
9560}
9561
9562// LRNGradBias sets the optional bias attribute to value.
9563//
9564// value: An offset (usually > 0 to avoid dividing by 0).
9565// If not specified, defaults to 1
9566func LRNGradBias(value float32) LRNGradAttr {
9567	return func(m optionalAttr) {
9568		m["bias"] = value
9569	}
9570}
9571
9572// LRNGradAlpha sets the optional alpha attribute to value.
9573//
9574// value: A scale factor, usually positive.
9575// If not specified, defaults to 1
9576func LRNGradAlpha(value float32) LRNGradAttr {
9577	return func(m optionalAttr) {
9578		m["alpha"] = value
9579	}
9580}
9581
9582// LRNGradBeta sets the optional beta attribute to value.
9583//
9584// value: An exponent.
9585// If not specified, defaults to 0.5
9586func LRNGradBeta(value float32) LRNGradAttr {
9587	return func(m optionalAttr) {
9588		m["beta"] = value
9589	}
9590}
9591
9592// Gradients for Local Response Normalization.
9593//
9594// Arguments:
9595//	input_grads: 4-D with shape `[batch, height, width, channels]`.
9596//	input_image: 4-D with shape `[batch, height, width, channels]`.
9597//	output_image: 4-D with shape `[batch, height, width, channels]`.
9598//
9599// Returns The gradients for LRN.
9600func LRNGrad(scope *Scope, input_grads tf.Output, input_image tf.Output, output_image tf.Output, optional ...LRNGradAttr) (output tf.Output) {
9601	if scope.Err() != nil {
9602		return
9603	}
9604	attrs := map[string]interface{}{}
9605	for _, a := range optional {
9606		a(attrs)
9607	}
9608	opspec := tf.OpSpec{
9609		Type: "LRNGrad",
9610		Input: []tf.Input{
9611			input_grads, input_image, output_image,
9612		},
9613		Attrs: attrs,
9614	}
9615	op := scope.AddOperation(opspec)
9616	return op.Output(0)
9617}
9618
9619// MaxPool3DGradGradAttr is an optional argument to MaxPool3DGradGrad.
9620type MaxPool3DGradGradAttr func(optionalAttr)
9621
9622// MaxPool3DGradGradDataFormat sets the optional data_format attribute to value.
9623//
9624// value: The data format of the input and output data. With the
9625// default format "NDHWC", the data is stored in the order of:
9626//     [batch, in_depth, in_height, in_width, in_channels].
9627// Alternatively, the format could be "NCDHW", the data storage order is:
9628//     [batch, in_channels, in_depth, in_height, in_width].
9629// If not specified, defaults to "NDHWC"
9630func MaxPool3DGradGradDataFormat(value string) MaxPool3DGradGradAttr {
9631	return func(m optionalAttr) {
9632		m["data_format"] = value
9633	}
9634}
9635
9636// Computes second-order gradients of the maxpooling function.
9637//
9638// Arguments:
9639//	orig_input: The original input tensor.
9640//	orig_output: The original output tensor.
9641//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
9642//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
9643// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
9644//	strides: 1-D tensor of length 5. The stride of the sliding window for each
9645// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
9646//	padding: The type of padding algorithm to use.
9647//
9648// Returns Gradients of gradients w.r.t. the input to `max_pool`.
9649func MaxPool3DGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradGradAttr) (output tf.Output) {
9650	if scope.Err() != nil {
9651		return
9652	}
9653	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
9654	for _, a := range optional {
9655		a(attrs)
9656	}
9657	opspec := tf.OpSpec{
9658		Type: "MaxPool3DGradGrad",
9659		Input: []tf.Input{
9660			orig_input, orig_output, grad,
9661		},
9662		Attrs: attrs,
9663	}
9664	op := scope.AddOperation(opspec)
9665	return op.Output(0)
9666}
9667
9668// ResourceApplyRMSPropAttr is an optional argument to ResourceApplyRMSProp.
9669type ResourceApplyRMSPropAttr func(optionalAttr)
9670
9671// ResourceApplyRMSPropUseLocking sets the optional use_locking attribute to value.
9672//
9673// value: If `True`, updating of the var, ms, and mom tensors is protected
9674// by a lock; otherwise the behavior is undefined, but may exhibit less
9675// contention.
9676// If not specified, defaults to false
9677func ResourceApplyRMSPropUseLocking(value bool) ResourceApplyRMSPropAttr {
9678	return func(m optionalAttr) {
9679		m["use_locking"] = value
9680	}
9681}
9682
9683// Update '*var' according to the RMSProp algorithm.
9684//
9685// Note that in dense implementation of this algorithm, ms and mom will
9686// update even if the grad is zero, but in this sparse implementation, ms
9687// and mom will not update in iterations during which the grad is zero.
9688//
9689// mean_square = decay * mean_square + (1-decay) * gradient ** 2
9690// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
9691//
9692// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
9693// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
9694// var <- var - mom
9695//
9696// Arguments:
9697//	var_: Should be from a Variable().
9698//	ms: Should be from a Variable().
9699//	mom: Should be from a Variable().
9700//	lr: Scaling factor. Must be a scalar.
9701//	rho: Decay rate. Must be a scalar.
9702//
9703//	epsilon: Ridge term. Must be a scalar.
9704//	grad: The gradient.
9705//
9706// Returns the created operation.
9707func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyRMSPropAttr) (o *tf.Operation) {
9708	if scope.Err() != nil {
9709		return
9710	}
9711	attrs := map[string]interface{}{}
9712	for _, a := range optional {
9713		a(attrs)
9714	}
9715	opspec := tf.OpSpec{
9716		Type: "ResourceApplyRMSProp",
9717		Input: []tf.Input{
9718			var_, ms, mom, lr, rho, momentum, epsilon, grad,
9719		},
9720		Attrs: attrs,
9721	}
9722	return scope.AddOperation(opspec)
9723}
9724
9725// MaxPool3DGradAttr is an optional argument to MaxPool3DGrad.
9726type MaxPool3DGradAttr func(optionalAttr)
9727
9728// MaxPool3DGradDataFormat sets the optional data_format attribute to value.
9729//
9730// value: The data format of the input and output data. With the
9731// default format "NDHWC", the data is stored in the order of:
9732//     [batch, in_depth, in_height, in_width, in_channels].
9733// Alternatively, the format could be "NCDHW", the data storage order is:
9734//     [batch, in_channels, in_depth, in_height, in_width].
9735// If not specified, defaults to "NDHWC"
9736func MaxPool3DGradDataFormat(value string) MaxPool3DGradAttr {
9737	return func(m optionalAttr) {
9738		m["data_format"] = value
9739	}
9740}
9741
9742// Computes gradients of 3D max pooling function.
9743//
9744// Arguments:
9745//	orig_input: The original input tensor.
9746//	orig_output: The original output tensor.
9747//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
9748//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
9749// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
9750//	strides: 1-D tensor of length 5. The stride of the sliding window for each
9751// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
9752//	padding: The type of padding algorithm to use.
9753func MaxPool3DGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradAttr) (output tf.Output) {
9754	if scope.Err() != nil {
9755		return
9756	}
9757	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
9758	for _, a := range optional {
9759		a(attrs)
9760	}
9761	opspec := tf.OpSpec{
9762		Type: "MaxPool3DGrad",
9763		Input: []tf.Input{
9764			orig_input, orig_output, grad,
9765		},
9766		Attrs: attrs,
9767	}
9768	op := scope.AddOperation(opspec)
9769	return op.Output(0)
9770}
9771
9772// MaxPool3DAttr is an optional argument to MaxPool3D.
9773type MaxPool3DAttr func(optionalAttr)
9774
9775// MaxPool3DDataFormat sets the optional data_format attribute to value.
9776//
9777// value: The data format of the input and output data. With the
9778// default format "NDHWC", the data is stored in the order of:
9779//     [batch, in_depth, in_height, in_width, in_channels].
9780// Alternatively, the format could be "NCDHW", the data storage order is:
9781//     [batch, in_channels, in_depth, in_height, in_width].
9782// If not specified, defaults to "NDHWC"
9783func MaxPool3DDataFormat(value string) MaxPool3DAttr {
9784	return func(m optionalAttr) {
9785		m["data_format"] = value
9786	}
9787}
9788
9789// Performs 3D max pooling on the input.
9790//
9791// Arguments:
9792//	input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
9793//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
9794// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
9795//	strides: 1-D tensor of length 5. The stride of the sliding window for each
9796// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
9797//	padding: The type of padding algorithm to use.
9798//
9799// Returns The max pooled output tensor.
9800func MaxPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DAttr) (output tf.Output) {
9801	if scope.Err() != nil {
9802		return
9803	}
9804	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
9805	for _, a := range optional {
9806		a(attrs)
9807	}
9808	opspec := tf.OpSpec{
9809		Type: "MaxPool3D",
9810		Input: []tf.Input{
9811			input,
9812		},
9813		Attrs: attrs,
9814	}
9815	op := scope.AddOperation(opspec)
9816	return op.Output(0)
9817}
9818
9819// AvgPool3DAttr is an optional argument to AvgPool3D.
9820type AvgPool3DAttr func(optionalAttr)
9821
9822// AvgPool3DDataFormat sets the optional data_format attribute to value.
9823//
9824// value: The data format of the input and output data. With the
9825// default format "NDHWC", the data is stored in the order of:
9826//     [batch, in_depth, in_height, in_width, in_channels].
9827// Alternatively, the format could be "NCDHW", the data storage order is:
9828//     [batch, in_channels, in_depth, in_height, in_width].
9829// If not specified, defaults to "NDHWC"
9830func AvgPool3DDataFormat(value string) AvgPool3DAttr {
9831	return func(m optionalAttr) {
9832		m["data_format"] = value
9833	}
9834}
9835
9836// Performs 3D average pooling on the input.
9837//
9838// Each entry in `output` is the mean of the corresponding size `ksize` window in
9839// `value`.
9840//
9841// Arguments:
9842//	input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
9843//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
9844// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
9845//	strides: 1-D tensor of length 5. The stride of the sliding window for each
9846// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
9847//	padding: The type of padding algorithm to use.
9848//
9849// Returns The average pooled output tensor.
9850func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output) {
9851	if scope.Err() != nil {
9852		return
9853	}
9854	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
9855	for _, a := range optional {
9856		a(attrs)
9857	}
9858	opspec := tf.OpSpec{
9859		Type: "AvgPool3D",
9860		Input: []tf.Input{
9861			input,
9862		},
9863		Attrs: attrs,
9864	}
9865	op := scope.AddOperation(opspec)
9866	return op.Output(0)
9867}
9868
9869// QuantizedMatMulWithBiasAndReluAndRequantizeAttr is an optional argument to QuantizedMatMulWithBiasAndReluAndRequantize.
9870type QuantizedMatMulWithBiasAndReluAndRequantizeAttr func(optionalAttr)
9871
9872// QuantizedMatMulWithBiasAndReluAndRequantizeToutput sets the optional Toutput attribute to value.
9873// If not specified, defaults to DT_QUINT8
9874func QuantizedMatMulWithBiasAndReluAndRequantizeToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
9875	return func(m optionalAttr) {
9876		m["Toutput"] = value
9877	}
9878}
9879
9880// QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA sets the optional transpose_a attribute to value.
9881//
9882// value: If true, `a` is transposed before multiplication.
9883// If not specified, defaults to false
9884func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
9885	return func(m optionalAttr) {
9886		m["transpose_a"] = value
9887	}
9888}
9889
9890// QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB sets the optional transpose_b attribute to value.
9891//
9892// value: If true, `b` is transposed before multiplication.
9893// If not specified, defaults to false
9894func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
9895	return func(m optionalAttr) {
9896		m["transpose_b"] = value
9897	}
9898}
9899
9900// QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode sets the optional input_quant_mode attribute to value.
9901//
9902// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED.
9903// If not specified, defaults to "MIN_FIRST"
9904func QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
9905	return func(m optionalAttr) {
9906		m["input_quant_mode"] = value
9907	}
9908}
9909
9910// Perform a quantized matrix multiplication of  `a` by the matrix `b` with bias
9911// add and relu and requantize fusion.
9912//
9913// The inputs must be two-dimensional matrices and 1D bias vector. And the inner
9914// dimension of `a` (after being transposed if `transpose_a` is non-zero) must
9915// match the outer dimension of `b` (after being transposed if `transposed_b` is
9916// non-zero). Then do broadcast add operation with bias values on the matrix
9917// multiplication result. The bias size must match inner dimension of `b`.  Then do
9918// relu activation to get non-negative result. Then do requantize operation to get
9919// final uint8 result.
9920//
9921// Arguments:
9922//	a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
9923//	b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
9924//	bias: A 1D bias tensor with size matching with inner dimension of `b` (after being
9925// transposed if `transposed_b` is non-zero).
9926//	min_a: The float value that the lowest quantized `a` value represents.
9927//	max_a: The float value that the highest quantized `a` value represents.
9928//	min_b: The float value that the lowest quantized `b` value represents.
9929//	max_b: The float value that the highest quantized `b` value represents.
9930//	min_freezed_output: The float value that the highest quantized output value after requantize.
9931//
9932//
9933// Returns:
9934//	out
9935//	min_out: The float value that the lowest quantized output value represents.
9936//	max_out: The float value that the highest quantized output value represents.
9937func QuantizedMatMulWithBiasAndReluAndRequantize(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, min_freezed_output tf.Output, max_freezed_output tf.Output, optional ...QuantizedMatMulWithBiasAndReluAndRequantizeAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
9938	if scope.Err() != nil {
9939		return
9940	}
9941	attrs := map[string]interface{}{}
9942	for _, a := range optional {
9943		a(attrs)
9944	}
9945	opspec := tf.OpSpec{
9946		Type: "QuantizedMatMulWithBiasAndReluAndRequantize",
9947		Input: []tf.Input{
9948			a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output,
9949		},
9950		Attrs: attrs,
9951	}
9952	op := scope.AddOperation(opspec)
9953	return op.Output(0), op.Output(1), op.Output(2)
9954}
9955
9956// LRNAttr is an optional argument to LRN.
9957type LRNAttr func(optionalAttr)
9958
9959// LRNDepthRadius sets the optional depth_radius attribute to value.
9960//
9961// value: 0-D.  Half-width of the 1-D normalization window.
9962// If not specified, defaults to 5
9963func LRNDepthRadius(value int64) LRNAttr {
9964	return func(m optionalAttr) {
9965		m["depth_radius"] = value
9966	}
9967}
9968
9969// LRNBias sets the optional bias attribute to value.
9970//
9971// value: An offset (usually positive to avoid dividing by 0).
9972// If not specified, defaults to 1
9973func LRNBias(value float32) LRNAttr {
9974	return func(m optionalAttr) {
9975		m["bias"] = value
9976	}
9977}
9978
9979// LRNAlpha sets the optional alpha attribute to value.
9980//
9981// value: A scale factor, usually positive.
9982// If not specified, defaults to 1
9983func LRNAlpha(value float32) LRNAttr {
9984	return func(m optionalAttr) {
9985		m["alpha"] = value
9986	}
9987}
9988
9989// LRNBeta sets the optional beta attribute to value.
9990//
9991// value: An exponent.
9992// If not specified, defaults to 0.5
9993func LRNBeta(value float32) LRNAttr {
9994	return func(m optionalAttr) {
9995		m["beta"] = value
9996	}
9997}
9998
9999// Local Response Normalization.
10000//
10001// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
10002// dimension), and each vector is normalized independently.  Within a given vector,
10003// each component is divided by the weighted, squared sum of inputs within
10004// `depth_radius`.  In detail,
10005//
10006//     sqr_sum[a, b, c, d] =
10007//         sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
10008//     output = input / (bias + alpha * sqr_sum) ** beta
10009//
10010// For details, see [Krizhevsky et al., ImageNet classification with deep
10011// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
10012//
10013// Arguments:
10014//	input: 4-D.
10015func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output) {
10016	if scope.Err() != nil {
10017		return
10018	}
10019	attrs := map[string]interface{}{}
10020	for _, a := range optional {
10021		a(attrs)
10022	}
10023	opspec := tf.OpSpec{
10024		Type: "LRN",
10025		Input: []tf.Input{
10026			input,
10027		},
10028		Attrs: attrs,
10029	}
10030	op := scope.AddOperation(opspec)
10031	return op.Output(0)
10032}
10033
10034// Conv3DBackpropInputV2Attr is an optional argument to Conv3DBackpropInputV2.
10035type Conv3DBackpropInputV2Attr func(optionalAttr)
10036
10037// Conv3DBackpropInputV2DataFormat sets the optional data_format attribute to value.
10038//
10039// value: The data format of the input and output data. With the
10040// default format "NDHWC", the data is stored in the order of:
10041//     [batch, in_depth, in_height, in_width, in_channels].
10042// Alternatively, the format could be "NCDHW", the data storage order is:
10043//     [batch, in_channels, in_depth, in_height, in_width].
10044// If not specified, defaults to "NDHWC"
10045func Conv3DBackpropInputV2DataFormat(value string) Conv3DBackpropInputV2Attr {
10046	return func(m optionalAttr) {
10047		m["data_format"] = value
10048	}
10049}
10050
10051// Conv3DBackpropInputV2Dilations sets the optional dilations attribute to value.
10052//
10053// value: 1-D tensor of length 5.  The dilation factor for each dimension of
10054// `input`. If set to k > 1, there will be k-1 skipped cells between each
10055// filter element on that dimension. The dimension order is determined by the
10056// value of `data_format`, see above for details. Dilations in the batch and
10057// depth dimensions must be 1.
10058// If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
10059func Conv3DBackpropInputV2Dilations(value []int64) Conv3DBackpropInputV2Attr {
10060	return func(m optionalAttr) {
10061		m["dilations"] = value
10062	}
10063}
10064
10065// Computes the gradients of 3-D convolution with respect to the input.
10066//
10067// Arguments:
10068//	input_sizes: An integer vector representing the tensor shape of `input`,
10069// where `input` is a 5-D
10070// `[batch, depth, rows, cols, in_channels]` tensor.
10071//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
10072// `in_channels` must match between `input` and `filter`.
10073//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
10074// out_channels]`.
10075//	strides: 1-D tensor of length 5. The stride of the sliding window for each
10076// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
10077//	padding: The type of padding algorithm to use.
10078func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputV2Attr) (output tf.Output) {
10079	if scope.Err() != nil {
10080		return
10081	}
10082	attrs := map[string]interface{}{"strides": strides, "padding": padding}
10083	for _, a := range optional {
10084		a(attrs)
10085	}
10086	opspec := tf.OpSpec{
10087		Type: "Conv3DBackpropInputV2",
10088		Input: []tf.Input{
10089			input_sizes, filter, out_backprop,
10090		},
10091		Attrs: attrs,
10092	}
10093	op := scope.AddOperation(opspec)
10094	return op.Output(0)
10095}
10096
10097// Conv3DBackpropFilterAttr is an optional argument to Conv3DBackpropFilter.
10098type Conv3DBackpropFilterAttr func(optionalAttr)
10099
10100// Conv3DBackpropFilterDilations sets the optional dilations attribute to value.
10101// If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
10102func Conv3DBackpropFilterDilations(value []int64) Conv3DBackpropFilterAttr {
10103	return func(m optionalAttr) {
10104		m["dilations"] = value
10105	}
10106}
10107
10108// Computes the gradients of 3-D convolution with respect to the filter.
10109//
10110// DEPRECATED at GraphDef version 10: Use Conv3DBackpropFilterV2
10111//
10112// Arguments:
10113//	input: Shape `[batch, depth, rows, cols, in_channels]`.
10114//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
10115// `in_channels` must match between `input` and `filter`.
10116//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
10117// out_channels]`.
10118//	strides: 1-D tensor of length 5. The stride of the sliding window for each
10119// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
10120//	padding: The type of padding algorithm to use.
10121func Conv3DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterAttr) (output tf.Output) {
10122	if scope.Err() != nil {
10123		return
10124	}
10125	attrs := map[string]interface{}{"strides": strides, "padding": padding}
10126	for _, a := range optional {
10127		a(attrs)
10128	}
10129	opspec := tf.OpSpec{
10130		Type: "Conv3DBackpropFilter",
10131		Input: []tf.Input{
10132			input, filter, out_backprop,
10133		},
10134		Attrs: attrs,
10135	}
10136	op := scope.AddOperation(opspec)
10137	return op.Output(0)
10138}
10139
10140// Conv3DBackpropInputAttr is an optional argument to Conv3DBackpropInput.
10141type Conv3DBackpropInputAttr func(optionalAttr)
10142
10143// Conv3DBackpropInputDilations sets the optional dilations attribute to value.
10144// If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
10145func Conv3DBackpropInputDilations(value []int64) Conv3DBackpropInputAttr {
10146	return func(m optionalAttr) {
10147		m["dilations"] = value
10148	}
10149}
10150
10151// Computes the gradients of 3-D convolution with respect to the input.
10152//
10153// DEPRECATED at GraphDef version 10: Use Conv3DBackpropInputV2
10154//
10155// Arguments:
10156//	input: Shape `[batch, depth, rows, cols, in_channels]`.
10157//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
10158// `in_channels` must match between `input` and `filter`.
10159//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
10160// out_channels]`.
10161//	strides: 1-D tensor of length 5. The stride of the sliding window for each
10162// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
10163//	padding: The type of padding algorithm to use.
10164func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputAttr) (output tf.Output) {
10165	if scope.Err() != nil {
10166		return
10167	}
10168	attrs := map[string]interface{}{"strides": strides, "padding": padding}
10169	for _, a := range optional {
10170		a(attrs)
10171	}
10172	opspec := tf.OpSpec{
10173		Type: "Conv3DBackpropInput",
10174		Input: []tf.Input{
10175			input, filter, out_backprop,
10176		},
10177		Attrs: attrs,
10178	}
10179	op := scope.AddOperation(opspec)
10180	return op.Output(0)
10181}
10182
10183// Conv3DAttr is an optional argument to Conv3D.
10184type Conv3DAttr func(optionalAttr)
10185
10186// Conv3DDataFormat sets the optional data_format attribute to value.
10187//
10188// value: The data format of the input and output data. With the
10189// default format "NDHWC", the data is stored in the order of:
10190//     [batch, in_depth, in_height, in_width, in_channels].
10191// Alternatively, the format could be "NCDHW", the data storage order is:
10192//     [batch, in_channels, in_depth, in_height, in_width].
10193// If not specified, defaults to "NDHWC"
10194func Conv3DDataFormat(value string) Conv3DAttr {
10195	return func(m optionalAttr) {
10196		m["data_format"] = value
10197	}
10198}
10199
10200// Conv3DDilations sets the optional dilations attribute to value.
10201//
10202// value: 1-D tensor of length 5.  The dilation factor for each dimension of
10203// `input`. If set to k > 1, there will be k-1 skipped cells between each
10204// filter element on that dimension. The dimension order is determined by the
10205// value of `data_format`, see above for details. Dilations in the batch and
10206// depth dimensions must be 1.
10207// If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
10208func Conv3DDilations(value []int64) Conv3DAttr {
10209	return func(m optionalAttr) {
10210		m["dilations"] = value
10211	}
10212}
10213
10214// Computes a 3-D convolution given 5-D `input` and `filter` tensors.
10215//
10216// In signal processing, cross-correlation is a measure of similarity of
10217// two waveforms as a function of a time-lag applied to one of them. This
10218// is also known as a sliding dot product or sliding inner-product.
10219//
10220// Our Conv3D implements a form of cross-correlation.
10221//
10222// Arguments:
10223//	input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
10224//	filter: Shape `[filter_depth, filter_height, filter_width, in_channels,
10225// out_channels]`. `in_channels` must match between `input` and `filter`.
10226//	strides: 1-D tensor of length 5. The stride of the sliding window for each
10227// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
10228//	padding: The type of padding algorithm to use.
10229func Conv3D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv3DAttr) (output tf.Output) {
10230	if scope.Err() != nil {
10231		return
10232	}
10233	attrs := map[string]interface{}{"strides": strides, "padding": padding}
10234	for _, a := range optional {
10235		a(attrs)
10236	}
10237	opspec := tf.OpSpec{
10238		Type: "Conv3D",
10239		Input: []tf.Input{
10240			input, filter,
10241		},
10242		Attrs: attrs,
10243	}
10244	op := scope.AddOperation(opspec)
10245	return op.Output(0)
10246}
10247
10248// Adds a value to the current value of a variable.
10249//
10250// Any ReadVariableOp with a control dependency on this op is guaranteed to
10251// see the incremented value or a subsequent newer one.
10252//
10253// Arguments:
10254//	resource: handle to the resource in which to store the variable.
10255//	value: the value by which the variable will be incremented.
10256//
10257// Returns the created operation.
10258func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
10259	if scope.Err() != nil {
10260		return
10261	}
10262	opspec := tf.OpSpec{
10263		Type: "AssignAddVariableOp",
10264		Input: []tf.Input{
10265			resource, value,
10266		},
10267	}
10268	return scope.AddOperation(opspec)
10269}
10270
10271// DepthwiseConv2dNativeBackpropInputAttr is an optional argument to DepthwiseConv2dNativeBackpropInput.
10272type DepthwiseConv2dNativeBackpropInputAttr func(optionalAttr)
10273
10274// DepthwiseConv2dNativeBackpropInputExplicitPaddings sets the optional explicit_paddings attribute to value.
10275// If not specified, defaults to {}
10276func DepthwiseConv2dNativeBackpropInputExplicitPaddings(value []int64) DepthwiseConv2dNativeBackpropInputAttr {
10277	return func(m optionalAttr) {
10278		m["explicit_paddings"] = value
10279	}
10280}
10281
10282// DepthwiseConv2dNativeBackpropInputDataFormat sets the optional data_format attribute to value.
10283//
10284// value: Specify the data format of the input and output data. With the
10285// default format "NHWC", the data is stored in the order of:
10286//     [batch, height, width, channels].
10287// Alternatively, the format could be "NCHW", the data storage order of:
10288//     [batch, channels, height, width].
10289// If not specified, defaults to "NHWC"
10290func DepthwiseConv2dNativeBackpropInputDataFormat(value string) DepthwiseConv2dNativeBackpropInputAttr {
10291	return func(m optionalAttr) {
10292		m["data_format"] = value
10293	}
10294}
10295
10296// DepthwiseConv2dNativeBackpropInputDilations sets the optional dilations attribute to value.
10297//
10298// value: 1-D tensor of length 4.  The dilation factor for each dimension of
10299// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
10300// element on that dimension. The dimension order is determined by the value of
10301// `data_format`, see above for details. Dilations in the batch and depth
10302// dimensions must be 1.
10303// If not specified, defaults to {i:1 i:1 i:1 i:1}
10304func DepthwiseConv2dNativeBackpropInputDilations(value []int64) DepthwiseConv2dNativeBackpropInputAttr {
10305	return func(m optionalAttr) {
10306		m["dilations"] = value
10307	}
10308}
10309
10310// Computes the gradients of depthwise convolution with respect to the input.
10311//
10312// Arguments:
10313//	input_sizes: An integer vector representing the shape of `input`, based
10314// on `data_format`.  For example, if `data_format` is 'NHWC' then
10315//  `input` is a 4-D `[batch, height, width, channels]` tensor.
10316//	filter: 4-D with shape
10317// `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
10318//	out_backprop: 4-D with shape  based on `data_format`.
10319// For example, if `data_format` is 'NHWC' then
10320// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
10321// Gradients w.r.t. the output of the convolution.
10322//	strides: The stride of the sliding window for each dimension of the input
10323// of the convolution.
10324//	padding: The type of padding algorithm to use.
10325//
10326// Returns 4-D with shape according to `data_format`.  For example, if
10327// `data_format` is 'NHWC', output shape is `[batch, in_height,
10328// in_width, in_channels]`.  Gradient w.r.t. the input of the
10329// convolution.
10330func DepthwiseConv2dNativeBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropInputAttr) (output tf.Output) {
10331	if scope.Err() != nil {
10332		return
10333	}
10334	attrs := map[string]interface{}{"strides": strides, "padding": padding}
10335	for _, a := range optional {
10336		a(attrs)
10337	}
10338	opspec := tf.OpSpec{
10339		Type: "DepthwiseConv2dNativeBackpropInput",
10340		Input: []tf.Input{
10341			input_sizes, filter, out_backprop,
10342		},
10343		Attrs: attrs,
10344	}
10345	op := scope.AddOperation(opspec)
10346	return op.Output(0)
10347}
10348
10349// DepthwiseConv2dNativeAttr is an optional argument to DepthwiseConv2dNative.
10350type DepthwiseConv2dNativeAttr func(optionalAttr)
10351
10352// DepthwiseConv2dNativeExplicitPaddings sets the optional explicit_paddings attribute to value.
10353// If not specified, defaults to {}
10354func DepthwiseConv2dNativeExplicitPaddings(value []int64) DepthwiseConv2dNativeAttr {
10355	return func(m optionalAttr) {
10356		m["explicit_paddings"] = value
10357	}
10358}
10359
10360// DepthwiseConv2dNativeDataFormat sets the optional data_format attribute to value.
10361//
10362// value: Specify the data format of the input and output data. With the
10363// default format "NHWC", the data is stored in the order of:
10364//     [batch, height, width, channels].
10365// Alternatively, the format could be "NCHW", the data storage order of:
10366//     [batch, channels, height, width].
10367// If not specified, defaults to "NHWC"
10368func DepthwiseConv2dNativeDataFormat(value string) DepthwiseConv2dNativeAttr {
10369	return func(m optionalAttr) {
10370		m["data_format"] = value
10371	}
10372}
10373
10374// DepthwiseConv2dNativeDilations sets the optional dilations attribute to value.
10375//
10376// value: 1-D tensor of length 4.  The dilation factor for each dimension of
10377// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
10378// element on that dimension. The dimension order is determined by the value of
10379// `data_format`, see above for details. Dilations in the batch and depth
10380// dimensions must be 1.
10381// If not specified, defaults to {i:1 i:1 i:1 i:1}
10382func DepthwiseConv2dNativeDilations(value []int64) DepthwiseConv2dNativeAttr {
10383	return func(m optionalAttr) {
10384		m["dilations"] = value
10385	}
10386}
10387
10388// Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
10389//
10390// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
10391// and a filter / kernel tensor of shape
10392// `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
10393// `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
10394// a different filter to each input channel (expanding from 1 channel to
10395// `channel_multiplier` channels for each), then concatenates the results
10396// together. Thus, the output has `in_channels * channel_multiplier` channels.
10397//
10398// ```
10399// for k in 0..in_channels-1
10400//   for q in 0..channel_multiplier-1
10401//     output[b, i, j, k * channel_multiplier + q] =
10402//       sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
10403//                         filter[di, dj, k, q]
10404// ```
10405//
10406// Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
10407// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
10408//
10409// Arguments:
10410//
10411//
10412//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
10413// of `input`.
10414//	padding: The type of padding algorithm to use.
10415func DepthwiseConv2dNative(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeAttr) (output tf.Output) {
10416	if scope.Err() != nil {
10417		return
10418	}
10419	attrs := map[string]interface{}{"strides": strides, "padding": padding}
10420	for _, a := range optional {
10421		a(attrs)
10422	}
10423	opspec := tf.OpSpec{
10424		Type: "DepthwiseConv2dNative",
10425		Input: []tf.Input{
10426			input, filter,
10427		},
10428		Attrs: attrs,
10429	}
10430	op := scope.AddOperation(opspec)
10431	return op.Output(0)
10432}
10433
10434// Component-wise multiplies a SparseTensor by a dense Tensor.
10435//
10436// The output locations corresponding to the implicitly zero elements in the sparse
10437// tensor will be zero (i.e., will not take up storage space), regardless of the
10438// contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
10439//
10440// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
10441// the other direction.
10442//
10443// Arguments:
10444//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
10445// SparseTensor, possibly not in canonical ordering.
10446//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
10447//	sp_shape: 1-D.  Shape of the input SparseTensor.
10448//	dense: `R`-D.  The dense Tensor operand.
10449//
10450// Returns 1-D.  The `N` values that are operated on.
10451func SparseDenseCwiseMul(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
10452	if scope.Err() != nil {
10453		return
10454	}
10455	opspec := tf.OpSpec{
10456		Type: "SparseDenseCwiseMul",
10457		Input: []tf.Input{
10458			sp_indices, sp_values, sp_shape, dense,
10459		},
10460	}
10461	op := scope.AddOperation(opspec)
10462	return op.Output(0)
10463}
10464
10465// Performs a padding as a preprocess during a convolution.
10466//
10467// Similar to FusedResizeAndPadConv2d, this op allows for an optimized
10468// implementation where the spatial padding transformation stage is fused with the
10469// im2col lookup, but in this case without the bilinear filtering required for
10470// resizing. Fusing the padding prevents the need to write out the intermediate
10471// results as whole tensors, reducing memory pressure, and we can get some latency
10472// gains by merging the transformation calculations.
10473// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
10474// order is used instead.
10475// Internally this op uses a single per-graph scratch buffer, which means that it
10476// will block if multiple versions are being run in parallel. This is because this
10477// operator is primarily an optimization to minimize memory usage.
10478//
10479// Arguments:
10480//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
10481//	paddings: A two-column matrix specifying the padding sizes. The number of
10482// rows must be the same as the rank of `input`.
10483//	filter: 4-D with shape
10484// `[filter_height, filter_width, in_channels, out_channels]`.
10485//
10486//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
10487// of `input`. Must be in the same order as the dimension specified with format.
10488//	padding: The type of padding algorithm to use.
10489func FusedPadConv2D(scope *Scope, input tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string) (output tf.Output) {
10490	if scope.Err() != nil {
10491		return
10492	}
10493	attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
10494	opspec := tf.OpSpec{
10495		Type: "FusedPadConv2D",
10496		Input: []tf.Input{
10497			input, paddings, filter,
10498		},
10499		Attrs: attrs,
10500	}
10501	op := scope.AddOperation(opspec)
10502	return op.Output(0)
10503}
10504
10505// FusedResizeAndPadConv2DAttr is an optional argument to FusedResizeAndPadConv2D.
10506type FusedResizeAndPadConv2DAttr func(optionalAttr)
10507
10508// FusedResizeAndPadConv2DResizeAlignCorners sets the optional resize_align_corners attribute to value.
10509//
10510// value: If true, the centers of the 4 corner pixels of the input and output tensors are
10511// aligned, preserving the values at the corner pixels. Defaults to false.
10512// If not specified, defaults to false
10513func FusedResizeAndPadConv2DResizeAlignCorners(value bool) FusedResizeAndPadConv2DAttr {
10514	return func(m optionalAttr) {
10515		m["resize_align_corners"] = value
10516	}
10517}
10518
10519// Performs a resize and padding as a preprocess during a convolution.
10520//
10521// It's often possible to do spatial transformations more efficiently as part of
10522// the packing stage of a convolution, so this op allows for an optimized
10523// implementation where these stages are fused together. This prevents the need to
10524// write out the intermediate results as whole tensors, reducing memory pressure,
10525// and we can get some latency gains by merging the transformation calculations.
10526// The data_format attribute for Conv2D isn't supported by this op, and defaults to
10527// 'NHWC' order.
10528// Internally this op uses a single per-graph scratch buffer, which means that it
10529// will block if multiple versions are being run in parallel. This is because this
10530// operator is primarily an optimization to minimize memory usage.
10531//
10532// Arguments:
10533//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
10534//	size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
10535// new size for the images.
10536//	paddings: A two-column matrix specifying the padding sizes. The number of
10537// rows must be the same as the rank of `input`.
10538//	filter: 4-D with shape
10539// `[filter_height, filter_width, in_channels, out_channels]`.
10540//
10541//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
10542// of `input`. Must be in the same order as the dimension specified with format.
10543//	padding: The type of padding algorithm to use.
10544func FusedResizeAndPadConv2D(scope *Scope, input tf.Output, size tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string, optional ...FusedResizeAndPadConv2DAttr) (output tf.Output) {
10545	if scope.Err() != nil {
10546		return
10547	}
10548	attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
10549	for _, a := range optional {
10550		a(attrs)
10551	}
10552	opspec := tf.OpSpec{
10553		Type: "FusedResizeAndPadConv2D",
10554		Input: []tf.Input{
10555			input, size, paddings, filter,
10556		},
10557		Attrs: attrs,
10558	}
10559	op := scope.AddOperation(opspec)
10560	return op.Output(0)
10561}
10562
10563// Adds Tensor 'bias' to Tensor 'input' for Quantized types.
10564//
10565// Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
10566//
10567// Arguments:
10568//
10569//	bias: A 1D bias Tensor with size matching the last dimension of 'input'.
10570//	min_input: The float value that the lowest quantized input value represents.
10571//	max_input: The float value that the highest quantized input value represents.
10572//	min_bias: The float value that the lowest quantized bias value represents.
10573//	max_bias: The float value that the highest quantized bias value represents.
10574//
10575//
10576// Returns:
10577//	output
10578//	min_out: The float value that the lowest quantized output value represents.
10579//	max_out: The float value that the highest quantized output value represents.
10580func QuantizedBiasAdd(scope *Scope, input tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_bias tf.Output, max_bias tf.Output, out_type tf.DataType) (output tf.Output, min_out tf.Output, max_out tf.Output) {
10581	if scope.Err() != nil {
10582		return
10583	}
10584	attrs := map[string]interface{}{"out_type": out_type}
10585	opspec := tf.OpSpec{
10586		Type: "QuantizedBiasAdd",
10587		Input: []tf.Input{
10588			input, bias, min_input, max_input, min_bias, max_bias,
10589		},
10590		Attrs: attrs,
10591	}
10592	op := scope.AddOperation(opspec)
10593	return op.Output(0), op.Output(1), op.Output(2)
10594}
10595
10596// DataFormatVecPermuteAttr is an optional argument to DataFormatVecPermute.
10597type DataFormatVecPermuteAttr func(optionalAttr)
10598
10599// DataFormatVecPermuteSrcFormat sets the optional src_format attribute to value.
10600//
10601// value: source data format.
10602// If not specified, defaults to "NHWC"
10603func DataFormatVecPermuteSrcFormat(value string) DataFormatVecPermuteAttr {
10604	return func(m optionalAttr) {
10605		m["src_format"] = value
10606	}
10607}
10608
10609// DataFormatVecPermuteDstFormat sets the optional dst_format attribute to value.
10610//
10611// value: destination data format.
10612// If not specified, defaults to "NCHW"
10613func DataFormatVecPermuteDstFormat(value string) DataFormatVecPermuteAttr {
10614	return func(m optionalAttr) {
10615		m["dst_format"] = value
10616	}
10617}
10618
10619// Permute input tensor from `src_format` to `dst_format`.
10620//
10621// Input tensor must be a vector of size 4, or a 4x2 tensor.
10622//
10623// For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs:
10624// ```
10625// [1, 2, 3, 4]
10626// ```
10627// and
10628// ```
10629// [[1, 2, 3, 4],
10630//  [5, 6, 7, 8]]
10631// ```
10632// , the outputs will be (respectively):
10633// ```
10634// [1, 4, 2, 3]
10635// ```
10636// and
10637// ```
10638// [[1, 4, 2, 3],
10639//  [5, 8, 6, 7]]
10640// ```
10641//
10642// Arguments:
10643//	x: Vector of size 4 or Tensor of shape (4, 2) in source data format.
10644//
10645// Returns Vector of size 4 or Tensor of shape (4, 2) in destination data format.
10646func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPermuteAttr) (y tf.Output) {
10647	if scope.Err() != nil {
10648		return
10649	}
10650	attrs := map[string]interface{}{}
10651	for _, a := range optional {
10652		a(attrs)
10653	}
10654	opspec := tf.OpSpec{
10655		Type: "DataFormatVecPermute",
10656		Input: []tf.Input{
10657			x,
10658		},
10659		Attrs: attrs,
10660	}
10661	op := scope.AddOperation(opspec)
10662	return op.Output(0)
10663}
10664
10665// DataFormatDimMapAttr is an optional argument to DataFormatDimMap.
10666type DataFormatDimMapAttr func(optionalAttr)
10667
10668// DataFormatDimMapSrcFormat sets the optional src_format attribute to value.
10669//
10670// value: source data format.
10671// If not specified, defaults to "NHWC"
10672func DataFormatDimMapSrcFormat(value string) DataFormatDimMapAttr {
10673	return func(m optionalAttr) {
10674		m["src_format"] = value
10675	}
10676}
10677
10678// DataFormatDimMapDstFormat sets the optional dst_format attribute to value.
10679//
10680// value: destination data format.
10681// If not specified, defaults to "NCHW"
10682func DataFormatDimMapDstFormat(value string) DataFormatDimMapAttr {
10683	return func(m optionalAttr) {
10684		m["dst_format"] = value
10685	}
10686}
10687
10688// Returns the dimension index in the destination data format given the one in
10689//
10690// the source data format.
10691//
10692// Arguments:
10693//	x: A Tensor with each element as a dimension index in source data format.
10694// Must be in the range [-4, 4).
10695//
10696// Returns A Tensor with each element as a dimension index in destination data format.
10697func DataFormatDimMap(scope *Scope, x tf.Output, optional ...DataFormatDimMapAttr) (y tf.Output) {
10698	if scope.Err() != nil {
10699		return
10700	}
10701	attrs := map[string]interface{}{}
10702	for _, a := range optional {
10703		a(attrs)
10704	}
10705	opspec := tf.OpSpec{
10706		Type: "DataFormatDimMap",
10707		Input: []tf.Input{
10708			x,
10709		},
10710		Attrs: attrs,
10711	}
10712	op := scope.AddOperation(opspec)
10713	return op.Output(0)
10714}
10715
10716// Conv2DBackpropFilterAttr is an optional argument to Conv2DBackpropFilter.
10717type Conv2DBackpropFilterAttr func(optionalAttr)
10718
10719// Conv2DBackpropFilterUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
10720// If not specified, defaults to true
10721func Conv2DBackpropFilterUseCudnnOnGpu(value bool) Conv2DBackpropFilterAttr {
10722	return func(m optionalAttr) {
10723		m["use_cudnn_on_gpu"] = value
10724	}
10725}
10726
10727// Conv2DBackpropFilterExplicitPaddings sets the optional explicit_paddings attribute to value.
10728//
10729// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
10730// dimension, the amount of padding inserted before and after the dimension is
10731// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
10732// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
10733// If not specified, defaults to {}
10734func Conv2DBackpropFilterExplicitPaddings(value []int64) Conv2DBackpropFilterAttr {
10735	return func(m optionalAttr) {
10736		m["explicit_paddings"] = value
10737	}
10738}
10739
10740// Conv2DBackpropFilterDataFormat sets the optional data_format attribute to value.
10741//
10742// value: Specify the data format of the input and output data. With the
10743// default format "NHWC", the data is stored in the order of:
10744//     [batch, in_height, in_width, in_channels].
10745// Alternatively, the format could be "NCHW", the data storage order of:
10746//     [batch, in_channels, in_height, in_width].
10747// If not specified, defaults to "NHWC"
10748func Conv2DBackpropFilterDataFormat(value string) Conv2DBackpropFilterAttr {
10749	return func(m optionalAttr) {
10750		m["data_format"] = value
10751	}
10752}
10753
10754// Conv2DBackpropFilterDilations sets the optional dilations attribute to value.
10755//
10756// value: 1-D tensor of length 4.  The dilation factor for each dimension of
10757// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
10758// element on that dimension. The dimension order is determined by the value of
10759// `data_format`, see above for details. Dilations in the batch and depth
10760// dimensions must be 1.
10761// If not specified, defaults to {i:1 i:1 i:1 i:1}
10762func Conv2DBackpropFilterDilations(value []int64) Conv2DBackpropFilterAttr {
10763	return func(m optionalAttr) {
10764		m["dilations"] = value
10765	}
10766}
10767
10768// Computes the gradients of convolution with respect to the filter.
10769//
10770// Arguments:
10771//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
10772//	filter_sizes: An integer vector representing the tensor shape of `filter`,
10773// where `filter` is a 4-D
10774// `[filter_height, filter_width, in_channels, out_channels]` tensor.
10775//	out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
10776// Gradients w.r.t. the output of the convolution.
10777//	strides: The stride of the sliding window for each dimension of the input
10778// of the convolution. Must be in the same order as the dimension specified with
10779// format.
10780//	padding: The type of padding algorithm to use.
10781//
10782// Returns 4-D with shape
10783// `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
10784// the `filter` input of the convolution.
10785func Conv2DBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropFilterAttr) (output tf.Output) {
10786	if scope.Err() != nil {
10787		return
10788	}
10789	attrs := map[string]interface{}{"strides": strides, "padding": padding}
10790	for _, a := range optional {
10791		a(attrs)
10792	}
10793	opspec := tf.OpSpec{
10794		Type: "Conv2DBackpropFilter",
10795		Input: []tf.Input{
10796			input, filter_sizes, out_backprop,
10797		},
10798		Attrs: attrs,
10799	}
10800	op := scope.AddOperation(opspec)
10801	return op.Output(0)
10802}
10803
10804// Adds `bias` to `value`.
10805//
10806// This is a deprecated version of BiasAdd and will be soon removed.
10807//
10808// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
10809// Broadcasting is supported, so `value` may have any number of dimensions.
10810//
10811// Arguments:
10812//	value: Any number of dimensions.
10813//	bias: 1-D with size the last dimension of `value`.
10814//
10815// Returns Broadcasted sum of `value` and `bias`.
10816func BiasAddV1(scope *Scope, value tf.Output, bias tf.Output) (output tf.Output) {
10817	if scope.Err() != nil {
10818		return
10819	}
10820	opspec := tf.OpSpec{
10821		Type: "BiasAddV1",
10822		Input: []tf.Input{
10823			value, bias,
10824		},
10825	}
10826	op := scope.AddOperation(opspec)
10827	return op.Output(0)
10828}
10829
10830// BiasAddGradAttr is an optional argument to BiasAddGrad.
10831type BiasAddGradAttr func(optionalAttr)
10832
10833// BiasAddGradDataFormat sets the optional data_format attribute to value.
10834//
10835// value: Specify the data format of the input and output data. With the
10836// default format "NHWC", the bias tensor will be added to the last dimension
10837// of the value tensor.
10838// Alternatively, the format could be "NCHW", the data storage order of:
10839//     [batch, in_channels, in_height, in_width].
10840// The tensor will be added to "in_channels", the third-to-the-last
10841//     dimension.
10842// If not specified, defaults to "NHWC"
10843func BiasAddGradDataFormat(value string) BiasAddGradAttr {
10844	return func(m optionalAttr) {
10845		m["data_format"] = value
10846	}
10847}
10848
10849// The backward operation for "BiasAdd" on the "bias" tensor.
10850//
10851// It accumulates all the values from out_backprop into the feature dimension.
10852// For NHWC data format, the feature dimension is the last. For NCHW data format,
10853// the feature dimension is the third-to-last.
10854//
10855// Arguments:
10856//	out_backprop: Any number of dimensions.
10857//
10858// Returns 1-D with size the feature dimension of `out_backprop`.
10859func BiasAddGrad(scope *Scope, out_backprop tf.Output, optional ...BiasAddGradAttr) (output tf.Output) {
10860	if scope.Err() != nil {
10861		return
10862	}
10863	attrs := map[string]interface{}{}
10864	for _, a := range optional {
10865		a(attrs)
10866	}
10867	opspec := tf.OpSpec{
10868		Type: "BiasAddGrad",
10869		Input: []tf.Input{
10870			out_backprop,
10871		},
10872		Attrs: attrs,
10873	}
10874	op := scope.AddOperation(opspec)
10875	return op.Output(0)
10876}
10877
10878// BiasAddAttr is an optional argument to BiasAdd.
10879type BiasAddAttr func(optionalAttr)
10880
10881// BiasAddDataFormat sets the optional data_format attribute to value.
10882//
10883// value: Specify the data format of the input and output data. With the
10884// default format "NHWC", the bias tensor will be added to the last dimension
10885// of the value tensor.
10886// Alternatively, the format could be "NCHW", the data storage order of:
10887//     [batch, in_channels, in_height, in_width].
10888// The tensor will be added to "in_channels", the third-to-the-last
10889//     dimension.
10890// If not specified, defaults to "NHWC"
10891func BiasAddDataFormat(value string) BiasAddAttr {
10892	return func(m optionalAttr) {
10893		m["data_format"] = value
10894	}
10895}
10896
10897// Adds `bias` to `value`.
10898//
10899// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
10900// Broadcasting is supported, so `value` may have any number of dimensions.
10901//
10902// Arguments:
10903//	value: Any number of dimensions.
10904//	bias: 1-D with size the last dimension of `value`.
10905//
10906// Returns Broadcasted sum of `value` and `bias`.
10907func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddAttr) (output tf.Output) {
10908	if scope.Err() != nil {
10909		return
10910	}
10911	attrs := map[string]interface{}{}
10912	for _, a := range optional {
10913		a(attrs)
10914	}
10915	opspec := tf.OpSpec{
10916		Type: "BiasAdd",
10917		Input: []tf.Input{
10918			value, bias,
10919		},
10920		Attrs: attrs,
10921	}
10922	op := scope.AddOperation(opspec)
10923	return op.Output(0)
10924}
10925
10926// FusedBatchNormGradV3Attr is an optional argument to FusedBatchNormGradV3.
10927type FusedBatchNormGradV3Attr func(optionalAttr)
10928
10929// FusedBatchNormGradV3Epsilon sets the optional epsilon attribute to value.
10930//
10931// value: A small float number added to the variance of x.
10932// If not specified, defaults to 0.0001
10933func FusedBatchNormGradV3Epsilon(value float32) FusedBatchNormGradV3Attr {
10934	return func(m optionalAttr) {
10935		m["epsilon"] = value
10936	}
10937}
10938
10939// FusedBatchNormGradV3DataFormat sets the optional data_format attribute to value.
10940//
10941// value: The data format for y_backprop, x, x_backprop.
10942// Either "NHWC" (default) or "NCHW".
10943// If not specified, defaults to "NHWC"
10944func FusedBatchNormGradV3DataFormat(value string) FusedBatchNormGradV3Attr {
10945	return func(m optionalAttr) {
10946		m["data_format"] = value
10947	}
10948}
10949
10950// FusedBatchNormGradV3IsTraining sets the optional is_training attribute to value.
10951//
10952// value: A bool value to indicate the operation is for training (default)
10953// or inference.
10954// If not specified, defaults to true
10955func FusedBatchNormGradV3IsTraining(value bool) FusedBatchNormGradV3Attr {
10956	return func(m optionalAttr) {
10957		m["is_training"] = value
10958	}
10959}
10960
10961// Gradient for batch normalization.
10962//
10963// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
10964// The size of 1D Tensors matches the dimension C of the 4D Tensors.
10965//
10966// Arguments:
10967//	y_backprop: A 4D Tensor for the gradient with respect to y.
10968//	x: A 4D Tensor for input data.
10969//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
10970//	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
10971// mean to be reused in gradient computation. When is_training is
10972// False, a 1D Tensor for the population mean to be reused in both
10973// 1st and 2nd order gradient computation.
10974//	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
10975// variance (inverted variance in the cuDNN case) to be reused in
10976// gradient computation. When is_training is False, a 1D Tensor
10977// for the population variance to be reused in both 1st and 2nd
10978// order gradient computation.
10979//	reserve_space_3: When is_training is True, a 1D Tensor for some intermediate results to be reused
10980// in gradient computation. When is_training is False, a dummy empty Tensor will be
10981// created.
10982//
10983// Returns:
10984//	x_backprop: A 4D Tensor for the gradient with respect to x.
10985//	scale_backprop: A 1D Tensor for the gradient with respect to scale.
10986//	offset_backprop: A 1D Tensor for the gradient with respect to offset.
10987//	reserve_space_4: Unused placeholder to match the mean input in FusedBatchNorm.
10988//	reserve_space_5: Unused placeholder to match the variance input
10989// in FusedBatchNorm.
10990func FusedBatchNormGradV3(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, reserve_space_3 tf.Output, optional ...FusedBatchNormGradV3Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_4 tf.Output, reserve_space_5 tf.Output) {
10991	if scope.Err() != nil {
10992		return
10993	}
10994	attrs := map[string]interface{}{}
10995	for _, a := range optional {
10996		a(attrs)
10997	}
10998	opspec := tf.OpSpec{
10999		Type: "FusedBatchNormGradV3",
11000		Input: []tf.Input{
11001			y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3,
11002		},
11003		Attrs: attrs,
11004	}
11005	op := scope.AddOperation(opspec)
11006	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
11007}
11008
11009// Adds up a SparseTensor and a dense Tensor, using these special rules:
11010//
11011// (1) Broadcasts the dense side to have the same shape as the sparse side, if
11012//     eligible;
11013// (2) Then, only the dense values pointed to by the indices of the SparseTensor
11014//     participate in the cwise addition.
11015//
11016// By these rules, the result is a logical SparseTensor with exactly the same
11017// indices and shape, but possibly with different non-zero values.  The output of
11018// this Op is the resultant non-zero values.
11019//
11020// Arguments:
11021//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
11022// SparseTensor, possibly not in canonical ordering.
11023//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
11024//	sp_shape: 1-D.  Shape of the input SparseTensor.
11025//	dense: `R`-D.  The dense Tensor operand.
11026//
11027// Returns 1-D.  The `N` values that are operated on.
11028func SparseDenseCwiseAdd(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
11029	if scope.Err() != nil {
11030		return
11031	}
11032	opspec := tf.OpSpec{
11033		Type: "SparseDenseCwiseAdd",
11034		Input: []tf.Input{
11035			sp_indices, sp_values, sp_shape, dense,
11036		},
11037	}
11038	op := scope.AddOperation(opspec)
11039	return op.Output(0)
11040}
11041
11042// Gradients for batch normalization.
11043//
11044// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
11045//
11046// This op is deprecated. See `tf.nn.batch_normalization`.
11047//
11048// Arguments:
11049//	t: A 4D input Tensor.
11050//	m: A 1D mean Tensor with size matching the last dimension of t.
11051// This is the first output from tf.nn.moments,
11052// or a saved moving average thereof.
11053//	v: A 1D variance Tensor with size matching the last dimension of t.
11054// This is the second output from tf.nn.moments,
11055// or a saved moving average thereof.
11056//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
11057// If "scale_after_normalization" is true, this Tensor will be multiplied
11058// with the normalized Tensor.
11059//	backprop: 4D backprop Tensor.
11060//	variance_epsilon: A small float number to avoid dividing by 0.
11061//	scale_after_normalization: A bool indicating whether the resulted tensor
11062// needs to be multiplied with gamma.
11063//
11064// Returns:
11065//	dx: 4D backprop tensor for input.
11066//	dm: 1D backprop tensor for mean.
11067//	dv: 1D backprop tensor for variance.
11068//	db: 1D backprop tensor for beta.
11069//	dg: 1D backprop tensor for gamma.
11070func BatchNormWithGlobalNormalizationGrad(scope *Scope, t tf.Output, m tf.Output, v tf.Output, gamma tf.Output, backprop tf.Output, variance_epsilon float32, scale_after_normalization bool) (dx tf.Output, dm tf.Output, dv tf.Output, db tf.Output, dg tf.Output) {
11071	if scope.Err() != nil {
11072		return
11073	}
11074	attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
11075	opspec := tf.OpSpec{
11076		Type: "BatchNormWithGlobalNormalizationGrad",
11077		Input: []tf.Input{
11078			t, m, v, gamma, backprop,
11079		},
11080		Attrs: attrs,
11081	}
11082	op := scope.AddOperation(opspec)
11083	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
11084}
11085
11086// Conv2DAttr is an optional argument to Conv2D.
11087type Conv2DAttr func(optionalAttr)
11088
11089// Conv2DUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
11090// If not specified, defaults to true
11091func Conv2DUseCudnnOnGpu(value bool) Conv2DAttr {
11092	return func(m optionalAttr) {
11093		m["use_cudnn_on_gpu"] = value
11094	}
11095}
11096
11097// Conv2DExplicitPaddings sets the optional explicit_paddings attribute to value.
11098//
11099// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
11100// dimension, the amount of padding inserted before and after the dimension is
11101// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
11102// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
11103// If not specified, defaults to {}
11104func Conv2DExplicitPaddings(value []int64) Conv2DAttr {
11105	return func(m optionalAttr) {
11106		m["explicit_paddings"] = value
11107	}
11108}
11109
11110// Conv2DDataFormat sets the optional data_format attribute to value.
11111//
11112// value: Specify the data format of the input and output data. With the
11113// default format "NHWC", the data is stored in the order of:
11114//     [batch, height, width, channels].
11115// Alternatively, the format could be "NCHW", the data storage order of:
11116//     [batch, channels, height, width].
11117// If not specified, defaults to "NHWC"
11118func Conv2DDataFormat(value string) Conv2DAttr {
11119	return func(m optionalAttr) {
11120		m["data_format"] = value
11121	}
11122}
11123
11124// Conv2DDilations sets the optional dilations attribute to value.
11125//
11126// value: 1-D tensor of length 4.  The dilation factor for each dimension of
11127// `input`. If set to k > 1, there will be k-1 skipped cells between each
11128// filter element on that dimension. The dimension order is determined by the
11129// value of `data_format`, see above for details. Dilations in the batch and
11130// depth dimensions must be 1.
11131// If not specified, defaults to {i:1 i:1 i:1 i:1}
11132func Conv2DDilations(value []int64) Conv2DAttr {
11133	return func(m optionalAttr) {
11134		m["dilations"] = value
11135	}
11136}
11137
11138// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
11139//
11140// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
11141// and a filter / kernel tensor of shape
11142// `[filter_height, filter_width, in_channels, out_channels]`, this op
11143// performs the following:
11144//
11145// 1. Flattens the filter to a 2-D matrix with shape
11146//    `[filter_height * filter_width * in_channels, output_channels]`.
11147// 2. Extracts image patches from the input tensor to form a *virtual*
11148//    tensor of shape `[batch, out_height, out_width,
11149//    filter_height * filter_width * in_channels]`.
11150// 3. For each patch, right-multiplies the filter matrix and the image patch
11151//    vector.
11152//
11153// In detail, with the default NHWC format,
11154//
11155//     output[b, i, j, k] =
11156//         sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
11157//                         filter[di, dj, q, k]
11158//
11159// Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
11160// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
11161//
11162// Arguments:
11163//	input: A 4-D tensor. The dimension order is interpreted according to the value
11164// of `data_format`, see below for details.
11165//	filter: A 4-D tensor of shape
11166// `[filter_height, filter_width, in_channels, out_channels]`
11167//	strides: 1-D tensor of length 4.  The stride of the sliding window for each
11168// dimension of `input`. The dimension order is determined by the value of
11169// `data_format`, see below for details.
11170//	padding: The type of padding algorithm to use.
11171//
11172// Returns A 4-D tensor. The dimension order is determined by the value of
11173// `data_format`, see below for details.
11174func Conv2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv2DAttr) (output tf.Output) {
11175	if scope.Err() != nil {
11176		return
11177	}
11178	attrs := map[string]interface{}{"strides": strides, "padding": padding}
11179	for _, a := range optional {
11180		a(attrs)
11181	}
11182	opspec := tf.OpSpec{
11183		Type: "Conv2D",
11184		Input: []tf.Input{
11185			input, filter,
11186		},
11187		Attrs: attrs,
11188	}
11189	op := scope.AddOperation(opspec)
11190	return op.Output(0)
11191}
11192
11193// Reduces `input` from `num_devices` using `reduction` to a single device.
11194//
11195// Reduces `input` from `num_devices` using `reduction` to a single device.
11196//
11197// The graph should be constructed so that all inputs have a valid device
11198// assignment, and the op itself is assigned one of these devices.
11199//
11200// input: The input to the reduction.
11201// data: the value of the reduction across all `num_devices` devices.
11202// reduction: the reduction operation to perform.
11203func NcclReduce(scope *Scope, input []tf.Output, reduction string) (data tf.Output) {
11204	if scope.Err() != nil {
11205		return
11206	}
11207	attrs := map[string]interface{}{"reduction": reduction}
11208	opspec := tf.OpSpec{
11209		Type: "NcclReduce",
11210		Input: []tf.Input{
11211			tf.OutputList(input),
11212		},
11213		Attrs: attrs,
11214	}
11215	op := scope.AddOperation(opspec)
11216	return op.Output(0)
11217}
11218
11219// QuantizedReluAttr is an optional argument to QuantizedRelu.
11220type QuantizedReluAttr func(optionalAttr)
11221
11222// QuantizedReluOutType sets the optional out_type attribute to value.
11223// If not specified, defaults to DT_QUINT8
11224func QuantizedReluOutType(value tf.DataType) QuantizedReluAttr {
11225	return func(m optionalAttr) {
11226		m["out_type"] = value
11227	}
11228}
11229
11230// Computes Quantized Rectified Linear: `max(features, 0)`
11231//
11232// Arguments:
11233//
11234//	min_features: The float value that the lowest quantized value represents.
11235//	max_features: The float value that the highest quantized value represents.
11236//
11237// Returns:
11238//	activations: Has the same output shape as "features".
11239//	min_activations: The float value that the lowest quantized value represents.
11240//	max_activations: The float value that the highest quantized value represents.
11241func QuantizedRelu(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
11242	if scope.Err() != nil {
11243		return
11244	}
11245	attrs := map[string]interface{}{}
11246	for _, a := range optional {
11247		a(attrs)
11248	}
11249	opspec := tf.OpSpec{
11250		Type: "QuantizedRelu",
11251		Input: []tf.Input{
11252			features, min_features, max_features,
11253		},
11254		Attrs: attrs,
11255	}
11256	op := scope.AddOperation(opspec)
11257	return op.Output(0), op.Output(1), op.Output(2)
11258}
11259
11260// Returns the next representable value of `x1` in the direction of `x2`, element-wise.
11261//
11262// This operation returns the same result as the C++ std::nextafter function.
11263//
11264// It can also return a subnormal number.
11265//
11266// @compatibility(cpp)
11267// Equivalent to C++ std::nextafter function.
11268// @end_compatibility
11269func NextAfter(scope *Scope, x1 tf.Output, x2 tf.Output) (output tf.Output) {
11270	if scope.Err() != nil {
11271		return
11272	}
11273	opspec := tf.OpSpec{
11274		Type: "NextAfter",
11275		Input: []tf.Input{
11276			x1, x2,
11277		},
11278	}
11279	op := scope.AddOperation(opspec)
11280	return op.Output(0)
11281}
11282
11283// RequantizePerChannelAttr is an optional argument to RequantizePerChannel.
11284type RequantizePerChannelAttr func(optionalAttr)
11285
11286// RequantizePerChannelOutType sets the optional out_type attribute to value.
11287//
11288// value: The quantized type of output tensor that needs to be converted.
11289// If not specified, defaults to DT_QUINT8
11290func RequantizePerChannelOutType(value tf.DataType) RequantizePerChannelAttr {
11291	return func(m optionalAttr) {
11292		m["out_type"] = value
11293	}
11294}
11295
11296// Requantizes input with min and max values known per channel.
11297//
11298// Arguments:
11299//	input: The original input tensor.
11300//	input_min: The minimum value of the input tensor
11301//	input_max: The maximum value of the input tensor.
11302//	requested_output_min: The minimum value of the output tensor requested.
11303//	requested_output_max: The maximum value of the output tensor requested.
11304//
11305// Returns:
11306//	output: Output tensor.
11307//	output_min: The minimum value of the final output tensor
11308//	output_max: The maximum value of the final output tensor.
11309func RequantizePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, optional ...RequantizePerChannelAttr) (output tf.Output, output_min tf.Output, output_max tf.Output) {
11310	if scope.Err() != nil {
11311		return
11312	}
11313	attrs := map[string]interface{}{}
11314	for _, a := range optional {
11315		a(attrs)
11316	}
11317	opspec := tf.OpSpec{
11318		Type: "RequantizePerChannel",
11319		Input: []tf.Input{
11320			input, input_min, input_max, requested_output_min, requested_output_max,
11321		},
11322		Attrs: attrs,
11323	}
11324	op := scope.AddOperation(opspec)
11325	return op.Output(0), op.Output(1), op.Output(2)
11326}
11327
11328// Bucketizes 'input' based on 'boundaries'.
11329//
11330// For example, if the inputs are
11331//     boundaries = [0, 10, 100]
11332//     input = [[-5, 10000]
11333//              [150,   10]
11334//              [5,    100]]
11335//
11336// then the output will be
11337//     output = [[0, 3]
11338//               [3, 2]
11339//               [1, 3]]
11340//
11341// Arguments:
11342//	input: Any shape of Tensor contains with int or float type.
11343//	boundaries: A sorted list of floats gives the boundary of the buckets.
11344//
11345// Returns Same shape with 'input', each value of input replaced with bucket index.
11346//
11347// @compatibility(numpy)
11348// Equivalent to np.digitize.
11349// @end_compatibility
11350func Bucketize(scope *Scope, input tf.Output, boundaries []float32) (output tf.Output) {
11351	if scope.Err() != nil {
11352		return
11353	}
11354	attrs := map[string]interface{}{"boundaries": boundaries}
11355	opspec := tf.OpSpec{
11356		Type: "Bucketize",
11357		Input: []tf.Input{
11358			input,
11359		},
11360		Attrs: attrs,
11361	}
11362	op := scope.AddOperation(opspec)
11363	return op.Output(0)
11364}
11365
11366// Converts each string in the input Tensor to its hash mod by a number of buckets.
11367//
11368// The hash function is deterministic on the content of the string within the
11369// process.
11370//
11371// Note that the hash function may change from time to time.
11372// This functionality will be deprecated and it's recommended to use
11373// `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
11374//
11375// Arguments:
11376//
11377//	num_buckets: The number of buckets.
11378//
11379// Returns A Tensor of the same shape as the input `string_tensor`.
11380func StringToHashBucket(scope *Scope, string_tensor tf.Output, num_buckets int64) (output tf.Output) {
11381	if scope.Err() != nil {
11382		return
11383	}
11384	attrs := map[string]interface{}{"num_buckets": num_buckets}
11385	opspec := tf.OpSpec{
11386		Type: "StringToHashBucket",
11387		Input: []tf.Input{
11388			string_tensor,
11389		},
11390		Attrs: attrs,
11391	}
11392	op := scope.AddOperation(opspec)
11393	return op.Output(0)
11394}
11395
11396// Computes softsign: `features / (abs(features) + 1)`.
11397func Softsign(scope *Scope, features tf.Output) (activations tf.Output) {
11398	if scope.Err() != nil {
11399		return
11400	}
11401	opspec := tf.OpSpec{
11402		Type: "Softsign",
11403		Input: []tf.Input{
11404			features,
11405		},
11406	}
11407	op := scope.AddOperation(opspec)
11408	return op.Output(0)
11409}
11410
11411// QuantizedAddAttr is an optional argument to QuantizedAdd.
11412type QuantizedAddAttr func(optionalAttr)
11413
11414// QuantizedAddToutput sets the optional Toutput attribute to value.
11415// If not specified, defaults to DT_QINT32
11416func QuantizedAddToutput(value tf.DataType) QuantizedAddAttr {
11417	return func(m optionalAttr) {
11418		m["Toutput"] = value
11419	}
11420}
11421
11422// Returns x + y element-wise, working on quantized buffers.
11423//
11424// Arguments:
11425//
11426//
11427//	min_x: The float value that the lowest quantized `x` value represents.
11428//	max_x: The float value that the highest quantized `x` value represents.
11429//	min_y: The float value that the lowest quantized `y` value represents.
11430//	max_y: The float value that the highest quantized `y` value represents.
11431//
11432// Returns:
11433//	z
11434//	min_z: The float value that the lowest quantized output value represents.
11435//	max_z: The float value that the highest quantized output value represents.
11436//
11437// *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about
11438// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
11439func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedAddAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
11440	if scope.Err() != nil {
11441		return
11442	}
11443	attrs := map[string]interface{}{}
11444	for _, a := range optional {
11445		a(attrs)
11446	}
11447	opspec := tf.OpSpec{
11448		Type: "QuantizedAdd",
11449		Input: []tf.Input{
11450			x, y, min_x, max_x, min_y, max_y,
11451		},
11452		Attrs: attrs,
11453	}
11454	op := scope.AddOperation(opspec)
11455	return op.Output(0), op.Output(1), op.Output(2)
11456}
11457
11458// QuantizedMatMulAttr is an optional argument to QuantizedMatMul.
11459type QuantizedMatMulAttr func(optionalAttr)
11460
11461// QuantizedMatMulToutput sets the optional Toutput attribute to value.
11462// If not specified, defaults to DT_QINT32
11463func QuantizedMatMulToutput(value tf.DataType) QuantizedMatMulAttr {
11464	return func(m optionalAttr) {
11465		m["Toutput"] = value
11466	}
11467}
11468
11469// QuantizedMatMulTransposeA sets the optional transpose_a attribute to value.
11470//
11471// value: If true, `a` is transposed before multiplication.
11472// If not specified, defaults to false
11473func QuantizedMatMulTransposeA(value bool) QuantizedMatMulAttr {
11474	return func(m optionalAttr) {
11475		m["transpose_a"] = value
11476	}
11477}
11478
11479// QuantizedMatMulTransposeB sets the optional transpose_b attribute to value.
11480//
11481// value: If true, `b` is transposed before multiplication.
11482// If not specified, defaults to false
11483func QuantizedMatMulTransposeB(value bool) QuantizedMatMulAttr {
11484	return func(m optionalAttr) {
11485		m["transpose_b"] = value
11486	}
11487}
11488
11489// QuantizedMatMulTactivation sets the optional Tactivation attribute to value.
11490//
11491// value: The type of output produced by activation function
11492// following this operation.
11493// If not specified, defaults to DT_QUINT8
11494func QuantizedMatMulTactivation(value tf.DataType) QuantizedMatMulAttr {
11495	return func(m optionalAttr) {
11496		m["Tactivation"] = value
11497	}
11498}
11499
11500// Perform a quantized matrix multiplication of  `a` by the matrix `b`.
11501//
11502// The inputs must be two-dimensional matrices and the inner dimension of
11503// `a` (after being transposed if `transpose_a` is non-zero) must match the
11504// outer dimension of `b` (after being transposed if `transposed_b` is
11505// non-zero).
11506//
11507// Arguments:
11508//	a: Must be a two-dimensional tensor.
11509//	b: Must be a two-dimensional tensor.
11510//	min_a: The float value that the lowest quantized `a` value represents.
11511//	max_a: The float value that the highest quantized `a` value represents.
11512//	min_b: The float value that the lowest quantized `b` value represents.
11513//	max_b: The float value that the highest quantized `b` value represents.
11514//
11515// Returns:
11516//	out
11517//	min_out: The float value that the lowest quantized output value represents.
11518//	max_out: The float value that the highest quantized output value represents.
11519func QuantizedMatMul(scope *Scope, a tf.Output, b tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
11520	if scope.Err() != nil {
11521		return
11522	}
11523	attrs := map[string]interface{}{}
11524	for _, a := range optional {
11525		a(attrs)
11526	}
11527	opspec := tf.OpSpec{
11528		Type: "QuantizedMatMul",
11529		Input: []tf.Input{
11530			a, b, min_a, max_a, min_b, max_b,
11531		},
11532		Attrs: attrs,
11533	}
11534	op := scope.AddOperation(opspec)
11535	return op.Output(0), op.Output(1), op.Output(2)
11536}
11537
11538// CumulativeLogsumexpAttr is an optional argument to CumulativeLogsumexp.
11539type CumulativeLogsumexpAttr func(optionalAttr)
11540
11541// CumulativeLogsumexpExclusive sets the optional exclusive attribute to value.
11542//
11543// value: If `True`, perform exclusive cumulative log-sum-exp.
11544// If not specified, defaults to false
11545func CumulativeLogsumexpExclusive(value bool) CumulativeLogsumexpAttr {
11546	return func(m optionalAttr) {
11547		m["exclusive"] = value
11548	}
11549}
11550
11551// CumulativeLogsumexpReverse sets the optional reverse attribute to value.
11552//
11553// value: A `bool` (default: False).
11554// If not specified, defaults to false
11555func CumulativeLogsumexpReverse(value bool) CumulativeLogsumexpAttr {
11556	return func(m optionalAttr) {
11557		m["reverse"] = value
11558	}
11559}
11560
11561// Compute the cumulative product of the tensor `x` along `axis`.
11562//
11563// By default, this op performs an inclusive cumulative log-sum-exp,
11564// which means that the first
11565// element of the input is identical to the first element of the output:
11566// ```python
11567// tf.math.cumulative_logsumexp([a, b, c])  # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))]
11568// ```
11569//
11570// By setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is
11571// performed instead:
11572// ```python
11573// tf.cumulative_logsumexp([a, b, c], exclusive=True)  # => [-inf, a, log(exp(a) * exp(b))]
11574// ```
11575// Note that the neutral element of the log-sum-exp operation is `-inf`,
11576// however, for performance reasons, the minimal value representable by the
11577// floating point type is used instead.
11578//
11579// By setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the
11580// opposite direction.
11581//
11582// Arguments:
11583//	x: A `Tensor`. Must be one of the following types: `float16`, `float32`, `float64`.
11584//	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
11585// `[-rank(x), rank(x))`.
11586func CumulativeLogsumexp(scope *Scope, x tf.Output, axis tf.Output, optional ...CumulativeLogsumexpAttr) (out tf.Output) {
11587	if scope.Err() != nil {
11588		return
11589	}
11590	attrs := map[string]interface{}{}
11591	for _, a := range optional {
11592		a(attrs)
11593	}
11594	opspec := tf.OpSpec{
11595		Type: "CumulativeLogsumexp",
11596		Input: []tf.Input{
11597			x, axis,
11598		},
11599		Attrs: attrs,
11600	}
11601	op := scope.AddOperation(opspec)
11602	return op.Output(0)
11603}
11604
11605// CumprodAttr is an optional argument to Cumprod.
11606type CumprodAttr func(optionalAttr)
11607
11608// CumprodExclusive sets the optional exclusive attribute to value.
11609//
11610// value: If `True`, perform exclusive cumprod.
11611// If not specified, defaults to false
11612func CumprodExclusive(value bool) CumprodAttr {
11613	return func(m optionalAttr) {
11614		m["exclusive"] = value
11615	}
11616}
11617
11618// CumprodReverse sets the optional reverse attribute to value.
11619//
11620// value: A `bool` (default: False).
11621// If not specified, defaults to false
11622func CumprodReverse(value bool) CumprodAttr {
11623	return func(m optionalAttr) {
11624		m["reverse"] = value
11625	}
11626}
11627
11628// Compute the cumulative product of the tensor `x` along `axis`.
11629//
11630// By default, this op performs an inclusive cumprod, which means that the first
11631// element of the input is identical to the first element of the output:
11632//
11633// ```python
11634// tf.cumprod([a, b, c])  # => [a, a * b, a * b * c]
11635// ```
11636//
11637// By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
11638// performed instead:
11639//
11640// ```python
11641// tf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]
11642// ```
11643//
11644// By setting the `reverse` kwarg to `True`, the cumprod is performed in the
11645// opposite direction:
11646//
11647// ```python
11648// tf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]
11649// ```
11650//
11651// This is more efficient than using separate `tf.reverse` ops.
11652//
11653// The `reverse` and `exclusive` kwargs can also be combined:
11654//
11655// ```python
11656// tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
11657// ```
11658//
11659// Arguments:
11660//	x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
11661// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
11662// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
11663//	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
11664// `[-rank(x), rank(x))`.
11665func Cumprod(scope *Scope, x tf.Output, axis tf.Output, optional ...CumprodAttr) (out tf.Output) {
11666	if scope.Err() != nil {
11667		return
11668	}
11669	attrs := map[string]interface{}{}
11670	for _, a := range optional {
11671		a(attrs)
11672	}
11673	opspec := tf.OpSpec{
11674		Type: "Cumprod",
11675		Input: []tf.Input{
11676			x, axis,
11677		},
11678		Attrs: attrs,
11679	}
11680	op := scope.AddOperation(opspec)
11681	return op.Output(0)
11682}
11683
11684// Performs gradient updates of embedding tables.
11685//
11686// Arguments:
11687//	inputs: A TensorList of gradients with which to update embedding tables.
11688// This argument has the same length and shapes as the return value of
11689// RecvTPUEmbeddingActivations, but contains gradients of the model's loss
11690// with respect to the embedding activations. The embedding tables are updated
11691// from these gradients via the optimizer specified in the TPU embedding
11692// configuration given to tpu.initialize_system.
11693//	learning_rates: A TensorList of float32 scalars, one for each dynamic learning
11694// rate tag: see the comments in
11695// //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto.
11696// Multiple tables can share the same dynamic learning rate tag as specified
11697// in the configuration. If the learning rates for all tables are constant,
11698// this list should be empty.
11699//	config: Serialized TPUEmbeddingConfiguration proto.
11700//
11701// Returns the created operation.
11702func SendTPUEmbeddingGradients(scope *Scope, inputs []tf.Output, learning_rates []tf.Output, config string) (o *tf.Operation) {
11703	if scope.Err() != nil {
11704		return
11705	}
11706	attrs := map[string]interface{}{"config": config}
11707	opspec := tf.OpSpec{
11708		Type: "SendTPUEmbeddingGradients",
11709		Input: []tf.Input{
11710			tf.OutputList(inputs), tf.OutputList(learning_rates),
11711		},
11712		Attrs: attrs,
11713	}
11714	return scope.AddOperation(opspec)
11715}
11716
11717// CumsumAttr is an optional argument to Cumsum.
11718type CumsumAttr func(optionalAttr)
11719
11720// CumsumExclusive sets the optional exclusive attribute to value.
11721//
11722// value: If `True`, perform exclusive cumsum.
11723// If not specified, defaults to false
11724func CumsumExclusive(value bool) CumsumAttr {
11725	return func(m optionalAttr) {
11726		m["exclusive"] = value
11727	}
11728}
11729
11730// CumsumReverse sets the optional reverse attribute to value.
11731//
11732// value: A `bool` (default: False).
11733// If not specified, defaults to false
11734func CumsumReverse(value bool) CumsumAttr {
11735	return func(m optionalAttr) {
11736		m["reverse"] = value
11737	}
11738}
11739
11740// Compute the cumulative sum of the tensor `x` along `axis`.
11741//
11742// By default, this op performs an inclusive cumsum, which means that the first
11743// element of the input is identical to the first element of the output:
11744//
11745// ```python
11746// tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
11747// ```
11748//
11749// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
11750// performed instead:
11751//
11752// ```python
11753// tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
11754// ```
11755//
11756// By setting the `reverse` kwarg to `True`, the cumsum is performed in the
11757// opposite direction:
11758//
11759// ```python
11760// tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
11761// ```
11762//
11763// This is more efficient than using separate `tf.reverse` ops.
11764//
11765// The `reverse` and `exclusive` kwargs can also be combined:
11766//
11767// ```python
11768// tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
11769// ```
11770//
11771// Arguments:
11772//	x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
11773// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
11774// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
11775//	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
11776// `[-rank(x), rank(x))`.
11777func Cumsum(scope *Scope, x tf.Output, axis tf.Output, optional ...CumsumAttr) (out tf.Output) {
11778	if scope.Err() != nil {
11779		return
11780	}
11781	attrs := map[string]interface{}{}
11782	for _, a := range optional {
11783		a(attrs)
11784	}
11785	opspec := tf.OpSpec{
11786		Type: "Cumsum",
11787		Input: []tf.Input{
11788			x, axis,
11789		},
11790		Attrs: attrs,
11791	}
11792	op := scope.AddOperation(opspec)
11793	return op.Output(0)
11794}
11795
11796// RaggedBincountAttr is an optional argument to RaggedBincount.
11797type RaggedBincountAttr func(optionalAttr)
11798
11799// RaggedBincountBinaryOutput sets the optional binary_output attribute to value.
11800//
11801// value: bool; Whether the kernel should count the appearance or number of occurrences.
11802// If not specified, defaults to false
11803func RaggedBincountBinaryOutput(value bool) RaggedBincountAttr {
11804	return func(m optionalAttr) {
11805		m["binary_output"] = value
11806	}
11807}
11808
11809// Counts the number of occurrences of each value in an integer array.
11810//
11811// Outputs a vector with length `size` and the same dtype as `weights`. If
11812// `weights` are empty, then index `i` stores the number of times the value `i` is
11813// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
11814// the value in `weights` at each index where the corresponding value in `arr` is
11815// `i`.
11816//
11817// Values in `arr` outside of the range [0, size) are ignored.
11818//
11819// Arguments:
11820//	splits: 1D int64 `Tensor`.
11821//	values: 2D int `Tensor`.
11822//	size: non-negative int scalar `Tensor`.
11823//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
11824// shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights
11825// equal to 1.
11826//
11827// Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].
11828// The counts or summed weights for each value in the range [0, size).
11829func RaggedBincount(scope *Scope, splits tf.Output, values tf.Output, size tf.Output, weights tf.Output, optional ...RaggedBincountAttr) (output tf.Output) {
11830	if scope.Err() != nil {
11831		return
11832	}
11833	attrs := map[string]interface{}{}
11834	for _, a := range optional {
11835		a(attrs)
11836	}
11837	opspec := tf.OpSpec{
11838		Type: "RaggedBincount",
11839		Input: []tf.Input{
11840			splits, values, size, weights,
11841		},
11842		Attrs: attrs,
11843	}
11844	op := scope.AddOperation(opspec)
11845	return op.Output(0)
11846}
11847
11848// SparseBincountAttr is an optional argument to SparseBincount.
11849type SparseBincountAttr func(optionalAttr)
11850
11851// SparseBincountBinaryOutput sets the optional binary_output attribute to value.
11852//
11853// value: bool; Whether the kernel should count the appearance or number of occurrences.
11854// If not specified, defaults to false
11855func SparseBincountBinaryOutput(value bool) SparseBincountAttr {
11856	return func(m optionalAttr) {
11857		m["binary_output"] = value
11858	}
11859}
11860
11861// Counts the number of occurrences of each value in an integer array.
11862//
11863// Outputs a vector with length `size` and the same dtype as `weights`. If
11864// `weights` are empty, then index `i` stores the number of times the value `i` is
11865// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
11866// the value in `weights` at each index where the corresponding value in `arr` is
11867// `i`.
11868//
11869// Values in `arr` outside of the range [0, size) are ignored.
11870//
11871// Arguments:
11872//	indices: 2D int64 `Tensor`.
11873//	values: 1D int `Tensor`.
11874//	dense_shape: 1D int64 `Tensor`.
11875//	size: non-negative int scalar `Tensor`.
11876//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
11877// shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights
11878// equal to 1.
11879//
11880// Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].
11881// The counts or summed weights for each value in the range [0, size).
11882func SparseBincount(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, size tf.Output, weights tf.Output, optional ...SparseBincountAttr) (output tf.Output) {
11883	if scope.Err() != nil {
11884		return
11885	}
11886	attrs := map[string]interface{}{}
11887	for _, a := range optional {
11888		a(attrs)
11889	}
11890	opspec := tf.OpSpec{
11891		Type: "SparseBincount",
11892		Input: []tf.Input{
11893			indices, values, dense_shape, size, weights,
11894		},
11895		Attrs: attrs,
11896	}
11897	op := scope.AddOperation(opspec)
11898	return op.Output(0)
11899}
11900
11901// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
11902//
11903// if < 0, `scale * features` otherwise.
11904//
11905// To be used together with
11906// `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
11907// For correct dropout, use `tf.contrib.nn.alpha_dropout`.
11908//
11909// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
11910func Selu(scope *Scope, features tf.Output) (activations tf.Output) {
11911	if scope.Err() != nil {
11912		return
11913	}
11914	opspec := tf.OpSpec{
11915		Type: "Selu",
11916		Input: []tf.Input{
11917			features,
11918		},
11919	}
11920	op := scope.AddOperation(opspec)
11921	return op.Output(0)
11922}
11923
11924// DenseBincountAttr is an optional argument to DenseBincount.
11925type DenseBincountAttr func(optionalAttr)
11926
11927// DenseBincountBinaryOutput sets the optional binary_output attribute to value.
11928//
11929// value: bool; Whether the kernel should count the appearance or number of occurrences.
11930// If not specified, defaults to false
11931func DenseBincountBinaryOutput(value bool) DenseBincountAttr {
11932	return func(m optionalAttr) {
11933		m["binary_output"] = value
11934	}
11935}
11936
11937// Counts the number of occurrences of each value in an integer array.
11938//
11939// Outputs a vector with length `size` and the same dtype as `weights`. If
11940// `weights` are empty, then index `i` stores the number of times the value `i` is
11941// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
11942// the value in `weights` at each index where the corresponding value in `arr` is
11943// `i`.
11944//
11945// Values in `arr` outside of the range [0, size) are ignored.
11946//
11947// Arguments:
11948//	input: 1D or 2D int `Tensor`.
11949//	size: non-negative int scalar `Tensor`.
11950//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
11951// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
11952// equal to 1.
11953//
11954// Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].
11955// The counts or summed weights for each value in the range [0, size).
11956func DenseBincount(scope *Scope, input tf.Output, size tf.Output, weights tf.Output, optional ...DenseBincountAttr) (output tf.Output) {
11957	if scope.Err() != nil {
11958		return
11959	}
11960	attrs := map[string]interface{}{}
11961	for _, a := range optional {
11962		a(attrs)
11963	}
11964	opspec := tf.OpSpec{
11965		Type: "DenseBincount",
11966		Input: []tf.Input{
11967			input, size, weights,
11968		},
11969		Attrs: attrs,
11970	}
11971	op := scope.AddOperation(opspec)
11972	return op.Output(0)
11973}
11974
11975// Counts the number of occurrences of each value in an integer array.
11976//
11977// Outputs a vector with length `size` and the same dtype as `weights`. If
11978// `weights` are empty, then index `i` stores the number of times the value `i` is
11979// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
11980// the value in `weights` at each index where the corresponding value in `arr` is
11981// `i`.
11982//
11983// Values in `arr` outside of the range [0, size) are ignored.
11984//
11985// Arguments:
11986//	arr: int32 `Tensor`.
11987//	size: non-negative int32 scalar `Tensor`.
11988//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
11989// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
11990// equal to 1.
11991//
11992// Returns 1D `Tensor` with length equal to `size`. The counts or summed weights for
11993// each value in the range [0, size).
11994func Bincount(scope *Scope, arr tf.Output, size tf.Output, weights tf.Output) (bins tf.Output) {
11995	if scope.Err() != nil {
11996		return
11997	}
11998	opspec := tf.OpSpec{
11999		Type: "Bincount",
12000		Input: []tf.Input{
12001			arr, size, weights,
12002		},
12003	}
12004	op := scope.AddOperation(opspec)
12005	return op.Output(0)
12006}
12007
12008// DestroyResourceOpAttr is an optional argument to DestroyResourceOp.
12009type DestroyResourceOpAttr func(optionalAttr)
12010
12011// DestroyResourceOpIgnoreLookupError sets the optional ignore_lookup_error attribute to value.
12012//
12013// value: whether to ignore the error when the resource
12014// doesn't exist.
12015// If not specified, defaults to true
12016func DestroyResourceOpIgnoreLookupError(value bool) DestroyResourceOpAttr {
12017	return func(m optionalAttr) {
12018		m["ignore_lookup_error"] = value
12019	}
12020}
12021
12022// Deletes the resource specified by the handle.
12023//
12024// All subsequent operations using the resource will result in a NotFound
12025// error status.
12026//
12027// Arguments:
12028//	resource: handle to the resource to delete.
12029//
12030// Returns the created operation.
12031func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyResourceOpAttr) (o *tf.Operation) {
12032	if scope.Err() != nil {
12033		return
12034	}
12035	attrs := map[string]interface{}{}
12036	for _, a := range optional {
12037		a(attrs)
12038	}
12039	opspec := tf.OpSpec{
12040		Type: "DestroyResourceOp",
12041		Input: []tf.Input{
12042			resource,
12043		},
12044		Attrs: attrs,
12045	}
12046	return scope.AddOperation(opspec)
12047}
12048
12049// HistogramFixedWidthAttr is an optional argument to HistogramFixedWidth.
12050type HistogramFixedWidthAttr func(optionalAttr)
12051
12052// HistogramFixedWidthDtype sets the optional dtype attribute to value.
12053// If not specified, defaults to DT_INT32
12054func HistogramFixedWidthDtype(value tf.DataType) HistogramFixedWidthAttr {
12055	return func(m optionalAttr) {
12056		m["dtype"] = value
12057	}
12058}
12059
12060// Return histogram of values.
12061//
12062// Given the tensor `values`, this operation returns a rank 1 histogram counting
12063// the number of entries in `values` that fall into every bin.  The bins are
12064// equal width and determined by the arguments `value_range` and `nbins`.
12065//
12066// ```python
12067// # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
12068// nbins = 5
12069// value_range = [0.0, 5.0]
12070// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
12071//
12072// with tf.get_default_session() as sess:
12073//   hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
12074//   variables.global_variables_initializer().run()
12075//   sess.run(hist) => [2, 1, 1, 0, 2]
12076// ```
12077//
12078// Arguments:
12079//	values: Numeric `Tensor`.
12080//	value_range: Shape [2] `Tensor` of same `dtype` as `values`.
12081// values <= value_range[0] will be mapped to hist[0],
12082// values >= value_range[1] will be mapped to hist[-1].
12083//	nbins: Scalar `int32 Tensor`.  Number of histogram bins.
12084//
12085// Returns A 1-D `Tensor` holding histogram of values.
12086func HistogramFixedWidth(scope *Scope, values tf.Output, value_range tf.Output, nbins tf.Output, optional ...HistogramFixedWidthAttr) (out tf.Output) {
12087	if scope.Err() != nil {
12088		return
12089	}
12090	attrs := map[string]interface{}{}
12091	for _, a := range optional {
12092		a(attrs)
12093	}
12094	opspec := tf.OpSpec{
12095		Type: "HistogramFixedWidth",
12096		Input: []tf.Input{
12097			values, value_range, nbins,
12098		},
12099		Attrs: attrs,
12100	}
12101	op := scope.AddOperation(opspec)
12102	return op.Output(0)
12103}
12104
12105// Compute the pairwise cross product.
12106//
12107// `a` and `b` must be the same shape; they can either be simple 3-element vectors,
12108// or any shape where the innermost dimension is 3. In the latter case, each pair
12109// of corresponding 3-element vectors is cross-multiplied independently.
12110//
12111// Arguments:
12112//	a: A tensor containing 3-element vectors.
12113//	b: Another tensor, of same type and shape as `a`.
12114//
12115// Returns Pairwise cross product of the vectors in `a` and `b`.
12116func Cross(scope *Scope, a tf.Output, b tf.Output) (product tf.Output) {
12117	if scope.Err() != nil {
12118		return
12119	}
12120	opspec := tf.OpSpec{
12121		Type: "Cross",
12122		Input: []tf.Input{
12123			a, b,
12124		},
12125	}
12126	op := scope.AddOperation(opspec)
12127	return op.Output(0)
12128}
12129
12130// Returns the complex conjugate of a complex number.
12131//
12132// Given a tensor `input` of complex numbers, this operation returns a tensor of
12133// complex numbers that are the complex conjugate of each element in `input`. The
12134// complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
12135// real part and *b* is the imaginary part.
12136//
12137// The complex conjugate returned by this operation is of the form \\(a - bj\\).
12138//
12139// For example:
12140//
12141// ```
12142// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
12143// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
12144// ```
12145func Conj(scope *Scope, input tf.Output) (output tf.Output) {
12146	if scope.Err() != nil {
12147		return
12148	}
12149	opspec := tf.OpSpec{
12150		Type: "Conj",
12151		Input: []tf.Input{
12152			input,
12153		},
12154	}
12155	op := scope.AddOperation(opspec)
12156	return op.Output(0)
12157}
12158
12159// AngleAttr is an optional argument to Angle.
12160type AngleAttr func(optionalAttr)
12161
12162// AngleTout sets the optional Tout attribute to value.
12163// If not specified, defaults to DT_FLOAT
12164func AngleTout(value tf.DataType) AngleAttr {
12165	return func(m optionalAttr) {
12166		m["Tout"] = value
12167	}
12168}
12169
12170// Returns the argument of a complex number.
12171//
12172// Given a tensor `input` of complex numbers, this operation returns a tensor of
12173// type `float` that is the argument of each element in `input`. All elements in
12174// `input` must be complex numbers of the form \\(a + bj\\), where *a*
12175// is the real part and *b* is the imaginary part.
12176//
12177// The argument returned by this operation is of the form \\(atan2(b, a)\\).
12178//
12179// For example:
12180//
12181// ```
12182// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
12183// tf.angle(input) ==> [2.0132, 1.056]
12184// ```
12185//
12186// @compatibility(numpy)
12187// Equivalent to np.angle.
12188// @end_compatibility
12189func Angle(scope *Scope, input tf.Output, optional ...AngleAttr) (output tf.Output) {
12190	if scope.Err() != nil {
12191		return
12192	}
12193	attrs := map[string]interface{}{}
12194	for _, a := range optional {
12195		a(attrs)
12196	}
12197	opspec := tf.OpSpec{
12198		Type: "Angle",
12199		Input: []tf.Input{
12200			input,
12201		},
12202		Attrs: attrs,
12203	}
12204	op := scope.AddOperation(opspec)
12205	return op.Output(0)
12206}
12207
12208// ImagAttr is an optional argument to Imag.
12209type ImagAttr func(optionalAttr)
12210
12211// ImagTout sets the optional Tout attribute to value.
12212// If not specified, defaults to DT_FLOAT
12213func ImagTout(value tf.DataType) ImagAttr {
12214	return func(m optionalAttr) {
12215		m["Tout"] = value
12216	}
12217}
12218
12219// Returns the imaginary part of a complex number.
12220//
12221// Given a tensor `input` of complex numbers, this operation returns a tensor of
12222// type `float` that is the imaginary part of each element in `input`. All
12223// elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
12224// is the real part and *b* is the imaginary part returned by this operation.
12225//
12226// For example:
12227//
12228// ```
12229// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
12230// tf.imag(input) ==> [4.75, 5.75]
12231// ```
12232func Imag(scope *Scope, input tf.Output, optional ...ImagAttr) (output tf.Output) {
12233	if scope.Err() != nil {
12234		return
12235	}
12236	attrs := map[string]interface{}{}
12237	for _, a := range optional {
12238		a(attrs)
12239	}
12240	opspec := tf.OpSpec{
12241		Type: "Imag",
12242		Input: []tf.Input{
12243			input,
12244		},
12245		Attrs: attrs,
12246	}
12247	op := scope.AddOperation(opspec)
12248	return op.Output(0)
12249}
12250
12251// Generates values in an interval.
12252//
12253// A sequence of `num` evenly-spaced values are generated beginning at `start`.
12254// If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
12255// so that the last one is exactly `stop`.
12256//
12257// For example:
12258//
12259// ```
12260// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
12261// ```
12262//
12263// Arguments:
12264//	start: 0-D tensor. First entry in the range.
12265//	stop: 0-D tensor. Last entry in the range.
12266//	num: 0-D tensor. Number of values to generate.
12267//
12268// Returns 1-D. The generated values.
12269func LinSpace(scope *Scope, start tf.Output, stop tf.Output, num tf.Output) (output tf.Output) {
12270	if scope.Err() != nil {
12271		return
12272	}
12273	opspec := tf.OpSpec{
12274		Type: "LinSpace",
12275		Input: []tf.Input{
12276			start, stop, num,
12277		},
12278	}
12279	op := scope.AddOperation(opspec)
12280	return op.Output(0)
12281}
12282
12283// AnyAttr is an optional argument to Any.
12284type AnyAttr func(optionalAttr)
12285
12286// AnyKeepDims sets the optional keep_dims attribute to value.
12287//
12288// value: If true, retain reduced dimensions with length 1.
12289// If not specified, defaults to false
12290func AnyKeepDims(value bool) AnyAttr {
12291	return func(m optionalAttr) {
12292		m["keep_dims"] = value
12293	}
12294}
12295
12296// Computes the "logical or" of elements across dimensions of a tensor.
12297//
12298// Reduces `input` along the dimensions given in `axis`. Unless
12299// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
12300// `axis`. If `keep_dims` is true, the reduced dimensions are
12301// retained with length 1.
12302//
12303// Arguments:
12304//	input: The tensor to reduce.
12305//	axis: The dimensions to reduce. Must be in the range
12306// `[-rank(input), rank(input))`.
12307//
12308// Returns The reduced tensor.
12309func Any(scope *Scope, input tf.Output, axis tf.Output, optional ...AnyAttr) (output tf.Output) {
12310	if scope.Err() != nil {
12311		return
12312	}
12313	attrs := map[string]interface{}{}
12314	for _, a := range optional {
12315		a(attrs)
12316	}
12317	opspec := tf.OpSpec{
12318		Type: "Any",
12319		Input: []tf.Input{
12320			input, axis,
12321		},
12322		Attrs: attrs,
12323	}
12324	op := scope.AddOperation(opspec)
12325	return op.Output(0)
12326}
12327
12328// Computes the gradient of morphological 2-D dilation with respect to the input.
12329//
12330// Arguments:
12331//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
12332//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
12333//	out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
12334//	strides: 1-D of length 4. The stride of the sliding window for each dimension of
12335// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
12336//	rates: 1-D of length 4. The input stride for atrous morphological dilation.
12337// Must be: `[1, rate_height, rate_width, 1]`.
12338//	padding: The type of padding algorithm to use.
12339//
12340// Returns 4-D with shape `[batch, in_height, in_width, depth]`.
12341func Dilation2DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (in_backprop tf.Output) {
12342	if scope.Err() != nil {
12343		return
12344	}
12345	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
12346	opspec := tf.OpSpec{
12347		Type: "Dilation2DBackpropInput",
12348		Input: []tf.Input{
12349			input, filter, out_backprop,
12350		},
12351		Attrs: attrs,
12352	}
12353	op := scope.AddOperation(opspec)
12354	return op.Output(0)
12355}
12356
12357// AllAttr is an optional argument to All.
12358type AllAttr func(optionalAttr)
12359
12360// AllKeepDims sets the optional keep_dims attribute to value.
12361//
12362// value: If true, retain reduced dimensions with length 1.
12363// If not specified, defaults to false
12364func AllKeepDims(value bool) AllAttr {
12365	return func(m optionalAttr) {
12366		m["keep_dims"] = value
12367	}
12368}
12369
12370// Computes the "logical and" of elements across dimensions of a tensor.
12371//
12372// Reduces `input` along the dimensions given in `axis`. Unless
12373// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
12374// `axis`. If `keep_dims` is true, the reduced dimensions are
12375// retained with length 1.
12376//
12377// Arguments:
12378//	input: The tensor to reduce.
12379//	axis: The dimensions to reduce. Must be in the range
12380// `[-rank(input), rank(input))`.
12381//
12382// Returns The reduced tensor.
12383func All(scope *Scope, input tf.Output, axis tf.Output, optional ...AllAttr) (output tf.Output) {
12384	if scope.Err() != nil {
12385		return
12386	}
12387	attrs := map[string]interface{}{}
12388	for _, a := range optional {
12389		a(attrs)
12390	}
12391	opspec := tf.OpSpec{
12392		Type: "All",
12393		Input: []tf.Input{
12394			input, axis,
12395		},
12396		Attrs: attrs,
12397	}
12398	op := scope.AddOperation(opspec)
12399	return op.Output(0)
12400}
12401
12402// Computes gradients for SparseSegmentSqrtN.
12403//
12404// Returns tensor "output" with same shape as grad, except for dimension 0 whose
12405// value is output_dim0.
12406//
12407// Arguments:
12408//	grad: gradient propagated to the SparseSegmentSqrtN op.
12409//	indices: indices passed to the corresponding SparseSegmentSqrtN op.
12410//	segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.
12411//	output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.
12412func SparseSegmentSqrtNGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
12413	if scope.Err() != nil {
12414		return
12415	}
12416	opspec := tf.OpSpec{
12417		Type: "SparseSegmentSqrtNGrad",
12418		Input: []tf.Input{
12419			grad, indices, segment_ids, output_dim0,
12420		},
12421	}
12422	op := scope.AddOperation(opspec)
12423	return op.Output(0)
12424}
12425
12426// Computes the mean along sparse segments of a tensor.
12427//
12428// See `tf.sparse.segment_sum` for usage examples.
12429//
12430// Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
12431// dimension, selecting a subset of dimension 0, specified by `indices`.
12432//
12433// Arguments:
12434//
12435//	indices: A 1-D tensor. Has same rank as `segment_ids`.
12436//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
12437//
12438// Returns Has same shape as data, except for dimension 0 which
12439// has size `k`, the number of segments.
12440func SparseSegmentMean(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
12441	if scope.Err() != nil {
12442		return
12443	}
12444	opspec := tf.OpSpec{
12445		Type: "SparseSegmentMean",
12446		Input: []tf.Input{
12447			data, indices, segment_ids,
12448		},
12449	}
12450	op := scope.AddOperation(opspec)
12451	return op.Output(0)
12452}
12453
12454// Computes gradients for SparseSegmentSum.
12455//
12456// Returns tensor "output" with same shape as grad, except for dimension 0 whose
12457// value is output_dim0.
12458//
12459// Arguments:
12460//	grad: gradient propagated to the SparseSegmentSum op.
12461//	indices: indices passed to the corresponding SparseSegmentSum op.
12462//	segment_ids: segment_ids passed to the corresponding SparseSegmentSum op.
12463//	output_dim0: dimension 0 of "data" passed to SparseSegmentSum op.
12464func SparseSegmentSumGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
12465	if scope.Err() != nil {
12466		return
12467	}
12468	opspec := tf.OpSpec{
12469		Type: "SparseSegmentSumGrad",
12470		Input: []tf.Input{
12471			grad, indices, segment_ids, output_dim0,
12472		},
12473	}
12474	op := scope.AddOperation(opspec)
12475	return op.Output(0)
12476}
12477
12478// Computes the sum along sparse segments of a tensor.
12479//
12480// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
12481// missing, the `output` tensor at that position will be zeroed.
12482//
12483// Read
12484// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation)
12485// for an explanation of segments.
12486//
12487// For example:
12488//
12489// ```python
12490// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
12491//
12492// tf.sparse_segment_sum_with_num_segments(
12493//     c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
12494// # => [[0 0 0 0]
12495// #     [0 0 0 0]
12496// #     [0 0 0 0]]
12497//
12498// tf.sparse_segment_sum_with_num_segments(c,
12499//                                         tf.constant([0, 1]),
12500//                                         tf.constant([0, 2],
12501//                                         num_segments=4))
12502// # => [[ 1  2  3  4]
12503// #     [ 0  0  0  0]
12504// #     [-1 -2 -3 -4]
12505// #     [ 0  0  0  0]]
12506// ```
12507//
12508// Arguments:
12509//
12510//	indices: A 1-D tensor. Has same rank as `segment_ids`.
12511//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
12512//	num_segments: Should equal the number of distinct segment IDs.
12513//
12514// Returns Has same shape as data, except for dimension 0 which
12515// has size `num_segments`.
12516func SparseSegmentSumWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
12517	if scope.Err() != nil {
12518		return
12519	}
12520	opspec := tf.OpSpec{
12521		Type: "SparseSegmentSumWithNumSegments",
12522		Input: []tf.Input{
12523			data, indices, segment_ids, num_segments,
12524		},
12525	}
12526	op := scope.AddOperation(opspec)
12527	return op.Output(0)
12528}
12529
12530// Computes the product along segments of a tensor.
12531//
12532// Read
12533// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
12534// for an explanation of segments.
12535//
12536// This operator is similar to the unsorted segment sum operator found
12537// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
12538// Instead of computing the sum over segments, it computes the product of all
12539// entries belonging to a segment such that:
12540//
12541// \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples
12542// `j...` such that `segment_ids[j...] == i`.
12543//
12544// For example:
12545//
12546// ``` python
12547// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
12548// tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2)
12549// # ==> [[ 4,  6, 6, 4],
12550// #       [5,  6, 7, 8]]
12551// ```
12552//
12553// If there is no entry for a given segment ID `i`, it outputs 1.
12554//
12555// If the given segment ID `i` is negative, then the corresponding value is
12556// dropped, and will not be included in the result.
12557//
12558// Arguments:
12559//
12560//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
12561//
12562//
12563// Returns Has same shape as data, except for the first `segment_ids.rank`
12564// dimensions, which are replaced with a single dimension which has size
12565// `num_segments`.
12566func UnsortedSegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
12567	if scope.Err() != nil {
12568		return
12569	}
12570	opspec := tf.OpSpec{
12571		Type: "UnsortedSegmentProd",
12572		Input: []tf.Input{
12573			data, segment_ids, num_segments,
12574		},
12575	}
12576	op := scope.AddOperation(opspec)
12577	return op.Output(0)
12578}
12579
12580// ResourceScatterNdSubAttr is an optional argument to ResourceScatterNdSub.
12581type ResourceScatterNdSubAttr func(optionalAttr)
12582
12583// ResourceScatterNdSubUseLocking sets the optional use_locking attribute to value.
12584//
12585// value: An optional bool. Defaults to True. If True, the assignment will
12586// be protected by a lock; otherwise the behavior is undefined,
12587// but may exhibit less contention.
12588// If not specified, defaults to true
12589func ResourceScatterNdSubUseLocking(value bool) ResourceScatterNdSubAttr {
12590	return func(m optionalAttr) {
12591		m["use_locking"] = value
12592	}
12593}
12594
12595// Applies sparse subtraction to individual values or slices in a Variable.
12596//
12597// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
12598//
12599// `indices` must be integer tensor, containing indices into `ref`.
12600// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
12601//
12602// The innermost dimension of `indices` (with length `K`) corresponds to
12603// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
12604// dimension of `ref`.
12605//
12606// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
12607//
12608// ```
12609// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
12610// ```
12611//
12612// For example, say we want to subtract 4 scattered elements from a rank-1 tensor
12613// with 8 elements. In Python, that subtraction would look like this:
12614//
12615// ```python
12616// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
12617// indices = tf.constant([[4], [3], [1], [7]])
12618// updates = tf.constant([9, 10, 11, 12])
12619// sub = tf.scatter_nd_sub(ref, indices, updates)
12620// with tf.Session() as sess:
12621//   print sess.run(sub)
12622// ```
12623//
12624// The resulting update to ref would look like this:
12625//
12626//     [1, -9, 3, -6, -4, 6, 7, -4]
12627//
12628// See `tf.scatter_nd` for more details about how to make updates to
12629// slices.
12630//
12631// Arguments:
12632//	ref: A resource handle. Must be from a VarHandleOp.
12633//	indices: A Tensor. Must be one of the following types: int32, int64.
12634// A tensor of indices into ref.
12635//	updates: A Tensor. Must have the same type as ref. A tensor of
12636// values to add to ref.
12637//
12638// Returns the created operation.
12639func ResourceScatterNdSub(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdSubAttr) (o *tf.Operation) {
12640	if scope.Err() != nil {
12641		return
12642	}
12643	attrs := map[string]interface{}{}
12644	for _, a := range optional {
12645		a(attrs)
12646	}
12647	opspec := tf.OpSpec{
12648		Type: "ResourceScatterNdSub",
12649		Input: []tf.Input{
12650			ref, indices, updates,
12651		},
12652		Attrs: attrs,
12653	}
12654	return scope.AddOperation(opspec)
12655}
12656
12657// Computes the minimum along segments of a tensor.
12658//
12659// Read
12660// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
12661// for an explanation of segments.
12662//
12663// This operator is similar to the unsorted segment sum operator found
12664// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
12665// Instead of computing the sum over segments, it computes the minimum such that:
12666//
12667// \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such
12668// that `segment_ids[j...] == i`.
12669//
12670// If the minimum is empty for a given segment ID `i`, it outputs the largest
12671// possible value for the specific numeric type,
12672// `output[i] = numeric_limits<T>::max()`.
12673//
12674// For example:
12675//
12676// ``` python
12677// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
12678// tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2)
12679// # ==> [[ 1,  2, 2, 1],
12680// #       [5,  6, 7, 8]]
12681// ```
12682//
12683// If the given segment ID `i` is negative, then the corresponding value is
12684// dropped, and will not be included in the result.
12685//
12686// Arguments:
12687//
12688//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
12689//
12690//
12691// Returns Has same shape as data, except for the first `segment_ids.rank`
12692// dimensions, which are replaced with a single dimension which has size
12693// `num_segments`.
12694func UnsortedSegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
12695	if scope.Err() != nil {
12696		return
12697	}
12698	opspec := tf.OpSpec{
12699		Type: "UnsortedSegmentMin",
12700		Input: []tf.Input{
12701			data, segment_ids, num_segments,
12702		},
12703	}
12704	op := scope.AddOperation(opspec)
12705	return op.Output(0)
12706}
12707
12708// Computes the maximum along segments of a tensor.
12709//
12710// Read
12711// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
12712// for an explanation of segments.
12713//
12714// This operator is similar to the unsorted segment sum operator found
12715// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
12716// Instead of computing the sum over segments, it computes the maximum such that:
12717//
12718// \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such
12719// that `segment_ids[j...] == i`.
12720//
12721// If the maximum is empty for a given segment ID `i`, it outputs the smallest
12722// possible value for the specific numeric type,
12723// `output[i] = numeric_limits<T>::lowest()`.
12724//
12725// If the given segment ID `i` is negative, then the corresponding value is
12726// dropped, and will not be included in the result.
12727//
12728// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12729// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
12730// </div>
12731//
12732// For example:
12733//
12734// ``` python
12735// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
12736// tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2)
12737// # ==> [[ 4,  3, 3, 4],
12738// #       [5,  6, 7, 8]]
12739// ```
12740//
12741//
12742// Arguments:
12743//
12744//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
12745//
12746//
12747// Returns Has same shape as data, except for the first `segment_ids.rank`
12748// dimensions, which are replaced with a single dimension which has size
12749// `num_segments`.
12750func UnsortedSegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
12751	if scope.Err() != nil {
12752		return
12753	}
12754	opspec := tf.OpSpec{
12755		Type: "UnsortedSegmentMax",
12756		Input: []tf.Input{
12757			data, segment_ids, num_segments,
12758		},
12759	}
12760	op := scope.AddOperation(opspec)
12761	return op.Output(0)
12762}
12763
12764// Computes the minimum along segments of a tensor.
12765//
12766// Read
12767// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
12768// for an explanation of segments.
12769//
12770// Computes a tensor such that
12771// \\(output_i = \min_j(data_j)\\) where `min` is over `j` such
12772// that `segment_ids[j] == i`.
12773//
12774// If the min is empty for a given segment ID `i`, `output[i] = 0`.
12775//
12776// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12777// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
12778// </div>
12779//
12780// For example:
12781//
12782// ```
12783// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
12784// tf.segment_min(c, tf.constant([0, 0, 1]))
12785// # ==> [[1, 2, 2, 1],
12786// #      [5, 6, 7, 8]]
12787// ```
12788//
12789// Arguments:
12790//
12791//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
12792// first dimension.  Values should be sorted and can be repeated.
12793//
12794// Returns Has same shape as data, except for dimension 0 which
12795// has size `k`, the number of segments.
12796func SegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
12797	if scope.Err() != nil {
12798		return
12799	}
12800	opspec := tf.OpSpec{
12801		Type: "SegmentMin",
12802		Input: []tf.Input{
12803			data, segment_ids,
12804		},
12805	}
12806	op := scope.AddOperation(opspec)
12807	return op.Output(0)
12808}
12809
12810// Computes the mean along segments of a tensor.
12811//
12812// Read
12813// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
12814// for an explanation of segments.
12815//
12816// Computes a tensor such that
12817// \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
12818// over `j` such that `segment_ids[j] == i` and `N` is the total number of
12819// values summed.
12820//
12821// If the mean is empty for a given segment ID `i`, `output[i] = 0`.
12822//
12823// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12824// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
12825// </div>
12826//
12827// For example:
12828//
12829// ```
12830// c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
12831// tf.segment_mean(c, tf.constant([0, 0, 1]))
12832// # ==> [[2.5, 2.5, 2.5, 2.5],
12833// #      [5, 6, 7, 8]]
12834// ```
12835//
12836//
12837// Arguments:
12838//
12839//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
12840// first dimension.  Values should be sorted and can be repeated.
12841//
12842// Returns Has same shape as data, except for dimension 0 which
12843// has size `k`, the number of segments.
12844func SegmentMean(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
12845	if scope.Err() != nil {
12846		return
12847	}
12848	opspec := tf.OpSpec{
12849		Type: "SegmentMean",
12850		Input: []tf.Input{
12851			data, segment_ids,
12852		},
12853	}
12854	op := scope.AddOperation(opspec)
12855	return op.Output(0)
12856}
12857
12858// ResourceApplyAdamWithAmsgradAttr is an optional argument to ResourceApplyAdamWithAmsgrad.
12859type ResourceApplyAdamWithAmsgradAttr func(optionalAttr)
12860
12861// ResourceApplyAdamWithAmsgradUseLocking sets the optional use_locking attribute to value.
12862//
12863// value: If `True`, updating of the var, m, and v tensors will be protected
12864// by a lock; otherwise the behavior is undefined, but may exhibit less
12865// contention.
12866// If not specified, defaults to false
12867func ResourceApplyAdamWithAmsgradUseLocking(value bool) ResourceApplyAdamWithAmsgradAttr {
12868	return func(m optionalAttr) {
12869		m["use_locking"] = value
12870	}
12871}
12872
12873// Update '*var' according to the Adam algorithm.
12874//
12875// $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
12876// $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
12877// $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$
12878// $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$
12879// $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$
12880//
12881// Arguments:
12882//	var_: Should be from a Variable().
12883//	m: Should be from a Variable().
12884//	v: Should be from a Variable().
12885//	vhat: Should be from a Variable().
12886//	beta1_power: Must be a scalar.
12887//	beta2_power: Must be a scalar.
12888//	lr: Scaling factor. Must be a scalar.
12889//	beta1: Momentum factor. Must be a scalar.
12890//	beta2: Momentum factor. Must be a scalar.
12891//	epsilon: Ridge term. Must be a scalar.
12892//	grad: The gradient.
12893//
12894// Returns the created operation.
12895func ResourceApplyAdamWithAmsgrad(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, vhat tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamWithAmsgradAttr) (o *tf.Operation) {
12896	if scope.Err() != nil {
12897		return
12898	}
12899	attrs := map[string]interface{}{}
12900	for _, a := range optional {
12901		a(attrs)
12902	}
12903	opspec := tf.OpSpec{
12904		Type: "ResourceApplyAdamWithAmsgrad",
12905		Input: []tf.Input{
12906			var_, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
12907		},
12908		Attrs: attrs,
12909	}
12910	return scope.AddOperation(opspec)
12911}
12912
12913// Computes the sum along segments of a tensor.
12914//
12915// Read
12916// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
12917// for an explanation of segments.
12918//
12919// Computes a tensor such that
12920// \\(output_i = \sum_j data_j\\) where sum is over `j` such
12921// that `segment_ids[j] == i`.
12922//
12923// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
12924//
12925// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12926// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
12927// </div>
12928//
12929// For example:
12930//
12931// ```
12932// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
12933// tf.segment_sum(c, tf.constant([0, 0, 1]))
12934// # ==> [[5, 5, 5, 5],
12935// #      [5, 6, 7, 8]]
12936// ```
12937//
12938//
12939// Arguments:
12940//
12941//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
12942// first dimension.  Values should be sorted and can be repeated.
12943//
12944// Returns Has same shape as data, except for dimension 0 which
12945// has size `k`, the number of segments.
12946func SegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
12947	if scope.Err() != nil {
12948		return
12949	}
12950	opspec := tf.OpSpec{
12951		Type: "SegmentSum",
12952		Input: []tf.Input{
12953			data, segment_ids,
12954		},
12955	}
12956	op := scope.AddOperation(opspec)
12957	return op.Output(0)
12958}
12959
12960// ArgMinAttr is an optional argument to ArgMin.
12961type ArgMinAttr func(optionalAttr)
12962
12963// ArgMinOutputType sets the optional output_type attribute to value.
12964// If not specified, defaults to DT_INT64
12965func ArgMinOutputType(value tf.DataType) ArgMinAttr {
12966	return func(m optionalAttr) {
12967		m["output_type"] = value
12968	}
12969}
12970
12971// Returns the index with the smallest value across dimensions of a tensor.
12972//
12973// Note that in case of ties the identity of the return value is not guaranteed.
12974//
12975// Usage:
12976//   ```python
12977//   import tensorflow as tf
12978//   a = [1, 10, 26.9, 2.8, 166.32, 62.3]
12979//   b = tf.math.argmin(input = a)
12980//   c = tf.keras.backend.eval(b)
12981//   # c = 0
12982//   # here a[0] = 1 which is the smallest element of a across axis 0
12983//   ```
12984//
12985// Arguments:
12986//
12987//	dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
12988// Describes which dimension of the input Tensor to reduce across. For vectors,
12989// use dimension = 0.
12990func ArgMin(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMinAttr) (output tf.Output) {
12991	if scope.Err() != nil {
12992		return
12993	}
12994	attrs := map[string]interface{}{}
12995	for _, a := range optional {
12996		a(attrs)
12997	}
12998	opspec := tf.OpSpec{
12999		Type: "ArgMin",
13000		Input: []tf.Input{
13001			input, dimension,
13002		},
13003		Attrs: attrs,
13004	}
13005	op := scope.AddOperation(opspec)
13006	return op.Output(0)
13007}
13008
13009// SumAttr is an optional argument to Sum.
13010type SumAttr func(optionalAttr)
13011
13012// SumKeepDims sets the optional keep_dims attribute to value.
13013//
13014// value: If true, retain reduced dimensions with length 1.
13015// If not specified, defaults to false
13016func SumKeepDims(value bool) SumAttr {
13017	return func(m optionalAttr) {
13018		m["keep_dims"] = value
13019	}
13020}
13021
13022// Computes the sum of elements across dimensions of a tensor.
13023//
13024// Reduces `input` along the dimensions given in `axis`. Unless
13025// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
13026// `axis`. If `keep_dims` is true, the reduced dimensions are
13027// retained with length 1.
13028//
13029// Arguments:
13030//	input: The tensor to reduce.
13031//	axis: The dimensions to reduce. Must be in the range
13032// `[-rank(input), rank(input))`.
13033//
13034// Returns The reduced tensor.
13035func Sum(scope *Scope, input tf.Output, axis tf.Output, optional ...SumAttr) (output tf.Output) {
13036	if scope.Err() != nil {
13037		return
13038	}
13039	attrs := map[string]interface{}{}
13040	for _, a := range optional {
13041		a(attrs)
13042	}
13043	opspec := tf.OpSpec{
13044		Type: "Sum",
13045		Input: []tf.Input{
13046			input, axis,
13047		},
13048		Attrs: attrs,
13049	}
13050	op := scope.AddOperation(opspec)
13051	return op.Output(0)
13052}
13053
13054// Selects elements from `x` or `y`, depending on `condition`.
13055//
13056// The `x`, and `y` tensors must all have the same shape, and the
13057// output will also have that shape.
13058//
13059// The `condition` tensor must be a scalar if `x` and `y` are scalars.
13060// If `x` and `y` are vectors or higher rank, then `condition` must be either a
13061// scalar, a vector with size matching the first dimension of `x`, or must have
13062// the same shape as `x`.
13063//
13064// The `condition` tensor acts as a mask that chooses, based on the value at each
13065// element, whether the corresponding element / row in the output should be
13066// taken from `x` (if true) or `y` (if false).
13067//
13068// If `condition` is a vector and `x` and `y` are higher rank matrices, then
13069// it chooses which row (outer dimension) to copy from `x` and `y`.
13070// If `condition` has the same shape as `x` and `y`, then it chooses which
13071// element to copy from `x` and `y`.
13072//
13073// For example:
13074//
13075// ```python
13076// # 'condition' tensor is [[True,  False]
13077// #                        [False, True]]
13078// # 't' is [[1, 2],
13079// #         [3, 4]]
13080// # 'e' is [[5, 6],
13081// #         [7, 8]]
13082// select(condition, t, e)  # => [[1, 6], [7, 4]]
13083//
13084//
13085// # 'condition' tensor is [True, False]
13086// # 't' is [[1, 2],
13087// #         [3, 4]]
13088// # 'e' is [[5, 6],
13089// #         [7, 8]]
13090// select(condition, t, e) ==> [[1, 2],
13091//                              [7, 8]]
13092//
13093// ```
13094//
13095// Arguments:
13096//
13097//	x: = A `Tensor` which may have the same shape as `condition`.
13098// If `condition` is rank 1, `x` may have higher rank,
13099// but its first dimension must match the size of `condition`.
13100//	y: = A `Tensor` with the same type and shape as `x`.
13101//
13102// Returns = A `Tensor` with the same type and shape as `x` and `y`.
13103func Select(scope *Scope, condition tf.Output, x tf.Output, y tf.Output) (output tf.Output) {
13104	if scope.Err() != nil {
13105		return
13106	}
13107	opspec := tf.OpSpec{
13108		Type: "Select",
13109		Input: []tf.Input{
13110			condition, x, y,
13111		},
13112	}
13113	op := scope.AddOperation(opspec)
13114	return op.Output(0)
13115}
13116
13117// Returns the truth value of x OR y element-wise.
13118//
13119// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
13120// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13121func LogicalOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13122	if scope.Err() != nil {
13123		return
13124	}
13125	opspec := tf.OpSpec{
13126		Type: "LogicalOr",
13127		Input: []tf.Input{
13128			x, y,
13129		},
13130	}
13131	op := scope.AddOperation(opspec)
13132	return op.Output(0)
13133}
13134
13135// Outputs deterministic pseudorandom random numbers from a Poisson distribution.
13136//
13137// Outputs random values from a Poisson distribution.
13138//
13139// The outputs are a deterministic function of `shape`, `seed`, and `lam`.
13140//
13141// Arguments:
13142//	shape: The shape of the output tensor.
13143//	seed: 2 seeds (shape [2]).
13144//	lam: The rate of the Poisson distribution. Shape must match the rightmost dimensions
13145// of `shape`.
13146//	dtype: The type of the output.
13147//
13148// Returns Random values with specified shape.
13149func StatelessRandomPoisson(scope *Scope, shape tf.Output, seed tf.Output, lam tf.Output, dtype tf.DataType) (output tf.Output) {
13150	if scope.Err() != nil {
13151		return
13152	}
13153	attrs := map[string]interface{}{"dtype": dtype}
13154	opspec := tf.OpSpec{
13155		Type: "StatelessRandomPoisson",
13156		Input: []tf.Input{
13157			shape, seed, lam,
13158		},
13159		Attrs: attrs,
13160	}
13161	op := scope.AddOperation(opspec)
13162	return op.Output(0)
13163}
13164
13165// Returns the truth value of `NOT x` element-wise.
13166//
13167// Arguments:
13168//	x: A `Tensor` of type `bool`.
13169//
13170// Returns A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`.
13171func LogicalNot(scope *Scope, x tf.Output) (y tf.Output) {
13172	if scope.Err() != nil {
13173		return
13174	}
13175	opspec := tf.OpSpec{
13176		Type: "LogicalNot",
13177		Input: []tf.Input{
13178			x,
13179		},
13180	}
13181	op := scope.AddOperation(opspec)
13182	return op.Output(0)
13183}
13184
13185// Writes a graph summary.
13186//
13187// Writes TensorFlow graph `tensor` at `step` using summary `writer`.
13188//
13189// Returns the created operation.
13190func WriteGraphSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation) {
13191	if scope.Err() != nil {
13192		return
13193	}
13194	opspec := tf.OpSpec{
13195		Type: "WriteGraphSummary",
13196		Input: []tf.Input{
13197			writer, step, tensor,
13198		},
13199	}
13200	return scope.AddOperation(opspec)
13201}
13202
13203// ApproximateEqualAttr is an optional argument to ApproximateEqual.
13204type ApproximateEqualAttr func(optionalAttr)
13205
13206// ApproximateEqualTolerance sets the optional tolerance attribute to value.
13207// If not specified, defaults to 1e-05
13208func ApproximateEqualTolerance(value float32) ApproximateEqualAttr {
13209	return func(m optionalAttr) {
13210		m["tolerance"] = value
13211	}
13212}
13213
13214// Returns the truth value of abs(x-y) < tolerance element-wise.
13215func ApproximateEqual(scope *Scope, x tf.Output, y tf.Output, optional ...ApproximateEqualAttr) (z tf.Output) {
13216	if scope.Err() != nil {
13217		return
13218	}
13219	attrs := map[string]interface{}{}
13220	for _, a := range optional {
13221		a(attrs)
13222	}
13223	opspec := tf.OpSpec{
13224		Type: "ApproximateEqual",
13225		Input: []tf.Input{
13226			x, y,
13227		},
13228		Attrs: attrs,
13229	}
13230	op := scope.AddOperation(opspec)
13231	return op.Output(0)
13232}
13233
13234// EqualAttr is an optional argument to Equal.
13235type EqualAttr func(optionalAttr)
13236
13237// EqualIncompatibleShapeError sets the optional incompatible_shape_error attribute to value.
13238// If not specified, defaults to true
13239func EqualIncompatibleShapeError(value bool) EqualAttr {
13240	return func(m optionalAttr) {
13241		m["incompatible_shape_error"] = value
13242	}
13243}
13244
13245// Returns the truth value of (x == y) element-wise.
13246//
13247// *NOTE*: `Equal` supports broadcasting. More about broadcasting
13248// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13249//
13250// ```python
13251// x = tf.constant([2, 4])
13252// y = tf.constant(2)
13253// tf.math.equal(x, y) ==> array([True, False])
13254//
13255// x = tf.constant([2, 4])
13256// y = tf.constant([2, 4])
13257// tf.math.equal(x, y) ==> array([True,  True])
13258// ```
13259func Equal(scope *Scope, x tf.Output, y tf.Output, optional ...EqualAttr) (z tf.Output) {
13260	if scope.Err() != nil {
13261		return
13262	}
13263	attrs := map[string]interface{}{}
13264	for _, a := range optional {
13265		a(attrs)
13266	}
13267	opspec := tf.OpSpec{
13268		Type: "Equal",
13269		Input: []tf.Input{
13270			x, y,
13271		},
13272		Attrs: attrs,
13273	}
13274	op := scope.AddOperation(opspec)
13275	return op.Output(0)
13276}
13277
13278// Returns the truth value of (x <= y) element-wise.
13279//
13280// *NOTE*: `LessEqual` supports broadcasting. More about broadcasting
13281// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13282//
13283// Example:
13284//
13285// ```python
13286// x = tf.constant([5, 4, 6])
13287// y = tf.constant([5])
13288// tf.math.less_equal(x, y) ==> [True, True, False]
13289//
13290// x = tf.constant([5, 4, 6])
13291// y = tf.constant([5, 6, 6])
13292// tf.math.less_equal(x, y) ==> [True, True, True]
13293// ```
13294func LessEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13295	if scope.Err() != nil {
13296		return
13297	}
13298	opspec := tf.OpSpec{
13299		Type: "LessEqual",
13300		Input: []tf.Input{
13301			x, y,
13302		},
13303	}
13304	op := scope.AddOperation(opspec)
13305	return op.Output(0)
13306}
13307
13308// Returns the truth value of (x < y) element-wise.
13309//
13310// *NOTE*: `Less` supports broadcasting. More about broadcasting
13311// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13312//
13313// Example:
13314//
13315// ```python
13316// x = tf.constant([5, 4, 6])
13317// y = tf.constant([5])
13318// tf.math.less(x, y) ==> [False, True, False]
13319//
13320// x = tf.constant([5, 4, 6])
13321// y = tf.constant([5, 6, 7])
13322// tf.math.less(x, y) ==> [False, True, True]
13323// ```
13324func Less(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13325	if scope.Err() != nil {
13326		return
13327	}
13328	opspec := tf.OpSpec{
13329		Type: "Less",
13330		Input: []tf.Input{
13331			x, y,
13332		},
13333	}
13334	op := scope.AddOperation(opspec)
13335	return op.Output(0)
13336}
13337
13338// Reduces sparse updates into the variable referenced by `resource` using the `max` operation.
13339//
13340// This operation computes
13341//
13342//     # Scalar indices
13343//     ref[indices, ...] = max(ref[indices, ...], updates[...])
13344//
13345//     # Vector indices (for each i)
13346//     ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
13347//
13348//     # High rank indices (for each i, ..., j)
13349//     ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
13350//
13351// Duplicate entries are handled correctly: if multiple `indices` reference
13352// the same location, their contributions are combined.
13353//
13354// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
13355//
13356// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13357// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
13358// </div>
13359//
13360// Arguments:
13361//	resource: Should be from a `Variable` node.
13362//	indices: A tensor of indices into the first dimension of `ref`.
13363//	updates: A tensor of updated values to add to `ref`.
13364//
13365// Returns the created operation.
13366func ResourceScatterMax(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
13367	if scope.Err() != nil {
13368		return
13369	}
13370	opspec := tf.OpSpec{
13371		Type: "ResourceScatterMax",
13372		Input: []tf.Input{
13373			resource, indices, updates,
13374		},
13375	}
13376	return scope.AddOperation(opspec)
13377}
13378
13379// Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
13380//
13381// The regularized incomplete beta integral is defined as:
13382//
13383//
13384// \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
13385//
13386// where
13387//
13388//
13389// \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
13390//
13391//
13392// is the incomplete beta function and \\(B(a, b)\\) is the *complete*
13393// beta function.
13394func Betainc(scope *Scope, a tf.Output, b tf.Output, x tf.Output) (z tf.Output) {
13395	if scope.Err() != nil {
13396		return
13397	}
13398	opspec := tf.OpSpec{
13399		Type: "Betainc",
13400		Input: []tf.Input{
13401			a, b, x,
13402		},
13403	}
13404	op := scope.AddOperation(opspec)
13405	return op.Output(0)
13406}
13407
13408// Computes the product along segments of a tensor.
13409//
13410// Read
13411// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
13412// for an explanation of segments.
13413//
13414// Computes a tensor such that
13415// \\(output_i = \prod_j data_j\\) where the product is over `j` such
13416// that `segment_ids[j] == i`.
13417//
13418// If the product is empty for a given segment ID `i`, `output[i] = 1`.
13419//
13420// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13421// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
13422// </div>
13423//
13424// For example:
13425//
13426// ```
13427// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
13428// tf.segment_prod(c, tf.constant([0, 0, 1]))
13429// # ==> [[4, 6, 6, 4],
13430// #      [5, 6, 7, 8]]
13431// ```
13432//
13433//
13434// Arguments:
13435//
13436//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
13437// first dimension.  Values should be sorted and can be repeated.
13438//
13439// Returns Has same shape as data, except for dimension 0 which
13440// has size `k`, the number of segments.
13441func SegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
13442	if scope.Err() != nil {
13443		return
13444	}
13445	opspec := tf.OpSpec{
13446		Type: "SegmentProd",
13447		Input: []tf.Input{
13448			data, segment_ids,
13449		},
13450	}
13451	op := scope.AddOperation(opspec)
13452	return op.Output(0)
13453}
13454
13455// Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
13456//
13457// This is the angle \\( \theta \in [-\pi, \pi] \\) such that
13458// \\[ x = r \cos(\theta) \\]
13459// and
13460// \\[ y = r \sin(\theta) \\]
13461// where \\(r = \sqrt{x^2 + y^2} \\).
13462//
13463// For example:
13464//
13465// >>> x = [1., 1.]
13466// >>> y = [1., -1.]
13467// >>> print((tf.math.atan2(y,x) * (180 / np.pi)).numpy())
13468// [ 45. -45.]
13469//
13470//
13471func Atan2(scope *Scope, y tf.Output, x tf.Output) (z tf.Output) {
13472	if scope.Err() != nil {
13473		return
13474	}
13475	opspec := tf.OpSpec{
13476		Type: "Atan2",
13477		Input: []tf.Input{
13478			y, x,
13479		},
13480	}
13481	op := scope.AddOperation(opspec)
13482	return op.Output(0)
13483}
13484
13485// Compute the polygamma function \\(\psi^{(n)}(x)\\).
13486//
13487// The polygamma function is defined as:
13488//
13489//
13490// \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\)
13491//
13492// where \\(\psi(x)\\) is the digamma function.
13493// The polygamma function is defined only for non-negative integer orders \\a\\.
13494func Polygamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
13495	if scope.Err() != nil {
13496		return
13497	}
13498	opspec := tf.OpSpec{
13499		Type: "Polygamma",
13500		Input: []tf.Input{
13501			a, x,
13502		},
13503	}
13504	op := scope.AddOperation(opspec)
13505	return op.Output(0)
13506}
13507
13508// SparseReduceSumAttr is an optional argument to SparseReduceSum.
13509type SparseReduceSumAttr func(optionalAttr)
13510
13511// SparseReduceSumKeepDims sets the optional keep_dims attribute to value.
13512//
13513// value: If true, retain reduced dimensions with length 1.
13514// If not specified, defaults to false
13515func SparseReduceSumKeepDims(value bool) SparseReduceSumAttr {
13516	return func(m optionalAttr) {
13517		m["keep_dims"] = value
13518	}
13519}
13520
13521// Computes the sum of elements across dimensions of a SparseTensor.
13522//
13523// This Op takes a SparseTensor and is the sparse counterpart to
13524// `tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`
13525// instead of a sparse one.
13526//
13527// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
13528// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
13529// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
13530// with length 1.
13531//
13532// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
13533// with a single element is returned.  Additionally, the axes can be negative,
13534// which are interpreted according to the indexing rules in Python.
13535//
13536// Arguments:
13537//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
13538// SparseTensor, possibly not in canonical ordering.
13539//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
13540//	input_shape: 1-D.  Shape of the input SparseTensor.
13541//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
13542//
13543// Returns `R-K`-D.  The reduced Tensor.
13544func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumAttr) (output tf.Output) {
13545	if scope.Err() != nil {
13546		return
13547	}
13548	attrs := map[string]interface{}{}
13549	for _, a := range optional {
13550		a(attrs)
13551	}
13552	opspec := tf.OpSpec{
13553		Type: "SparseReduceSum",
13554		Input: []tf.Input{
13555			input_indices, input_values, input_shape, reduction_axes,
13556		},
13557		Attrs: attrs,
13558	}
13559	op := scope.AddOperation(opspec)
13560	return op.Output(0)
13561}
13562
13563// Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
13564//
13565// The Hurwitz zeta function is defined as:
13566//
13567//
13568// \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
13569func Zeta(scope *Scope, x tf.Output, q tf.Output) (z tf.Output) {
13570	if scope.Err() != nil {
13571		return
13572	}
13573	opspec := tf.OpSpec{
13574		Type: "Zeta",
13575		Input: []tf.Input{
13576			x, q,
13577		},
13578	}
13579	op := scope.AddOperation(opspec)
13580	return op.Output(0)
13581}
13582
13583// StringSplitV2Attr is an optional argument to StringSplitV2.
13584type StringSplitV2Attr func(optionalAttr)
13585
13586// StringSplitV2Maxsplit sets the optional maxsplit attribute to value.
13587//
13588// value: An `int`. If `maxsplit > 0`, limit of the split of the result.
13589// If not specified, defaults to -1
13590func StringSplitV2Maxsplit(value int64) StringSplitV2Attr {
13591	return func(m optionalAttr) {
13592		m["maxsplit"] = value
13593	}
13594}
13595
13596// Split elements of `source` based on `sep` into a `SparseTensor`.
13597//
13598// Let N be the size of source (typically N will be the batch size). Split each
13599// element of `source` based on `sep` and return a `SparseTensor`
13600// containing the split tokens. Empty tokens are ignored.
13601//
13602// For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',
13603// then the output will be
13604// ```
13605// st.indices = [0, 0;
13606//               0, 1;
13607//               1, 0;
13608//               1, 1;
13609//               1, 2]
13610// st.shape = [2, 3]
13611// st.values = ['hello', 'world', 'a', 'b', 'c']
13612// ```
13613//
13614// If `sep` is given, consecutive delimiters are not grouped together and are
13615// deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and
13616// sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
13617// string, consecutive whitespace are regarded as a single separator, and the
13618// result will contain no empty strings at the startor end if the string has
13619// leading or trailing whitespace.
13620//
13621// Note that the above mentioned behavior matches python's str.split.
13622//
13623// Arguments:
13624//	input: `1-D` string `Tensor`, the strings to split.
13625//	sep: `0-D` string `Tensor`, the delimiter character.
13626func StringSplitV2(scope *Scope, input tf.Output, sep tf.Output, optional ...StringSplitV2Attr) (indices tf.Output, values tf.Output, shape tf.Output) {
13627	if scope.Err() != nil {
13628		return
13629	}
13630	attrs := map[string]interface{}{}
13631	for _, a := range optional {
13632		a(attrs)
13633	}
13634	opspec := tf.OpSpec{
13635		Type: "StringSplitV2",
13636		Input: []tf.Input{
13637			input, sep,
13638		},
13639		Attrs: attrs,
13640	}
13641	op := scope.AddOperation(opspec)
13642	return op.Output(0), op.Output(1), op.Output(2)
13643}
13644
13645// Compute the lower regularized incomplete Gamma function `P(a, x)`.
13646//
13647// The lower regularized incomplete Gamma function is defined as:
13648//
13649//
13650// \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
13651//
13652// where
13653//
13654// \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\)
13655//
13656// is the lower incomplete Gamma function.
13657//
13658// Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
13659// Gamma function.
13660func Igamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
13661	if scope.Err() != nil {
13662		return
13663	}
13664	opspec := tf.OpSpec{
13665		Type: "Igamma",
13666		Input: []tf.Input{
13667			a, x,
13668		},
13669	}
13670	op := scope.AddOperation(opspec)
13671	return op.Output(0)
13672}
13673
13674// Compute the upper regularized incomplete Gamma function `Q(a, x)`.
13675//
13676// The upper regularized incomplete Gamma function is defined as:
13677//
13678// \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
13679//
13680// where
13681//
13682// \\(Gamma(a, x) = \int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
13683//
13684// is the upper incomplete Gamma function.
13685//
13686// Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
13687// Gamma function.
13688func Igammac(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
13689	if scope.Err() != nil {
13690		return
13691	}
13692	opspec := tf.OpSpec{
13693		Type: "Igammac",
13694		Input: []tf.Input{
13695			a, x,
13696		},
13697	}
13698	op := scope.AddOperation(opspec)
13699	return op.Output(0)
13700}
13701
13702// Returns element-wise remainder of division. This emulates C semantics in that
13703//
13704// the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
13705// y + truncate_mod(x, y) = x`.
13706//
13707// *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
13708// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13709func TruncateMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13710	if scope.Err() != nil {
13711		return
13712	}
13713	opspec := tf.OpSpec{
13714		Type: "TruncateMod",
13715		Input: []tf.Input{
13716			x, y,
13717		},
13718	}
13719	op := scope.AddOperation(opspec)
13720	return op.Output(0)
13721}
13722
13723// Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
13724//
13725// true, this follows Python semantics in that the result here is consistent
13726// with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
13727//
13728// *NOTE*: `FloorMod` supports broadcasting. More about broadcasting
13729// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13730func FloorMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13731	if scope.Err() != nil {
13732		return
13733	}
13734	opspec := tf.OpSpec{
13735		Type: "FloorMod",
13736		Input: []tf.Input{
13737			x, y,
13738		},
13739	}
13740	op := scope.AddOperation(opspec)
13741	return op.Output(0)
13742}
13743
13744// Returns element-wise remainder of division. This emulates C semantics in that
13745//
13746// the result here is consistent with a truncating divide. E.g.
13747// `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
13748//
13749// *NOTE*: `Mod` supports broadcasting. More about broadcasting
13750// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13751func Mod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13752	if scope.Err() != nil {
13753		return
13754	}
13755	opspec := tf.OpSpec{
13756		Type: "Mod",
13757		Input: []tf.Input{
13758			x, y,
13759		},
13760	}
13761	op := scope.AddOperation(opspec)
13762	return op.Output(0)
13763}
13764
13765// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
13766//
13767// N is the size of the segment being reduced.
13768//
13769// See `tf.sparse.segment_sum` for usage examples.
13770//
13771//
13772// Arguments:
13773//
13774//	indices: A 1-D tensor. Has same rank as `segment_ids`.
13775//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
13776//
13777// Returns Has same shape as data, except for dimension 0 which
13778// has size `k`, the number of segments.
13779func SparseSegmentSqrtN(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
13780	if scope.Err() != nil {
13781		return
13782	}
13783	opspec := tf.OpSpec{
13784		Type: "SparseSegmentSqrtN",
13785		Input: []tf.Input{
13786			data, indices, segment_ids,
13787		},
13788	}
13789	op := scope.AddOperation(opspec)
13790	return op.Output(0)
13791}
13792
13793// Returns the max of x and y (i.e. x > y ? x : y) element-wise.
13794//
13795// *NOTE*: `Maximum` supports broadcasting. More about broadcasting
13796// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13797func Maximum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13798	if scope.Err() != nil {
13799		return
13800	}
13801	opspec := tf.OpSpec{
13802		Type: "Maximum",
13803		Input: []tf.Input{
13804			x, y,
13805		},
13806	}
13807	op := scope.AddOperation(opspec)
13808	return op.Output(0)
13809}
13810
13811// Returns 0 if x == 0, and x / y otherwise, elementwise.
13812func Xdivy(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13813	if scope.Err() != nil {
13814		return
13815	}
13816	opspec := tf.OpSpec{
13817		Type: "Xdivy",
13818		Input: []tf.Input{
13819			x, y,
13820		},
13821	}
13822	op := scope.AddOperation(opspec)
13823	return op.Output(0)
13824}
13825
13826// Returns 0 if x == 0, and x * log(y) otherwise, elementwise.
13827func Xlogy(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13828	if scope.Err() != nil {
13829		return
13830	}
13831	opspec := tf.OpSpec{
13832		Type: "Xlogy",
13833		Input: []tf.Input{
13834			x, y,
13835		},
13836	}
13837	op := scope.AddOperation(opspec)
13838	return op.Output(0)
13839}
13840
13841// Increments variable pointed to by 'resource' until it reaches 'limit'.
13842//
13843// Arguments:
13844//	resource: Should be from a scalar `Variable` node.
13845//	limit: If incrementing ref would bring it above limit, instead generates an
13846// 'OutOfRange' error.
13847//
13848//
13849// Returns A copy of the input before increment. If nothing else modifies the
13850// input, the values produced will all be distinct.
13851func ResourceCountUpTo(scope *Scope, resource tf.Output, limit int64, T tf.DataType) (output tf.Output) {
13852	if scope.Err() != nil {
13853		return
13854	}
13855	attrs := map[string]interface{}{"limit": limit, "T": T}
13856	opspec := tf.OpSpec{
13857		Type: "ResourceCountUpTo",
13858		Input: []tf.Input{
13859			resource,
13860		},
13861		Attrs: attrs,
13862	}
13863	op := scope.AddOperation(opspec)
13864	return op.Output(0)
13865}
13866
13867// StatefulStandardNormalAttr is an optional argument to StatefulStandardNormal.
13868type StatefulStandardNormalAttr func(optionalAttr)
13869
13870// StatefulStandardNormalDtype sets the optional dtype attribute to value.
13871//
13872// value: The type of the output.
13873// If not specified, defaults to DT_FLOAT
13874func StatefulStandardNormalDtype(value tf.DataType) StatefulStandardNormalAttr {
13875	return func(m optionalAttr) {
13876		m["dtype"] = value
13877	}
13878}
13879
13880// Outputs random values from a normal distribution. This op is deprecated in favor of op 'StatefulStandardNormalV2'
13881//
13882// DEPRECATED at GraphDef version 29: Use StatefulStandardNormalV2 instead
13883//
13884// The generated values will have mean 0 and standard deviation 1.
13885//
13886// Arguments:
13887//	resource: The handle of the resource variable that stores the state of the RNG.
13888//	shape: The shape of the output tensor.
13889//
13890// Returns A tensor of the specified shape filled with random normal values.
13891func StatefulStandardNormal(scope *Scope, resource tf.Output, shape tf.Output, optional ...StatefulStandardNormalAttr) (output tf.Output) {
13892	if scope.Err() != nil {
13893		return
13894	}
13895	attrs := map[string]interface{}{}
13896	for _, a := range optional {
13897		a(attrs)
13898	}
13899	opspec := tf.OpSpec{
13900		Type: "StatefulStandardNormal",
13901		Input: []tf.Input{
13902			resource, shape,
13903		},
13904		Attrs: attrs,
13905	}
13906	op := scope.AddOperation(opspec)
13907	return op.Output(0)
13908}
13909
13910// Returns x / y element-wise for real types.
13911//
13912// If `x` and `y` are reals, this will return the floating-point division.
13913//
13914// *NOTE*: `Div` supports broadcasting. More about broadcasting
13915// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13916func RealDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13917	if scope.Err() != nil {
13918		return
13919	}
13920	opspec := tf.OpSpec{
13921		Type: "RealDiv",
13922		Input: []tf.Input{
13923			x, y,
13924		},
13925	}
13926	op := scope.AddOperation(opspec)
13927	return op.Output(0)
13928}
13929
13930// Returns x / y element-wise for integer types.
13931//
13932// Truncation designates that negative numbers will round fractional quantities
13933// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
13934// than Python semantics. See `FloorDiv` for a division function that matches
13935// Python Semantics.
13936//
13937// *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
13938// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13939func TruncateDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13940	if scope.Err() != nil {
13941		return
13942	}
13943	opspec := tf.OpSpec{
13944		Type: "TruncateDiv",
13945		Input: []tf.Input{
13946			x, y,
13947		},
13948	}
13949	op := scope.AddOperation(opspec)
13950	return op.Output(0)
13951}
13952
13953// Returns x // y element-wise.
13954//
13955// *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
13956// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13957func FloorDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13958	if scope.Err() != nil {
13959		return
13960	}
13961	opspec := tf.OpSpec{
13962		Type: "FloorDiv",
13963		Input: []tf.Input{
13964			x, y,
13965		},
13966	}
13967	op := scope.AddOperation(opspec)
13968	return op.Output(0)
13969}
13970
13971// Writes a serialized proto summary.
13972//
13973// Writes `tensor`, a serialized proto at `step` using summary `writer`.
13974//
13975// Returns the created operation.
13976func WriteRawProtoSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation) {
13977	if scope.Err() != nil {
13978		return
13979	}
13980	opspec := tf.OpSpec{
13981		Type: "WriteRawProtoSummary",
13982		Input: []tf.Input{
13983			writer, step, tensor,
13984		},
13985	}
13986	return scope.AddOperation(opspec)
13987}
13988
13989// Returns 0 if the denominator is zero.
13990//
13991//
13992// *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting
13993// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13994func DivNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13995	if scope.Err() != nil {
13996		return
13997	}
13998	opspec := tf.OpSpec{
13999		Type: "DivNoNan",
14000		Input: []tf.Input{
14001			x, y,
14002		},
14003	}
14004	op := scope.AddOperation(opspec)
14005	return op.Output(0)
14006}
14007
14008// Returns x / y element-wise.
14009//
14010// *NOTE*: `Div` supports broadcasting. More about broadcasting
14011// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
14012func Div(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
14013	if scope.Err() != nil {
14014		return
14015	}
14016	opspec := tf.OpSpec{
14017		Type: "Div",
14018		Input: []tf.Input{
14019			x, y,
14020		},
14021	}
14022	op := scope.AddOperation(opspec)
14023	return op.Output(0)
14024}
14025
14026// Says whether the targets are in the top `K` predictions.
14027//
14028// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
14029// prediction for the target class is among the top `k` predictions among
14030// all predictions for example `i`. Note that the behavior of `InTopK` differs
14031// from the `TopK` op in its handling of ties; if multiple classes have the
14032// same prediction value and straddle the top-`k` boundary, all of those
14033// classes are considered to be in the top `k`.
14034//
14035// More formally, let
14036//
14037//   \\(predictions_i\\) be the predictions for all classes for example `i`,
14038//   \\(targets_i\\) be the target class for example `i`,
14039//   \\(out_i\\) be the output for example `i`,
14040//
14041// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
14042//
14043// Arguments:
14044//	predictions: A `batch_size` x `classes` tensor.
14045//	targets: A `batch_size` vector of class ids.
14046//	k: Number of top elements to look at for computing precision.
14047//
14048// Returns Computed Precision at `k` as a `bool Tensor`.
14049func InTopK(scope *Scope, predictions tf.Output, targets tf.Output, k int64) (precision tf.Output) {
14050	if scope.Err() != nil {
14051		return
14052	}
14053	attrs := map[string]interface{}{"k": k}
14054	opspec := tf.OpSpec{
14055		Type: "InTopK",
14056		Input: []tf.Input{
14057			predictions, targets,
14058		},
14059		Attrs: attrs,
14060	}
14061	op := scope.AddOperation(opspec)
14062	return op.Output(0)
14063}
14064
14065// Returns x - y element-wise.
14066//
14067// *NOTE*: `Subtract` supports broadcasting. More about broadcasting
14068// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
14069func Sub(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
14070	if scope.Err() != nil {
14071		return
14072	}
14073	opspec := tf.OpSpec{
14074		Type: "Sub",
14075		Input: []tf.Input{
14076			x, y,
14077		},
14078	}
14079	op := scope.AddOperation(opspec)
14080	return op.Output(0)
14081}
14082
14083// AsStringAttr is an optional argument to AsString.
14084type AsStringAttr func(optionalAttr)
14085
14086// AsStringPrecision sets the optional precision attribute to value.
14087//
14088// value: The post-decimal precision to use for floating point numbers.
14089// Only used if precision > -1.
14090// If not specified, defaults to -1
14091func AsStringPrecision(value int64) AsStringAttr {
14092	return func(m optionalAttr) {
14093		m["precision"] = value
14094	}
14095}
14096
14097// AsStringScientific sets the optional scientific attribute to value.
14098//
14099// value: Use scientific notation for floating point numbers.
14100// If not specified, defaults to false
14101func AsStringScientific(value bool) AsStringAttr {
14102	return func(m optionalAttr) {
14103		m["scientific"] = value
14104	}
14105}
14106
14107// AsStringShortest sets the optional shortest attribute to value.
14108//
14109// value: Use shortest representation (either scientific or standard) for
14110// floating point numbers.
14111// If not specified, defaults to false
14112func AsStringShortest(value bool) AsStringAttr {
14113	return func(m optionalAttr) {
14114		m["shortest"] = value
14115	}
14116}
14117
14118// AsStringWidth sets the optional width attribute to value.
14119//
14120// value: Pad pre-decimal numbers to this width.
14121// Applies to both floating point and integer numbers.
14122// Only used if width > -1.
14123// If not specified, defaults to -1
14124func AsStringWidth(value int64) AsStringAttr {
14125	return func(m optionalAttr) {
14126		m["width"] = value
14127	}
14128}
14129
14130// AsStringFill sets the optional fill attribute to value.
14131//
14132// value: The value to pad if width > -1.  If empty, pads with spaces.
14133// Another typical value is '0'.  String cannot be longer than 1 character.
14134// If not specified, defaults to ""
14135func AsStringFill(value string) AsStringAttr {
14136	return func(m optionalAttr) {
14137		m["fill"] = value
14138	}
14139}
14140
14141// Converts each entry in the given tensor to strings.
14142//
14143// Supports many numeric types and boolean.
14144//
14145// For Unicode, see the
14146// [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text)
14147// tutorial.
14148//
14149// Examples:
14150//
14151// >>> tf.strings.as_string([3, 2])
14152// <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'3', b'2'], dtype=object)>
14153// >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy()
14154// array([b'3.14', b'2.72'], dtype=object)
14155func AsString(scope *Scope, input tf.Output, optional ...AsStringAttr) (output tf.Output) {
14156	if scope.Err() != nil {
14157		return
14158	}
14159	attrs := map[string]interface{}{}
14160	for _, a := range optional {
14161		a(attrs)
14162	}
14163	opspec := tf.OpSpec{
14164		Type: "AsString",
14165		Input: []tf.Input{
14166			input,
14167		},
14168		Attrs: attrs,
14169	}
14170	op := scope.AddOperation(opspec)
14171	return op.Output(0)
14172}
14173
14174// Conv3DBackpropFilterV2Attr is an optional argument to Conv3DBackpropFilterV2.
14175type Conv3DBackpropFilterV2Attr func(optionalAttr)
14176
14177// Conv3DBackpropFilterV2DataFormat sets the optional data_format attribute to value.
14178//
14179// value: The data format of the input and output data. With the
14180// default format "NDHWC", the data is stored in the order of:
14181//     [batch, in_depth, in_height, in_width, in_channels].
14182// Alternatively, the format could be "NCDHW", the data storage order is:
14183//     [batch, in_channels, in_depth, in_height, in_width].
14184// If not specified, defaults to "NDHWC"
14185func Conv3DBackpropFilterV2DataFormat(value string) Conv3DBackpropFilterV2Attr {
14186	return func(m optionalAttr) {
14187		m["data_format"] = value
14188	}
14189}
14190
14191// Conv3DBackpropFilterV2Dilations sets the optional dilations attribute to value.
14192//
14193// value: 1-D tensor of length 5.  The dilation factor for each dimension of
14194// `input`. If set to k > 1, there will be k-1 skipped cells between each
14195// filter element on that dimension. The dimension order is determined by the
14196// value of `data_format`, see above for details. Dilations in the batch and
14197// depth dimensions must be 1.
14198// If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
14199func Conv3DBackpropFilterV2Dilations(value []int64) Conv3DBackpropFilterV2Attr {
14200	return func(m optionalAttr) {
14201		m["dilations"] = value
14202	}
14203}
14204
14205// Computes the gradients of 3-D convolution with respect to the filter.
14206//
14207// Arguments:
14208//	input: Shape `[batch, depth, rows, cols, in_channels]`.
14209//	filter_sizes: An integer vector representing the tensor shape of `filter`,
14210// where `filter` is a 5-D
14211// `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
14212// tensor.
14213//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
14214// out_channels]`.
14215//	strides: 1-D tensor of length 5. The stride of the sliding window for each
14216// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
14217//	padding: The type of padding algorithm to use.
14218func Conv3DBackpropFilterV2(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterV2Attr) (output tf.Output) {
14219	if scope.Err() != nil {
14220		return
14221	}
14222	attrs := map[string]interface{}{"strides": strides, "padding": padding}
14223	for _, a := range optional {
14224		a(attrs)
14225	}
14226	opspec := tf.OpSpec{
14227		Type: "Conv3DBackpropFilterV2",
14228		Input: []tf.Input{
14229			input, filter_sizes, out_backprop,
14230		},
14231		Attrs: attrs,
14232	}
14233	op := scope.AddOperation(opspec)
14234	return op.Output(0)
14235}
14236
14237// Returns x + y element-wise.
14238//
14239// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
14240// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
14241func AddV2(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
14242	if scope.Err() != nil {
14243		return
14244	}
14245	opspec := tf.OpSpec{
14246		Type: "AddV2",
14247		Input: []tf.Input{
14248			x, y,
14249		},
14250	}
14251	op := scope.AddOperation(opspec)
14252	return op.Output(0)
14253}
14254
14255// Returns element-wise integer closest to x.
14256//
14257// If the result is midway between two representable values,
14258// the even representable is chosen.
14259// For example:
14260//
14261// ```
14262// rint(-1.5) ==> -2.0
14263// rint(0.5000001) ==> 1.0
14264// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
14265// ```
14266func Rint(scope *Scope, x tf.Output) (y tf.Output) {
14267	if scope.Err() != nil {
14268		return
14269	}
14270	opspec := tf.OpSpec{
14271		Type: "Rint",
14272		Input: []tf.Input{
14273			x,
14274		},
14275	}
14276	op := scope.AddOperation(opspec)
14277	return op.Output(0)
14278}
14279
14280// Returns element-wise smallest integer not less than x.
14281func Ceil(scope *Scope, x tf.Output) (y tf.Output) {
14282	if scope.Err() != nil {
14283		return
14284	}
14285	opspec := tf.OpSpec{
14286		Type: "Ceil",
14287		Input: []tf.Input{
14288			x,
14289		},
14290	}
14291	op := scope.AddOperation(opspec)
14292	return op.Output(0)
14293}
14294
14295// Returns element-wise largest integer not greater than x.
14296func Floor(scope *Scope, x tf.Output) (y tf.Output) {
14297	if scope.Err() != nil {
14298		return
14299	}
14300	opspec := tf.OpSpec{
14301		Type: "Floor",
14302		Input: []tf.Input{
14303			x,
14304		},
14305	}
14306	op := scope.AddOperation(opspec)
14307	return op.Output(0)
14308}
14309
14310// Returns an element-wise indication of the sign of a number.
14311//
14312// `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
14313//
14314// For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
14315//
14316// Example usage:
14317// >>> tf.math.sign([0., 2., -3.])
14318// <tf.Tensor: shape=(3,), dtype=float32, numpy=array([ 0.,  1., -1.], dtype=float32)>
14319func Sign(scope *Scope, x tf.Output) (y tf.Output) {
14320	if scope.Err() != nil {
14321		return
14322	}
14323	opspec := tf.OpSpec{
14324		Type: "Sign",
14325		Input: []tf.Input{
14326			x,
14327		},
14328	}
14329	op := scope.AddOperation(opspec)
14330	return op.Output(0)
14331}
14332
14333// Returns which elements of x are finite.
14334//
14335// @compatibility(numpy)
14336// Equivalent to np.isfinite
14337// @end_compatibility
14338//
14339// Example:
14340//
14341// ```python
14342// x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan])
14343// tf.math.is_finite(x) ==> [True, True, True, False, False]
14344// ```
14345func IsFinite(scope *Scope, x tf.Output) (y tf.Output) {
14346	if scope.Err() != nil {
14347		return
14348	}
14349	opspec := tf.OpSpec{
14350		Type: "IsFinite",
14351		Input: []tf.Input{
14352			x,
14353		},
14354	}
14355	op := scope.AddOperation(opspec)
14356	return op.Output(0)
14357}
14358
14359// Returns which elements of x are Inf.
14360//
14361// @compatibility(numpy)
14362// Equivalent to np.isinf
14363// @end_compatibility
14364//
14365// Example:
14366//
14367// ```python
14368// x = tf.constant([5.0, np.inf, 6.8, np.inf])
14369// tf.math.is_inf(x) ==> [False, True, False, True]
14370// ```
14371func IsInf(scope *Scope, x tf.Output) (y tf.Output) {
14372	if scope.Err() != nil {
14373		return
14374	}
14375	opspec := tf.OpSpec{
14376		Type: "IsInf",
14377		Input: []tf.Input{
14378			x,
14379		},
14380	}
14381	op := scope.AddOperation(opspec)
14382	return op.Output(0)
14383}
14384
14385// Returns which elements of x are NaN.
14386//
14387// @compatibility(numpy)
14388// Equivalent to np.isnan
14389// @end_compatibility
14390//
14391// Example:
14392//
14393// ```python
14394// x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf])
14395// tf.math.is_nan(x) ==> [False, True, False, True, False]
14396// ```
14397func IsNan(scope *Scope, x tf.Output) (y tf.Output) {
14398	if scope.Err() != nil {
14399		return
14400	}
14401	opspec := tf.OpSpec{
14402		Type: "IsNan",
14403		Input: []tf.Input{
14404			x,
14405		},
14406	}
14407	op := scope.AddOperation(opspec)
14408	return op.Output(0)
14409}
14410
14411// Computes the trignometric inverse tangent of x element-wise.
14412//
14413// The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that
14414// if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`.
14415//
14416// **Note**: The output of `tf.math.atan` will lie within the invertible range
14417// of tan, i.e (-pi/2, pi/2).
14418//
14419// For example:
14420//
14421// ```python
14422// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
14423// x = tf.constant([1.047, 0.785])
14424// y = tf.math.tan(x) # [1.731261, 0.99920404]
14425//
14426// tf.math.atan(y) # [1.047, 0.785] = x
14427// ```
14428//
14429func Atan(scope *Scope, x tf.Output) (y tf.Output) {
14430	if scope.Err() != nil {
14431		return
14432	}
14433	opspec := tf.OpSpec{
14434		Type: "Atan",
14435		Input: []tf.Input{
14436			x,
14437		},
14438	}
14439	op := scope.AddOperation(opspec)
14440	return op.Output(0)
14441}
14442
14443// ResourceApplyProximalAdagradAttr is an optional argument to ResourceApplyProximalAdagrad.
14444type ResourceApplyProximalAdagradAttr func(optionalAttr)
14445
14446// ResourceApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
14447//
14448// value: If True, updating of the var and accum tensors will be protected by
14449// a lock; otherwise the behavior is undefined, but may exhibit less contention.
14450// If not specified, defaults to false
14451func ResourceApplyProximalAdagradUseLocking(value bool) ResourceApplyProximalAdagradAttr {
14452	return func(m optionalAttr) {
14453		m["use_locking"] = value
14454	}
14455}
14456
14457// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
14458//
14459// accum += grad * grad
14460// prox_v = var - lr * grad * (1 / sqrt(accum))
14461// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
14462//
14463// Arguments:
14464//	var_: Should be from a Variable().
14465//	accum: Should be from a Variable().
14466//	lr: Scaling factor. Must be a scalar.
14467//	l1: L1 regularization. Must be a scalar.
14468//	l2: L2 regularization. Must be a scalar.
14469//	grad: The gradient.
14470//
14471// Returns the created operation.
14472func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) (o *tf.Operation) {
14473	if scope.Err() != nil {
14474		return
14475	}
14476	attrs := map[string]interface{}{}
14477	for _, a := range optional {
14478		a(attrs)
14479	}
14480	opspec := tf.OpSpec{
14481		Type: "ResourceApplyProximalAdagrad",
14482		Input: []tf.Input{
14483			var_, accum, lr, l1, l2, grad,
14484		},
14485		Attrs: attrs,
14486	}
14487	return scope.AddOperation(opspec)
14488}
14489
14490// Divides sparse updates into the variable referenced by `resource`.
14491//
14492// This operation computes
14493//
14494//     # Scalar indices
14495//     ref[indices, ...] /= updates[...]
14496//
14497//     # Vector indices (for each i)
14498//     ref[indices[i], ...] /= updates[i, ...]
14499//
14500//     # High rank indices (for each i, ..., j)
14501//     ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
14502//
14503// Duplicate entries are handled correctly: if multiple `indices` reference
14504// the same location, their contributions multiply.
14505//
14506// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
14507//
14508// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
14509// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
14510// </div>
14511//
14512// Arguments:
14513//	resource: Should be from a `Variable` node.
14514//	indices: A tensor of indices into the first dimension of `ref`.
14515//	updates: A tensor of updated values to add to `ref`.
14516//
14517// Returns the created operation.
14518func ResourceScatterDiv(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
14519	if scope.Err() != nil {
14520		return
14521	}
14522	opspec := tf.OpSpec{
14523		Type: "ResourceScatterDiv",
14524		Input: []tf.Input{
14525			resource, indices, updates,
14526		},
14527	}
14528	return scope.AddOperation(opspec)
14529}
14530
14531// Computes the trignometric inverse sine of x element-wise.
14532//
14533// The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that
14534// if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`.
14535//
14536// **Note**: The output of `tf.math.asin` will lie within the invertible range
14537// of sine, i.e [-pi/2, pi/2].
14538//
14539// For example:
14540//
14541// ```python
14542// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
14543// x = tf.constant([1.047, 0.785])
14544// y = tf.math.sin(x) # [0.8659266, 0.7068252]
14545//
14546// tf.math.asin(y) # [1.047, 0.785] = x
14547// ```
14548//
14549func Asin(scope *Scope, x tf.Output) (y tf.Output) {
14550	if scope.Err() != nil {
14551		return
14552	}
14553	opspec := tf.OpSpec{
14554		Type: "Asin",
14555		Input: []tf.Input{
14556			x,
14557		},
14558	}
14559	op := scope.AddOperation(opspec)
14560	return op.Output(0)
14561}
14562
14563// Writes a histogram summary.
14564//
14565// Writes histogram `values` at `step` with `tag` using summary `writer`.
14566//
14567// Returns the created operation.
14568func WriteHistogramSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, values tf.Output) (o *tf.Operation) {
14569	if scope.Err() != nil {
14570		return
14571	}
14572	opspec := tf.OpSpec{
14573		Type: "WriteHistogramSummary",
14574		Input: []tf.Input{
14575			writer, step, tag, values,
14576		},
14577	}
14578	return scope.AddOperation(opspec)
14579}
14580
14581// Computes tan of x element-wise.
14582//
14583//   Given an input tensor, this function computes tangent of every
14584//   element in the tensor. Input range is `(-inf, inf)` and
14585//   output range is `(-inf, inf)`. If input lies outside the boundary, `nan`
14586//   is returned.
14587//
14588//   ```python
14589//   x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
14590//   tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan]
14591//   ```
14592func Tan(scope *Scope, x tf.Output) (y tf.Output) {
14593	if scope.Err() != nil {
14594		return
14595	}
14596	opspec := tf.OpSpec{
14597		Type: "Tan",
14598		Input: []tf.Input{
14599			x,
14600		},
14601	}
14602	op := scope.AddOperation(opspec)
14603	return op.Output(0)
14604}
14605
14606// FusedBatchNormV2Attr is an optional argument to FusedBatchNormV2.
14607type FusedBatchNormV2Attr func(optionalAttr)
14608
14609// FusedBatchNormV2Epsilon sets the optional epsilon attribute to value.
14610//
14611// value: A small float number added to the variance of x.
14612// If not specified, defaults to 0.0001
14613func FusedBatchNormV2Epsilon(value float32) FusedBatchNormV2Attr {
14614	return func(m optionalAttr) {
14615		m["epsilon"] = value
14616	}
14617}
14618
14619// FusedBatchNormV2ExponentialAvgFactor sets the optional exponential_avg_factor attribute to value.
14620// If not specified, defaults to 1
14621func FusedBatchNormV2ExponentialAvgFactor(value float32) FusedBatchNormV2Attr {
14622	return func(m optionalAttr) {
14623		m["exponential_avg_factor"] = value
14624	}
14625}
14626
14627// FusedBatchNormV2DataFormat sets the optional data_format attribute to value.
14628//
14629// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
14630// If not specified, defaults to "NHWC"
14631func FusedBatchNormV2DataFormat(value string) FusedBatchNormV2Attr {
14632	return func(m optionalAttr) {
14633		m["data_format"] = value
14634	}
14635}
14636
14637// FusedBatchNormV2IsTraining sets the optional is_training attribute to value.
14638//
14639// value: A bool value to indicate the operation is for training (default)
14640// or inference.
14641// If not specified, defaults to true
14642func FusedBatchNormV2IsTraining(value bool) FusedBatchNormV2Attr {
14643	return func(m optionalAttr) {
14644		m["is_training"] = value
14645	}
14646}
14647
14648// Batch normalization.
14649//
14650// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
14651// The size of 1D Tensors matches the dimension C of the 4D Tensors.
14652//
14653// Arguments:
14654//	x: A 4D Tensor for input data.
14655//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
14656//	offset: A 1D Tensor for offset, to shift to the normalized x.
14657//	mean: A 1D Tensor for population mean. Used for inference only;
14658// must be empty for training.
14659//	variance: A 1D Tensor for population variance. Used for inference only;
14660// must be empty for training.
14661//
14662// Returns:
14663//	y: A 4D Tensor for output data.
14664//	batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
14665// to compute the running mean.
14666//	batch_variance: A 1D Tensor for the computed batch variance, to be used by
14667// TensorFlow to compute the running variance.
14668//	reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
14669// in the gradient computation.
14670//	reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
14671// in the cuDNN case), to be reused in the gradient computation.
14672func FusedBatchNormV2(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV2Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
14673	if scope.Err() != nil {
14674		return
14675	}
14676	attrs := map[string]interface{}{}
14677	for _, a := range optional {
14678		a(attrs)
14679	}
14680	opspec := tf.OpSpec{
14681		Type: "FusedBatchNormV2",
14682		Input: []tf.Input{
14683			x, scale, offset, mean, variance,
14684		},
14685		Attrs: attrs,
14686	}
14687	op := scope.AddOperation(opspec)
14688	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
14689}
14690
14691// Computes sine of x element-wise.
14692//
14693//   Given an input tensor, this function computes sine of every
14694//   element in the tensor. Input range is `(-inf, inf)` and
14695//   output range is `[-1,1]`.
14696//
14697//   ```python
14698//   x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")])
14699//   tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan]
14700//   ```
14701func Sin(scope *Scope, x tf.Output) (y tf.Output) {
14702	if scope.Err() != nil {
14703		return
14704	}
14705	opspec := tf.OpSpec{
14706		Type: "Sin",
14707		Input: []tf.Input{
14708			x,
14709		},
14710	}
14711	op := scope.AddOperation(opspec)
14712	return op.Output(0)
14713}
14714
14715// ResourceApplyAdamAttr is an optional argument to ResourceApplyAdam.
14716type ResourceApplyAdamAttr func(optionalAttr)
14717
14718// ResourceApplyAdamUseLocking sets the optional use_locking attribute to value.
14719//
14720// value: If `True`, updating of the var, m, and v tensors will be protected
14721// by a lock; otherwise the behavior is undefined, but may exhibit less
14722// contention.
14723// If not specified, defaults to false
14724func ResourceApplyAdamUseLocking(value bool) ResourceApplyAdamAttr {
14725	return func(m optionalAttr) {
14726		m["use_locking"] = value
14727	}
14728}
14729
14730// ResourceApplyAdamUseNesterov sets the optional use_nesterov attribute to value.
14731//
14732// value: If `True`, uses the nesterov update.
14733// If not specified, defaults to false
14734func ResourceApplyAdamUseNesterov(value bool) ResourceApplyAdamAttr {
14735	return func(m optionalAttr) {
14736		m["use_nesterov"] = value
14737	}
14738}
14739
14740// Update '*var' according to the Adam algorithm.
14741//
14742// $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$
14743// $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$
14744// $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$
14745// $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\\\  \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$
14746//
14747// Arguments:
14748//	var_: Should be from a Variable().
14749//	m: Should be from a Variable().
14750//	v: Should be from a Variable().
14751//	beta1_power: Must be a scalar.
14752//	beta2_power: Must be a scalar.
14753//	lr: Scaling factor. Must be a scalar.
14754//	beta1: Momentum factor. Must be a scalar.
14755//	beta2: Momentum factor. Must be a scalar.
14756//	epsilon: Ridge term. Must be a scalar.
14757//	grad: The gradient.
14758//
14759// Returns the created operation.
14760func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamAttr) (o *tf.Operation) {
14761	if scope.Err() != nil {
14762		return
14763	}
14764	attrs := map[string]interface{}{}
14765	for _, a := range optional {
14766		a(attrs)
14767	}
14768	opspec := tf.OpSpec{
14769		Type: "ResourceApplyAdam",
14770		Input: []tf.Input{
14771			var_, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
14772		},
14773		Attrs: attrs,
14774	}
14775	return scope.AddOperation(opspec)
14776}
14777
14778// ResourceSparseApplyAdadeltaAttr is an optional argument to ResourceSparseApplyAdadelta.
14779type ResourceSparseApplyAdadeltaAttr func(optionalAttr)
14780
14781// ResourceSparseApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
14782//
14783// value: If True, updating of the var and accum tensors will be protected by
14784// a lock; otherwise the behavior is undefined, but may exhibit less contention.
14785// If not specified, defaults to false
14786func ResourceSparseApplyAdadeltaUseLocking(value bool) ResourceSparseApplyAdadeltaAttr {
14787	return func(m optionalAttr) {
14788		m["use_locking"] = value
14789	}
14790}
14791
14792// var: Should be from a Variable().
14793//
14794// Arguments:
14795//
14796//	accum: Should be from a Variable().
14797//	accum_update: : Should be from a Variable().
14798//	lr: Learning rate. Must be a scalar.
14799//	rho: Decay factor. Must be a scalar.
14800//	epsilon: Constant factor. Must be a scalar.
14801//	grad: The gradient.
14802//	indices: A vector of indices into the first dimension of var and accum.
14803//
14804// Returns the created operation.
14805func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) (o *tf.Operation) {
14806	if scope.Err() != nil {
14807		return
14808	}
14809	attrs := map[string]interface{}{}
14810	for _, a := range optional {
14811		a(attrs)
14812	}
14813	opspec := tf.OpSpec{
14814		Type: "ResourceSparseApplyAdadelta",
14815		Input: []tf.Input{
14816			var_, accum, accum_update, lr, rho, epsilon, grad, indices,
14817		},
14818		Attrs: attrs,
14819	}
14820	return scope.AddOperation(opspec)
14821}
14822
14823// Computes sigmoid of `x` element-wise.
14824//
14825// Specifically, `y = 1 / (1 + exp(-x))`.
14826func Sigmoid(scope *Scope, x tf.Output) (y tf.Output) {
14827	if scope.Err() != nil {
14828		return
14829	}
14830	opspec := tf.OpSpec{
14831		Type: "Sigmoid",
14832		Input: []tf.Input{
14833			x,
14834		},
14835	}
14836	op := scope.AddOperation(opspec)
14837	return op.Output(0)
14838}
14839
14840// Computes the complementary error function of `x` element-wise.
14841func Erfc(scope *Scope, x tf.Output) (y tf.Output) {
14842	if scope.Err() != nil {
14843		return
14844	}
14845	opspec := tf.OpSpec{
14846		Type: "Erfc",
14847		Input: []tf.Input{
14848			x,
14849		},
14850	}
14851	op := scope.AddOperation(opspec)
14852	return op.Output(0)
14853}
14854
14855// Computes the Approximate Minimum Degree (AMD) ordering of `input`.
14856//
14857// Computes the Approximate Minimum Degree (AMD) ordering for a sparse matrix.
14858//
14859// The returned permutation may be used to permute the rows and columns of the
14860// given sparse matrix. This typically results in permuted sparse matrix's sparse
14861// Cholesky (or other decompositions) in having fewer zero fill-in compared to
14862// decomposition of the original matrix.
14863//
14864// The input sparse matrix may have rank 2 or rank 3. The output Tensor,
14865// representing would then have rank 1 or 2 respectively, with the same batch
14866// shape as the input.
14867//
14868// Each component of the input sparse matrix must represent a square symmetric
14869// matrix; only the lower triangular part of the matrix is read. The values of the
14870// sparse matrix does not affect the returned permutation, only the sparsity
14871// pattern of the sparse matrix is used. Hence, a single AMD ordering may be
14872// reused for the Cholesky decompositions of sparse matrices with the same sparsity
14873// pattern but with possibly different values.
14874//
14875// Each batch component of the output permutation represents a permutation of `N`
14876// elements, where the input sparse matrix components each have `N` rows. That is,
14877// the component contains each of the integers `{0, .. N-1}` exactly once. The
14878// `i`th element represents the row index that the `i`th row maps to.
14879//
14880// Usage example:
14881//
14882// ```python
14883//     from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
14884//
14885//     a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])
14886//     a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)
14887//     a_dense_shape = [4, 4]
14888//
14889//     with tf.Session() as sess:
14890//       # Define (COO format) SparseTensor over Numpy array.
14891//       a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
14892//
14893//       # Convert SparseTensors to CSR SparseMatrix.
14894//       a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
14895//           a_st.indices, a_st.values, a_st.dense_shape)
14896//
14897//       # Obtain the AMD Ordering for the CSR SparseMatrix.
14898//       ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)
14899//
14900//       ordering_amd_value = sess.run(ordering_amd)
14901// ```
14902//
14903// `ordering_amd_value` stores the AMD ordering: `[1 2 3 0]`.
14904//
14905// input: A `CSRSparseMatrix`.
14906//
14907// Arguments:
14908//	input: A `CSRSparseMatrix`.
14909//
14910// Returns The Approximate Minimum Degree (AMD) ordering of `input`.
14911func SparseMatrixOrderingAMD(scope *Scope, input tf.Output) (output tf.Output) {
14912	if scope.Err() != nil {
14913		return
14914	}
14915	opspec := tf.OpSpec{
14916		Type: "SparseMatrixOrderingAMD",
14917		Input: []tf.Input{
14918			input,
14919		},
14920	}
14921	op := scope.AddOperation(opspec)
14922	return op.Output(0)
14923}
14924
14925// RandomStandardNormalAttr is an optional argument to RandomStandardNormal.
14926type RandomStandardNormalAttr func(optionalAttr)
14927
14928// RandomStandardNormalSeed sets the optional seed attribute to value.
14929//
14930// value: If either `seed` or `seed2` are set to be non-zero, the random number
14931// generator is seeded by the given seed.  Otherwise, it is seeded by a
14932// random seed.
14933// If not specified, defaults to 0
14934func RandomStandardNormalSeed(value int64) RandomStandardNormalAttr {
14935	return func(m optionalAttr) {
14936		m["seed"] = value
14937	}
14938}
14939
14940// RandomStandardNormalSeed2 sets the optional seed2 attribute to value.
14941//
14942// value: A second seed to avoid seed collision.
14943// If not specified, defaults to 0
14944func RandomStandardNormalSeed2(value int64) RandomStandardNormalAttr {
14945	return func(m optionalAttr) {
14946		m["seed2"] = value
14947	}
14948}
14949
14950// Outputs random values from a normal distribution.
14951//
14952// The generated values will have mean 0 and standard deviation 1.
14953//
14954// Arguments:
14955//	shape: The shape of the output tensor.
14956//	dtype: The type of the output.
14957//
14958// Returns A tensor of the specified shape filled with random normal values.
14959func RandomStandardNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomStandardNormalAttr) (output tf.Output) {
14960	if scope.Err() != nil {
14961		return
14962	}
14963	attrs := map[string]interface{}{"dtype": dtype}
14964	for _, a := range optional {
14965		a(attrs)
14966	}
14967	opspec := tf.OpSpec{
14968		Type: "RandomStandardNormal",
14969		Input: []tf.Input{
14970			shape,
14971		},
14972		Attrs: attrs,
14973	}
14974	op := scope.AddOperation(opspec)
14975	return op.Output(0)
14976}
14977
14978// Computes the [Gauss error function](https://en.wikipedia.org/wiki/Error_function) of `x` element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$.
14979func Erf(scope *Scope, x tf.Output) (y tf.Output) {
14980	if scope.Err() != nil {
14981		return
14982	}
14983	opspec := tf.OpSpec{
14984		Type: "Erf",
14985		Input: []tf.Input{
14986			x,
14987		},
14988	}
14989	op := scope.AddOperation(opspec)
14990	return op.Output(0)
14991}
14992
14993// Computes Psi, the derivative of Lgamma (the log of the absolute value of
14994//
14995// `Gamma(x)`), element-wise.
14996func Digamma(scope *Scope, x tf.Output) (y tf.Output) {
14997	if scope.Err() != nil {
14998		return
14999	}
15000	opspec := tf.OpSpec{
15001		Type: "Digamma",
15002		Input: []tf.Input{
15003			x,
15004		},
15005	}
15006	op := scope.AddOperation(opspec)
15007	return op.Output(0)
15008}
15009
15010// Reads the value of a variable.
15011//
15012// The tensor returned by this operation is immutable.
15013//
15014// The value returned by this operation is guaranteed to be influenced by all the
15015// writes on which this operation depends directly or indirectly, and to not be
15016// influenced by any of the writes which depend directly or indirectly on this
15017// operation.
15018//
15019// Arguments:
15020//	resource: handle to the resource in which to store the variable.
15021//	dtype: the dtype of the value.
15022func ReadVariableOp(scope *Scope, resource tf.Output, dtype tf.DataType) (value tf.Output) {
15023	if scope.Err() != nil {
15024		return
15025	}
15026	attrs := map[string]interface{}{"dtype": dtype}
15027	opspec := tf.OpSpec{
15028		Type: "ReadVariableOp",
15029		Input: []tf.Input{
15030			resource,
15031		},
15032		Attrs: attrs,
15033	}
15034	op := scope.AddOperation(opspec)
15035	return op.Output(0)
15036}
15037
15038// Computes a range that covers the actual values present in a quantized tensor.
15039//
15040// Given a quantized tensor described by `(input, input_min, input_max)`, outputs a
15041// range that covers the actual values present in that tensor. This op is typically
15042// used to produce the `requested_output_min` and `requested_output_max` for
15043// `Requantize`.
15044//
15045// Arguments:
15046//
15047//	input_min: The float value that the minimum quantized input value represents.
15048//	input_max: The float value that the maximum quantized input value represents.
15049//
15050// Returns:
15051//	output_min: The computed min output.
15052//	output_max: the computed max output.
15053func RequantizationRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output) (output_min tf.Output, output_max tf.Output) {
15054	if scope.Err() != nil {
15055		return
15056	}
15057	opspec := tf.OpSpec{
15058		Type: "RequantizationRange",
15059		Input: []tf.Input{
15060			input, input_min, input_max,
15061		},
15062	}
15063	op := scope.AddOperation(opspec)
15064	return op.Output(0), op.Output(1)
15065}
15066
15067// Computes the log of the absolute value of `Gamma(x)` element-wise.
15068//
15069//   For positive numbers, this function computes log((input - 1)!) for every element in the tensor.
15070//   `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539`
15071//
15072// Example:
15073//
15074// ```python
15075// x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6])
15076// tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685]
15077// ```
15078func Lgamma(scope *Scope, x tf.Output) (y tf.Output) {
15079	if scope.Err() != nil {
15080		return
15081	}
15082	opspec := tf.OpSpec{
15083		Type: "Lgamma",
15084		Input: []tf.Input{
15085			x,
15086		},
15087	}
15088	op := scope.AddOperation(opspec)
15089	return op.Output(0)
15090}
15091
15092// Computes the gradient for the tanh of `x` wrt its input.
15093//
15094// Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
15095// is the corresponding input gradient.
15096func TanhGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
15097	if scope.Err() != nil {
15098		return
15099	}
15100	opspec := tf.OpSpec{
15101		Type: "TanhGrad",
15102		Input: []tf.Input{
15103			y, dy,
15104		},
15105	}
15106	op := scope.AddOperation(opspec)
15107	return op.Output(0)
15108}
15109
15110// Computes inverse hyperbolic tangent of x element-wise.
15111//
15112//   Given an input tensor, this function computes inverse hyperbolic tangent
15113//   for every element in the tensor. Input range is `[-1,1]` and output range is
15114//   `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the
15115//   input is `1`, output will be `inf`. Values outside the range will have
15116//   `nan` as output.
15117//
15118//   ```python
15119//   x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")])
15120//   tf.math.atanh(x) ==> [nan -inf -0.54930615 inf  0. 0.54930615 nan nan]
15121//   ```
15122func Atanh(scope *Scope, x tf.Output) (y tf.Output) {
15123	if scope.Err() != nil {
15124		return
15125	}
15126	opspec := tf.OpSpec{
15127		Type: "Atanh",
15128		Input: []tf.Input{
15129			x,
15130		},
15131	}
15132	op := scope.AddOperation(opspec)
15133	return op.Output(0)
15134}
15135
15136// Computes hyperbolic cosine of x element-wise.
15137//
15138//   Given an input tensor, this function computes hyperbolic cosine of every
15139//   element in the tensor. Input range is `[-inf, inf]` and output range
15140//   is `[1, inf]`.
15141//
15142//   ```python
15143//   x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
15144//   tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf]
15145//   ```
15146func Cosh(scope *Scope, x tf.Output) (y tf.Output) {
15147	if scope.Err() != nil {
15148		return
15149	}
15150	opspec := tf.OpSpec{
15151		Type: "Cosh",
15152		Input: []tf.Input{
15153			x,
15154		},
15155	}
15156	op := scope.AddOperation(opspec)
15157	return op.Output(0)
15158}
15159
15160// Computes natural logarithm of (1 + x) element-wise.
15161//
15162// I.e., \\(y = \log_e (1 + x)\\).
15163//
15164// Example:
15165//
15166// ```python
15167// x = tf.constant([0, 0.5, 1, 5])
15168// tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595]
15169// ```
15170func Log1p(scope *Scope, x tf.Output) (y tf.Output) {
15171	if scope.Err() != nil {
15172		return
15173	}
15174	opspec := tf.OpSpec{
15175		Type: "Log1p",
15176		Input: []tf.Input{
15177			x,
15178		},
15179	}
15180	op := scope.AddOperation(opspec)
15181	return op.Output(0)
15182}
15183
15184// Conv2DBackpropInputAttr is an optional argument to Conv2DBackpropInput.
15185type Conv2DBackpropInputAttr func(optionalAttr)
15186
15187// Conv2DBackpropInputUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
15188// If not specified, defaults to true
15189func Conv2DBackpropInputUseCudnnOnGpu(value bool) Conv2DBackpropInputAttr {
15190	return func(m optionalAttr) {
15191		m["use_cudnn_on_gpu"] = value
15192	}
15193}
15194
15195// Conv2DBackpropInputExplicitPaddings sets the optional explicit_paddings attribute to value.
15196//
15197// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
15198// dimension, the amount of padding inserted before and after the dimension is
15199// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
15200// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
15201// If not specified, defaults to {}
15202func Conv2DBackpropInputExplicitPaddings(value []int64) Conv2DBackpropInputAttr {
15203	return func(m optionalAttr) {
15204		m["explicit_paddings"] = value
15205	}
15206}
15207
15208// Conv2DBackpropInputDataFormat sets the optional data_format attribute to value.
15209//
15210// value: Specify the data format of the input and output data. With the
15211// default format "NHWC", the data is stored in the order of:
15212//     [batch, in_height, in_width, in_channels].
15213// Alternatively, the format could be "NCHW", the data storage order of:
15214//     [batch, in_channels, in_height, in_width].
15215// If not specified, defaults to "NHWC"
15216func Conv2DBackpropInputDataFormat(value string) Conv2DBackpropInputAttr {
15217	return func(m optionalAttr) {
15218		m["data_format"] = value
15219	}
15220}
15221
15222// Conv2DBackpropInputDilations sets the optional dilations attribute to value.
15223//
15224// value: 1-D tensor of length 4.  The dilation factor for each dimension of
15225// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
15226// element on that dimension. The dimension order is determined by the value of
15227// `data_format`, see above for details. Dilations in the batch and depth
15228// dimensions must be 1.
15229// If not specified, defaults to {i:1 i:1 i:1 i:1}
15230func Conv2DBackpropInputDilations(value []int64) Conv2DBackpropInputAttr {
15231	return func(m optionalAttr) {
15232		m["dilations"] = value
15233	}
15234}
15235
15236// Computes the gradients of convolution with respect to the input.
15237//
15238// Arguments:
15239//	input_sizes: An integer vector representing the shape of `input`,
15240// where `input` is a 4-D `[batch, height, width, channels]` tensor.
15241//	filter: 4-D with shape
15242// `[filter_height, filter_width, in_channels, out_channels]`.
15243//	out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
15244// Gradients w.r.t. the output of the convolution.
15245//	strides: The stride of the sliding window for each dimension of the input
15246// of the convolution. Must be in the same order as the dimension specified with
15247// format.
15248//	padding: The type of padding algorithm to use.
15249//
15250// Returns 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
15251// w.r.t. the input of the convolution.
15252func Conv2DBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropInputAttr) (output tf.Output) {
15253	if scope.Err() != nil {
15254		return
15255	}
15256	attrs := map[string]interface{}{"strides": strides, "padding": padding}
15257	for _, a := range optional {
15258		a(attrs)
15259	}
15260	opspec := tf.OpSpec{
15261		Type: "Conv2DBackpropInput",
15262		Input: []tf.Input{
15263			input_sizes, filter, out_backprop,
15264		},
15265		Attrs: attrs,
15266	}
15267	op := scope.AddOperation(opspec)
15268	return op.Output(0)
15269}
15270
15271// Computes `exp(x) - 1` element-wise.
15272//
15273//   i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor.
15274//   `e` denotes Euler's number and is approximately equal to 2.718281.
15275//
15276//   ```python
15277//   x = tf.constant(2.0)
15278//   tf.math.expm1(x) ==> 6.389056
15279//
15280//   x = tf.constant([2.0, 8.0])
15281//   tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32)
15282//
15283//   x = tf.constant(1 + 1j)
15284//   tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j)
15285//   ```
15286func Expm1(scope *Scope, x tf.Output) (y tf.Output) {
15287	if scope.Err() != nil {
15288		return
15289	}
15290	opspec := tf.OpSpec{
15291		Type: "Expm1",
15292		Input: []tf.Input{
15293			x,
15294		},
15295	}
15296	op := scope.AddOperation(opspec)
15297	return op.Output(0)
15298}
15299
15300// Computes exponential of x element-wise.  \\(y = e^x\\).
15301//
15302//   This function computes the exponential of every element in the input tensor.
15303//   i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor.
15304//   `e` denotes Euler's number and is approximately equal to 2.718281.
15305//   Output is positive for any real input.
15306//
15307//   ```python
15308//   x = tf.constant(2.0)
15309//   tf.math.exp(x) ==> 7.389056
15310//
15311//   x = tf.constant([2.0, 8.0])
15312//   tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32)
15313//   ```
15314//
15315//   For complex numbers, the exponential value is calculated as follows:
15316//
15317//   ```
15318//   e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y)
15319//   ```
15320//
15321//   Let's consider complex number 1+1j as an example.
15322//   e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j)
15323//
15324//   ```python
15325//   x = tf.constant(1 + 1j)
15326//   tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j
15327//   ```
15328func Exp(scope *Scope, x tf.Output) (y tf.Output) {
15329	if scope.Err() != nil {
15330		return
15331	}
15332	opspec := tf.OpSpec{
15333		Type: "Exp",
15334		Input: []tf.Input{
15335			x,
15336		},
15337	}
15338	op := scope.AddOperation(opspec)
15339	return op.Output(0)
15340}
15341
15342// Converts the given variant tensor to an iterator and stores it in the given resource.
15343//
15344// Arguments:
15345//	resource_handle: A handle to an iterator resource.
15346//	serialized: A variant tensor storing the state of the iterator contained in the
15347// resource.
15348//
15349// Returns the created operation.
15350func DeserializeIterator(scope *Scope, resource_handle tf.Output, serialized tf.Output) (o *tf.Operation) {
15351	if scope.Err() != nil {
15352		return
15353	}
15354	opspec := tf.OpSpec{
15355		Type: "DeserializeIterator",
15356		Input: []tf.Input{
15357			resource_handle, serialized,
15358		},
15359	}
15360	return scope.AddOperation(opspec)
15361}
15362
15363// Computes the gradient for the rsqrt of `x` wrt its input.
15364//
15365// Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
15366// is the corresponding input gradient.
15367func RsqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
15368	if scope.Err() != nil {
15369		return
15370	}
15371	opspec := tf.OpSpec{
15372		Type: "RsqrtGrad",
15373		Input: []tf.Input{
15374			y, dy,
15375		},
15376	}
15377	op := scope.AddOperation(opspec)
15378	return op.Output(0)
15379}
15380
15381// FusedBatchNormV3Attr is an optional argument to FusedBatchNormV3.
15382type FusedBatchNormV3Attr func(optionalAttr)
15383
15384// FusedBatchNormV3Epsilon sets the optional epsilon attribute to value.
15385//
15386// value: A small float number added to the variance of x.
15387// If not specified, defaults to 0.0001
15388func FusedBatchNormV3Epsilon(value float32) FusedBatchNormV3Attr {
15389	return func(m optionalAttr) {
15390		m["epsilon"] = value
15391	}
15392}
15393
15394// FusedBatchNormV3ExponentialAvgFactor sets the optional exponential_avg_factor attribute to value.
15395// If not specified, defaults to 1
15396func FusedBatchNormV3ExponentialAvgFactor(value float32) FusedBatchNormV3Attr {
15397	return func(m optionalAttr) {
15398		m["exponential_avg_factor"] = value
15399	}
15400}
15401
15402// FusedBatchNormV3DataFormat sets the optional data_format attribute to value.
15403//
15404// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
15405// If not specified, defaults to "NHWC"
15406func FusedBatchNormV3DataFormat(value string) FusedBatchNormV3Attr {
15407	return func(m optionalAttr) {
15408		m["data_format"] = value
15409	}
15410}
15411
15412// FusedBatchNormV3IsTraining sets the optional is_training attribute to value.
15413//
15414// value: A bool value to indicate the operation is for training (default)
15415// or inference.
15416// If not specified, defaults to true
15417func FusedBatchNormV3IsTraining(value bool) FusedBatchNormV3Attr {
15418	return func(m optionalAttr) {
15419		m["is_training"] = value
15420	}
15421}
15422
15423// Batch normalization.
15424//
15425// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
15426// The size of 1D Tensors matches the dimension C of the 4D Tensors.
15427//
15428// Arguments:
15429//	x: A 4D Tensor for input data.
15430//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
15431//	offset: A 1D Tensor for offset, to shift to the normalized x.
15432//	mean: A 1D Tensor for population mean. Used for inference only;
15433// must be empty for training.
15434//	variance: A 1D Tensor for population variance. Used for inference only;
15435// must be empty for training.
15436//
15437// Returns:
15438//	y: A 4D Tensor for output data.
15439//	batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
15440// to compute the running mean.
15441//	batch_variance: A 1D Tensor for the computed batch variance, to be used by
15442// TensorFlow to compute the running variance.
15443//	reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
15444// in the gradient computation.
15445//	reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
15446// in the cuDNN case), to be reused in the gradient computation.
15447//	reserve_space_3: A 1D Tensor for some intermediate results, to be reused in the gradient
15448// computation for better efficiency.
15449func FusedBatchNormV3(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV3Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, reserve_space_3 tf.Output) {
15450	if scope.Err() != nil {
15451		return
15452	}
15453	attrs := map[string]interface{}{}
15454	for _, a := range optional {
15455		a(attrs)
15456	}
15457	opspec := tf.OpSpec{
15458		Type: "FusedBatchNormV3",
15459		Input: []tf.Input{
15460			x, scale, offset, mean, variance,
15461		},
15462		Attrs: attrs,
15463	}
15464	op := scope.AddOperation(opspec)
15465	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5)
15466}
15467
15468// QuantizedInstanceNormAttr is an optional argument to QuantizedInstanceNorm.
15469type QuantizedInstanceNormAttr func(optionalAttr)
15470
15471// QuantizedInstanceNormOutputRangeGiven sets the optional output_range_given attribute to value.
15472//
15473// value: If True, `given_y_min` and `given_y_min`
15474// and `given_y_max` are used as the output range. Otherwise,
15475// the implementation computes the output range.
15476// If not specified, defaults to false
15477func QuantizedInstanceNormOutputRangeGiven(value bool) QuantizedInstanceNormAttr {
15478	return func(m optionalAttr) {
15479		m["output_range_given"] = value
15480	}
15481}
15482
15483// QuantizedInstanceNormGivenYMin sets the optional given_y_min attribute to value.
15484//
15485// value: Output in `y_min` if `output_range_given` is True.
15486// If not specified, defaults to 0
15487func QuantizedInstanceNormGivenYMin(value float32) QuantizedInstanceNormAttr {
15488	return func(m optionalAttr) {
15489		m["given_y_min"] = value
15490	}
15491}
15492
15493// QuantizedInstanceNormGivenYMax sets the optional given_y_max attribute to value.
15494//
15495// value: Output in `y_max` if `output_range_given` is True.
15496// If not specified, defaults to 0
15497func QuantizedInstanceNormGivenYMax(value float32) QuantizedInstanceNormAttr {
15498	return func(m optionalAttr) {
15499		m["given_y_max"] = value
15500	}
15501}
15502
15503// QuantizedInstanceNormVarianceEpsilon sets the optional variance_epsilon attribute to value.
15504//
15505// value: A small float number to avoid dividing by 0.
15506// If not specified, defaults to 1e-05
15507func QuantizedInstanceNormVarianceEpsilon(value float32) QuantizedInstanceNormAttr {
15508	return func(m optionalAttr) {
15509		m["variance_epsilon"] = value
15510	}
15511}
15512
15513// QuantizedInstanceNormMinSeparation sets the optional min_separation attribute to value.
15514//
15515// value: Minimum value of `y_max - y_min`
15516// If not specified, defaults to 0.001
15517func QuantizedInstanceNormMinSeparation(value float32) QuantizedInstanceNormAttr {
15518	return func(m optionalAttr) {
15519		m["min_separation"] = value
15520	}
15521}
15522
15523// Quantized Instance normalization.
15524//
15525// Arguments:
15526//	x: A 4D input Tensor.
15527//	x_min: The value represented by the lowest quantized input.
15528//	x_max: The value represented by the highest quantized input.
15529//
15530// Returns:
15531//	y: A 4D Tensor.
15532//	y_min: The value represented by the lowest quantized output.
15533//	y_max: The value represented by the highest quantized output.
15534func QuantizedInstanceNorm(scope *Scope, x tf.Output, x_min tf.Output, x_max tf.Output, optional ...QuantizedInstanceNormAttr) (y tf.Output, y_min tf.Output, y_max tf.Output) {
15535	if scope.Err() != nil {
15536		return
15537	}
15538	attrs := map[string]interface{}{}
15539	for _, a := range optional {
15540		a(attrs)
15541	}
15542	opspec := tf.OpSpec{
15543		Type: "QuantizedInstanceNorm",
15544		Input: []tf.Input{
15545			x, x_min, x_max,
15546		},
15547		Attrs: attrs,
15548	}
15549	op := scope.AddOperation(opspec)
15550	return op.Output(0), op.Output(1), op.Output(2)
15551}
15552
15553// Computes reciprocal of square root of x element-wise.
15554//
15555// I.e., \\(y = 1 / \sqrt{x}\\).
15556func Rsqrt(scope *Scope, x tf.Output) (y tf.Output) {
15557	if scope.Err() != nil {
15558		return
15559	}
15560	opspec := tf.OpSpec{
15561		Type: "Rsqrt",
15562		Input: []tf.Input{
15563			x,
15564		},
15565	}
15566	op := scope.AddOperation(opspec)
15567	return op.Output(0)
15568}
15569
15570// Computes square root of x element-wise.
15571//
15572// I.e., \\(y = \sqrt{x} = x^{1/2}\\).
15573func Sqrt(scope *Scope, x tf.Output) (y tf.Output) {
15574	if scope.Err() != nil {
15575		return
15576	}
15577	opspec := tf.OpSpec{
15578		Type: "Sqrt",
15579		Input: []tf.Input{
15580			x,
15581		},
15582	}
15583	op := scope.AddOperation(opspec)
15584	return op.Output(0)
15585}
15586
15587// Computes the gradient for the inverse of `x` wrt its input.
15588//
15589// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
15590// is the corresponding input gradient.
15591func InvGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
15592	if scope.Err() != nil {
15593		return
15594	}
15595	opspec := tf.OpSpec{
15596		Type: "InvGrad",
15597		Input: []tf.Input{
15598			y, dy,
15599		},
15600	}
15601	op := scope.AddOperation(opspec)
15602	return op.Output(0)
15603}
15604
15605// Computes the reciprocal of x element-wise.
15606//
15607// I.e., \\(y = 1 / x\\).
15608func Inv(scope *Scope, x tf.Output) (y tf.Output) {
15609	if scope.Err() != nil {
15610		return
15611	}
15612	opspec := tf.OpSpec{
15613		Type: "Inv",
15614		Input: []tf.Input{
15615			x,
15616		},
15617	}
15618	op := scope.AddOperation(opspec)
15619	return op.Output(0)
15620}
15621
15622// Produces a summary of any statistics recorded by the given statistics manager.
15623func ExperimentalStatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output) {
15624	if scope.Err() != nil {
15625		return
15626	}
15627	opspec := tf.OpSpec{
15628		Type: "ExperimentalStatsAggregatorSummary",
15629		Input: []tf.Input{
15630			iterator,
15631		},
15632	}
15633	op := scope.AddOperation(opspec)
15634	return op.Output(0)
15635}
15636
15637// BatchMatMulV3Attr is an optional argument to BatchMatMulV3.
15638type BatchMatMulV3Attr func(optionalAttr)
15639
15640// BatchMatMulV3AdjX sets the optional adj_x attribute to value.
15641//
15642// value: If `True`, adjoint the slices of `x`. Defaults to `False`.
15643// If not specified, defaults to false
15644func BatchMatMulV3AdjX(value bool) BatchMatMulV3Attr {
15645	return func(m optionalAttr) {
15646		m["adj_x"] = value
15647	}
15648}
15649
15650// BatchMatMulV3AdjY sets the optional adj_y attribute to value.
15651//
15652// value: If `True`, adjoint the slices of `y`. Defaults to `False`.
15653// If not specified, defaults to false
15654func BatchMatMulV3AdjY(value bool) BatchMatMulV3Attr {
15655	return func(m optionalAttr) {
15656		m["adj_y"] = value
15657	}
15658}
15659
15660// Multiplies slices of two tensors in batches.
15661//
15662// Multiplies all slices of `Tensor` `x` and `y` (each slice can be
15663// viewed as an element of a batch), and arranges the individual results
15664// in a single output tensor of the same batch size. Each of the
15665// individual slices can optionally be adjointed (to adjoint a matrix
15666// means to transpose and conjugate it) before multiplication by setting
15667// the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
15668//
15669// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
15670// and `[..., r_y, c_y]`.
15671//
15672// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
15673//
15674//     r_o = c_x if adj_x else r_x
15675//     c_o = r_y if adj_y else c_y
15676//
15677// It is computed as:
15678//
15679//     output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
15680//
15681// *NOTE*: `BatchMatMulV3` supports broadcasting in the batch dimensions. More
15682// about broadcasting
15683// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
15684//
15685//
15686// Arguments:
15687//	x: 2-D or higher with shape `[..., r_x, c_x]`.
15688//	y: 2-D or higher with shape `[..., r_y, c_y]`.
15689//	Tout: If not spcified, Tout is the same type to input type.
15690//
15691// Returns 3-D or higher with shape `[..., r_o, c_o]`
15692func BatchMatMulV3(scope *Scope, x tf.Output, y tf.Output, Tout tf.DataType, optional ...BatchMatMulV3Attr) (output tf.Output) {
15693	if scope.Err() != nil {
15694		return
15695	}
15696	attrs := map[string]interface{}{"Tout": Tout}
15697	for _, a := range optional {
15698		a(attrs)
15699	}
15700	opspec := tf.OpSpec{
15701		Type: "BatchMatMulV3",
15702		Input: []tf.Input{
15703			x, y,
15704		},
15705		Attrs: attrs,
15706	}
15707	op := scope.AddOperation(opspec)
15708	return op.Output(0)
15709}
15710
15711// RaggedTensorFromVariantAttr is an optional argument to RaggedTensorFromVariant.
15712type RaggedTensorFromVariantAttr func(optionalAttr)
15713
15714// RaggedTensorFromVariantTsplits sets the optional Tsplits attribute to value.
15715// If not specified, defaults to DT_INT64
15716func RaggedTensorFromVariantTsplits(value tf.DataType) RaggedTensorFromVariantAttr {
15717	return func(m optionalAttr) {
15718		m["Tsplits"] = value
15719	}
15720}
15721
15722// Decodes a `variant` Tensor into a `RaggedTensor`.
15723//
15724// Decodes the given `variant` Tensor and returns a `RaggedTensor`. The input
15725// could be a scalar, meaning it encodes a single `RaggedTensor` with ragged_rank
15726// `output_ragged_rank`. It could also have an arbitrary rank, in which case each
15727// element is decoded into a `RaggedTensor` with ragged_rank `input_ragged_rank`
15728// and these are then stacked according to the input shape to output a single
15729// `RaggedTensor` with ragged_rank `output_ragged_rank`. Each `variant` element in
15730// the input Tensor is decoded by retrieving from the element a 1-D `variant`
15731// Tensor with `input_ragged_rank + 1` Tensors, corresponding to the splits and
15732// values of the decoded `RaggedTensor`. If `input_ragged_rank` is -1, then it is
15733// inferred as `output_ragged_rank` - `rank(encoded_ragged)`. See
15734// `RaggedTensorToVariant` for the corresponding encoding logic.
15735//
15736//
15737// Arguments:
15738//	encoded_ragged: A `variant` Tensor containing encoded `RaggedTensor`s.
15739//	input_ragged_rank: The ragged rank of each encoded `RaggedTensor` component in the input. If set to
15740// -1, this is inferred as `output_ragged_rank` - `rank(encoded_ragged)`
15741//	output_ragged_rank: The expected ragged rank of the output `RaggedTensor`. The following must hold:
15742// `output_ragged_rank = rank(encoded_ragged) + input_ragged_rank`.
15743//
15744//
15745// Returns:
15746//	output_nested_splits: A list of one or more Tensors representing the splits of the output
15747// `RaggedTensor`.
15748//	output_dense_values: A Tensor representing the values of the output `RaggedTensor`.
15749func RaggedTensorFromVariant(scope *Scope, encoded_ragged tf.Output, input_ragged_rank int64, output_ragged_rank int64, Tvalues tf.DataType, optional ...RaggedTensorFromVariantAttr) (output_nested_splits []tf.Output, output_dense_values tf.Output) {
15750	if scope.Err() != nil {
15751		return
15752	}
15753	attrs := map[string]interface{}{"input_ragged_rank": input_ragged_rank, "output_ragged_rank": output_ragged_rank, "Tvalues": Tvalues}
15754	for _, a := range optional {
15755		a(attrs)
15756	}
15757	opspec := tf.OpSpec{
15758		Type: "RaggedTensorFromVariant",
15759		Input: []tf.Input{
15760			encoded_ragged,
15761		},
15762		Attrs: attrs,
15763	}
15764	op := scope.AddOperation(opspec)
15765	if scope.Err() != nil {
15766		return
15767	}
15768	var idx int
15769	var err error
15770	if output_nested_splits, idx, err = makeOutputList(op, idx, "output_nested_splits"); err != nil {
15771		scope.UpdateErr("RaggedTensorFromVariant", err)
15772		return
15773	}
15774	output_dense_values = op.Output(idx)
15775	return output_nested_splits, output_dense_values
15776}
15777
15778// BatchMatMulAttr is an optional argument to BatchMatMul.
15779type BatchMatMulAttr func(optionalAttr)
15780
15781// BatchMatMulAdjX sets the optional adj_x attribute to value.
15782//
15783// value: If `True`, adjoint the slices of `x`. Defaults to `False`.
15784// If not specified, defaults to false
15785func BatchMatMulAdjX(value bool) BatchMatMulAttr {
15786	return func(m optionalAttr) {
15787		m["adj_x"] = value
15788	}
15789}
15790
15791// BatchMatMulAdjY sets the optional adj_y attribute to value.
15792//
15793// value: If `True`, adjoint the slices of `y`. Defaults to `False`.
15794// If not specified, defaults to false
15795func BatchMatMulAdjY(value bool) BatchMatMulAttr {
15796	return func(m optionalAttr) {
15797		m["adj_y"] = value
15798	}
15799}
15800
15801// Multiplies slices of two tensors in batches.
15802//
15803// Multiplies all slices of `Tensor` `x` and `y` (each slice can be
15804// viewed as an element of a batch), and arranges the individual results
15805// in a single output tensor of the same batch size. Each of the
15806// individual slices can optionally be adjointed (to adjoint a matrix
15807// means to transpose and conjugate it) before multiplication by setting
15808// the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
15809//
15810// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
15811// and `[..., r_y, c_y]`.
15812//
15813// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
15814//
15815//     r_o = c_x if adj_x else r_x
15816//     c_o = r_y if adj_y else c_y
15817//
15818// It is computed as:
15819//
15820//     output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
15821//
15822// Arguments:
15823//	x: 2-D or higher with shape `[..., r_x, c_x]`.
15824//	y: 2-D or higher with shape `[..., r_y, c_y]`.
15825//
15826// Returns 3-D or higher with shape `[..., r_o, c_o]`
15827func BatchMatMul(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulAttr) (output tf.Output) {
15828	if scope.Err() != nil {
15829		return
15830	}
15831	attrs := map[string]interface{}{}
15832	for _, a := range optional {
15833		a(attrs)
15834	}
15835	opspec := tf.OpSpec{
15836		Type: "BatchMatMul",
15837		Input: []tf.Input{
15838			x, y,
15839		},
15840		Attrs: attrs,
15841	}
15842	op := scope.AddOperation(opspec)
15843	return op.Output(0)
15844}
15845
15846// Returns the element-wise sum of a list of tensors.
15847//
15848// `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
15849// wait for all of its inputs to be ready before beginning to sum. This can
15850// save memory if inputs are ready at different times, since minimum temporary
15851// storage is proportional to the output size rather than the inputs size.
15852//
15853// Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
15854//
15855// Returns a `Tensor` of same shape and type as the elements of `inputs`.
15856//
15857// Arguments:
15858//	inputs: A list of `Tensor` objects, each with same shape and type.
15859//	shape: Shape of elements of `inputs`.
15860func AccumulateNV2(scope *Scope, inputs []tf.Output, shape tf.Shape) (sum tf.Output) {
15861	if scope.Err() != nil {
15862		return
15863	}
15864	attrs := map[string]interface{}{"shape": shape}
15865	opspec := tf.OpSpec{
15866		Type: "AccumulateNV2",
15867		Input: []tf.Input{
15868			tf.OutputList(inputs),
15869		},
15870		Attrs: attrs,
15871	}
15872	op := scope.AddOperation(opspec)
15873	return op.Output(0)
15874}
15875
15876// MaxPoolGradAttr is an optional argument to MaxPoolGrad.
15877type MaxPoolGradAttr func(optionalAttr)
15878
15879// MaxPoolGradExplicitPaddings sets the optional explicit_paddings attribute to value.
15880// If not specified, defaults to {}
15881func MaxPoolGradExplicitPaddings(value []int64) MaxPoolGradAttr {
15882	return func(m optionalAttr) {
15883		m["explicit_paddings"] = value
15884	}
15885}
15886
15887// MaxPoolGradDataFormat sets the optional data_format attribute to value.
15888//
15889// value: Specify the data format of the input and output data. With the
15890// default format "NHWC", the data is stored in the order of:
15891//     [batch, in_height, in_width, in_channels].
15892// Alternatively, the format could be "NCHW", the data storage order of:
15893//     [batch, in_channels, in_height, in_width].
15894// If not specified, defaults to "NHWC"
15895func MaxPoolGradDataFormat(value string) MaxPoolGradAttr {
15896	return func(m optionalAttr) {
15897		m["data_format"] = value
15898	}
15899}
15900
15901// Computes gradients of the maxpooling function.
15902//
15903// Arguments:
15904//	orig_input: The original input tensor.
15905//	orig_output: The original output tensor.
15906//	grad: 4-D.  Gradients w.r.t. the output of `max_pool`.
15907//	ksize: The size of the window for each dimension of the input tensor.
15908//	strides: The stride of the sliding window for each dimension of the
15909// input tensor.
15910//	padding: The type of padding algorithm to use.
15911//
15912// Returns Gradients w.r.t. the input to `max_pool`.
15913func MaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradAttr) (output tf.Output) {
15914	if scope.Err() != nil {
15915		return
15916	}
15917	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
15918	for _, a := range optional {
15919		a(attrs)
15920	}
15921	opspec := tf.OpSpec{
15922		Type: "MaxPoolGrad",
15923		Input: []tf.Input{
15924			orig_input, orig_output, grad,
15925		},
15926		Attrs: attrs,
15927	}
15928	op := scope.AddOperation(opspec)
15929	return op.Output(0)
15930}
15931
15932// Rolls the elements of a tensor along an axis.
15933//
15934// The elements are shifted positively (towards larger indices) by the offset of
15935// `shift` along the dimension of `axis`. Negative `shift` values will shift
15936// elements in the opposite direction. Elements that roll passed the last position
15937// will wrap around to the first and vice versa. Multiple shifts along multiple
15938// axes may be specified.
15939//
15940// For example:
15941//
15942// ```
15943// # 't' is [0, 1, 2, 3, 4]
15944// roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
15945//
15946// # shifting along multiple dimensions
15947// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
15948// roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
15949//
15950// # shifting along the same axis multiple times
15951// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
15952// roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
15953// ```
15954//
15955// Arguments:
15956//
15957//	shift: Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which
15958// elements are shifted positively (towards larger indices) along the dimension
15959// specified by `axis[i]`. Negative shifts will roll the elements in the opposite
15960// direction.
15961//	axis: Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift
15962// `shift[i]` should occur. If the same axis is referenced more than once, the
15963// total shift for that axis will be the sum of all the shifts that belong to that
15964// axis.
15965//
15966// Returns Has the same shape and size as the input. The elements are shifted
15967// positively (towards larger indices) by the offsets of `shift` along the
15968// dimensions of `axis`.
15969func Roll(scope *Scope, input tf.Output, shift tf.Output, axis tf.Output) (output tf.Output) {
15970	if scope.Err() != nil {
15971		return
15972	}
15973	opspec := tf.OpSpec{
15974		Type: "Roll",
15975		Input: []tf.Input{
15976			input, shift, axis,
15977		},
15978	}
15979	op := scope.AddOperation(opspec)
15980	return op.Output(0)
15981}
15982
15983// Converts a tensor to a scalar predicate.
15984//
15985// Converts a tensor to a scalar predicate with the following rules:
15986//
15987// - For 0D tensors, truthiness is determined by comparing against a "zero"
15988//   value. For numerical types it is the obvious zero. For strings it is the
15989//   empty string.
15990//
15991// - For >0D tensors, truthiness is determined by looking at the number of
15992//   elements. If has zero elements, then the result is false. Otherwise the
15993//   result is true.
15994//
15995// This matches the behavior of If and While for determining if a tensor counts
15996// as true/false for a branch condition.
15997func ToBool(scope *Scope, input tf.Output) (output tf.Output) {
15998	if scope.Err() != nil {
15999		return
16000	}
16001	opspec := tf.OpSpec{
16002		Type: "ToBool",
16003		Input: []tf.Input{
16004			input,
16005		},
16006	}
16007	op := scope.AddOperation(opspec)
16008	return op.Output(0)
16009}
16010
16011// GenerateBoundingBoxProposalsAttr is an optional argument to GenerateBoundingBoxProposals.
16012type GenerateBoundingBoxProposalsAttr func(optionalAttr)
16013
16014// GenerateBoundingBoxProposalsPostNmsTopn sets the optional post_nms_topn attribute to value.
16015//
16016// value: An integer. Maximum number of rois in the output.
16017// If not specified, defaults to 300
16018func GenerateBoundingBoxProposalsPostNmsTopn(value int64) GenerateBoundingBoxProposalsAttr {
16019	return func(m optionalAttr) {
16020		m["post_nms_topn"] = value
16021	}
16022}
16023
16024// This op produces Region of Interests from given bounding boxes(bbox_deltas) encoded wrt anchors according to eq.2 in arXiv:1506.01497
16025//
16026//       The op selects top `pre_nms_topn` scoring boxes, decodes them with respect to anchors,
16027//       applies non-maximal suppression on overlapping boxes with higher than
16028//       `nms_threshold` intersection-over-union (iou) value, discarding boxes where shorter
16029//       side is less than `min_size`.
16030//       Inputs:
16031//       `scores`: A 4D tensor of shape [Batch, Height, Width, Num Anchors] containing the scores per anchor at given position
16032//       `bbox_deltas`: is a tensor of shape [Batch, Height, Width, 4 x Num Anchors] boxes encoded to each anchor
16033//       `anchors`: A 1D tensor of shape [4 x Num Anchors], representing the anchors.
16034//       Outputs:
16035//       `rois`: output RoIs, a 3D tensor of shape [Batch, post_nms_topn, 4], padded by 0 if less than post_nms_topn candidates found.
16036//       `roi_probabilities`: probability scores of each roi in 'rois', a 2D tensor of shape [Batch,post_nms_topn], padded with 0 if needed, sorted by scores.
16037//
16038// Arguments:
16039//	scores: A 4-D float tensor of shape `[num_images, height, width, num_achors]` containing scores of the boxes for given anchors, can be unsorted.
16040//	bbox_deltas: A 4-D float tensor of shape `[num_images, height, width, 4 x num_anchors]`. encoding boxes with respec to each anchor.
16041// Coordinates are given in the form [dy, dx, dh, dw].
16042//	image_info: A 2-D float tensor of shape `[num_images, 5]` containing image information Height, Width, Scale.
16043//	anchors: A 2-D float tensor of shape `[num_anchors, 4]` describing the anchor boxes. Boxes are formatted in the form [y1, x1, y2, x2].
16044//	nms_threshold: A scalar float tensor for non-maximal-suppression threshold.
16045//	pre_nms_topn: A scalar int tensor for the number of top scoring boxes to be used as input.
16046//	min_size: A scalar float tensor. Any box that has a smaller size than min_size will be discarded.
16047//
16048// Returns:
16049//	rois: A 3-D float tensor of shape `[num_images,post_nms_topn,4]` representing the selected
16050// region of interest boxes. Sorted in descending order in scores.
16051//	roi_probabilities: A 2-D float tensor of shape `[num_images, post_nms_topn]` representing the score of the
16052// region of interest box in `rois` tensor at the same index.
16053func GenerateBoundingBoxProposals(scope *Scope, scores tf.Output, bbox_deltas tf.Output, image_info tf.Output, anchors tf.Output, nms_threshold tf.Output, pre_nms_topn tf.Output, min_size tf.Output, optional ...GenerateBoundingBoxProposalsAttr) (rois tf.Output, roi_probabilities tf.Output) {
16054	if scope.Err() != nil {
16055		return
16056	}
16057	attrs := map[string]interface{}{}
16058	for _, a := range optional {
16059		a(attrs)
16060	}
16061	opspec := tf.OpSpec{
16062		Type: "GenerateBoundingBoxProposals",
16063		Input: []tf.Input{
16064			scores, bbox_deltas, image_info, anchors, nms_threshold, pre_nms_topn, min_size,
16065		},
16066		Attrs: attrs,
16067	}
16068	op := scope.AddOperation(opspec)
16069	return op.Output(0), op.Output(1)
16070}
16071
16072// InitializeTableFromTextFileV2Attr is an optional argument to InitializeTableFromTextFileV2.
16073type InitializeTableFromTextFileV2Attr func(optionalAttr)
16074
16075// InitializeTableFromTextFileV2VocabSize sets the optional vocab_size attribute to value.
16076//
16077// value: Number of elements of the file, use -1 if unknown.
16078// If not specified, defaults to -1
16079//
16080// REQUIRES: value >= -1
16081func InitializeTableFromTextFileV2VocabSize(value int64) InitializeTableFromTextFileV2Attr {
16082	return func(m optionalAttr) {
16083		m["vocab_size"] = value
16084	}
16085}
16086
16087// InitializeTableFromTextFileV2Delimiter sets the optional delimiter attribute to value.
16088//
16089// value: Delimiter to separate fields in a line.
16090// If not specified, defaults to "\t"
16091func InitializeTableFromTextFileV2Delimiter(value string) InitializeTableFromTextFileV2Attr {
16092	return func(m optionalAttr) {
16093		m["delimiter"] = value
16094	}
16095}
16096
16097// InitializeTableFromTextFileV2Offset sets the optional offset attribute to value.
16098// If not specified, defaults to 0
16099func InitializeTableFromTextFileV2Offset(value int64) InitializeTableFromTextFileV2Attr {
16100	return func(m optionalAttr) {
16101		m["offset"] = value
16102	}
16103}
16104
16105// Initializes a table from a text file.
16106//
16107// It inserts one key-value pair into the table for each line of the file.
16108// The key and value is extracted from the whole line content, elements from the
16109// split line based on `delimiter` or the line number (starting from zero).
16110// Where to extract the key and value from a line is specified by `key_index` and
16111// `value_index`.
16112//
16113// - A value of -1 means use the line number(starting from zero), expects `int64`.
16114// - A value of -2 means use the whole line content, expects `string`.
16115// - A value >= 0 means use the index (starting at zero) of the split line based
16116//   on `delimiter`.
16117//
16118// Arguments:
16119//	table_handle: Handle to a table which will be initialized.
16120//	filename: Filename of a vocabulary text file.
16121//	key_index: Column index in a line to get the table `key` values from.
16122//	value_index: Column index that represents information of a line to get the table
16123// `value` values from.
16124//
16125// Returns the created operation.
16126func InitializeTableFromTextFileV2(scope *Scope, table_handle tf.Output, filename tf.Output, key_index int64, value_index int64, optional ...InitializeTableFromTextFileV2Attr) (o *tf.Operation) {
16127	if scope.Err() != nil {
16128		return
16129	}
16130	attrs := map[string]interface{}{"key_index": key_index, "value_index": value_index}
16131	for _, a := range optional {
16132		a(attrs)
16133	}
16134	opspec := tf.OpSpec{
16135		Type: "InitializeTableFromTextFileV2",
16136		Input: []tf.Input{
16137			table_handle, filename,
16138		},
16139		Attrs: attrs,
16140	}
16141	return scope.AddOperation(opspec)
16142}
16143
16144// MutableDenseHashTableV2Attr is an optional argument to MutableDenseHashTableV2.
16145type MutableDenseHashTableV2Attr func(optionalAttr)
16146
16147// MutableDenseHashTableV2Container sets the optional container attribute to value.
16148//
16149// value: If non-empty, this table is placed in the given container.
16150// Otherwise, a default container is used.
16151// If not specified, defaults to ""
16152func MutableDenseHashTableV2Container(value string) MutableDenseHashTableV2Attr {
16153	return func(m optionalAttr) {
16154		m["container"] = value
16155	}
16156}
16157
16158// MutableDenseHashTableV2SharedName sets the optional shared_name attribute to value.
16159//
16160// value: If non-empty, this table is shared under the given name across
16161// multiple sessions.
16162// If not specified, defaults to ""
16163func MutableDenseHashTableV2SharedName(value string) MutableDenseHashTableV2Attr {
16164	return func(m optionalAttr) {
16165		m["shared_name"] = value
16166	}
16167}
16168
16169// MutableDenseHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
16170// If not specified, defaults to false
16171func MutableDenseHashTableV2UseNodeNameSharing(value bool) MutableDenseHashTableV2Attr {
16172	return func(m optionalAttr) {
16173		m["use_node_name_sharing"] = value
16174	}
16175}
16176
16177// MutableDenseHashTableV2ValueShape sets the optional value_shape attribute to value.
16178//
16179// value: The shape of each value.
16180// If not specified, defaults to {}
16181func MutableDenseHashTableV2ValueShape(value tf.Shape) MutableDenseHashTableV2Attr {
16182	return func(m optionalAttr) {
16183		m["value_shape"] = value
16184	}
16185}
16186
16187// MutableDenseHashTableV2InitialNumBuckets sets the optional initial_num_buckets attribute to value.
16188//
16189// value: The initial number of hash table buckets. Must be a power
16190// to 2.
16191// If not specified, defaults to 131072
16192func MutableDenseHashTableV2InitialNumBuckets(value int64) MutableDenseHashTableV2Attr {
16193	return func(m optionalAttr) {
16194		m["initial_num_buckets"] = value
16195	}
16196}
16197
16198// MutableDenseHashTableV2MaxLoadFactor sets the optional max_load_factor attribute to value.
16199//
16200// value: The maximum ratio between number of entries and number of
16201// buckets before growing the table. Must be between 0 and 1.
16202// If not specified, defaults to 0.8
16203func MutableDenseHashTableV2MaxLoadFactor(value float32) MutableDenseHashTableV2Attr {
16204	return func(m optionalAttr) {
16205		m["max_load_factor"] = value
16206	}
16207}
16208
16209// Creates an empty hash table that uses tensors as the backing store.
16210//
16211// It uses "open addressing" with quadratic reprobing to resolve
16212// collisions.
16213//
16214// This op creates a mutable hash table, specifying the type of its keys and
16215// values. Each value must be a scalar. Data can be inserted into the table using
16216// the insert operations. It does not support the initialization operation.
16217//
16218// Arguments:
16219//	empty_key: The key used to represent empty key buckets internally. Must not
16220// be used in insert or lookup operations.
16221//
16222//	value_dtype: Type of the table values.
16223//
16224// Returns Handle to a table.
16225func MutableDenseHashTableV2(scope *Scope, empty_key tf.Output, deleted_key tf.Output, value_dtype tf.DataType, optional ...MutableDenseHashTableV2Attr) (table_handle tf.Output) {
16226	if scope.Err() != nil {
16227		return
16228	}
16229	attrs := map[string]interface{}{"value_dtype": value_dtype}
16230	for _, a := range optional {
16231		a(attrs)
16232	}
16233	opspec := tf.OpSpec{
16234		Type: "MutableDenseHashTableV2",
16235		Input: []tf.Input{
16236			empty_key, deleted_key,
16237		},
16238		Attrs: attrs,
16239	}
16240	op := scope.AddOperation(opspec)
16241	return op.Output(0)
16242}
16243
16244// MutableHashTableV2Attr is an optional argument to MutableHashTableV2.
16245type MutableHashTableV2Attr func(optionalAttr)
16246
16247// MutableHashTableV2Container sets the optional container attribute to value.
16248//
16249// value: If non-empty, this table is placed in the given container.
16250// Otherwise, a default container is used.
16251// If not specified, defaults to ""
16252func MutableHashTableV2Container(value string) MutableHashTableV2Attr {
16253	return func(m optionalAttr) {
16254		m["container"] = value
16255	}
16256}
16257
16258// MutableHashTableV2SharedName sets the optional shared_name attribute to value.
16259//
16260// value: If non-empty, this table is shared under the given name across
16261// multiple sessions.
16262// If not specified, defaults to ""
16263func MutableHashTableV2SharedName(value string) MutableHashTableV2Attr {
16264	return func(m optionalAttr) {
16265		m["shared_name"] = value
16266	}
16267}
16268
16269// MutableHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
16270//
16271// value: If true and shared_name is empty, the table is shared
16272// using the node name.
16273// If not specified, defaults to false
16274func MutableHashTableV2UseNodeNameSharing(value bool) MutableHashTableV2Attr {
16275	return func(m optionalAttr) {
16276		m["use_node_name_sharing"] = value
16277	}
16278}
16279
16280// Creates an empty hash table.
16281//
16282// This op creates a mutable hash table, specifying the type of its keys and
16283// values. Each value must be a scalar. Data can be inserted into the table using
16284// the insert operations. It does not support the initialization operation.
16285//
16286// Arguments:
16287//	key_dtype: Type of the table keys.
16288//	value_dtype: Type of the table values.
16289//
16290// Returns Handle to a table.
16291func MutableHashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableV2Attr) (table_handle tf.Output) {
16292	if scope.Err() != nil {
16293		return
16294	}
16295	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
16296	for _, a := range optional {
16297		a(attrs)
16298	}
16299	opspec := tf.OpSpec{
16300		Type: "MutableHashTableV2",
16301
16302		Attrs: attrs,
16303	}
16304	op := scope.AddOperation(opspec)
16305	return op.Output(0)
16306}
16307
16308// Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering.
16309//
16310// Arguments:
16311//	tree_ensemble_handle: Handle to the tree ensemble.
16312//	mean_gradients: A tensor with shape=[logits_dimension] with mean of gradients for a first node.
16313//	mean_hessians: A tensor with shape=[logits_dimension] mean of hessians for a first node.
16314//	l1: l1 regularization factor on leaf weights, per instance based.
16315//	l2: l2 regularization factor on leaf weights, per instance based.
16316//
16317// Returns Bool, whether to continue bias centering.
16318func BoostedTreesCenterBias(scope *Scope, tree_ensemble_handle tf.Output, mean_gradients tf.Output, mean_hessians tf.Output, l1 tf.Output, l2 tf.Output) (continue_centering tf.Output) {
16319	if scope.Err() != nil {
16320		return
16321	}
16322	opspec := tf.OpSpec{
16323		Type: "BoostedTreesCenterBias",
16324		Input: []tf.Input{
16325			tree_ensemble_handle, mean_gradients, mean_hessians, l1, l2,
16326		},
16327	}
16328	op := scope.AddOperation(opspec)
16329	return op.Output(0)
16330}
16331
16332// HashTableV2Attr is an optional argument to HashTableV2.
16333type HashTableV2Attr func(optionalAttr)
16334
16335// HashTableV2Container sets the optional container attribute to value.
16336//
16337// value: If non-empty, this table is placed in the given container.
16338// Otherwise, a default container is used.
16339// If not specified, defaults to ""
16340func HashTableV2Container(value string) HashTableV2Attr {
16341	return func(m optionalAttr) {
16342		m["container"] = value
16343	}
16344}
16345
16346// HashTableV2SharedName sets the optional shared_name attribute to value.
16347//
16348// value: If non-empty, this table is shared under the given name across
16349// multiple sessions.
16350// If not specified, defaults to ""
16351func HashTableV2SharedName(value string) HashTableV2Attr {
16352	return func(m optionalAttr) {
16353		m["shared_name"] = value
16354	}
16355}
16356
16357// HashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
16358//
16359// value: If true and shared_name is empty, the table is shared
16360// using the node name.
16361// If not specified, defaults to false
16362func HashTableV2UseNodeNameSharing(value bool) HashTableV2Attr {
16363	return func(m optionalAttr) {
16364		m["use_node_name_sharing"] = value
16365	}
16366}
16367
16368// Creates a non-initialized hash table.
16369//
16370// This op creates a hash table, specifying the type of its keys and values.
16371// Before using the table you will have to initialize it.  After initialization the
16372// table will be immutable.
16373//
16374// Arguments:
16375//	key_dtype: Type of the table keys.
16376//	value_dtype: Type of the table values.
16377//
16378// Returns Handle to a table.
16379func HashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...HashTableV2Attr) (table_handle tf.Output) {
16380	if scope.Err() != nil {
16381		return
16382	}
16383	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
16384	for _, a := range optional {
16385		a(attrs)
16386	}
16387	opspec := tf.OpSpec{
16388		Type: "HashTableV2",
16389
16390		Attrs: attrs,
16391	}
16392	op := scope.AddOperation(opspec)
16393	return op.Output(0)
16394}
16395
16396// Check if the input matches the regex pattern.
16397//
16398// The input is a string tensor of any shape. The pattern is a scalar
16399// string tensor which is applied to every element of the input tensor.
16400// The boolean values (True or False) of the output tensor indicate
16401// if the input matches the regex pattern provided.
16402//
16403// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
16404//
16405// Examples:
16406//
16407// >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$")
16408// <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
16409// >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$")
16410// <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  True])>
16411//
16412// Arguments:
16413//	input: A string tensor of the text to be processed.
16414//	pattern: A scalar string tensor containing the regular expression to match the input.
16415//
16416// Returns A bool tensor with the same shape as `input`.
16417func RegexFullMatch(scope *Scope, input tf.Output, pattern tf.Output) (output tf.Output) {
16418	if scope.Err() != nil {
16419		return
16420	}
16421	opspec := tf.OpSpec{
16422		Type: "RegexFullMatch",
16423		Input: []tf.Input{
16424			input, pattern,
16425		},
16426	}
16427	op := scope.AddOperation(opspec)
16428	return op.Output(0)
16429}
16430
16431// MatrixDiagV3Attr is an optional argument to MatrixDiagV3.
16432type MatrixDiagV3Attr func(optionalAttr)
16433
16434// MatrixDiagV3Align sets the optional align attribute to value.
16435//
16436// value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
16437// a string specifying how superdiagonals and subdiagonals should be aligned,
16438// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
16439// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
16440// to the right (left-pads the row) and subdiagonals to the left (right-pads the
16441// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
16442// the opposite alignment.
16443// If not specified, defaults to "RIGHT_LEFT"
16444func MatrixDiagV3Align(value string) MatrixDiagV3Attr {
16445	return func(m optionalAttr) {
16446		m["align"] = value
16447	}
16448}
16449
16450// Returns a batched diagonal tensor with given batched diagonal values.
16451//
16452// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
16453// diagonals of a matrix, with everything else padded with `padding`. `num_rows`
16454// and `num_cols` specify the dimension of the innermost matrix of the output. If
16455// both are not specified, the op assumes the innermost matrix is square and infers
16456// its size from `k` and the innermost dimension of `diagonal`. If only one of them
16457// is specified, the op assumes the unspecified value is the smallest possible
16458// based on other criteria.
16459//
16460// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
16461// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
16462// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
16463// `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
16464//
16465// The second innermost dimension of `diagonal` has double meaning.
16466// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
16467// [I, J, ..., M], and the output tensor is:
16468//
16469// ```
16470// output[i, j, ..., l, m, n]
16471//   = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
16472//     padding_value                             ; otherwise
16473// ```
16474//
16475// Otherwise, `M` is treated as the number of diagonals for the matrix in the
16476// same batch (`M = k[1]-k[0]+1`), and the output tensor is:
16477//
16478// ```
16479// output[i, j, ..., l, m, n]
16480//   = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
16481//     padding_value                                     ; otherwise
16482// ```
16483// where `d = n - m`, `diag_index = [k] - d`, and
16484// `index_in_diag = n - max(d, 0) + offset`.
16485//
16486// `offset` is zero except when the alignment of the diagonal is to the right.
16487// ```
16488// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
16489//                                            and `d >= 0`) or
16490//                                          (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
16491//                                            and `d <= 0`)
16492//          0                          ; otherwise
16493// ```
16494// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
16495//
16496// For example:
16497//
16498// ```
16499// # The main diagonal.
16500// diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
16501//                      [5, 6, 7, 8]])
16502// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
16503//                                [0, 2, 0, 0],
16504//                                [0, 0, 3, 0],
16505//                                [0, 0, 0, 4]],
16506//                               [[5, 0, 0, 0],
16507//                                [0, 6, 0, 0],
16508//                                [0, 0, 7, 0],
16509//                                [0, 0, 0, 8]]]
16510//
16511// # A superdiagonal (per batch).
16512// diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
16513//                      [4, 5, 6]])
16514// tf.matrix_diag(diagonal, k = 1)
16515//   ==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
16516//         [0, 0, 2, 0],
16517//         [0, 0, 0, 3],
16518//         [0, 0, 0, 0]],
16519//        [[0, 4, 0, 0],
16520//         [0, 0, 5, 0],
16521//         [0, 0, 0, 6],
16522//         [0, 0, 0, 0]]]
16523//
16524// # A tridiagonal band (per batch).
16525// diagonals = np.array([[[0, 8, 9],  # Input shape: (2, 2, 3)
16526//                        [1, 2, 3],
16527//                        [4, 5, 0]],
16528//                       [[0, 2, 3],
16529//                        [6, 7, 9],
16530//                        [9, 1, 0]]])
16531// tf.matrix_diag(diagonals, k = (-1, 1))
16532//   ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
16533//         [4, 2, 9],
16534//         [0, 5, 3]],
16535//        [[6, 2, 0],
16536//         [9, 7, 3],
16537//         [0, 1, 9]]]
16538//
16539// # LEFT_RIGHT alignment.
16540// diagonals = np.array([[[8, 9, 0],  # Input shape: (2, 2, 3)
16541//                        [1, 2, 3],
16542//                        [0, 4, 5]],
16543//                       [[2, 3, 0],
16544//                        [6, 7, 9],
16545//                        [0, 9, 1]]])
16546// tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT")
16547//   ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
16548//         [4, 2, 9],
16549//         [0, 5, 3]],
16550//        [[6, 2, 0],
16551//         [9, 7, 3],
16552//         [0, 1, 9]]]
16553//
16554// # Rectangular matrix.
16555// diagonal = np.array([1, 2])  # Input shape: (2)
16556// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
16557//   ==> [[0, 0, 0, 0],  # Output shape: (3, 4)
16558//        [1, 0, 0, 0],
16559//        [0, 2, 0, 0]]
16560//
16561// # Rectangular matrix with inferred num_cols and padding_value = 9.
16562// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
16563//   ==> [[9, 9],  # Output shape: (3, 2)
16564//        [1, 9],
16565//        [9, 2]]
16566//
16567// ```
16568//
16569// Arguments:
16570//	diagonal: Rank `r`, where `r >= 1`
16571//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
16572// diagonal, and negative value means subdiagonals. `k` can be a single integer
16573// (for a single diagonal) or a pair of integers specifying the low and high ends
16574// of a matrix band. `k[0]` must not be larger than `k[1]`.
16575//	num_rows: The number of rows of the output matrix. If it is not provided, the op assumes
16576// the output matrix is a square matrix and infers the matrix size from k and the
16577// innermost dimension of `diagonal`.
16578//	num_cols: The number of columns of the output matrix. If it is not provided, the op
16579// assumes the output matrix is a square matrix and infers the matrix size from
16580// k and the innermost dimension of `diagonal`.
16581//	padding_value: The number to fill the area outside the specified diagonal band with.
16582// Default is 0.
16583//
16584// Returns Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.
16585func MatrixDiagV3(scope *Scope, diagonal tf.Output, k tf.Output, num_rows tf.Output, num_cols tf.Output, padding_value tf.Output, optional ...MatrixDiagV3Attr) (output tf.Output) {
16586	if scope.Err() != nil {
16587		return
16588	}
16589	attrs := map[string]interface{}{}
16590	for _, a := range optional {
16591		a(attrs)
16592	}
16593	opspec := tf.OpSpec{
16594		Type: "MatrixDiagV3",
16595		Input: []tf.Input{
16596			diagonal, k, num_rows, num_cols, padding_value,
16597		},
16598		Attrs: attrs,
16599	}
16600	op := scope.AddOperation(opspec)
16601	return op.Output(0)
16602}
16603
16604// Greedily selects a subset of bounding boxes in descending order of score,
16605//
16606// pruning away boxes that have high overlaps
16607// with previously selected boxes.  Bounding boxes with score less than
16608// `score_threshold` are removed. N-by-n overlap values are supplied as square matrix,
16609// which allows for defining a custom overlap criterium (eg. intersection over union,
16610// intersection over area, etc.).
16611//
16612// The output of this operation is a set of integers indexing into the input
16613// collection of bounding boxes representing the selected boxes.  The bounding
16614// box coordinates corresponding to the selected indices can then be obtained
16615// using the `tf.gather operation`.  For example:
16616//
16617//   selected_indices = tf.image.non_max_suppression_with_overlaps(
16618//       overlaps, scores, max_output_size, overlap_threshold, score_threshold)
16619//   selected_boxes = tf.gather(boxes, selected_indices)
16620//
16621// Arguments:
16622//	overlaps: A 2-D float tensor of shape `[num_boxes, num_boxes]` representing
16623// the n-by-n box overlap values.
16624//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
16625// score corresponding to each box (each row of boxes).
16626//	max_output_size: A scalar integer tensor representing the maximum number of
16627// boxes to be selected by non max suppression.
16628//	overlap_threshold: A 0-D float tensor representing the threshold for deciding whether
16629// boxes overlap too.
16630//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
16631// boxes based on score.
16632//
16633// Returns A 1-D integer tensor of shape `[M]` representing the selected
16634// indices from the boxes tensor, where `M <= max_output_size`.
16635func NonMaxSuppressionWithOverlaps(scope *Scope, overlaps tf.Output, scores tf.Output, max_output_size tf.Output, overlap_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output) {
16636	if scope.Err() != nil {
16637		return
16638	}
16639	opspec := tf.OpSpec{
16640		Type: "NonMaxSuppressionWithOverlaps",
16641		Input: []tf.Input{
16642			overlaps, scores, max_output_size, overlap_threshold, score_threshold,
16643		},
16644	}
16645	op := scope.AddOperation(opspec)
16646	return op.Output(0)
16647}
16648
16649// Outputs all keys and values in the table.
16650//
16651// Arguments:
16652//	table_handle: Handle to the table.
16653//
16654//
16655//
16656// Returns:
16657//	keys: Vector of all keys present in the table.
16658//	values: Tensor of all values in the table. Indexed in parallel with `keys`.
16659func LookupTableExportV2(scope *Scope, table_handle tf.Output, Tkeys tf.DataType, Tvalues tf.DataType) (keys tf.Output, values tf.Output) {
16660	if scope.Err() != nil {
16661		return
16662	}
16663	attrs := map[string]interface{}{"Tkeys": Tkeys, "Tvalues": Tvalues}
16664	opspec := tf.OpSpec{
16665		Type: "LookupTableExportV2",
16666		Input: []tf.Input{
16667			table_handle,
16668		},
16669		Attrs: attrs,
16670	}
16671	op := scope.AddOperation(opspec)
16672	return op.Output(0), op.Output(1)
16673}
16674
16675// Assigns a new value to a variable.
16676//
16677// Any ReadVariableOp with a control dependency on this op is guaranteed to return
16678// this value or a subsequent newer value of the variable.
16679//
16680// Arguments:
16681//	resource: handle to the resource in which to store the variable.
16682//	value: the value to set the new tensor to use.
16683//
16684// Returns the created operation.
16685func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
16686	if scope.Err() != nil {
16687		return
16688	}
16689	opspec := tf.OpSpec{
16690		Type: "AssignVariableOp",
16691		Input: []tf.Input{
16692			resource, value,
16693		},
16694	}
16695	return scope.AddOperation(opspec)
16696}
16697
16698// PaddingFIFOQueueV2Attr is an optional argument to PaddingFIFOQueueV2.
16699type PaddingFIFOQueueV2Attr func(optionalAttr)
16700
16701// PaddingFIFOQueueV2Shapes sets the optional shapes attribute to value.
16702//
16703// value: The shape of each component in a value. The length of this attr must
16704// be either 0 or the same as the length of component_types.
16705// Shapes of fixed rank but variable size are allowed by setting
16706// any shape dimension to -1.  In this case, the inputs' shape may vary along
16707// the given dimension, and DequeueMany will pad the given dimension with
16708// zeros up to the maximum shape of all elements in the given batch.
16709// If the length of this attr is 0, different queue elements may have
16710// different ranks and shapes, but only one element may be dequeued at a time.
16711// If not specified, defaults to {}
16712//
16713// REQUIRES: len(value) >= 0
16714func PaddingFIFOQueueV2Shapes(value []tf.Shape) PaddingFIFOQueueV2Attr {
16715	return func(m optionalAttr) {
16716		m["shapes"] = value
16717	}
16718}
16719
16720// PaddingFIFOQueueV2Capacity sets the optional capacity attribute to value.
16721//
16722// value: The upper bound on the number of elements in this queue.
16723// Negative numbers mean no limit.
16724// If not specified, defaults to -1
16725func PaddingFIFOQueueV2Capacity(value int64) PaddingFIFOQueueV2Attr {
16726	return func(m optionalAttr) {
16727		m["capacity"] = value
16728	}
16729}
16730
16731// PaddingFIFOQueueV2Container sets the optional container attribute to value.
16732//
16733// value: If non-empty, this queue is placed in the given container.
16734// Otherwise, a default container is used.
16735// If not specified, defaults to ""
16736func PaddingFIFOQueueV2Container(value string) PaddingFIFOQueueV2Attr {
16737	return func(m optionalAttr) {
16738		m["container"] = value
16739	}
16740}
16741
16742// PaddingFIFOQueueV2SharedName sets the optional shared_name attribute to value.
16743//
16744// value: If non-empty, this queue will be shared under the given name
16745// across multiple sessions.
16746// If not specified, defaults to ""
16747func PaddingFIFOQueueV2SharedName(value string) PaddingFIFOQueueV2Attr {
16748	return func(m optionalAttr) {
16749		m["shared_name"] = value
16750	}
16751}
16752
16753// A queue that produces elements in first-in first-out order.
16754//
16755// Variable-size shapes are allowed by setting the corresponding shape dimensions
16756// to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum
16757// size of any given element in the minibatch.  See below for details.
16758//
16759// Arguments:
16760//	component_types: The type of each component in a value.
16761//
16762// Returns The handle to the queue.
16763func PaddingFIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...PaddingFIFOQueueV2Attr) (handle tf.Output) {
16764	if scope.Err() != nil {
16765		return
16766	}
16767	attrs := map[string]interface{}{"component_types": component_types}
16768	for _, a := range optional {
16769		a(attrs)
16770	}
16771	opspec := tf.OpSpec{
16772		Type: "PaddingFIFOQueueV2",
16773
16774		Attrs: attrs,
16775	}
16776	op := scope.AddOperation(opspec)
16777	return op.Output(0)
16778}
16779
16780// Returns whether the given key exists in the map.
16781//
16782// input_handle: the input map
16783// key: the key to check
16784// has_key: whether the key is already in the map or not
16785func TensorMapHasKey(scope *Scope, input_handle tf.Output, key tf.Output) (has_key tf.Output) {
16786	if scope.Err() != nil {
16787		return
16788	}
16789	opspec := tf.OpSpec{
16790		Type: "TensorMapHasKey",
16791		Input: []tf.Input{
16792			input_handle, key,
16793		},
16794	}
16795	op := scope.AddOperation(opspec)
16796	return op.Output(0)
16797}
16798
16799// Returns a tensor map with item from given key erased.
16800//
16801// input_handle: the original map
16802// output_handle: the map with value from given key removed
16803// key: the key of the value to be erased
16804func TensorMapErase(scope *Scope, input_handle tf.Output, key tf.Output, value_dtype tf.DataType) (output_handle tf.Output) {
16805	if scope.Err() != nil {
16806		return
16807	}
16808	attrs := map[string]interface{}{"value_dtype": value_dtype}
16809	opspec := tf.OpSpec{
16810		Type: "TensorMapErase",
16811		Input: []tf.Input{
16812			input_handle, key,
16813		},
16814		Attrs: attrs,
16815	}
16816	op := scope.AddOperation(opspec)
16817	return op.Output(0)
16818}
16819
16820// MaxPoolGradGradAttr is an optional argument to MaxPoolGradGrad.
16821type MaxPoolGradGradAttr func(optionalAttr)
16822
16823// MaxPoolGradGradDataFormat sets the optional data_format attribute to value.
16824//
16825// value: Specify the data format of the input and output data. With the
16826// default format "NHWC", the data is stored in the order of:
16827//     [batch, in_height, in_width, in_channels].
16828// Alternatively, the format could be "NCHW", the data storage order of:
16829//     [batch, in_channels, in_height, in_width].
16830// If not specified, defaults to "NHWC"
16831func MaxPoolGradGradDataFormat(value string) MaxPoolGradGradAttr {
16832	return func(m optionalAttr) {
16833		m["data_format"] = value
16834	}
16835}
16836
16837// Computes second-order gradients of the maxpooling function.
16838//
16839// Arguments:
16840//	orig_input: The original input tensor.
16841//	orig_output: The original output tensor.
16842//	grad: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
16843//	ksize: The size of the window for each dimension of the input tensor.
16844//	strides: The stride of the sliding window for each dimension of the
16845// input tensor.
16846//	padding: The type of padding algorithm to use.
16847//
16848// Returns Gradients of gradients w.r.t. the input to `max_pool`.
16849func MaxPoolGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradAttr) (output tf.Output) {
16850	if scope.Err() != nil {
16851		return
16852	}
16853	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
16854	for _, a := range optional {
16855		a(attrs)
16856	}
16857	opspec := tf.OpSpec{
16858		Type: "MaxPoolGradGrad",
16859		Input: []tf.Input{
16860			orig_input, orig_output, grad,
16861		},
16862		Attrs: attrs,
16863	}
16864	op := scope.AddOperation(opspec)
16865	return op.Output(0)
16866}
16867
16868// Returns the number of tensors in the input tensor map.
16869//
16870// input_handle: the input map
16871// size: the number of tensors in the map
16872func TensorMapSize(scope *Scope, input_handle tf.Output) (size tf.Output) {
16873	if scope.Err() != nil {
16874		return
16875	}
16876	opspec := tf.OpSpec{
16877		Type: "TensorMapSize",
16878		Input: []tf.Input{
16879			input_handle,
16880		},
16881	}
16882	op := scope.AddOperation(opspec)
16883	return op.Output(0)
16884}
16885
16886// Creates a TensorList by indexing into a Tensor.
16887//
16888// Each member of the TensorList corresponds to one row of the input tensor,
16889// specified by the given index (see `tf.gather`).
16890//
16891// tensor: The input tensor.
16892// indices: The indices used to index into the list.
16893// element_shape: The shape of the elements in the list (can be less specified than
16894//   the shape of the tensor).
16895// output_handle: The TensorList.
16896func TensorListScatter(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output) (output_handle tf.Output) {
16897	if scope.Err() != nil {
16898		return
16899	}
16900	opspec := tf.OpSpec{
16901		Type: "TensorListScatter",
16902		Input: []tf.Input{
16903			tensor, indices, element_shape,
16904		},
16905	}
16906	op := scope.AddOperation(opspec)
16907	return op.Output(0)
16908}
16909
16910// Sets the index-th position of the list to contain the given tensor.
16911//
16912// input_handle: the list
16913// index: the position in the list to which the tensor will be assigned
16914// item: the element to be assigned to that position
16915// output_handle: the new list, with the element in the proper position
16916//
16917func TensorListSetItem(scope *Scope, input_handle tf.Output, index tf.Output, item tf.Output) (output_handle tf.Output) {
16918	if scope.Err() != nil {
16919		return
16920	}
16921	opspec := tf.OpSpec{
16922		Type: "TensorListSetItem",
16923		Input: []tf.Input{
16924			input_handle, index, item,
16925		},
16926	}
16927	op := scope.AddOperation(opspec)
16928	return op.Output(0)
16929}
16930
16931// Clips tensor values to a specified min and max.
16932//
16933// Given a tensor `t`, this operation returns a tensor of the same type and
16934// shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
16935// Any values less than `clip_value_min` are set to `clip_value_min`. Any values
16936// greater than `clip_value_max` are set to `clip_value_max`.
16937//
16938// Arguments:
16939//	t: A `Tensor`.
16940//	clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
16941// as `t`. The minimum value to clip by.
16942//	clip_value_max: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
16943// as `t`. The maximum value to clip by.
16944//
16945// Returns A clipped `Tensor` with the same shape as input 't'.
16946func ClipByValue(scope *Scope, t tf.Output, clip_value_min tf.Output, clip_value_max tf.Output) (output tf.Output) {
16947	if scope.Err() != nil {
16948		return
16949	}
16950	opspec := tf.OpSpec{
16951		Type: "ClipByValue",
16952		Input: []tf.Input{
16953			t, clip_value_min, clip_value_max,
16954		},
16955	}
16956	op := scope.AddOperation(opspec)
16957	return op.Output(0)
16958}
16959
16960// List of the given size with empty elements.
16961//
16962// element_shape: the shape of the future elements of the list
16963// num_elements: the number of elements to reserve
16964// handle: the output list
16965// element_dtype: the desired type of elements in the list.
16966func TensorListReserve(scope *Scope, element_shape tf.Output, num_elements tf.Output, element_dtype tf.DataType) (handle tf.Output) {
16967	if scope.Err() != nil {
16968		return
16969	}
16970	attrs := map[string]interface{}{"element_dtype": element_dtype}
16971	opspec := tf.OpSpec{
16972		Type: "TensorListReserve",
16973		Input: []tf.Input{
16974			element_shape, num_elements,
16975		},
16976		Attrs: attrs,
16977	}
16978	op := scope.AddOperation(opspec)
16979	return op.Output(0)
16980}
16981
16982// ExperimentalThreadPoolHandleAttr is an optional argument to ExperimentalThreadPoolHandle.
16983type ExperimentalThreadPoolHandleAttr func(optionalAttr)
16984
16985// ExperimentalThreadPoolHandleMaxIntraOpParallelism sets the optional max_intra_op_parallelism attribute to value.
16986//
16987// value: The maximum degree of parallelism to use within operations that execute on this
16988// threadpool.
16989// If not specified, defaults to 1
16990func ExperimentalThreadPoolHandleMaxIntraOpParallelism(value int64) ExperimentalThreadPoolHandleAttr {
16991	return func(m optionalAttr) {
16992		m["max_intra_op_parallelism"] = value
16993	}
16994}
16995
16996// ExperimentalThreadPoolHandleContainer sets the optional container attribute to value.
16997// If not specified, defaults to ""
16998func ExperimentalThreadPoolHandleContainer(value string) ExperimentalThreadPoolHandleAttr {
16999	return func(m optionalAttr) {
17000		m["container"] = value
17001	}
17002}
17003
17004// ExperimentalThreadPoolHandleSharedName sets the optional shared_name attribute to value.
17005// If not specified, defaults to ""
17006func ExperimentalThreadPoolHandleSharedName(value string) ExperimentalThreadPoolHandleAttr {
17007	return func(m optionalAttr) {
17008		m["shared_name"] = value
17009	}
17010}
17011
17012// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
17013//
17014// Arguments:
17015//	num_threads: The number of threads in the thread pool.
17016//	display_name: A human-readable name for the threads that may be visible in some
17017// visualizations.
17018// threadpool.
17019//
17020// Returns A resource that can be consumed by one or more ExperimentalThreadPoolDataset
17021// ops.
17022func ExperimentalThreadPoolHandle(scope *Scope, num_threads int64, display_name string, optional ...ExperimentalThreadPoolHandleAttr) (handle tf.Output) {
17023	if scope.Err() != nil {
17024		return
17025	}
17026	attrs := map[string]interface{}{"num_threads": num_threads, "display_name": display_name}
17027	for _, a := range optional {
17028		a(attrs)
17029	}
17030	opspec := tf.OpSpec{
17031		Type: "ExperimentalThreadPoolHandle",
17032
17033		Attrs: attrs,
17034	}
17035	op := scope.AddOperation(opspec)
17036	return op.Output(0)
17037}
17038
17039// Creates a TensorList which, when stacked, has the value of `tensor`.
17040//
17041// Each tensor in the result list corresponds to one row of the input tensor.
17042//
17043// tensor: The input tensor.
17044// output_handle: The list.
17045func TensorListFromTensor(scope *Scope, tensor tf.Output, element_shape tf.Output) (output_handle tf.Output) {
17046	if scope.Err() != nil {
17047		return
17048	}
17049	opspec := tf.OpSpec{
17050		Type: "TensorListFromTensor",
17051		Input: []tf.Input{
17052			tensor, element_shape,
17053		},
17054	}
17055	op := scope.AddOperation(opspec)
17056	return op.Output(0)
17057}
17058
17059// Concats all tensors in the list along the 0th dimension.
17060//
17061// Requires that all tensors have the same shape except the first dimension.
17062//
17063// input_handle: The input list.
17064// element_shape: The shape of the uninitialized elements in the list. If the first
17065//   dimension is not -1, it is assumed that all list elements have the same
17066//   leading dim.
17067// leading_dims: The list of leading dims of uninitialized list elements. Used if
17068//   the leading dim of input_handle.element_shape or the element_shape input arg
17069//   is not already set.
17070// tensor: The concated result.
17071// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
17072//
17073func TensorListConcatV2(scope *Scope, input_handle tf.Output, element_shape tf.Output, leading_dims tf.Output, element_dtype tf.DataType) (tensor tf.Output, lengths tf.Output) {
17074	if scope.Err() != nil {
17075		return
17076	}
17077	attrs := map[string]interface{}{"element_dtype": element_dtype}
17078	opspec := tf.OpSpec{
17079		Type: "TensorListConcatV2",
17080		Input: []tf.Input{
17081			input_handle, element_shape, leading_dims,
17082		},
17083		Attrs: attrs,
17084	}
17085	op := scope.AddOperation(opspec)
17086	return op.Output(0), op.Output(1)
17087}
17088
17089// TensorListStackAttr is an optional argument to TensorListStack.
17090type TensorListStackAttr func(optionalAttr)
17091
17092// TensorListStackNumElements sets the optional num_elements attribute to value.
17093// If not specified, defaults to -1
17094func TensorListStackNumElements(value int64) TensorListStackAttr {
17095	return func(m optionalAttr) {
17096		m["num_elements"] = value
17097	}
17098}
17099
17100// Stacks all tensors in the list.
17101//
17102// Requires that all tensors have the same shape.
17103//
17104// input_handle: the input list
17105// tensor: the gathered result
17106// num_elements: optional. If not -1, the number of elements in the list.
17107//
17108func TensorListStack(scope *Scope, input_handle tf.Output, element_shape tf.Output, element_dtype tf.DataType, optional ...TensorListStackAttr) (tensor tf.Output) {
17109	if scope.Err() != nil {
17110		return
17111	}
17112	attrs := map[string]interface{}{"element_dtype": element_dtype}
17113	for _, a := range optional {
17114		a(attrs)
17115	}
17116	opspec := tf.OpSpec{
17117		Type: "TensorListStack",
17118		Input: []tf.Input{
17119			input_handle, element_shape,
17120		},
17121		Attrs: attrs,
17122	}
17123	op := scope.AddOperation(opspec)
17124	return op.Output(0)
17125}
17126
17127// Creates a dataset that contains `count` elements from the `input_dataset`.
17128//
17129// Arguments:
17130//
17131//	count: A scalar representing the number of elements from the `input_dataset`
17132// that should be taken. A value of `-1` indicates that all of `input_dataset`
17133// is taken.
17134//
17135//
17136func TakeDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
17137	if scope.Err() != nil {
17138		return
17139	}
17140	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
17141	opspec := tf.OpSpec{
17142		Type: "TakeDataset",
17143		Input: []tf.Input{
17144			input_dataset, count,
17145		},
17146		Attrs: attrs,
17147	}
17148	op := scope.AddOperation(opspec)
17149	return op.Output(0)
17150}
17151
17152// Scatter `updates` into an existing tensor according to `indices`.
17153//
17154// This operation creates a new tensor by applying sparse `updates` to the passed
17155// in `tensor`.
17156// This operation is very similar to `tf.scatter_nd`, except that the updates are
17157// scattered onto an existing tensor (as opposed to a zero-tensor). If the memory
17158// for the existing tensor cannot be re-used, a copy is made and updated.
17159//
17160// If `indices` contains duplicates, then we pick the last update for the index.
17161//
17162// If an out of bound index is found on CPU, an error is returned.
17163//
17164// **WARNING**: There are some GPU specific semantics for this operation.
17165// - If an out of bound index is found, the index is ignored.
17166// - The order in which updates are applied is nondeterministic, so the output
17167// will be nondeterministic if `indices` contains duplicates.
17168//
17169// `indices` is an integer tensor containing indices into a new tensor of shape
17170// `shape`.
17171//
17172// * `indices` must have at least 2 axes: `(num_updates, index_depth)`.
17173// * The last axis of `indices` is how deep to index into `tensor` so  this index
17174//   depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim`
17175//
17176// if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements.
17177// if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input
17178// `tensor`.
17179//
17180// Each `update` has a rank of `tensor.rank - indices.shape[-1]`.
17181// The overall shape of `updates` is:
17182//
17183// ```
17184// indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
17185// ```
17186//
17187// For usage examples see the python [tf.tensor_scatter_nd_update](
17188// https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function
17189//
17190//
17191// Arguments:
17192//	tensor: Tensor to copy/update.
17193//	indices: Index tensor.
17194//	updates: Updates to scatter into output.
17195//
17196// Returns A new tensor with the given shape and updates applied according
17197// to the indices.
17198func TensorScatterUpdate(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
17199	if scope.Err() != nil {
17200		return
17201	}
17202	opspec := tf.OpSpec{
17203		Type: "TensorScatterUpdate",
17204		Input: []tf.Input{
17205			tensor, indices, updates,
17206		},
17207	}
17208	op := scope.AddOperation(opspec)
17209	return op.Output(0)
17210}
17211
17212// Returns the last element of the input list as well as a list with all but that element.
17213//
17214// Fails if the list is empty.
17215//
17216// input_handle: the input list
17217// tensor: the withdrawn last element of the list
17218// element_dtype: the type of elements in the list
17219// element_shape: the shape of the output tensor
17220func TensorListPopBack(scope *Scope, input_handle tf.Output, element_shape tf.Output, element_dtype tf.DataType) (output_handle tf.Output, tensor tf.Output) {
17221	if scope.Err() != nil {
17222		return
17223	}
17224	attrs := map[string]interface{}{"element_dtype": element_dtype}
17225	opspec := tf.OpSpec{
17226		Type: "TensorListPopBack",
17227		Input: []tf.Input{
17228			input_handle, element_shape,
17229		},
17230		Attrs: attrs,
17231	}
17232	op := scope.AddOperation(opspec)
17233	return op.Output(0), op.Output(1)
17234}
17235
17236// Returns the number of tensors in the input tensor list.
17237//
17238// input_handle: the input list
17239// length: the number of tensors in the list
17240func TensorListLength(scope *Scope, input_handle tf.Output) (length tf.Output) {
17241	if scope.Err() != nil {
17242		return
17243	}
17244	opspec := tf.OpSpec{
17245		Type: "TensorListLength",
17246		Input: []tf.Input{
17247			input_handle,
17248		},
17249	}
17250	op := scope.AddOperation(opspec)
17251	return op.Output(0)
17252}
17253
17254// Computes the maximum along segments of a tensor.
17255//
17256// Read
17257// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
17258// for an explanation of segments.
17259//
17260// Computes a tensor such that
17261// \\(output_i = \max_j(data_j)\\) where `max` is over `j` such
17262// that `segment_ids[j] == i`.
17263//
17264// If the max is empty for a given segment ID `i`, `output[i] = 0`.
17265//
17266// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
17267// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
17268// </div>
17269//
17270// For example:
17271//
17272// ```
17273// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
17274// tf.segment_max(c, tf.constant([0, 0, 1]))
17275// # ==> [[4, 3, 3, 4],
17276// #      [5, 6, 7, 8]]
17277// ```
17278//
17279//
17280// Arguments:
17281//
17282//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
17283// first dimension.  Values should be sorted and can be repeated.
17284//
17285// Returns Has same shape as data, except for dimension 0 which
17286// has size `k`, the number of segments.
17287func SegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
17288	if scope.Err() != nil {
17289		return
17290	}
17291	opspec := tf.OpSpec{
17292		Type: "SegmentMax",
17293		Input: []tf.Input{
17294			data, segment_ids,
17295		},
17296	}
17297	op := scope.AddOperation(opspec)
17298	return op.Output(0)
17299}
17300
17301// CastAttr is an optional argument to Cast.
17302type CastAttr func(optionalAttr)
17303
17304// CastTruncate sets the optional Truncate attribute to value.
17305// If not specified, defaults to false
17306func CastTruncate(value bool) CastAttr {
17307	return func(m optionalAttr) {
17308		m["Truncate"] = value
17309	}
17310}
17311
17312// Cast x of type SrcT to y of DstT.
17313func Cast(scope *Scope, x tf.Output, DstT tf.DataType, optional ...CastAttr) (y tf.Output) {
17314	if scope.Err() != nil {
17315		return
17316	}
17317	attrs := map[string]interface{}{"DstT": DstT}
17318	for _, a := range optional {
17319		a(attrs)
17320	}
17321	opspec := tf.OpSpec{
17322		Type: "Cast",
17323		Input: []tf.Input{
17324			x,
17325		},
17326		Attrs: attrs,
17327	}
17328	op := scope.AddOperation(opspec)
17329	return op.Output(0)
17330}
17331
17332// MeanAttr is an optional argument to Mean.
17333type MeanAttr func(optionalAttr)
17334
17335// MeanKeepDims sets the optional keep_dims attribute to value.
17336//
17337// value: If true, retain reduced dimensions with length 1.
17338// If not specified, defaults to false
17339func MeanKeepDims(value bool) MeanAttr {
17340	return func(m optionalAttr) {
17341		m["keep_dims"] = value
17342	}
17343}
17344
17345// Computes the mean of elements across dimensions of a tensor.
17346//
17347// Reduces `input` along the dimensions given in `axis`. Unless
17348// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
17349// `axis`. If `keep_dims` is true, the reduced dimensions are
17350// retained with length 1.
17351//
17352// Arguments:
17353//	input: The tensor to reduce.
17354//	axis: The dimensions to reduce. Must be in the range
17355// `[-rank(input), rank(input))`.
17356//
17357// Returns The reduced tensor.
17358func Mean(scope *Scope, input tf.Output, axis tf.Output, optional ...MeanAttr) (output tf.Output) {
17359	if scope.Err() != nil {
17360		return
17361	}
17362	attrs := map[string]interface{}{}
17363	for _, a := range optional {
17364		a(attrs)
17365	}
17366	opspec := tf.OpSpec{
17367		Type: "Mean",
17368		Input: []tf.Input{
17369			input, axis,
17370		},
17371		Attrs: attrs,
17372	}
17373	op := scope.AddOperation(opspec)
17374	return op.Output(0)
17375}
17376
17377// Wraps the XLA ConvGeneralDilated operator, documented at
17378//
17379//  https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
17380// .
17381//
17382// Arguments:
17383//	lhs: the input tensor
17384//	rhs: the kernel tensor
17385//	window_strides: the inter-window strides
17386//	padding: the padding to apply at the start and end of each input dimensions
17387//	lhs_dilation: dilation to apply between input elements
17388//	rhs_dilation: dilation to apply between kernel elements
17389//	feature_group_count: number of feature groups for grouped convolution.
17390//	dimension_numbers: a serialized xla::ConvolutionDimensionNumbers proto.
17391//	precision_config: a serialized xla::PrecisionConfig proto.
17392//	preferred_element_type: The type of the tensor.
17393func XlaConvV2(scope *Scope, lhs tf.Output, rhs tf.Output, window_strides tf.Output, padding tf.Output, lhs_dilation tf.Output, rhs_dilation tf.Output, feature_group_count tf.Output, dimension_numbers string, precision_config string, preferred_element_type tf.DataType) (output tf.Output) {
17394	if scope.Err() != nil {
17395		return
17396	}
17397	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "precision_config": precision_config, "preferred_element_type": preferred_element_type}
17398	opspec := tf.OpSpec{
17399		Type: "XlaConvV2",
17400		Input: []tf.Input{
17401			lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count,
17402		},
17403		Attrs: attrs,
17404	}
17405	op := scope.AddOperation(opspec)
17406	return op.Output(0)
17407}
17408
17409// Op that reshards on-device TPU variables to specified state.
17410//
17411// Op that reshards on-device TPU variables to specified state. Internal use only.
17412//
17413// The sharding state is represented as the key of the compilation that generated
17414// the sharding/unsharding programs along with the main program. new_format_key
17415// specifies the desired state, and format_state_var is the current state of the
17416// variables.
17417//
17418// Returns the created operation.
17419func TPUReshardVariables(scope *Scope, vars []tf.Output, new_format_key tf.Output, format_state_var tf.Output) (o *tf.Operation) {
17420	if scope.Err() != nil {
17421		return
17422	}
17423	opspec := tf.OpSpec{
17424		Type: "TPUReshardVariables",
17425		Input: []tf.Input{
17426			tf.OutputList(vars), new_format_key, format_state_var,
17427		},
17428	}
17429	return scope.AddOperation(opspec)
17430}
17431
17432// Returns the cardinality of `input_dataset`.
17433//
17434// Returns the cardinality of `input_dataset`.
17435//
17436// Arguments:
17437//	input_dataset: A variant tensor representing the dataset to return cardinality for.
17438//
17439// Returns The cardinality of `input_dataset`. Named constants are used to represent
17440// infinite and unknown cardinality.
17441func DatasetCardinality(scope *Scope, input_dataset tf.Output) (cardinality tf.Output) {
17442	if scope.Err() != nil {
17443		return
17444	}
17445	opspec := tf.OpSpec{
17446		Type: "DatasetCardinality",
17447		Input: []tf.Input{
17448			input_dataset,
17449		},
17450	}
17451	op := scope.AddOperation(opspec)
17452	return op.Output(0)
17453}
17454
17455// Outputs random integers from a uniform distribution.
17456//
17457// The generated values are uniform integers in the range `[minval, maxval)`.
17458// The lower bound `minval` is included in the range, while the upper bound
17459// `maxval` is excluded.
17460//
17461// The random integers are slightly biased unless `maxval - minval` is an exact
17462// power of two.  The bias is small for values of `maxval - minval` significantly
17463// smaller than the range of the output (either `2^32` or `2^64`).
17464//
17465// Arguments:
17466//	resource: The handle of the resource variable that stores the state of the RNG.
17467//	algorithm: The RNG algorithm.
17468//	shape: The shape of the output tensor.
17469//	minval: Minimum value (inclusive, scalar).
17470//	maxval: Maximum value (exclusive, scalar).
17471//
17472// Returns Random values with specified shape.
17473func StatefulUniformInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output) {
17474	if scope.Err() != nil {
17475		return
17476	}
17477	opspec := tf.OpSpec{
17478		Type: "StatefulUniformInt",
17479		Input: []tf.Input{
17480			resource, algorithm, shape, minval, maxval,
17481		},
17482	}
17483	op := scope.AddOperation(opspec)
17484	return op.Output(0)
17485}
17486
17487// DebugNumericSummaryAttr is an optional argument to DebugNumericSummary.
17488type DebugNumericSummaryAttr func(optionalAttr)
17489
17490// DebugNumericSummaryDeviceName sets the optional device_name attribute to value.
17491// If not specified, defaults to ""
17492func DebugNumericSummaryDeviceName(value string) DebugNumericSummaryAttr {
17493	return func(m optionalAttr) {
17494		m["device_name"] = value
17495	}
17496}
17497
17498// DebugNumericSummaryTensorName sets the optional tensor_name attribute to value.
17499//
17500// value: Name of the input tensor.
17501// If not specified, defaults to ""
17502func DebugNumericSummaryTensorName(value string) DebugNumericSummaryAttr {
17503	return func(m optionalAttr) {
17504		m["tensor_name"] = value
17505	}
17506}
17507
17508// DebugNumericSummaryDebugUrls sets the optional debug_urls attribute to value.
17509//
17510// value: List of URLs to debug targets, e.g.,
17511//   file:///foo/tfdbg_dump, grpc:://localhost:11011.
17512// If not specified, defaults to {}
17513func DebugNumericSummaryDebugUrls(value []string) DebugNumericSummaryAttr {
17514	return func(m optionalAttr) {
17515		m["debug_urls"] = value
17516	}
17517}
17518
17519// DebugNumericSummaryLowerBound sets the optional lower_bound attribute to value.
17520//
17521// value: (float) The lower bound <= which values will be included in the
17522//   generalized -inf count. Default: -inf.
17523// If not specified, defaults to -inf
17524func DebugNumericSummaryLowerBound(value float32) DebugNumericSummaryAttr {
17525	return func(m optionalAttr) {
17526		m["lower_bound"] = value
17527	}
17528}
17529
17530// DebugNumericSummaryUpperBound sets the optional upper_bound attribute to value.
17531//
17532// value: (float) The upper bound >= which values will be included in the
17533//   generalized +inf count. Default: +inf.
17534// If not specified, defaults to inf
17535func DebugNumericSummaryUpperBound(value float32) DebugNumericSummaryAttr {
17536	return func(m optionalAttr) {
17537		m["upper_bound"] = value
17538	}
17539}
17540
17541// DebugNumericSummaryMuteIfHealthy sets the optional mute_if_healthy attribute to value.
17542//
17543// value: (bool) Do not send data to the debug URLs unless at least one
17544//   of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and
17545//   inf counts) is non-zero.
17546// If not specified, defaults to false
17547func DebugNumericSummaryMuteIfHealthy(value bool) DebugNumericSummaryAttr {
17548	return func(m optionalAttr) {
17549		m["mute_if_healthy"] = value
17550	}
17551}
17552
17553// DebugNumericSummaryGatedGrpc sets the optional gated_grpc attribute to value.
17554//
17555// value: Whether this op will be gated. If any of the debug_urls of this
17556//   debug node is of the grpc:// scheme, when the value of this attribute is set
17557//   to True, the data will not actually be sent via the grpc stream unless this
17558//   debug op has been enabled at the debug_url. If all of the debug_urls of this
17559//   debug node are of the grpc:// scheme and the debug op is enabled at none of
17560//   them, the output will be an empty Tensor.
17561// If not specified, defaults to false
17562func DebugNumericSummaryGatedGrpc(value bool) DebugNumericSummaryAttr {
17563	return func(m optionalAttr) {
17564		m["gated_grpc"] = value
17565	}
17566}
17567
17568// Debug Numeric Summary Op.
17569//
17570// Provide a basic summary of numeric value types, range and distribution.
17571//
17572// output: A double tensor of shape [14 + nDimensions], where nDimensions is the
17573//   number of dimensions of the tensor's shape. The elements of output are:
17574//   [0]: is initialized (1.0) or not (0.0).
17575//   [1]: total number of elements
17576//   [2]: NaN element count
17577//   [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by
17578//     default.
17579//   [4]: negative element count (excluding -inf), if lower_bound is the default
17580//     -inf. Otherwise, this is the count of elements > lower_bound and < 0.
17581//   [5]: zero element count
17582//   [6]: positive element count (excluding +inf), if upper_bound is the default
17583//     +inf. Otherwise, this is the count of elements < upper_bound and > 0.
17584//   [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by
17585//     default.
17586// Output elements [1:8] are all zero, if the tensor is uninitialized.
17587//   [8]: minimum of all non-inf and non-NaN elements.
17588//        If uninitialized or no such element exists: +inf.
17589//   [9]: maximum of all non-inf and non-NaN elements.
17590//        If uninitialized or no such element exists: -inf.
17591//   [10]: mean of all non-inf and non-NaN elements.
17592//         If uninitialized or no such element exists: NaN.
17593//   [11]: variance of all non-inf and non-NaN elements.
17594//         If uninitialized or no such element exists: NaN.
17595//   [12]: Data type of the tensor encoded as an enum integer. See the DataType
17596//         proto for more details.
17597//   [13]: Number of dimensions of the tensor (ndims).
17598//   [14+]: Sizes of the dimensions.
17599//
17600//
17601// Arguments:
17602//	input: Input tensor, non-Reference type.
17603func DebugNumericSummary(scope *Scope, input tf.Output, optional ...DebugNumericSummaryAttr) (output tf.Output) {
17604	if scope.Err() != nil {
17605		return
17606	}
17607	attrs := map[string]interface{}{}
17608	for _, a := range optional {
17609		a(attrs)
17610	}
17611	opspec := tf.OpSpec{
17612		Type: "DebugNumericSummary",
17613		Input: []tf.Input{
17614			input,
17615		},
17616		Attrs: attrs,
17617	}
17618	op := scope.AddOperation(opspec)
17619	return op.Output(0)
17620}
17621
17622// An Op to exchange data across TPU replicas.
17623//
17624// On each replica, the input is split into `split_count` blocks along
17625// `split_dimension` and send to the other replicas given group_assignment. After
17626// receiving `split_count` - 1 blocks from other replicas, we concatenate the
17627// blocks along `concat_dimension` as the output.
17628//
17629// For example, suppose there are 2 TPU replicas:
17630// replica 0 receives input: `[[A, B]]`
17631// replica 1 receives input: `[[C, D]]`
17632//
17633// group_assignment=`[[0, 1]]`
17634// concat_dimension=0
17635// split_dimension=1
17636// split_count=2
17637//
17638// replica 0's output: `[[A], [C]]`
17639// replica 1's output: `[[B], [D]]`
17640//
17641// Arguments:
17642//	input: The local input to the sum.
17643//	group_assignment: An int32 tensor with shape
17644// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
17645// replica ids in the ith subgroup.
17646//	concat_dimension: The dimension number to concatenate.
17647//	split_dimension: The dimension number to split.
17648//	split_count: The number of splits, this number must equal to the sub-group
17649// size(group_assignment.get_shape()[1])
17650//
17651// Returns The exchanged result.
17652func AllToAll(scope *Scope, input tf.Output, group_assignment tf.Output, concat_dimension int64, split_dimension int64, split_count int64) (output tf.Output) {
17653	if scope.Err() != nil {
17654		return
17655	}
17656	attrs := map[string]interface{}{"concat_dimension": concat_dimension, "split_dimension": split_dimension, "split_count": split_count}
17657	opspec := tf.OpSpec{
17658		Type: "AllToAll",
17659		Input: []tf.Input{
17660			input, group_assignment,
17661		},
17662		Attrs: attrs,
17663	}
17664	op := scope.AddOperation(opspec)
17665	return op.Output(0)
17666}
17667
17668// TridiagonalSolveAttr is an optional argument to TridiagonalSolve.
17669type TridiagonalSolveAttr func(optionalAttr)
17670
17671// TridiagonalSolvePartialPivoting sets the optional partial_pivoting attribute to value.
17672//
17673// value: Whether to apply partial pivoting. Partial pivoting makes the procedure more
17674// stable, but slower.
17675// If not specified, defaults to true
17676func TridiagonalSolvePartialPivoting(value bool) TridiagonalSolveAttr {
17677	return func(m optionalAttr) {
17678		m["partial_pivoting"] = value
17679	}
17680}
17681
17682// TridiagonalSolvePerturbSingular sets the optional perturb_singular attribute to value.
17683// If not specified, defaults to false
17684func TridiagonalSolvePerturbSingular(value bool) TridiagonalSolveAttr {
17685	return func(m optionalAttr) {
17686		m["perturb_singular"] = value
17687	}
17688}
17689
17690// Solves tridiagonal systems of equations.
17691//
17692//   Solves tridiagonal systems of equations.
17693//   Supports batch dimensions and multiple right-hand sides per each left-hand
17694//   side.
17695//   On CPU, solution is computed via Gaussian elimination with or without partial
17696//   pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE
17697//   library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
17698//   Partial pivoting is not yet supported by XLA backends.
17699//
17700// Arguments:
17701//	diagonals: Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the
17702// tridiagonal matrices with three rows being the superdiagonal, diagonals, and
17703// subdiagonals, in order. The last element of the superdiagonal and the first
17704// element of the subdiagonal is ignored.
17705//	rhs: Tensor of shape `[..., M, K]`, representing K right-hand sides per each
17706// left-hand side.
17707//
17708// Returns Tensor of shape `[..., M, K]` containing the solutions
17709func TridiagonalSolve(scope *Scope, diagonals tf.Output, rhs tf.Output, optional ...TridiagonalSolveAttr) (output tf.Output) {
17710	if scope.Err() != nil {
17711		return
17712	}
17713	attrs := map[string]interface{}{}
17714	for _, a := range optional {
17715		a(attrs)
17716	}
17717	opspec := tf.OpSpec{
17718		Type: "TridiagonalSolve",
17719		Input: []tf.Input{
17720			diagonals, rhs,
17721		},
17722		Attrs: attrs,
17723	}
17724	op := scope.AddOperation(opspec)
17725	return op.Output(0)
17726}
17727
17728// MatrixTriangularSolveAttr is an optional argument to MatrixTriangularSolve.
17729type MatrixTriangularSolveAttr func(optionalAttr)
17730
17731// MatrixTriangularSolveLower sets the optional lower attribute to value.
17732//
17733// value: Boolean indicating whether the innermost matrices in `matrix` are
17734// lower or upper triangular.
17735// If not specified, defaults to true
17736func MatrixTriangularSolveLower(value bool) MatrixTriangularSolveAttr {
17737	return func(m optionalAttr) {
17738		m["lower"] = value
17739	}
17740}
17741
17742// MatrixTriangularSolveAdjoint sets the optional adjoint attribute to value.
17743//
17744// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
17745//          adjoint.
17746//
17747// @compatibility(numpy)
17748// Equivalent to scipy.linalg.solve_triangular
17749// @end_compatibility
17750// If not specified, defaults to false
17751func MatrixTriangularSolveAdjoint(value bool) MatrixTriangularSolveAttr {
17752	return func(m optionalAttr) {
17753		m["adjoint"] = value
17754	}
17755}
17756
17757// Solves systems of linear equations with upper or lower triangular matrices by backsubstitution.
17758//
17759//
17760// `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
17761// square matrices. If `lower` is `True` then the strictly upper triangular part
17762// of each inner-most matrix is assumed to be zero and not accessed.
17763// If `lower` is False then the strictly lower triangular part of each inner-most
17764// matrix is assumed to be zero and not accessed.
17765// `rhs` is a tensor of shape `[..., M, N]`.
17766//
17767// The output is a tensor of shape `[..., M, N]`. If `adjoint` is
17768// `True` then the innermost matrices in `output` satisfy matrix equations
17769// `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
17770// If `adjoint` is `False` then the strictly then the  innermost matrices in
17771// `output` satisfy matrix equations
17772// `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
17773//
17774// Note, the batch shapes for the inputs only need to broadcast.
17775//
17776// Example:
17777// ```python
17778//
17779// a = tf.constant([[3,  0,  0,  0],
17780//                  [2,  1,  0,  0],
17781//                  [1,  0,  1,  0],
17782//                  [1,  1,  1,  1]], dtype=tf.float32)
17783//
17784// b = tf.constant([[4],
17785//                  [2],
17786//                  [4],
17787//                  [2]], dtype=tf.float32)
17788//
17789// x = tf.linalg.triangular_solve(a, b, lower=True)
17790// x
17791// # <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
17792// # array([[ 1.3333334 ],
17793// #        [-0.66666675],
17794// #        [ 2.6666665 ],
17795// #        [-1.3333331 ]], dtype=float32)>
17796//
17797// # in python3 one can use `a@x`
17798// tf.matmul(a, x)
17799// # <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
17800// # array([[4.       ],
17801// #        [2.       ],
17802// #        [4.       ],
17803// #        [1.9999999]], dtype=float32)>
17804// ```
17805//
17806// Arguments:
17807//	matrix: Shape is `[..., M, M]`.
17808//	rhs: Shape is `[..., M, K]`.
17809//
17810// Returns Shape is `[..., M, K]`.
17811func MatrixTriangularSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixTriangularSolveAttr) (output tf.Output) {
17812	if scope.Err() != nil {
17813		return
17814	}
17815	attrs := map[string]interface{}{}
17816	for _, a := range optional {
17817		a(attrs)
17818	}
17819	opspec := tf.OpSpec{
17820		Type: "MatrixTriangularSolve",
17821		Input: []tf.Input{
17822			matrix, rhs,
17823		},
17824		Attrs: attrs,
17825	}
17826	op := scope.AddOperation(opspec)
17827	return op.Output(0)
17828}
17829
17830// Applies sparse addition to `input` using individual values or slices
17831//
17832// from `updates` according to indices `indices`.  The updates are non-aliasing:
17833// `input` is only modified in-place if no other operations will use it.
17834// Otherwise, a copy of `input` is made.  This operation has a gradient with
17835// respect to both `input` and `updates`.
17836//
17837// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
17838//
17839// `indices` must be integer tensor, containing indices into `input`.
17840// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`.
17841//
17842// The innermost dimension of `indices` (with length `K`) corresponds to
17843// indices into elements (if `K = P`) or `(P-K)`-dimensional slices
17844// (if `K < P`) along the `K`th dimension of `input`.
17845//
17846// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
17847//
17848// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$
17849//
17850// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
17851// elements. In Python, that addition would look like this:
17852//
17853//     input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
17854//     indices = tf.constant([[4], [3], [1], [7]])
17855//     updates = tf.constant([9, 10, 11, 12])
17856//     output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
17857//     with tf.Session() as sess:
17858//       print(sess.run(output))
17859//
17860// The resulting value `output` would look like this:
17861//
17862//     [1, 13, 3, 14, 14, 6, 7, 20]
17863//
17864// See `tf.scatter_nd` for more details about how to make updates to slices.
17865//
17866// Arguments:
17867//	input: A Tensor.
17868//	indices: A Tensor. Must be one of the following types: `int32`, `int64`.
17869// A tensor of indices into `input`.
17870//	updates: A Tensor. Must have the same type as ref. A tensor of updated values
17871// to add to `input`.
17872//
17873// Returns A `Tensor` with the same shape as `input`, containing values of `input`
17874// updated with `updates`.
17875func ScatterNdNonAliasingAdd(scope *Scope, input tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
17876	if scope.Err() != nil {
17877		return
17878	}
17879	opspec := tf.OpSpec{
17880		Type: "ScatterNdNonAliasingAdd",
17881		Input: []tf.Input{
17882			input, indices, updates,
17883		},
17884	}
17885	op := scope.AddOperation(opspec)
17886	return op.Output(0)
17887}
17888
17889// ProdAttr is an optional argument to Prod.
17890type ProdAttr func(optionalAttr)
17891
17892// ProdKeepDims sets the optional keep_dims attribute to value.
17893//
17894// value: If true, retain reduced dimensions with length 1.
17895// If not specified, defaults to false
17896func ProdKeepDims(value bool) ProdAttr {
17897	return func(m optionalAttr) {
17898		m["keep_dims"] = value
17899	}
17900}
17901
17902// Computes the product of elements across dimensions of a tensor.
17903//
17904// Reduces `input` along the dimensions given in `axis`. Unless
17905// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
17906// `axis`. If `keep_dims` is true, the reduced dimensions are
17907// retained with length 1.
17908//
17909// Arguments:
17910//	input: The tensor to reduce.
17911//	axis: The dimensions to reduce. Must be in the range
17912// `[-rank(input), rank(input))`.
17913//
17914// Returns The reduced tensor.
17915func Prod(scope *Scope, input tf.Output, axis tf.Output, optional ...ProdAttr) (output tf.Output) {
17916	if scope.Err() != nil {
17917		return
17918	}
17919	attrs := map[string]interface{}{}
17920	for _, a := range optional {
17921		a(attrs)
17922	}
17923	opspec := tf.OpSpec{
17924		Type: "Prod",
17925		Input: []tf.Input{
17926			input, axis,
17927		},
17928		Attrs: attrs,
17929	}
17930	op := scope.AddOperation(opspec)
17931	return op.Output(0)
17932}
17933
17934// SampleDistortedBoundingBoxV2Attr is an optional argument to SampleDistortedBoundingBoxV2.
17935type SampleDistortedBoundingBoxV2Attr func(optionalAttr)
17936
17937// SampleDistortedBoundingBoxV2Seed sets the optional seed attribute to value.
17938//
17939// value: If either `seed` or `seed2` are set to non-zero, the random number
17940// generator is seeded by the given `seed`.  Otherwise, it is seeded by a random
17941// seed.
17942// If not specified, defaults to 0
17943func SampleDistortedBoundingBoxV2Seed(value int64) SampleDistortedBoundingBoxV2Attr {
17944	return func(m optionalAttr) {
17945		m["seed"] = value
17946	}
17947}
17948
17949// SampleDistortedBoundingBoxV2Seed2 sets the optional seed2 attribute to value.
17950//
17951// value: A second seed to avoid seed collision.
17952// If not specified, defaults to 0
17953func SampleDistortedBoundingBoxV2Seed2(value int64) SampleDistortedBoundingBoxV2Attr {
17954	return func(m optionalAttr) {
17955		m["seed2"] = value
17956	}
17957}
17958
17959// SampleDistortedBoundingBoxV2AspectRatioRange sets the optional aspect_ratio_range attribute to value.
17960//
17961// value: The cropped area of the image must have an aspect ratio =
17962// width / height within this range.
17963// If not specified, defaults to {f:0.75 f:1.33}
17964func SampleDistortedBoundingBoxV2AspectRatioRange(value []float32) SampleDistortedBoundingBoxV2Attr {
17965	return func(m optionalAttr) {
17966		m["aspect_ratio_range"] = value
17967	}
17968}
17969
17970// SampleDistortedBoundingBoxV2AreaRange sets the optional area_range attribute to value.
17971//
17972// value: The cropped area of the image must contain a fraction of the
17973// supplied image within this range.
17974// If not specified, defaults to {f:0.05 f:1}
17975func SampleDistortedBoundingBoxV2AreaRange(value []float32) SampleDistortedBoundingBoxV2Attr {
17976	return func(m optionalAttr) {
17977		m["area_range"] = value
17978	}
17979}
17980
17981// SampleDistortedBoundingBoxV2MaxAttempts sets the optional max_attempts attribute to value.
17982//
17983// value: Number of attempts at generating a cropped region of the image
17984// of the specified constraints. After `max_attempts` failures, return the entire
17985// image.
17986// If not specified, defaults to 100
17987func SampleDistortedBoundingBoxV2MaxAttempts(value int64) SampleDistortedBoundingBoxV2Attr {
17988	return func(m optionalAttr) {
17989		m["max_attempts"] = value
17990	}
17991}
17992
17993// SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
17994//
17995// value: Controls behavior if no bounding boxes supplied.
17996// If true, assume an implicit bounding box covering the whole input. If false,
17997// raise an error.
17998// If not specified, defaults to false
17999func SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxV2Attr {
18000	return func(m optionalAttr) {
18001		m["use_image_if_no_bounding_boxes"] = value
18002	}
18003}
18004
18005// Generate a single randomly distorted bounding box for an image.
18006//
18007// Bounding box annotations are often supplied in addition to ground-truth labels
18008// in image recognition or object localization tasks. A common technique for
18009// training such a system is to randomly distort an image while preserving
18010// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
18011// localization of an object, i.e. bounding box, given an `image_size`,
18012// `bounding_boxes` and a series of constraints.
18013//
18014// The output of this Op is a single bounding box that may be used to crop the
18015// original image. The output is returned as 3 tensors: `begin`, `size` and
18016// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
18017// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
18018// what the bounding box looks like.
18019//
18020// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
18021// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
18022// height of the underlying image.
18023//
18024// For example,
18025//
18026// ```python
18027//     # Generate a single distorted bounding box.
18028//     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
18029//         tf.shape(image),
18030//         bounding_boxes=bounding_boxes)
18031//
18032//     # Draw the bounding box in an image summary.
18033//     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
18034//                                                   bbox_for_draw)
18035//     tf.summary.image('images_with_box', image_with_box)
18036//
18037//     # Employ the bounding box to distort the image.
18038//     distorted_image = tf.slice(image, begin, size)
18039// ```
18040//
18041// Note that if no bounding box information is available, setting
18042// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
18043// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
18044// false and no bounding boxes are supplied, an error is raised.
18045//
18046// Arguments:
18047//	image_size: 1-D, containing `[height, width, channels]`.
18048//	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
18049// associated with the image.
18050//	min_object_covered: The cropped area of the image must contain at least this
18051// fraction of any bounding box supplied. The value of this parameter should be
18052// non-negative. In the case of 0, the cropped area does not need to overlap
18053// any of the bounding boxes supplied.
18054//
18055// Returns:
18056//	begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
18057// `tf.slice`.
18058//	size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
18059// `tf.slice`.
18060//	bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
18061// Provide as input to `tf.image.draw_bounding_boxes`.
18062func SampleDistortedBoundingBoxV2(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, optional ...SampleDistortedBoundingBoxV2Attr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
18063	if scope.Err() != nil {
18064		return
18065	}
18066	attrs := map[string]interface{}{}
18067	for _, a := range optional {
18068		a(attrs)
18069	}
18070	opspec := tf.OpSpec{
18071		Type: "SampleDistortedBoundingBoxV2",
18072		Input: []tf.Input{
18073			image_size, bounding_boxes, min_object_covered,
18074		},
18075		Attrs: attrs,
18076	}
18077	op := scope.AddOperation(opspec)
18078	return op.Output(0), op.Output(1), op.Output(2)
18079}
18080
18081// EigAttr is an optional argument to Eig.
18082type EigAttr func(optionalAttr)
18083
18084// EigComputeV sets the optional compute_v attribute to value.
18085//
18086// value: If `True` then eigenvectors will be computed and returned in `v`.
18087// Otherwise, only the eigenvalues will be computed.
18088// If not specified, defaults to true
18089func EigComputeV(value bool) EigAttr {
18090	return func(m optionalAttr) {
18091		m["compute_v"] = value
18092	}
18093}
18094
18095// Computes the eigen decomposition of one or more square matrices.
18096//
18097// Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in
18098// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
18099// are sorted in non-decreasing order.
18100//
18101// ```python
18102// # a is a tensor.
18103// # e is a tensor of eigenvalues.
18104// # v is a tensor of eigenvectors.
18105// e, v = eig(a)
18106// e = eig(a, compute_v=False)
18107// ```
18108//
18109// Arguments:
18110//	input: `Tensor` input of shape `[N, N]`.
18111//
18112//
18113// Returns:
18114//	e: Eigenvalues. Shape is `[N]`.
18115//	v: Eigenvectors. Shape is `[N, N]`.
18116func Eig(scope *Scope, input tf.Output, Tout tf.DataType, optional ...EigAttr) (e tf.Output, v tf.Output) {
18117	if scope.Err() != nil {
18118		return
18119	}
18120	attrs := map[string]interface{}{"Tout": Tout}
18121	for _, a := range optional {
18122		a(attrs)
18123	}
18124	opspec := tf.OpSpec{
18125		Type: "Eig",
18126		Input: []tf.Input{
18127			input,
18128		},
18129		Attrs: attrs,
18130	}
18131	op := scope.AddOperation(opspec)
18132	return op.Output(0), op.Output(1)
18133}
18134
18135// Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
18136//
18137// DEPRECATED at GraphDef version 11: Use SelfAdjointEigV2 instead.
18138//
18139// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
18140// form square matrices, with the same constraints as the single matrix
18141// SelfAdjointEig.
18142//
18143// The result is a [..., M+1, M] matrix with [..., 0,:] containing the
18144// eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues
18145// are sorted in non-decreasing order.
18146//
18147// Arguments:
18148//	input: Shape is `[..., M, M]`.
18149//
18150// Returns Shape is `[..., M+1, M]`.
18151func SelfAdjointEig(scope *Scope, input tf.Output) (output tf.Output) {
18152	if scope.Err() != nil {
18153		return
18154	}
18155	opspec := tf.OpSpec{
18156		Type: "SelfAdjointEig",
18157		Input: []tf.Input{
18158			input,
18159		},
18160	}
18161	op := scope.AddOperation(opspec)
18162	return op.Output(0)
18163}
18164
18165// Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
18166//
18167// For an explanation see "Differentiation of the Cholesky algorithm" by
18168// Iain Murray http://arxiv.org/abs/1602.07527.
18169//
18170// Arguments:
18171//	l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
18172// Algorithm depends only on lower triangular part of the innermost matrices of
18173// this tensor.
18174//	grad: df/dl where f is some scalar function. Shape is `[..., M, M]`.
18175// Algorithm depends only on lower triangular part of the innermost matrices of
18176// this tensor.
18177//
18178// Returns Symmetrized version of df/dA . Shape is `[..., M, M]`
18179func CholeskyGrad(scope *Scope, l tf.Output, grad tf.Output) (output tf.Output) {
18180	if scope.Err() != nil {
18181		return
18182	}
18183	opspec := tf.OpSpec{
18184		Type: "CholeskyGrad",
18185		Input: []tf.Input{
18186			l, grad,
18187		},
18188	}
18189	op := scope.AddOperation(opspec)
18190	return op.Output(0)
18191}
18192
18193// Check if the input matches the regex pattern.
18194//
18195// The input is a string tensor of any shape. The pattern is the
18196// regular expression to be matched with every element of the input tensor.
18197// The boolean values (True or False) of the output tensor indicate
18198// if the input matches the regex pattern provided.
18199//
18200// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
18201//
18202// Arguments:
18203//	input: A string tensor of the text to be processed.
18204//	pattern: The regular expression to match the input.
18205//
18206// Returns A bool tensor with the same shape as `input`.
18207func StaticRegexFullMatch(scope *Scope, input tf.Output, pattern string) (output tf.Output) {
18208	if scope.Err() != nil {
18209		return
18210	}
18211	attrs := map[string]interface{}{"pattern": pattern}
18212	opspec := tf.OpSpec{
18213		Type: "StaticRegexFullMatch",
18214		Input: []tf.Input{
18215			input,
18216		},
18217		Attrs: attrs,
18218	}
18219	op := scope.AddOperation(opspec)
18220	return op.Output(0)
18221}
18222
18223// ParseSingleSequenceExampleAttr is an optional argument to ParseSingleSequenceExample.
18224type ParseSingleSequenceExampleAttr func(optionalAttr)
18225
18226// ParseSingleSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
18227//
18228// value: A list of Ncontext_sparse types; the data types of data in
18229// each context Feature given in context_sparse_keys.
18230// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
18231// DT_INT64 (Int64List), and DT_STRING (BytesList).
18232// If not specified, defaults to {}
18233//
18234// REQUIRES: len(value) >= 0
18235func ParseSingleSequenceExampleContextSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
18236	return func(m optionalAttr) {
18237		m["context_sparse_types"] = value
18238	}
18239}
18240
18241// ParseSingleSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
18242// If not specified, defaults to {}
18243//
18244// REQUIRES: len(value) >= 0
18245func ParseSingleSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
18246	return func(m optionalAttr) {
18247		m["feature_list_dense_types"] = value
18248	}
18249}
18250
18251// ParseSingleSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
18252//
18253// value: A list of Ncontext_dense shapes; the shapes of data in
18254// each context Feature given in context_dense_keys.
18255// The number of elements in the Feature corresponding to context_dense_key[j]
18256// must always equal context_dense_shapes[j].NumEntries().
18257// The shape of context_dense_values[j] will match context_dense_shapes[j].
18258// If not specified, defaults to {}
18259//
18260// REQUIRES: len(value) >= 0
18261func ParseSingleSequenceExampleContextDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
18262	return func(m optionalAttr) {
18263		m["context_dense_shapes"] = value
18264	}
18265}
18266
18267// ParseSingleSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
18268//
18269// value: A list of Nfeature_list_sparse types; the data types
18270// of data in each FeatureList given in feature_list_sparse_keys.
18271// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
18272// DT_INT64 (Int64List), and DT_STRING (BytesList).
18273// If not specified, defaults to {}
18274//
18275// REQUIRES: len(value) >= 0
18276func ParseSingleSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
18277	return func(m optionalAttr) {
18278		m["feature_list_sparse_types"] = value
18279	}
18280}
18281
18282// ParseSingleSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
18283//
18284// value: A list of Nfeature_list_dense shapes; the shapes of
18285// data in each FeatureList given in feature_list_dense_keys.
18286// The shape of each Feature in the FeatureList corresponding to
18287// feature_list_dense_key[j] must always equal
18288// feature_list_dense_shapes[j].NumEntries().
18289// If not specified, defaults to {}
18290//
18291// REQUIRES: len(value) >= 0
18292func ParseSingleSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
18293	return func(m optionalAttr) {
18294		m["feature_list_dense_shapes"] = value
18295	}
18296}
18297
18298// Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.
18299//
18300// Arguments:
18301//	serialized: A scalar containing a binary serialized SequenceExample proto.
18302//	feature_list_dense_missing_assumed_empty: A vector listing the
18303// FeatureList keys which may be missing from the SequenceExample.  If the
18304// associated FeatureList is missing, it is treated as empty.  By default,
18305// any FeatureList not listed in this vector must exist in the SequenceExample.
18306//	context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
18307// The keys expected in the Examples' features associated with context_sparse
18308// values.
18309//	context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
18310// The keys expected in the SequenceExamples' context features associated with
18311// dense values.
18312//	feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
18313// (scalars).  The keys expected in the FeatureLists associated with sparse
18314// values.
18315//	feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
18316// The keys expected in the SequenceExamples' feature_lists associated
18317// with lists of dense values.
18318//	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
18319// context_dense_defaults[j] provides default values
18320// when the SequenceExample's context map lacks context_dense_key[j].
18321// If an empty Tensor is provided for context_dense_defaults[j],
18322// then the Feature context_dense_keys[j] is required.
18323// The input type is inferred from context_dense_defaults[j], even when it's
18324// empty.  If context_dense_defaults[j] is not empty, its shape must match
18325// context_dense_shapes[j].
18326//	debug_name: A scalar containing the name of the serialized proto.
18327// May contain, for example, table key (descriptive) name for the
18328// corresponding serialized proto.  This is purely useful for debugging
18329// purposes, and the presence of values here has no effect on the output.
18330// May also be an empty scalar if no name is available.
18331func ParseSingleSequenceExample(scope *Scope, serialized tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_sparse_keys []tf.Output, context_dense_keys []tf.Output, feature_list_sparse_keys []tf.Output, feature_list_dense_keys []tf.Output, context_dense_defaults []tf.Output, debug_name tf.Output, optional ...ParseSingleSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output) {
18332	if scope.Err() != nil {
18333		return
18334	}
18335	attrs := map[string]interface{}{}
18336	for _, a := range optional {
18337		a(attrs)
18338	}
18339	opspec := tf.OpSpec{
18340		Type: "ParseSingleSequenceExample",
18341		Input: []tf.Input{
18342			serialized, feature_list_dense_missing_assumed_empty, tf.OutputList(context_sparse_keys), tf.OutputList(context_dense_keys), tf.OutputList(feature_list_sparse_keys), tf.OutputList(feature_list_dense_keys), tf.OutputList(context_dense_defaults), debug_name,
18343		},
18344		Attrs: attrs,
18345	}
18346	op := scope.AddOperation(opspec)
18347	if scope.Err() != nil {
18348		return
18349	}
18350	var idx int
18351	var err error
18352	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
18353		scope.UpdateErr("ParseSingleSequenceExample", err)
18354		return
18355	}
18356	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
18357		scope.UpdateErr("ParseSingleSequenceExample", err)
18358		return
18359	}
18360	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
18361		scope.UpdateErr("ParseSingleSequenceExample", err)
18362		return
18363	}
18364	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
18365		scope.UpdateErr("ParseSingleSequenceExample", err)
18366		return
18367	}
18368	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
18369		scope.UpdateErr("ParseSingleSequenceExample", err)
18370		return
18371	}
18372	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
18373		scope.UpdateErr("ParseSingleSequenceExample", err)
18374		return
18375	}
18376	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
18377		scope.UpdateErr("ParseSingleSequenceExample", err)
18378		return
18379	}
18380	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
18381		scope.UpdateErr("ParseSingleSequenceExample", err)
18382		return
18383	}
18384	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values
18385}
18386
18387// Splits a tensor into a list.
18388//
18389// list[i] corresponds to lengths[i] tensors from the input tensor.
18390// The tensor must have rank at least 1 and contain exactly sum(lengths) elements.
18391//
18392// tensor: The input tensor.
18393// element_shape: A shape compatible with that of elements in the tensor.
18394// lengths: Vector of sizes of the 0th dimension of tensors in the list.
18395// output_handle: The list.
18396func TensorListSplit(scope *Scope, tensor tf.Output, element_shape tf.Output, lengths tf.Output) (output_handle tf.Output) {
18397	if scope.Err() != nil {
18398		return
18399	}
18400	opspec := tf.OpSpec{
18401		Type: "TensorListSplit",
18402		Input: []tf.Input{
18403			tensor, element_shape, lengths,
18404		},
18405	}
18406	op := scope.AddOperation(opspec)
18407	return op.Output(0)
18408}
18409
18410// RetrieveTPUEmbeddingAdagradParametersAttr is an optional argument to RetrieveTPUEmbeddingAdagradParameters.
18411type RetrieveTPUEmbeddingAdagradParametersAttr func(optionalAttr)
18412
18413// RetrieveTPUEmbeddingAdagradParametersTableId sets the optional table_id attribute to value.
18414// If not specified, defaults to -1
18415func RetrieveTPUEmbeddingAdagradParametersTableId(value int64) RetrieveTPUEmbeddingAdagradParametersAttr {
18416	return func(m optionalAttr) {
18417		m["table_id"] = value
18418	}
18419}
18420
18421// RetrieveTPUEmbeddingAdagradParametersTableName sets the optional table_name attribute to value.
18422// If not specified, defaults to ""
18423func RetrieveTPUEmbeddingAdagradParametersTableName(value string) RetrieveTPUEmbeddingAdagradParametersAttr {
18424	return func(m optionalAttr) {
18425		m["table_name"] = value
18426	}
18427}
18428
18429// RetrieveTPUEmbeddingAdagradParametersConfig sets the optional config attribute to value.
18430// If not specified, defaults to ""
18431func RetrieveTPUEmbeddingAdagradParametersConfig(value string) RetrieveTPUEmbeddingAdagradParametersAttr {
18432	return func(m optionalAttr) {
18433		m["config"] = value
18434	}
18435}
18436
18437// Retrieve Adagrad embedding parameters.
18438//
18439// An op that retrieves optimization parameters from embedding to host
18440// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
18441// the correct embedding table configuration. For example, this op is
18442// used to retrieve updated parameters before saving a checkpoint.
18443//
18444// Returns:
18445//	parameters: Parameter parameters updated by the Adagrad optimization algorithm.
18446//	accumulators: Parameter accumulators updated by the Adagrad optimization algorithm.
18447func RetrieveTPUEmbeddingAdagradParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdagradParametersAttr) (parameters tf.Output, accumulators tf.Output) {
18448	if scope.Err() != nil {
18449		return
18450	}
18451	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
18452	for _, a := range optional {
18453		a(attrs)
18454	}
18455	opspec := tf.OpSpec{
18456		Type: "RetrieveTPUEmbeddingAdagradParameters",
18457
18458		Attrs: attrs,
18459	}
18460	op := scope.AddOperation(opspec)
18461	return op.Output(0), op.Output(1)
18462}
18463
18464// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
18465//
18466// N is the size of the segment being reduced.
18467//
18468// Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is
18469// missing, the `output` tensor at that position will be zeroed.
18470//
18471// Read
18472// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
18473// for an explanation of segments.
18474//
18475// Arguments:
18476//
18477//	indices: A 1-D tensor. Has same rank as `segment_ids`.
18478//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
18479//	num_segments: Should equal the number of distinct segment IDs.
18480//
18481// Returns Has same shape as data, except for dimension 0 which
18482// has size `k`, the number of segments.
18483func SparseSegmentSqrtNWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
18484	if scope.Err() != nil {
18485		return
18486	}
18487	opspec := tf.OpSpec{
18488		Type: "SparseSegmentSqrtNWithNumSegments",
18489		Input: []tf.Input{
18490			data, indices, segment_ids, num_segments,
18491		},
18492	}
18493	op := scope.AddOperation(opspec)
18494	return op.Output(0)
18495}
18496
18497// Computes the Cholesky decomposition of one or more square matrices.
18498//
18499// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
18500// form square matrices.
18501//
18502// The input has to be symmetric and positive definite. Only the lower-triangular
18503// part of the input will be used for this operation. The upper-triangular part
18504// will not be read.
18505//
18506// The output is a tensor of the same shape as the input
18507// containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
18508//
18509// **Note**: The gradient computation on GPU is faster for large matrices but
18510// not for large batch dimensions when the submatrices are small. In this
18511// case it might be faster to use the CPU.
18512//
18513// Arguments:
18514//	input: Shape is `[..., M, M]`.
18515//
18516// Returns Shape is `[..., M, M]`.
18517func Cholesky(scope *Scope, input tf.Output) (output tf.Output) {
18518	if scope.Err() != nil {
18519		return
18520	}
18521	opspec := tf.OpSpec{
18522		Type: "Cholesky",
18523		Input: []tf.Input{
18524			input,
18525		},
18526	}
18527	op := scope.AddOperation(opspec)
18528	return op.Output(0)
18529}
18530
18531// Computes the determinant of one or more square matrices.
18532//
18533// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
18534// form square matrices. The output is a tensor containing the determinants
18535// for all input submatrices `[..., :, :]`.
18536//
18537// Arguments:
18538//	input: Shape is `[..., M, M]`.
18539//
18540// Returns Shape is `[...]`.
18541func MatrixDeterminant(scope *Scope, input tf.Output) (output tf.Output) {
18542	if scope.Err() != nil {
18543		return
18544	}
18545	opspec := tf.OpSpec{
18546		Type: "MatrixDeterminant",
18547		Input: []tf.Input{
18548			input,
18549		},
18550	}
18551	op := scope.AddOperation(opspec)
18552	return op.Output(0)
18553}
18554
18555// Returns the set of files matching one or more glob patterns.
18556//
18557// Note that this routine only supports wildcard characters in the
18558// basename portion of the pattern, not in the directory portion.
18559// Note also that the order of filenames returned is deterministic.
18560//
18561// Arguments:
18562//	pattern: Shell wildcard pattern(s). Scalar or vector of type string.
18563//
18564// Returns A vector of matching filenames.
18565func MatchingFiles(scope *Scope, pattern tf.Output) (filenames tf.Output) {
18566	if scope.Err() != nil {
18567		return
18568	}
18569	opspec := tf.OpSpec{
18570		Type: "MatchingFiles",
18571		Input: []tf.Input{
18572			pattern,
18573		},
18574	}
18575	op := scope.AddOperation(opspec)
18576	return op.Output(0)
18577}
18578
18579// WriteImageSummaryAttr is an optional argument to WriteImageSummary.
18580type WriteImageSummaryAttr func(optionalAttr)
18581
18582// WriteImageSummaryMaxImages sets the optional max_images attribute to value.
18583// If not specified, defaults to 3
18584//
18585// REQUIRES: value >= 1
18586func WriteImageSummaryMaxImages(value int64) WriteImageSummaryAttr {
18587	return func(m optionalAttr) {
18588		m["max_images"] = value
18589	}
18590}
18591
18592// Writes an image summary.
18593//
18594// Writes image `tensor` at `step` with `tag` using summary `writer`.
18595// `tensor` is image with shape [height, width, channels].
18596//
18597// Returns the created operation.
18598func WriteImageSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, bad_color tf.Output, optional ...WriteImageSummaryAttr) (o *tf.Operation) {
18599	if scope.Err() != nil {
18600		return
18601	}
18602	attrs := map[string]interface{}{}
18603	for _, a := range optional {
18604		a(attrs)
18605	}
18606	opspec := tf.OpSpec{
18607		Type: "WriteImageSummary",
18608		Input: []tf.Input{
18609			writer, step, tag, tensor, bad_color,
18610		},
18611		Attrs: attrs,
18612	}
18613	return scope.AddOperation(opspec)
18614}
18615
18616// MatrixSolveAttr is an optional argument to MatrixSolve.
18617type MatrixSolveAttr func(optionalAttr)
18618
18619// MatrixSolveAdjoint sets the optional adjoint attribute to value.
18620//
18621// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
18622// adjoint.
18623// If not specified, defaults to false
18624func MatrixSolveAdjoint(value bool) MatrixSolveAttr {
18625	return func(m optionalAttr) {
18626		m["adjoint"] = value
18627	}
18628}
18629
18630// Solves systems of linear equations.
18631//
18632// `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
18633// form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
18634// a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix
18635// satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
18636// If `adjoint` is `True` then each output matrix satisfies
18637// `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
18638//
18639// Arguments:
18640//	matrix: Shape is `[..., M, M]`.
18641//	rhs: Shape is `[..., M, K]`.
18642//
18643// Returns Shape is `[..., M, K]`.
18644func MatrixSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixSolveAttr) (output tf.Output) {
18645	if scope.Err() != nil {
18646		return
18647	}
18648	attrs := map[string]interface{}{}
18649	for _, a := range optional {
18650		a(attrs)
18651	}
18652	opspec := tf.OpSpec{
18653		Type: "MatrixSolve",
18654		Input: []tf.Input{
18655			matrix, rhs,
18656		},
18657		Attrs: attrs,
18658	}
18659	op := scope.AddOperation(opspec)
18660	return op.Output(0)
18661}
18662
18663// TensorArrayConcatV2Attr is an optional argument to TensorArrayConcatV2.
18664type TensorArrayConcatV2Attr func(optionalAttr)
18665
18666// TensorArrayConcatV2ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
18667// If not specified, defaults to {unknown_rank:true}
18668func TensorArrayConcatV2ElementShapeExcept0(value tf.Shape) TensorArrayConcatV2Attr {
18669	return func(m optionalAttr) {
18670		m["element_shape_except0"] = value
18671	}
18672}
18673
18674// Deprecated. Use TensorArrayConcatV3
18675func TensorArrayConcatV2(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV2Attr) (value tf.Output, lengths tf.Output) {
18676	if scope.Err() != nil {
18677		return
18678	}
18679	attrs := map[string]interface{}{"dtype": dtype}
18680	for _, a := range optional {
18681		a(attrs)
18682	}
18683	opspec := tf.OpSpec{
18684		Type: "TensorArrayConcatV2",
18685		Input: []tf.Input{
18686			handle, flow_in,
18687		},
18688		Attrs: attrs,
18689	}
18690	op := scope.AddOperation(opspec)
18691	return op.Output(0), op.Output(1)
18692}
18693
18694// Writes contents to the file at input filename. Creates file and recursively
18695//
18696// creates directory if not existing.
18697//
18698// Arguments:
18699//	filename: scalar. The name of the file to which we write the contents.
18700//	contents: scalar. The content to be written to the output file.
18701//
18702// Returns the created operation.
18703func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) (o *tf.Operation) {
18704	if scope.Err() != nil {
18705		return
18706	}
18707	opspec := tf.OpSpec{
18708		Type: "WriteFile",
18709		Input: []tf.Input{
18710			filename, contents,
18711		},
18712	}
18713	return scope.AddOperation(opspec)
18714}
18715
18716// TruncatedNormalAttr is an optional argument to TruncatedNormal.
18717type TruncatedNormalAttr func(optionalAttr)
18718
18719// TruncatedNormalSeed sets the optional seed attribute to value.
18720//
18721// value: If either `seed` or `seed2` are set to be non-zero, the random number
18722// generator is seeded by the given seed.  Otherwise, it is seeded by a
18723// random seed.
18724// If not specified, defaults to 0
18725func TruncatedNormalSeed(value int64) TruncatedNormalAttr {
18726	return func(m optionalAttr) {
18727		m["seed"] = value
18728	}
18729}
18730
18731// TruncatedNormalSeed2 sets the optional seed2 attribute to value.
18732//
18733// value: A second seed to avoid seed collision.
18734// If not specified, defaults to 0
18735func TruncatedNormalSeed2(value int64) TruncatedNormalAttr {
18736	return func(m optionalAttr) {
18737		m["seed2"] = value
18738	}
18739}
18740
18741// Outputs random values from a truncated normal distribution.
18742//
18743// The generated values follow a normal distribution with mean 0 and standard
18744// deviation 1, except that values whose magnitude is more than 2 standard
18745// deviations from the mean are dropped and re-picked.
18746//
18747// Arguments:
18748//	shape: The shape of the output tensor.
18749//	dtype: The type of the output.
18750//
18751// Returns A tensor of the specified shape filled with random truncated normal
18752// values.
18753func TruncatedNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...TruncatedNormalAttr) (output tf.Output) {
18754	if scope.Err() != nil {
18755		return
18756	}
18757	attrs := map[string]interface{}{"dtype": dtype}
18758	for _, a := range optional {
18759		a(attrs)
18760	}
18761	opspec := tf.OpSpec{
18762		Type: "TruncatedNormal",
18763		Input: []tf.Input{
18764			shape,
18765		},
18766		Attrs: attrs,
18767	}
18768	op := scope.AddOperation(opspec)
18769	return op.Output(0)
18770}
18771
18772// Computes requantization range per channel.
18773//
18774// Arguments:
18775//	input: The original input tensor.
18776//	input_min: The minimum value of the input tensor
18777//	input_max: The maximum value of the input tensor.
18778//	clip_value_max: The maximum value of the output that needs to be clipped.
18779// Example: set this to 6 for Relu6.
18780//
18781// Returns:
18782//	output_min: The minimum value of the final output tensor
18783//	output_max: The maximum value of the final output tensor.
18784func RequantizationRangePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, clip_value_max float32) (output_min tf.Output, output_max tf.Output) {
18785	if scope.Err() != nil {
18786		return
18787	}
18788	attrs := map[string]interface{}{"clip_value_max": clip_value_max}
18789	opspec := tf.OpSpec{
18790		Type: "RequantizationRangePerChannel",
18791		Input: []tf.Input{
18792			input, input_min, input_max,
18793		},
18794		Attrs: attrs,
18795	}
18796	op := scope.AddOperation(opspec)
18797	return op.Output(0), op.Output(1)
18798}
18799
18800// Creates a dataset that takes a Bernoulli sample of the contents of another dataset.
18801//
18802// There is no transformation in the `tf.data` Python API for creating this dataset.
18803// Instead, it is created as a result of the `filter_with_random_uniform_fusion`
18804// static optimization. Whether this optimization is performed is determined by the
18805// `experimental_optimization.filter_with_random_uniform_fusion` option of
18806// `tf.data.Options`.
18807//
18808// Arguments:
18809//
18810//	rate: A scalar representing the sample rate. Each element of `input_dataset` is
18811// retained with this probability, independent of all other elements.
18812//	seed: A scalar representing seed of random number generator.
18813//	seed2: A scalar representing seed2 of random number generator.
18814//
18815//
18816func SamplingDataset(scope *Scope, input_dataset tf.Output, rate tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
18817	if scope.Err() != nil {
18818		return
18819	}
18820	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
18821	opspec := tf.OpSpec{
18822		Type: "SamplingDataset",
18823		Input: []tf.Input{
18824			input_dataset, rate, seed, seed2,
18825		},
18826		Attrs: attrs,
18827	}
18828	op := scope.AddOperation(opspec)
18829	return op.Output(0)
18830}
18831
18832// Reads and outputs the entire contents of the input filename.
18833func ReadFile(scope *Scope, filename tf.Output) (contents tf.Output) {
18834	if scope.Err() != nil {
18835		return
18836	}
18837	opspec := tf.OpSpec{
18838		Type: "ReadFile",
18839		Input: []tf.Input{
18840			filename,
18841		},
18842	}
18843	op := scope.AddOperation(opspec)
18844	return op.Output(0)
18845}
18846
18847// FakeQuantWithMinMaxVarsPerChannelGradientAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannelGradient.
18848type FakeQuantWithMinMaxVarsPerChannelGradientAttr func(optionalAttr)
18849
18850// FakeQuantWithMinMaxVarsPerChannelGradientNumBits sets the optional num_bits attribute to value.
18851//
18852// value: The bitwidth of the quantization; between 2 and 16, inclusive.
18853// If not specified, defaults to 8
18854func FakeQuantWithMinMaxVarsPerChannelGradientNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
18855	return func(m optionalAttr) {
18856		m["num_bits"] = value
18857	}
18858}
18859
18860// FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange sets the optional narrow_range attribute to value.
18861//
18862// value: Whether to quantize into 2^num_bits - 1 distinct values.
18863// If not specified, defaults to false
18864func FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
18865	return func(m optionalAttr) {
18866		m["narrow_range"] = value
18867	}
18868}
18869
18870// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
18871//
18872// Arguments:
18873//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
18874// shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.
18875//	inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
18876//   same as `gradients`.
18877// min, max: Quantization interval, floats of shape `[d]`.
18878//
18879//
18880//
18881// Returns:
18882//	backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as
18883// `inputs`:
18884//   `gradients * (inputs >= min && inputs <= max)`.
18885//	backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`:
18886// `sum_per_d(gradients * (inputs < min))`.
18887//	backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`:
18888// `sum_per_d(gradients * (inputs > max))`.
18889func FakeQuantWithMinMaxVarsPerChannelGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
18890	if scope.Err() != nil {
18891		return
18892	}
18893	attrs := map[string]interface{}{}
18894	for _, a := range optional {
18895		a(attrs)
18896	}
18897	opspec := tf.OpSpec{
18898		Type: "FakeQuantWithMinMaxVarsPerChannelGradient",
18899		Input: []tf.Input{
18900			gradients, inputs, min, max,
18901		},
18902		Attrs: attrs,
18903	}
18904	op := scope.AddOperation(opspec)
18905	return op.Output(0), op.Output(1), op.Output(2)
18906}
18907
18908// PrintV2Attr is an optional argument to PrintV2.
18909type PrintV2Attr func(optionalAttr)
18910
18911// PrintV2OutputStream sets the optional output_stream attribute to value.
18912//
18913// value: A string specifying the output stream or logging level to print to.
18914// If not specified, defaults to "stderr"
18915func PrintV2OutputStream(value string) PrintV2Attr {
18916	return func(m optionalAttr) {
18917		m["output_stream"] = value
18918	}
18919}
18920
18921// PrintV2End sets the optional end attribute to value.
18922// If not specified, defaults to "\n"
18923func PrintV2End(value string) PrintV2Attr {
18924	return func(m optionalAttr) {
18925		m["end"] = value
18926	}
18927}
18928
18929// Prints a string scalar.
18930//
18931// Prints a string scalar to the desired output_stream.
18932//
18933// Arguments:
18934//	input: The string scalar to print.
18935//
18936// Returns the created operation.
18937func PrintV2(scope *Scope, input tf.Output, optional ...PrintV2Attr) (o *tf.Operation) {
18938	if scope.Err() != nil {
18939		return
18940	}
18941	attrs := map[string]interface{}{}
18942	for _, a := range optional {
18943		a(attrs)
18944	}
18945	opspec := tf.OpSpec{
18946		Type: "PrintV2",
18947		Input: []tf.Input{
18948			input,
18949		},
18950		Attrs: attrs,
18951	}
18952	return scope.AddOperation(opspec)
18953}
18954
18955// Restore a Reader to its initial clean state.
18956//
18957// Arguments:
18958//	reader_handle: Handle to a Reader.
18959//
18960// Returns the created operation.
18961func ReaderResetV2(scope *Scope, reader_handle tf.Output) (o *tf.Operation) {
18962	if scope.Err() != nil {
18963		return
18964	}
18965	opspec := tf.OpSpec{
18966		Type: "ReaderResetV2",
18967		Input: []tf.Input{
18968			reader_handle,
18969		},
18970	}
18971	return scope.AddOperation(opspec)
18972}
18973
18974// Split a `SparseTensor` into `num_split` tensors along one dimension.
18975//
18976// If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
18977// `[0 : shape[split_dim] % num_split]` gets one extra dimension.
18978// For example, if `split_dim = 1` and `num_split = 2` and the input is
18979//
18980//     input_tensor = shape = [2, 7]
18981//     [    a   d e  ]
18982//     [b c          ]
18983//
18984// Graphically the output tensors are:
18985//
18986//     output_tensor[0] = shape = [2, 4]
18987//     [    a  ]
18988//     [b c    ]
18989//
18990//     output_tensor[1] = shape = [2, 3]
18991//     [ d e  ]
18992//     [      ]
18993//
18994// Arguments:
18995//	split_dim: 0-D.  The dimension along which to split.  Must be in the range
18996// `[0, rank(shape))`.
18997//	indices: 2-D tensor represents the indices of the sparse tensor.
18998//	values: 1-D tensor represents the values of the sparse tensor.
18999//	shape: 1-D. tensor represents the shape of the sparse tensor.
19000// output indices: A list of 1-D tensors represents the indices of the output
19001// sparse tensors.
19002//	num_split: The number of ways to split.
19003//
19004// Returns:
19005//	output_indices
19006//	output_values: A list of 1-D tensors represents the values of the output sparse
19007// tensors.
19008//	output_shape: A list of 1-D tensors represents the shape of the output sparse
19009// tensors.
19010func SparseSplit(scope *Scope, split_dim tf.Output, indices tf.Output, values tf.Output, shape tf.Output, num_split int64) (output_indices []tf.Output, output_values []tf.Output, output_shape []tf.Output) {
19011	if scope.Err() != nil {
19012		return
19013	}
19014	attrs := map[string]interface{}{"num_split": num_split}
19015	opspec := tf.OpSpec{
19016		Type: "SparseSplit",
19017		Input: []tf.Input{
19018			split_dim, indices, values, shape,
19019		},
19020		Attrs: attrs,
19021	}
19022	op := scope.AddOperation(opspec)
19023	if scope.Err() != nil {
19024		return
19025	}
19026	var idx int
19027	var err error
19028	if output_indices, idx, err = makeOutputList(op, idx, "output_indices"); err != nil {
19029		scope.UpdateErr("SparseSplit", err)
19030		return
19031	}
19032	if output_values, idx, err = makeOutputList(op, idx, "output_values"); err != nil {
19033		scope.UpdateErr("SparseSplit", err)
19034		return
19035	}
19036	if output_shape, idx, err = makeOutputList(op, idx, "output_shape"); err != nil {
19037		scope.UpdateErr("SparseSplit", err)
19038		return
19039	}
19040	return output_indices, output_values, output_shape
19041}
19042
19043// RaggedRangeAttr is an optional argument to RaggedRange.
19044type RaggedRangeAttr func(optionalAttr)
19045
19046// RaggedRangeTsplits sets the optional Tsplits attribute to value.
19047// If not specified, defaults to DT_INT64
19048func RaggedRangeTsplits(value tf.DataType) RaggedRangeAttr {
19049	return func(m optionalAttr) {
19050		m["Tsplits"] = value
19051	}
19052}
19053
19054// Returns a `RaggedTensor` containing the specified sequences of numbers.
19055//
19056//
19057// Returns a `RaggedTensor` `result` composed from `rt_dense_values` and
19058// `rt_nested_splits`, such that
19059// `result[i] = range(starts[i], limits[i], deltas[i])`.
19060//
19061// ```python
19062// (rt_nested_splits, rt_dense_values) = ragged_range(
19063//       starts=[2, 5, 8], limits=[3, 5, 12], deltas=1)
19064// result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits)
19065// print(result)
19066// <tf.RaggedTensor [[2], [], [8, 9, 10, 11]] >
19067// ```
19068//
19069// The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
19070// The vector inputs must all have the same size.  Scalar inputs are broadcast
19071// to match the size of the vector inputs.
19072//
19073// Arguments:
19074//	starts: The starts of each range.
19075//	limits: The limits of each range.
19076//	deltas: The deltas of each range.
19077//
19078// Returns:
19079//	rt_nested_splits: The `row_splits` for the returned `RaggedTensor`.
19080//	rt_dense_values: The `flat_values` for the returned `RaggedTensor`.
19081func RaggedRange(scope *Scope, starts tf.Output, limits tf.Output, deltas tf.Output, optional ...RaggedRangeAttr) (rt_nested_splits tf.Output, rt_dense_values tf.Output) {
19082	if scope.Err() != nil {
19083		return
19084	}
19085	attrs := map[string]interface{}{}
19086	for _, a := range optional {
19087		a(attrs)
19088	}
19089	opspec := tf.OpSpec{
19090		Type: "RaggedRange",
19091		Input: []tf.Input{
19092			starts, limits, deltas,
19093		},
19094		Attrs: attrs,
19095	}
19096	op := scope.AddOperation(opspec)
19097	return op.Output(0), op.Output(1)
19098}
19099
19100// Computes rectified linear 6: `min(max(features, 0), 6)`.
19101func Relu6(scope *Scope, features tf.Output) (activations tf.Output) {
19102	if scope.Err() != nil {
19103		return
19104	}
19105	opspec := tf.OpSpec{
19106		Type: "Relu6",
19107		Input: []tf.Input{
19108			features,
19109		},
19110	}
19111	op := scope.AddOperation(opspec)
19112	return op.Output(0)
19113}
19114
19115// Produce a string tensor that encodes the state of a Reader.
19116//
19117// Not all Readers support being serialized, so this can produce an
19118// Unimplemented error.
19119//
19120// Arguments:
19121//	reader_handle: Handle to a Reader.
19122func ReaderSerializeStateV2(scope *Scope, reader_handle tf.Output) (state tf.Output) {
19123	if scope.Err() != nil {
19124		return
19125	}
19126	opspec := tf.OpSpec{
19127		Type: "ReaderSerializeStateV2",
19128		Input: []tf.Input{
19129			reader_handle,
19130		},
19131	}
19132	op := scope.AddOperation(opspec)
19133	return op.Output(0)
19134}
19135
19136// FractionalMaxPoolAttr is an optional argument to FractionalMaxPool.
19137type FractionalMaxPoolAttr func(optionalAttr)
19138
19139// FractionalMaxPoolPseudoRandom sets the optional pseudo_random attribute to value.
19140//
19141// value: When set to True, generates the pooling sequence in a
19142// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
19143// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
19144// difference between pseudorandom and random.
19145// If not specified, defaults to false
19146func FractionalMaxPoolPseudoRandom(value bool) FractionalMaxPoolAttr {
19147	return func(m optionalAttr) {
19148		m["pseudo_random"] = value
19149	}
19150}
19151
19152// FractionalMaxPoolOverlapping sets the optional overlapping attribute to value.
19153//
19154// value: When set to True, it means when pooling, the values at the boundary
19155// of adjacent pooling cells are used by both cells. For example:
19156//
19157// `index  0  1  2  3  4`
19158//
19159// `value  20 5  16 3  7`
19160//
19161// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
19162// The result would be [20, 16] for fractional max pooling.
19163// If not specified, defaults to false
19164func FractionalMaxPoolOverlapping(value bool) FractionalMaxPoolAttr {
19165	return func(m optionalAttr) {
19166		m["overlapping"] = value
19167	}
19168}
19169
19170// FractionalMaxPoolDeterministic sets the optional deterministic attribute to value.
19171//
19172// value: When set to True, a fixed pooling region will be used when
19173// iterating over a FractionalMaxPool node in the computation graph. Mainly used
19174// in unit test to make FractionalMaxPool deterministic.
19175// If not specified, defaults to false
19176func FractionalMaxPoolDeterministic(value bool) FractionalMaxPoolAttr {
19177	return func(m optionalAttr) {
19178		m["deterministic"] = value
19179	}
19180}
19181
19182// FractionalMaxPoolSeed sets the optional seed attribute to value.
19183//
19184// value: If either seed or seed2 are set to be non-zero, the random number
19185// generator is seeded by the given seed.  Otherwise, it is seeded by a
19186// random seed.
19187// If not specified, defaults to 0
19188func FractionalMaxPoolSeed(value int64) FractionalMaxPoolAttr {
19189	return func(m optionalAttr) {
19190		m["seed"] = value
19191	}
19192}
19193
19194// FractionalMaxPoolSeed2 sets the optional seed2 attribute to value.
19195//
19196// value: An second seed to avoid seed collision.
19197// If not specified, defaults to 0
19198func FractionalMaxPoolSeed2(value int64) FractionalMaxPoolAttr {
19199	return func(m optionalAttr) {
19200		m["seed2"] = value
19201	}
19202}
19203
19204// Performs fractional max pooling on the input.
19205//
19206// Fractional max pooling is slightly different than regular max pooling.  In
19207// regular max pooling, you downsize an input set by taking the maximum value of
19208// smaller N x N subsections of the set (often 2x2), and try to reduce the set by
19209// a factor of N, where N is an integer.  Fractional max pooling, as you might
19210// expect from the word "fractional", means that the overall reduction ratio N
19211// does not have to be an integer.
19212//
19213// The sizes of the pooling regions are generated randomly but are fairly uniform.
19214// For example, let's look at the height dimension, and the constraints on the
19215// list of rows that will be pool boundaries.
19216//
19217// First we define the following:
19218//
19219// 1.  input_row_length : the number of rows from the input set
19220// 2.  output_row_length : which will be smaller than the input
19221// 3.  alpha = input_row_length / output_row_length : our reduction ratio
19222// 4.  K = floor(alpha)
19223// 5.  row_pooling_sequence : this is the result list of pool boundary rows
19224//
19225// Then, row_pooling_sequence should satisfy:
19226//
19227// 1.  a[0] = 0 : the first value of the sequence is 0
19228// 2.  a[end] = input_row_length : the last value of the sequence is the size
19229// 3.  K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
19230// 4.  length(row_pooling_sequence) = output_row_length+1
19231//
19232// For more details on fractional max pooling, see this paper:
19233// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
19234//
19235// Arguments:
19236//	value: 4-D with shape `[batch, height, width, channels]`.
19237//	pooling_ratio: Pooling ratio for each dimension of `value`, currently only
19238// supports row and col dimension and should be >= 1.0. For example, a valid
19239// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
19240// must be 1.0 because we don't allow pooling on batch and channels
19241// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
19242// respectively.
19243//
19244// Returns:
19245//	output: output tensor after fractional max pooling.
19246//	row_pooling_sequence: row pooling sequence, needed to calculate gradient.
19247//	col_pooling_sequence: column pooling sequence, needed to calculate gradient.
19248func FractionalMaxPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalMaxPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
19249	if scope.Err() != nil {
19250		return
19251	}
19252	attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
19253	for _, a := range optional {
19254		a(attrs)
19255	}
19256	opspec := tf.OpSpec{
19257		Type: "FractionalMaxPool",
19258		Input: []tf.Input{
19259			value,
19260		},
19261		Attrs: attrs,
19262	}
19263	op := scope.AddOperation(opspec)
19264	return op.Output(0), op.Output(1), op.Output(2)
19265}
19266
19267// Computes the reciprocal of x element-wise.
19268//
19269// I.e., \\(y = 1 / x\\).
19270func Reciprocal(scope *Scope, x tf.Output) (y tf.Output) {
19271	if scope.Err() != nil {
19272		return
19273	}
19274	opspec := tf.OpSpec{
19275		Type: "Reciprocal",
19276		Input: []tf.Input{
19277			x,
19278		},
19279	}
19280	op := scope.AddOperation(opspec)
19281	return op.Output(0)
19282}
19283
19284// Returns the number of work units this Reader has finished processing.
19285//
19286// Arguments:
19287//	reader_handle: Handle to a Reader.
19288func ReaderNumWorkUnitsCompletedV2(scope *Scope, reader_handle tf.Output) (units_completed tf.Output) {
19289	if scope.Err() != nil {
19290		return
19291	}
19292	opspec := tf.OpSpec{
19293		Type: "ReaderNumWorkUnitsCompletedV2",
19294		Input: []tf.Input{
19295			reader_handle,
19296		},
19297	}
19298	op := scope.AddOperation(opspec)
19299	return op.Output(0)
19300}
19301
19302// Returns the next record (key, value pair) produced by a Reader.
19303//
19304// Will dequeue from the input queue if necessary (e.g. when the
19305// Reader needs to start reading from a new file since it has finished
19306// with the previous file).
19307//
19308// Arguments:
19309//	reader_handle: Handle to a Reader.
19310//	queue_handle: Handle to a Queue, with string work items.
19311//
19312// Returns:
19313//	key: A scalar.
19314//	value: A scalar.
19315func ReaderReadV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output) (key tf.Output, value tf.Output) {
19316	if scope.Err() != nil {
19317		return
19318	}
19319	opspec := tf.OpSpec{
19320		Type: "ReaderReadV2",
19321		Input: []tf.Input{
19322			reader_handle, queue_handle,
19323		},
19324	}
19325	op := scope.AddOperation(opspec)
19326	return op.Output(0), op.Output(1)
19327}
19328
19329// Computes square of x element-wise.
19330//
19331// I.e., \\(y = x * x = x^2\\).
19332func Square(scope *Scope, x tf.Output) (y tf.Output) {
19333	if scope.Err() != nil {
19334		return
19335	}
19336	opspec := tf.OpSpec{
19337		Type: "Square",
19338		Input: []tf.Input{
19339			x,
19340		},
19341	}
19342	op := scope.AddOperation(opspec)
19343	return op.Output(0)
19344}
19345
19346// Return a slice from 'input'.
19347//
19348// The output tensor is a tensor with dimensions described by 'size'
19349// whose values are extracted from 'input' starting at the offsets in
19350// 'begin'.
19351//
19352// *Requirements*:
19353//   0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
19354//
19355// Arguments:
19356//
19357//	begin: begin[i] specifies the offset into the 'i'th dimension of
19358// 'input' to slice from.
19359//	size: size[i] specifies the number of elements of the 'i'th dimension
19360// of 'input' to slice. If size[i] is -1, all remaining elements in dimension
19361// i are included in the slice (i.e. this is equivalent to setting
19362// size[i] = input.dim_size(i) - begin[i]).
19363func Slice(scope *Scope, input tf.Output, begin tf.Output, size tf.Output) (output tf.Output) {
19364	if scope.Err() != nil {
19365		return
19366	}
19367	opspec := tf.OpSpec{
19368		Type: "Slice",
19369		Input: []tf.Input{
19370			input, begin, size,
19371		},
19372	}
19373	op := scope.AddOperation(opspec)
19374	return op.Output(0)
19375}
19376
19377// TFRecordReaderV2Attr is an optional argument to TFRecordReaderV2.
19378type TFRecordReaderV2Attr func(optionalAttr)
19379
19380// TFRecordReaderV2Container sets the optional container attribute to value.
19381//
19382// value: If non-empty, this reader is placed in the given container.
19383// Otherwise, a default container is used.
19384// If not specified, defaults to ""
19385func TFRecordReaderV2Container(value string) TFRecordReaderV2Attr {
19386	return func(m optionalAttr) {
19387		m["container"] = value
19388	}
19389}
19390
19391// TFRecordReaderV2SharedName sets the optional shared_name attribute to value.
19392//
19393// value: If non-empty, this reader is named in the given bucket
19394// with this shared_name. Otherwise, the node name is used instead.
19395// If not specified, defaults to ""
19396func TFRecordReaderV2SharedName(value string) TFRecordReaderV2Attr {
19397	return func(m optionalAttr) {
19398		m["shared_name"] = value
19399	}
19400}
19401
19402// TFRecordReaderV2CompressionType sets the optional compression_type attribute to value.
19403// If not specified, defaults to ""
19404func TFRecordReaderV2CompressionType(value string) TFRecordReaderV2Attr {
19405	return func(m optionalAttr) {
19406		m["compression_type"] = value
19407	}
19408}
19409
19410// A Reader that outputs the records from a TensorFlow Records file.
19411//
19412// Returns The handle to reference the Reader.
19413func TFRecordReaderV2(scope *Scope, optional ...TFRecordReaderV2Attr) (reader_handle tf.Output) {
19414	if scope.Err() != nil {
19415		return
19416	}
19417	attrs := map[string]interface{}{}
19418	for _, a := range optional {
19419		a(attrs)
19420	}
19421	opspec := tf.OpSpec{
19422		Type: "TFRecordReaderV2",
19423
19424		Attrs: attrs,
19425	}
19426	op := scope.AddOperation(opspec)
19427	return op.Output(0)
19428}
19429
19430// IdentityReaderV2Attr is an optional argument to IdentityReaderV2.
19431type IdentityReaderV2Attr func(optionalAttr)
19432
19433// IdentityReaderV2Container sets the optional container attribute to value.
19434//
19435// value: If non-empty, this reader is placed in the given container.
19436// Otherwise, a default container is used.
19437// If not specified, defaults to ""
19438func IdentityReaderV2Container(value string) IdentityReaderV2Attr {
19439	return func(m optionalAttr) {
19440		m["container"] = value
19441	}
19442}
19443
19444// IdentityReaderV2SharedName sets the optional shared_name attribute to value.
19445//
19446// value: If non-empty, this reader is named in the given bucket
19447// with this shared_name. Otherwise, the node name is used instead.
19448// If not specified, defaults to ""
19449func IdentityReaderV2SharedName(value string) IdentityReaderV2Attr {
19450	return func(m optionalAttr) {
19451		m["shared_name"] = value
19452	}
19453}
19454
19455// A Reader that outputs the queued work as both the key and value.
19456//
19457// To use, enqueue strings in a Queue.  ReaderRead will take the front
19458// work string and output (work, work).
19459//
19460// Returns The handle to reference the Reader.
19461func IdentityReaderV2(scope *Scope, optional ...IdentityReaderV2Attr) (reader_handle tf.Output) {
19462	if scope.Err() != nil {
19463		return
19464	}
19465	attrs := map[string]interface{}{}
19466	for _, a := range optional {
19467		a(attrs)
19468	}
19469	opspec := tf.OpSpec{
19470		Type: "IdentityReaderV2",
19471
19472		Attrs: attrs,
19473	}
19474	op := scope.AddOperation(opspec)
19475	return op.Output(0)
19476}
19477
19478// ParseExampleDatasetAttr is an optional argument to ParseExampleDataset.
19479type ParseExampleDatasetAttr func(optionalAttr)
19480
19481// ParseExampleDatasetSloppy sets the optional sloppy attribute to value.
19482// If not specified, defaults to false
19483func ParseExampleDatasetSloppy(value bool) ParseExampleDatasetAttr {
19484	return func(m optionalAttr) {
19485		m["sloppy"] = value
19486	}
19487}
19488
19489// ParseExampleDatasetRaggedKeys sets the optional ragged_keys attribute to value.
19490// If not specified, defaults to {}
19491//
19492// REQUIRES: len(value) >= 0
19493func ParseExampleDatasetRaggedKeys(value []string) ParseExampleDatasetAttr {
19494	return func(m optionalAttr) {
19495		m["ragged_keys"] = value
19496	}
19497}
19498
19499// ParseExampleDatasetRaggedValueTypes sets the optional ragged_value_types attribute to value.
19500// If not specified, defaults to {}
19501//
19502// REQUIRES: len(value) >= 0
19503func ParseExampleDatasetRaggedValueTypes(value []tf.DataType) ParseExampleDatasetAttr {
19504	return func(m optionalAttr) {
19505		m["ragged_value_types"] = value
19506	}
19507}
19508
19509// ParseExampleDatasetRaggedSplitTypes sets the optional ragged_split_types attribute to value.
19510// If not specified, defaults to {}
19511//
19512// REQUIRES: len(value) >= 0
19513func ParseExampleDatasetRaggedSplitTypes(value []tf.DataType) ParseExampleDatasetAttr {
19514	return func(m optionalAttr) {
19515		m["ragged_split_types"] = value
19516	}
19517}
19518
19519// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
19520//
19521// Arguments:
19522//
19523//
19524//	dense_defaults: A dict mapping string keys to `Tensor`s.
19525// The keys of the dict must match the dense_keys of the feature.
19526//	sparse_keys: A list of string keys in the examples features.
19527// The results for these keys will be returned as `SparseTensor` objects.
19528//	dense_keys: A list of Ndense string Tensors (scalars).
19529// The keys expected in the Examples features associated with dense values.
19530//	sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
19531// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
19532// and `tf.string` (`BytesList`) are supported.
19533//	dense_shapes: List of tuples with the same length as `dense_keys`.
19534// The shape of the data for each dense feature referenced by `dense_keys`.
19535// Required for any input tensors identified by `dense_keys`.  Must be
19536// either fully defined, or may contain an unknown first dimension.
19537// An unknown first dimension means the feature is treated as having
19538// a variable number of blocks, and the output shape along this dimension
19539// is considered unknown at graph build time.  Padding is applied for
19540// minibatch elements smaller than the maximum number of blocks for the
19541// given feature along this dimension.
19542//	output_types: The type list for the return values.
19543//	output_shapes: The list of shapes being produced.
19544func ParseExampleDataset(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ParseExampleDatasetAttr) (handle tf.Output) {
19545	if scope.Err() != nil {
19546		return
19547	}
19548	attrs := map[string]interface{}{"sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes, "output_types": output_types, "output_shapes": output_shapes}
19549	for _, a := range optional {
19550		a(attrs)
19551	}
19552	opspec := tf.OpSpec{
19553		Type: "ParseExampleDataset",
19554		Input: []tf.Input{
19555			input_dataset, num_parallel_calls, tf.OutputList(dense_defaults),
19556		},
19557		Attrs: attrs,
19558	}
19559	op := scope.AddOperation(opspec)
19560	return op.Output(0)
19561}
19562
19563// FixedLengthRecordReaderV2Attr is an optional argument to FixedLengthRecordReaderV2.
19564type FixedLengthRecordReaderV2Attr func(optionalAttr)
19565
19566// FixedLengthRecordReaderV2HeaderBytes sets the optional header_bytes attribute to value.
19567//
19568// value: Number of bytes in the header, defaults to 0.
19569// If not specified, defaults to 0
19570func FixedLengthRecordReaderV2HeaderBytes(value int64) FixedLengthRecordReaderV2Attr {
19571	return func(m optionalAttr) {
19572		m["header_bytes"] = value
19573	}
19574}
19575
19576// FixedLengthRecordReaderV2FooterBytes sets the optional footer_bytes attribute to value.
19577//
19578// value: Number of bytes in the footer, defaults to 0.
19579// If not specified, defaults to 0
19580func FixedLengthRecordReaderV2FooterBytes(value int64) FixedLengthRecordReaderV2Attr {
19581	return func(m optionalAttr) {
19582		m["footer_bytes"] = value
19583	}
19584}
19585
19586// FixedLengthRecordReaderV2HopBytes sets the optional hop_bytes attribute to value.
19587//
19588// value: Number of bytes to hop before each read. Default of 0 means using
19589// record_bytes.
19590// If not specified, defaults to 0
19591func FixedLengthRecordReaderV2HopBytes(value int64) FixedLengthRecordReaderV2Attr {
19592	return func(m optionalAttr) {
19593		m["hop_bytes"] = value
19594	}
19595}
19596
19597// FixedLengthRecordReaderV2Container sets the optional container attribute to value.
19598//
19599// value: If non-empty, this reader is placed in the given container.
19600// Otherwise, a default container is used.
19601// If not specified, defaults to ""
19602func FixedLengthRecordReaderV2Container(value string) FixedLengthRecordReaderV2Attr {
19603	return func(m optionalAttr) {
19604		m["container"] = value
19605	}
19606}
19607
19608// FixedLengthRecordReaderV2SharedName sets the optional shared_name attribute to value.
19609//
19610// value: If non-empty, this reader is named in the given bucket
19611// with this shared_name. Otherwise, the node name is used instead.
19612// If not specified, defaults to ""
19613func FixedLengthRecordReaderV2SharedName(value string) FixedLengthRecordReaderV2Attr {
19614	return func(m optionalAttr) {
19615		m["shared_name"] = value
19616	}
19617}
19618
19619// FixedLengthRecordReaderV2Encoding sets the optional encoding attribute to value.
19620//
19621// value: The type of encoding for the file. Currently ZLIB and GZIP
19622// are supported. Defaults to none.
19623// If not specified, defaults to ""
19624func FixedLengthRecordReaderV2Encoding(value string) FixedLengthRecordReaderV2Attr {
19625	return func(m optionalAttr) {
19626		m["encoding"] = value
19627	}
19628}
19629
19630// A Reader that outputs fixed-length records from a file.
19631//
19632// Arguments:
19633//	record_bytes: Number of bytes in the record.
19634//
19635// Returns The handle to reference the Reader.
19636func FixedLengthRecordReaderV2(scope *Scope, record_bytes int64, optional ...FixedLengthRecordReaderV2Attr) (reader_handle tf.Output) {
19637	if scope.Err() != nil {
19638		return
19639	}
19640	attrs := map[string]interface{}{"record_bytes": record_bytes}
19641	for _, a := range optional {
19642		a(attrs)
19643	}
19644	opspec := tf.OpSpec{
19645		Type: "FixedLengthRecordReaderV2",
19646
19647		Attrs: attrs,
19648	}
19649	op := scope.AddOperation(opspec)
19650	return op.Output(0)
19651}
19652
19653// Saves input tensors slices to disk.
19654//
19655// This is like `Save` except that tensors can be listed in the saved file as being
19656// a slice of a larger tensor.  `shapes_and_slices` specifies the shape of the
19657// larger tensor and the slice that this tensor covers. `shapes_and_slices` must
19658// have as many elements as `tensor_names`.
19659//
19660// Elements of the `shapes_and_slices` input must either be:
19661//
19662// *  The empty string, in which case the corresponding tensor is
19663//    saved normally.
19664// *  A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
19665//    `dimI` are the dimensions of the larger tensor and `slice-spec`
19666//    specifies what part is covered by the tensor to save.
19667//
19668// `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
19669// where each `sliceI` is either:
19670//
19671// *  The string `-` meaning that the slice covers all indices of this dimension
19672// *  `start,length` where `start` and `length` are integers.  In that
19673//    case the slice covers `length` indices starting at `start`.
19674//
19675// See also `Save`.
19676//
19677// Arguments:
19678//	filename: Must have a single element. The name of the file to which we write the
19679// tensor.
19680//	tensor_names: Shape `[N]`. The names of the tensors to be saved.
19681//	shapes_and_slices: Shape `[N]`.  The shapes and slice specifications to use when
19682// saving the tensors.
19683//	data: `N` tensors to save.
19684//
19685// Returns the created operation.
19686func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes_and_slices tf.Output, data []tf.Output) (o *tf.Operation) {
19687	if scope.Err() != nil {
19688		return
19689	}
19690	opspec := tf.OpSpec{
19691		Type: "SaveSlices",
19692		Input: []tf.Input{
19693			filename, tensor_names, shapes_and_slices, tf.OutputList(data),
19694		},
19695	}
19696	return scope.AddOperation(opspec)
19697}
19698
19699// Saves the input tensors to disk.
19700//
19701// The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
19702// is written to `filename` with name `tensor_names[i]`.
19703//
19704// See also `SaveSlices`.
19705//
19706// Arguments:
19707//	filename: Must have a single element. The name of the file to which we write
19708// the tensor.
19709//	tensor_names: Shape `[N]`. The names of the tensors to be saved.
19710//	data: `N` tensors to save.
19711//
19712// Returns the created operation.
19713func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Output) (o *tf.Operation) {
19714	if scope.Err() != nil {
19715		return
19716	}
19717	opspec := tf.OpSpec{
19718		Type: "Save",
19719		Input: []tf.Input{
19720			filename, tensor_names, tf.OutputList(data),
19721		},
19722	}
19723	return scope.AddOperation(opspec)
19724}
19725
19726// MergeV2CheckpointsAttr is an optional argument to MergeV2Checkpoints.
19727type MergeV2CheckpointsAttr func(optionalAttr)
19728
19729// MergeV2CheckpointsDeleteOldDirs sets the optional delete_old_dirs attribute to value.
19730//
19731// value: see above.
19732// If not specified, defaults to true
19733func MergeV2CheckpointsDeleteOldDirs(value bool) MergeV2CheckpointsAttr {
19734	return func(m optionalAttr) {
19735		m["delete_old_dirs"] = value
19736	}
19737}
19738
19739// V2 format specific: merges the metadata files of sharded checkpoints.  The
19740//
19741// result is one logical checkpoint, with one physical metadata file and renamed
19742// data files.
19743//
19744// Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
19745//
19746// If delete_old_dirs is true, attempts to delete recursively the dirname of each
19747// path in the input checkpoint_prefixes.  This is useful when those paths are non
19748// user-facing temporary locations.
19749//
19750// Arguments:
19751//	checkpoint_prefixes: prefixes of V2 checkpoints to merge.
19752//	destination_prefix: scalar.  The desired final prefix.  Allowed to be the same
19753// as one of the checkpoint_prefixes.
19754//
19755// Returns the created operation.
19756func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination_prefix tf.Output, optional ...MergeV2CheckpointsAttr) (o *tf.Operation) {
19757	if scope.Err() != nil {
19758		return
19759	}
19760	attrs := map[string]interface{}{}
19761	for _, a := range optional {
19762		a(attrs)
19763	}
19764	opspec := tf.OpSpec{
19765		Type: "MergeV2Checkpoints",
19766		Input: []tf.Input{
19767			checkpoint_prefixes, destination_prefix,
19768		},
19769		Attrs: attrs,
19770	}
19771	return scope.AddOperation(opspec)
19772}
19773
19774// LoadTPUEmbeddingMomentumParametersAttr is an optional argument to LoadTPUEmbeddingMomentumParameters.
19775type LoadTPUEmbeddingMomentumParametersAttr func(optionalAttr)
19776
19777// LoadTPUEmbeddingMomentumParametersTableId sets the optional table_id attribute to value.
19778// If not specified, defaults to -1
19779func LoadTPUEmbeddingMomentumParametersTableId(value int64) LoadTPUEmbeddingMomentumParametersAttr {
19780	return func(m optionalAttr) {
19781		m["table_id"] = value
19782	}
19783}
19784
19785// LoadTPUEmbeddingMomentumParametersTableName sets the optional table_name attribute to value.
19786// If not specified, defaults to ""
19787func LoadTPUEmbeddingMomentumParametersTableName(value string) LoadTPUEmbeddingMomentumParametersAttr {
19788	return func(m optionalAttr) {
19789		m["table_name"] = value
19790	}
19791}
19792
19793// LoadTPUEmbeddingMomentumParametersConfig sets the optional config attribute to value.
19794// If not specified, defaults to ""
19795func LoadTPUEmbeddingMomentumParametersConfig(value string) LoadTPUEmbeddingMomentumParametersAttr {
19796	return func(m optionalAttr) {
19797		m["config"] = value
19798	}
19799}
19800
19801// Load Momentum embedding parameters.
19802//
19803// An op that loads optimization parameters into HBM for embedding. Must be
19804// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
19805// embedding table configuration. For example, this op is used to install
19806// parameters that are loaded from a checkpoint before a training loop is
19807// executed.
19808//
19809// Arguments:
19810//	parameters: Value of parameters used in the Momentum optimization algorithm.
19811//	momenta: Value of momenta used in the Momentum optimization algorithm.
19812//
19813//
19814//
19815// Returns the created operation.
19816func LoadTPUEmbeddingMomentumParameters(scope *Scope, parameters tf.Output, momenta tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMomentumParametersAttr) (o *tf.Operation) {
19817	if scope.Err() != nil {
19818		return
19819	}
19820	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
19821	for _, a := range optional {
19822		a(attrs)
19823	}
19824	opspec := tf.OpSpec{
19825		Type: "LoadTPUEmbeddingMomentumParameters",
19826		Input: []tf.Input{
19827			parameters, momenta,
19828		},
19829		Attrs: attrs,
19830	}
19831	return scope.AddOperation(opspec)
19832}
19833
19834// ImageProjectiveTransformV3Attr is an optional argument to ImageProjectiveTransformV3.
19835type ImageProjectiveTransformV3Attr func(optionalAttr)
19836
19837// ImageProjectiveTransformV3FillMode sets the optional fill_mode attribute to value.
19838//
19839// value: Fill mode, "REFLECT", "WRAP", "CONSTANT", or "NEAREST".
19840// If not specified, defaults to "CONSTANT"
19841func ImageProjectiveTransformV3FillMode(value string) ImageProjectiveTransformV3Attr {
19842	return func(m optionalAttr) {
19843		m["fill_mode"] = value
19844	}
19845}
19846
19847// Applies the given transform to each of the images.
19848//
19849// If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps
19850// the *output* point `(x, y)` to a transformed *input* point
19851// `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
19852// `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input
19853// image, the output pixel is set to fill_value.
19854//
19855// Arguments:
19856//	images: 4-D with shape `[batch, height, width, channels]`.
19857//	transforms: 2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3
19858// projective transformation matrix, with the last entry assumed to be 1. If there
19859// is one row, the same transformation will be applied to all images.
19860//	output_shape: 1-D Tensor [new_height, new_width].
19861//	fill_value: float, the value to be filled when fill_mode is constant".
19862//	interpolation: Interpolation method, "NEAREST" or "BILINEAR".
19863//
19864// Returns 4-D with shape
19865// `[batch, new_height, new_width, channels]`.
19866func ImageProjectiveTransformV3(scope *Scope, images tf.Output, transforms tf.Output, output_shape tf.Output, fill_value tf.Output, interpolation string, optional ...ImageProjectiveTransformV3Attr) (transformed_images tf.Output) {
19867	if scope.Err() != nil {
19868		return
19869	}
19870	attrs := map[string]interface{}{"interpolation": interpolation}
19871	for _, a := range optional {
19872		a(attrs)
19873	}
19874	opspec := tf.OpSpec{
19875		Type: "ImageProjectiveTransformV3",
19876		Input: []tf.Input{
19877			images, transforms, output_shape, fill_value,
19878		},
19879		Attrs: attrs,
19880	}
19881	op := scope.AddOperation(opspec)
19882	return op.Output(0)
19883}
19884
19885// Returns the truth value of x AND y element-wise.
19886//
19887// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
19888// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
19889func LogicalAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
19890	if scope.Err() != nil {
19891		return
19892	}
19893	opspec := tf.OpSpec{
19894		Type: "LogicalAnd",
19895		Input: []tf.Input{
19896			x, y,
19897		},
19898	}
19899	op := scope.AddOperation(opspec)
19900	return op.Output(0)
19901}
19902
19903// CollectiveBcastSendAttr is an optional argument to CollectiveBcastSend.
19904type CollectiveBcastSendAttr func(optionalAttr)
19905
19906// CollectiveBcastSendCommunicationHint sets the optional communication_hint attribute to value.
19907// If not specified, defaults to "auto"
19908func CollectiveBcastSendCommunicationHint(value string) CollectiveBcastSendAttr {
19909	return func(m optionalAttr) {
19910		m["communication_hint"] = value
19911	}
19912}
19913
19914// CollectiveBcastSendTimeoutSeconds sets the optional timeout_seconds attribute to value.
19915// If not specified, defaults to 0
19916func CollectiveBcastSendTimeoutSeconds(value float32) CollectiveBcastSendAttr {
19917	return func(m optionalAttr) {
19918		m["timeout_seconds"] = value
19919	}
19920}
19921
19922// Broadcasts a tensor value to one or more other devices.
19923func CollectiveBcastSend(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveBcastSendAttr) (data tf.Output) {
19924	if scope.Err() != nil {
19925		return
19926	}
19927	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
19928	for _, a := range optional {
19929		a(attrs)
19930	}
19931	opspec := tf.OpSpec{
19932		Type: "CollectiveBcastSend",
19933		Input: []tf.Input{
19934			input,
19935		},
19936		Attrs: attrs,
19937	}
19938	op := scope.AddOperation(opspec)
19939	return op.Output(0)
19940}
19941
19942// CombinedNonMaxSuppressionAttr is an optional argument to CombinedNonMaxSuppression.
19943type CombinedNonMaxSuppressionAttr func(optionalAttr)
19944
19945// CombinedNonMaxSuppressionPadPerClass sets the optional pad_per_class attribute to value.
19946//
19947// value: If false, the output nmsed boxes, scores and classes
19948// are padded/clipped to `max_total_size`. If true, the
19949// output nmsed boxes, scores and classes are padded to be of length
19950// `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in
19951// which case it is clipped to `max_total_size`. Defaults to false.
19952// If not specified, defaults to false
19953func CombinedNonMaxSuppressionPadPerClass(value bool) CombinedNonMaxSuppressionAttr {
19954	return func(m optionalAttr) {
19955		m["pad_per_class"] = value
19956	}
19957}
19958
19959// CombinedNonMaxSuppressionClipBoxes sets the optional clip_boxes attribute to value.
19960//
19961// value: If true, assume the box coordinates are between [0, 1] and clip the output boxes
19962// if they fall beyond [0, 1]. If false, do not do clipping and output the box
19963// coordinates as it is.
19964// If not specified, defaults to true
19965func CombinedNonMaxSuppressionClipBoxes(value bool) CombinedNonMaxSuppressionAttr {
19966	return func(m optionalAttr) {
19967		m["clip_boxes"] = value
19968	}
19969}
19970
19971// Greedily selects a subset of bounding boxes in descending order of score,
19972//
19973// This operation performs non_max_suppression on the inputs per batch, across
19974// all classes.
19975// Prunes away boxes that have high intersection-over-union (IOU) overlap
19976// with previously selected boxes.  Bounding boxes are supplied as
19977// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
19978// diagonal pair of box corners and the coordinates can be provided as normalized
19979// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
19980// is agnostic to where the origin is in the coordinate system. Also note that
19981// this algorithm is invariant to orthogonal transformations and translations
19982// of the coordinate system; thus translating or reflections of the coordinate
19983// system result in the same boxes being selected by the algorithm.
19984// The output of this operation is the final boxes, scores and classes tensor
19985// returned after performing non_max_suppression.
19986//
19987// Arguments:
19988//	boxes: A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then
19989// same boxes are used for all classes otherwise, if `q` is equal to number of
19990// classes, class-specific boxes are used.
19991//	scores: A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]`
19992// representing a single score corresponding to each box (each row of boxes).
19993//	max_output_size_per_class: A scalar integer tensor representing the maximum number of
19994// boxes to be selected by non max suppression per class
19995//	max_total_size: An int32 scalar representing the maximum number of boxes retained over all
19996// classes. Note that setting this value to a large number may result in OOM error
19997// depending on the system workload.
19998//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
19999// boxes overlap too much with respect to IOU.
20000//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
20001// boxes based on score.
20002//
20003// Returns:
20004//	nmsed_boxes: A [batch_size, max_detections, 4] float32 tensor
20005// containing the non-max suppressed boxes.
20006//	nmsed_scores: A [batch_size, max_detections] float32 tensor
20007// containing the scores for the boxes.
20008//	nmsed_classes: A [batch_size, max_detections] float32 tensor
20009// containing the classes for the boxes.
20010//	valid_detections: A [batch_size] int32 tensor indicating the number of
20011// valid detections per batch item. Only the top num_detections[i] entries in
20012// nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the
20013// entries are zero paddings.
20014func CombinedNonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size_per_class tf.Output, max_total_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, optional ...CombinedNonMaxSuppressionAttr) (nmsed_boxes tf.Output, nmsed_scores tf.Output, nmsed_classes tf.Output, valid_detections tf.Output) {
20015	if scope.Err() != nil {
20016		return
20017	}
20018	attrs := map[string]interface{}{}
20019	for _, a := range optional {
20020		a(attrs)
20021	}
20022	opspec := tf.OpSpec{
20023		Type: "CombinedNonMaxSuppression",
20024		Input: []tf.Input{
20025			boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold,
20026		},
20027		Attrs: attrs,
20028	}
20029	op := scope.AddOperation(opspec)
20030	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
20031}
20032
20033// Greedily selects a subset of bounding boxes in descending order of score,
20034//
20035// pruning away boxes that have high intersection-over-union (IOU) overlap
20036// with previously selected boxes.  Bounding boxes are supplied as
20037// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
20038// diagonal pair of box corners and the coordinates can be provided as normalized
20039// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
20040// is agnostic to where the origin is in the coordinate system.  Note that this
20041// algorithm is invariant to orthogonal transformations and translations
20042// of the coordinate system; thus translating or reflections of the coordinate
20043// system result in the same boxes being selected by the algorithm.
20044//
20045// The output of this operation is a set of integers indexing into the input
20046// collection of bounding boxes representing the selected boxes.  The bounding
20047// box coordinates corresponding to the selected indices can then be obtained
20048// using the `tf.gather operation`.  For example:
20049//
20050//   selected_indices = tf.image.non_max_suppression_v2(
20051//       boxes, scores, max_output_size, iou_threshold)
20052//   selected_boxes = tf.gather(boxes, selected_indices)
20053//
20054// Arguments:
20055//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
20056//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
20057// score corresponding to each box (each row of boxes).
20058//	max_output_size: A scalar integer tensor representing the maximum number of
20059// boxes to be selected by non max suppression.
20060//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
20061// boxes overlap too much with respect to IOU.
20062//
20063// Returns A 1-D integer tensor of shape `[M]` representing the selected
20064// indices from the boxes tensor, where `M <= max_output_size`.
20065func NonMaxSuppressionV2(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output) (selected_indices tf.Output) {
20066	if scope.Err() != nil {
20067		return
20068	}
20069	opspec := tf.OpSpec{
20070		Type: "NonMaxSuppressionV2",
20071		Input: []tf.Input{
20072			boxes, scores, max_output_size, iou_threshold,
20073		},
20074	}
20075	op := scope.AddOperation(opspec)
20076	return op.Output(0)
20077}
20078
20079// Removes keys and its associated values from a table.
20080//
20081// The tensor `keys` must of the same type as the keys of the table. Keys not
20082// already in the table are silently ignored.
20083//
20084// Arguments:
20085//	table_handle: Handle to the table.
20086//	keys: Any shape.  Keys of the elements to remove.
20087//
20088// Returns the created operation.
20089func LookupTableRemoveV2(scope *Scope, table_handle tf.Output, keys tf.Output) (o *tf.Operation) {
20090	if scope.Err() != nil {
20091		return
20092	}
20093	opspec := tf.OpSpec{
20094		Type: "LookupTableRemoveV2",
20095		Input: []tf.Input{
20096			table_handle, keys,
20097		},
20098	}
20099	return scope.AddOperation(opspec)
20100}
20101
20102// NonMaxSuppressionAttr is an optional argument to NonMaxSuppression.
20103type NonMaxSuppressionAttr func(optionalAttr)
20104
20105// NonMaxSuppressionIouThreshold sets the optional iou_threshold attribute to value.
20106//
20107// value: A float representing the threshold for deciding whether boxes
20108// overlap too much with respect to IOU.
20109// If not specified, defaults to 0.5
20110func NonMaxSuppressionIouThreshold(value float32) NonMaxSuppressionAttr {
20111	return func(m optionalAttr) {
20112		m["iou_threshold"] = value
20113	}
20114}
20115
20116// Greedily selects a subset of bounding boxes in descending order of score,
20117//
20118// pruning away boxes that have high intersection-over-union (IOU) overlap
20119// with previously selected boxes.  Bounding boxes are supplied as
20120// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
20121// diagonal pair of box corners and the coordinates can be provided as normalized
20122// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
20123// is agnostic to where the origin is in the coordinate system.  Note that this
20124// algorithm is invariant to orthogonal transformations and translations
20125// of the coordinate system; thus translating or reflections of the coordinate
20126// system result in the same boxes being selected by the algorithm.
20127// The output of this operation is a set of integers indexing into the input
20128// collection of bounding boxes representing the selected boxes.  The bounding
20129// box coordinates corresponding to the selected indices can then be obtained
20130// using the `tf.gather operation`.  For example:
20131//   selected_indices = tf.image.non_max_suppression(
20132//       boxes, scores, max_output_size, iou_threshold)
20133//   selected_boxes = tf.gather(boxes, selected_indices)
20134//
20135// Arguments:
20136//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
20137//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
20138// score corresponding to each box (each row of boxes).
20139//	max_output_size: A scalar integer tensor representing the maximum number of
20140// boxes to be selected by non max suppression.
20141//
20142// Returns A 1-D integer tensor of shape `[M]` representing the selected
20143// indices from the boxes tensor, where `M <= max_output_size`.
20144func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, optional ...NonMaxSuppressionAttr) (selected_indices tf.Output) {
20145	if scope.Err() != nil {
20146		return
20147	}
20148	attrs := map[string]interface{}{}
20149	for _, a := range optional {
20150		a(attrs)
20151	}
20152	opspec := tf.OpSpec{
20153		Type: "NonMaxSuppression",
20154		Input: []tf.Input{
20155			boxes, scores, max_output_size,
20156		},
20157		Attrs: attrs,
20158	}
20159	op := scope.AddOperation(opspec)
20160	return op.Output(0)
20161}
20162
20163// Computes hyperbolic tangent of `x` element-wise.
20164//
20165//   Given an input tensor, this function computes hyperbolic tangent of every
20166//   element in the tensor. Input range is `[-inf, inf]` and
20167//   output range is `[-1,1]`.
20168//
20169//   >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")])
20170//   >>> tf.math.tanh(x)
20171//   <tf.Tensor: shape=(8,), dtype=float32, numpy=
20172//   array([-1.        , -0.99990916, -0.46211717,  0.7615942 ,  0.8336547 ,
20173//           0.9640276 ,  0.9950547 ,  1.        ], dtype=float32)>
20174//
20175func Tanh(scope *Scope, x tf.Output) (y tf.Output) {
20176	if scope.Err() != nil {
20177		return
20178	}
20179	opspec := tf.OpSpec{
20180		Type: "Tanh",
20181		Input: []tf.Input{
20182			x,
20183		},
20184	}
20185	op := scope.AddOperation(opspec)
20186	return op.Output(0)
20187}
20188
20189// OutfeedDequeueAttr is an optional argument to OutfeedDequeue.
20190type OutfeedDequeueAttr func(optionalAttr)
20191
20192// OutfeedDequeueDeviceOrdinal sets the optional device_ordinal attribute to value.
20193//
20194// value: The TPU device to use. This should be -1 when the Op
20195// is running on a TPU device, and >= 0 when the Op is running on the CPU
20196// device.
20197// If not specified, defaults to -1
20198func OutfeedDequeueDeviceOrdinal(value int64) OutfeedDequeueAttr {
20199	return func(m optionalAttr) {
20200		m["device_ordinal"] = value
20201	}
20202}
20203
20204// Retrieves a single tensor from the computation outfeed.
20205//
20206// This operation will block indefinitely until data is available.
20207//
20208// Arguments:
20209//	dtype: The type of elements in the tensor.
20210//	shape: The shape of the tensor.
20211//
20212// Returns A tensor that will be read from the device outfeed.
20213func OutfeedDequeue(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...OutfeedDequeueAttr) (output tf.Output) {
20214	if scope.Err() != nil {
20215		return
20216	}
20217	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
20218	for _, a := range optional {
20219		a(attrs)
20220	}
20221	opspec := tf.OpSpec{
20222		Type: "OutfeedDequeue",
20223
20224		Attrs: attrs,
20225	}
20226	op := scope.AddOperation(opspec)
20227	return op.Output(0)
20228}
20229
20230// CropAndResizeGradImageAttr is an optional argument to CropAndResizeGradImage.
20231type CropAndResizeGradImageAttr func(optionalAttr)
20232
20233// CropAndResizeGradImageMethod sets the optional method attribute to value.
20234//
20235// value: A string specifying the interpolation method. Only 'bilinear' is
20236// supported for now.
20237// If not specified, defaults to "bilinear"
20238func CropAndResizeGradImageMethod(value string) CropAndResizeGradImageAttr {
20239	return func(m optionalAttr) {
20240		m["method"] = value
20241	}
20242}
20243
20244// Computes the gradient of the crop_and_resize op wrt the input image tensor.
20245//
20246// Arguments:
20247//	grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
20248//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
20249// specifies the coordinates of a box in the `box_ind[i]` image and is specified
20250// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
20251// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
20252// `[0, 1]` interval of normalized image height is mapped to
20253// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
20254// which case the sampled crop is an up-down flipped version of the original
20255// image. The width dimension is treated similarly. Normalized coordinates
20256// outside the `[0, 1]` range are allowed, in which case we use
20257// `extrapolation_value` to extrapolate the input image values.
20258//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
20259// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
20260//	image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]`
20261// containing the original image size. Both `image_height` and `image_width` need
20262// to be positive.
20263//
20264//
20265// Returns A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
20266func CropAndResizeGradImage(scope *Scope, grads tf.Output, boxes tf.Output, box_ind tf.Output, image_size tf.Output, T tf.DataType, optional ...CropAndResizeGradImageAttr) (output tf.Output) {
20267	if scope.Err() != nil {
20268		return
20269	}
20270	attrs := map[string]interface{}{"T": T}
20271	for _, a := range optional {
20272		a(attrs)
20273	}
20274	opspec := tf.OpSpec{
20275		Type: "CropAndResizeGradImage",
20276		Input: []tf.Input{
20277			grads, boxes, box_ind, image_size,
20278		},
20279		Attrs: attrs,
20280	}
20281	op := scope.AddOperation(opspec)
20282	return op.Output(0)
20283}
20284
20285// ExtractGlimpseV2Attr is an optional argument to ExtractGlimpseV2.
20286type ExtractGlimpseV2Attr func(optionalAttr)
20287
20288// ExtractGlimpseV2Centered sets the optional centered attribute to value.
20289//
20290// value: indicates if the offset coordinates are centered relative to
20291// the image, in which case the (0, 0) offset is relative to the center
20292// of the input images. If false, the (0,0) offset corresponds to the
20293// upper left corner of the input images.
20294// If not specified, defaults to true
20295func ExtractGlimpseV2Centered(value bool) ExtractGlimpseV2Attr {
20296	return func(m optionalAttr) {
20297		m["centered"] = value
20298	}
20299}
20300
20301// ExtractGlimpseV2Normalized sets the optional normalized attribute to value.
20302//
20303// value: indicates if the offset coordinates are normalized.
20304// If not specified, defaults to true
20305func ExtractGlimpseV2Normalized(value bool) ExtractGlimpseV2Attr {
20306	return func(m optionalAttr) {
20307		m["normalized"] = value
20308	}
20309}
20310
20311// ExtractGlimpseV2UniformNoise sets the optional uniform_noise attribute to value.
20312//
20313// value: indicates if the noise should be generated using a
20314// uniform distribution or a Gaussian distribution.
20315// If not specified, defaults to true
20316func ExtractGlimpseV2UniformNoise(value bool) ExtractGlimpseV2Attr {
20317	return func(m optionalAttr) {
20318		m["uniform_noise"] = value
20319	}
20320}
20321
20322// ExtractGlimpseV2Noise sets the optional noise attribute to value.
20323//
20324// value: indicates if the noise should `uniform`, `gaussian`, or
20325// `zero`. The default is `uniform` which means the noise type
20326// will be decided by `uniform_noise`.
20327// If not specified, defaults to "uniform"
20328func ExtractGlimpseV2Noise(value string) ExtractGlimpseV2Attr {
20329	return func(m optionalAttr) {
20330		m["noise"] = value
20331	}
20332}
20333
20334// Extracts a glimpse from the input tensor.
20335//
20336// Returns a set of windows called glimpses extracted at location
20337// `offsets` from the input tensor. If the windows only partially
20338// overlaps the inputs, the non overlapping areas will be filled with
20339// random noise.
20340//
20341// The result is a 4-D tensor of shape `[batch_size, glimpse_height,
20342// glimpse_width, channels]`. The channels and batch dimensions are the
20343// same as that of the input tensor. The height and width of the output
20344// windows are specified in the `size` parameter.
20345//
20346// The argument `normalized` and `centered` controls how the windows are built:
20347//
20348// * If the coordinates are normalized but not centered, 0.0 and 1.0
20349//   correspond to the minimum and maximum of each height and width
20350//   dimension.
20351// * If the coordinates are both normalized and centered, they range from
20352//   -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
20353//   left corner, the lower right corner is located at (1.0, 1.0) and the
20354//   center is at (0, 0).
20355// * If the coordinates are not normalized they are interpreted as
20356//   numbers of pixels.
20357//
20358// Arguments:
20359//	input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
20360//	size: A 1-D tensor of 2 elements containing the size of the glimpses
20361// to extract.  The glimpse height must be specified first, following
20362// by the glimpse width.
20363//	offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
20364// the y, x locations of the center of each window.
20365//
20366// Returns A tensor representing the glimpses `[batch_size,
20367// glimpse_height, glimpse_width, channels]`.
20368func ExtractGlimpseV2(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseV2Attr) (glimpse tf.Output) {
20369	if scope.Err() != nil {
20370		return
20371	}
20372	attrs := map[string]interface{}{}
20373	for _, a := range optional {
20374		a(attrs)
20375	}
20376	opspec := tf.OpSpec{
20377		Type: "ExtractGlimpseV2",
20378		Input: []tf.Input{
20379			input, size, offsets,
20380		},
20381		Attrs: attrs,
20382	}
20383	op := scope.AddOperation(opspec)
20384	return op.Output(0)
20385}
20386
20387// StatelessSampleDistortedBoundingBoxAttr is an optional argument to StatelessSampleDistortedBoundingBox.
20388type StatelessSampleDistortedBoundingBoxAttr func(optionalAttr)
20389
20390// StatelessSampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
20391//
20392// value: The cropped area of the image must have an aspect ratio =
20393// width / height within this range.
20394// If not specified, defaults to {f:0.75 f:1.33}
20395func StatelessSampleDistortedBoundingBoxAspectRatioRange(value []float32) StatelessSampleDistortedBoundingBoxAttr {
20396	return func(m optionalAttr) {
20397		m["aspect_ratio_range"] = value
20398	}
20399}
20400
20401// StatelessSampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
20402//
20403// value: The cropped area of the image must contain a fraction of the
20404// supplied image within this range.
20405// If not specified, defaults to {f:0.05 f:1}
20406func StatelessSampleDistortedBoundingBoxAreaRange(value []float32) StatelessSampleDistortedBoundingBoxAttr {
20407	return func(m optionalAttr) {
20408		m["area_range"] = value
20409	}
20410}
20411
20412// StatelessSampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
20413//
20414// value: Number of attempts at generating a cropped region of the image
20415// of the specified constraints. After `max_attempts` failures, return the entire
20416// image.
20417// If not specified, defaults to 100
20418func StatelessSampleDistortedBoundingBoxMaxAttempts(value int64) StatelessSampleDistortedBoundingBoxAttr {
20419	return func(m optionalAttr) {
20420		m["max_attempts"] = value
20421	}
20422}
20423
20424// StatelessSampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
20425//
20426// value: Controls behavior if no bounding boxes supplied.
20427// If true, assume an implicit bounding box covering the whole input. If false,
20428// raise an error.
20429// If not specified, defaults to false
20430func StatelessSampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) StatelessSampleDistortedBoundingBoxAttr {
20431	return func(m optionalAttr) {
20432		m["use_image_if_no_bounding_boxes"] = value
20433	}
20434}
20435
20436// Generate a randomly distorted bounding box for an image deterministically.
20437//
20438// Bounding box annotations are often supplied in addition to ground-truth labels
20439// in image recognition or object localization tasks. A common technique for
20440// training such a system is to randomly distort an image while preserving its
20441// content, i.e. *data augmentation*. This Op, given the same `seed`,
20442// deterministically outputs a randomly distorted localization of an object, i.e.
20443// bounding box, given an `image_size`, `bounding_boxes` and a series of
20444// constraints.
20445//
20446// The output of this Op is a single bounding box that may be used to crop the
20447// original image. The output is returned as 3 tensors: `begin`, `size` and
20448// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
20449// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
20450// what the bounding box looks like.
20451//
20452// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
20453// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
20454// the height of the underlying image.
20455//
20456// The output of this Op is guaranteed to be the same given the same `seed` and is
20457// independent of how many times the function is called, and independent of global
20458// seed settings (e.g. `tf.random.set_seed`).
20459//
20460// Example usage:
20461//
20462// >>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]])
20463// >>> bbox = tf.constant(
20464// ...   [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
20465// >>> seed = (1, 2)
20466// >>> # Generate a single distorted bounding box.
20467// >>> bbox_begin, bbox_size, bbox_draw = (
20468// ...   tf.image.stateless_sample_distorted_bounding_box(
20469// ...     tf.shape(image), bounding_boxes=bbox, seed=seed))
20470// >>> # Employ the bounding box to distort the image.
20471// >>> tf.slice(image, bbox_begin, bbox_size)
20472// <tf.Tensor: shape=(2, 2, 1), dtype=int64, numpy=
20473// array([[[1],
20474//         [2]],
20475//        [[4],
20476//         [5]]])>
20477// >>> # Draw the bounding box in an image summary.
20478// >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
20479// >>> tf.image.draw_bounding_boxes(
20480// ...   tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors)
20481// <tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy=
20482// array([[[[1.],
20483//          [1.],
20484//          [3.]],
20485//         [[1.],
20486//          [1.],
20487//          [6.]],
20488//         [[7.],
20489//          [8.],
20490//          [9.]]]], dtype=float32)>
20491//
20492// Note that if no bounding box information is available, setting
20493// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
20494// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
20495// false and no bounding boxes are supplied, an error is raised.
20496//
20497// Arguments:
20498//	image_size: 1-D, containing `[height, width, channels]`.
20499//	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
20500// associated with the image.
20501//	min_object_covered: The cropped area of the image must contain at least this
20502// fraction of any bounding box supplied. The value of this parameter should be
20503// non-negative. In the case of 0, the cropped area does not need to overlap
20504// any of the bounding boxes supplied.
20505//	seed: 1-D with shape `[2]`. The seed to the random number generator. Must have dtype
20506// `int32` or `int64`. (When using XLA, only `int32` is allowed.)
20507//
20508// Returns:
20509//	begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
20510// `tf.slice`.
20511//	size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
20512// `tf.slice`.
20513//	bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
20514// Provide as input to `tf.image.draw_bounding_boxes`.
20515func StatelessSampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, seed tf.Output, optional ...StatelessSampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
20516	if scope.Err() != nil {
20517		return
20518	}
20519	attrs := map[string]interface{}{}
20520	for _, a := range optional {
20521		a(attrs)
20522	}
20523	opspec := tf.OpSpec{
20524		Type: "StatelessSampleDistortedBoundingBox",
20525		Input: []tf.Input{
20526			image_size, bounding_boxes, min_object_covered, seed,
20527		},
20528		Attrs: attrs,
20529	}
20530	op := scope.AddOperation(opspec)
20531	return op.Output(0), op.Output(1), op.Output(2)
20532}
20533
20534// AvgPoolAttr is an optional argument to AvgPool.
20535type AvgPoolAttr func(optionalAttr)
20536
20537// AvgPoolDataFormat sets the optional data_format attribute to value.
20538//
20539// value: Specify the data format of the input and output data. With the
20540// default format "NHWC", the data is stored in the order of:
20541//     [batch, in_height, in_width, in_channels].
20542// Alternatively, the format could be "NCHW", the data storage order of:
20543//     [batch, in_channels, in_height, in_width].
20544// If not specified, defaults to "NHWC"
20545func AvgPoolDataFormat(value string) AvgPoolAttr {
20546	return func(m optionalAttr) {
20547		m["data_format"] = value
20548	}
20549}
20550
20551// Performs average pooling on the input.
20552//
20553// Each entry in `output` is the mean of the corresponding size `ksize`
20554// window in `value`.
20555//
20556// Arguments:
20557//	value: 4-D with shape `[batch, height, width, channels]`.
20558//	ksize: The size of the sliding window for each dimension of `value`.
20559//	strides: The stride of the sliding window for each dimension of `value`.
20560//	padding: The type of padding algorithm to use.
20561//
20562// Returns The average pooled output tensor.
20563func AvgPool(scope *Scope, value tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolAttr) (output tf.Output) {
20564	if scope.Err() != nil {
20565		return
20566	}
20567	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
20568	for _, a := range optional {
20569		a(attrs)
20570	}
20571	opspec := tf.OpSpec{
20572		Type: "AvgPool",
20573		Input: []tf.Input{
20574			value,
20575		},
20576		Attrs: attrs,
20577	}
20578	op := scope.AddOperation(opspec)
20579	return op.Output(0)
20580}
20581
20582// DecodeImageAttr is an optional argument to DecodeImage.
20583type DecodeImageAttr func(optionalAttr)
20584
20585// DecodeImageChannels sets the optional channels attribute to value.
20586//
20587// value: Number of color channels for the decoded image.
20588// If not specified, defaults to 0
20589func DecodeImageChannels(value int64) DecodeImageAttr {
20590	return func(m optionalAttr) {
20591		m["channels"] = value
20592	}
20593}
20594
20595// DecodeImageDtype sets the optional dtype attribute to value.
20596//
20597// value: The desired DType of the returned Tensor.
20598// If not specified, defaults to DT_UINT8
20599func DecodeImageDtype(value tf.DataType) DecodeImageAttr {
20600	return func(m optionalAttr) {
20601		m["dtype"] = value
20602	}
20603}
20604
20605// DecodeImageExpandAnimations sets the optional expand_animations attribute to value.
20606//
20607// value: Controls the output shape of the returned op. If True, the returned op will
20608// produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all
20609// GIFs, whether animated or not. If, False, the returned op will produce a 3-D
20610// tensor for all file types and will truncate animated GIFs to the first frame.
20611// If not specified, defaults to true
20612func DecodeImageExpandAnimations(value bool) DecodeImageAttr {
20613	return func(m optionalAttr) {
20614		m["expand_animations"] = value
20615	}
20616}
20617
20618// Function for decode_bmp, decode_gif, decode_jpeg, and decode_png.
20619//
20620// Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the
20621// appropriate operation to convert the input bytes string into a Tensor of type
20622// dtype.
20623//
20624// *NOTE*: decode_gif returns a 4-D array [num_frames, height, width, 3], as
20625// opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays
20626// [height, width, num_channels]. Make sure to take this into account when
20627// constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or
20628// PNG files. Alternately, set the expand_animations argument of this function to
20629// False, in which case the op will return 3-dimensional tensors and will truncate
20630// animated GIF files to the first frame.
20631//
20632// *NOTE*: If the first frame of an animated GIF does not occupy the entire
20633// canvas (maximum frame width x maximum frame height), then it fills the
20634// unoccupied areas (in the first frame) with zeros (black). For frames after the
20635// first frame that does not occupy the entire canvas, it uses the previous
20636// frame to fill the unoccupied areas.
20637//
20638// Arguments:
20639//	contents: 0-D. The encoded image bytes.
20640//
20641// Returns 3-D with shape `[height, width, channels]` or 4-D with shape
20642// `[frame, height, width, channels]`..
20643func DecodeImage(scope *Scope, contents tf.Output, optional ...DecodeImageAttr) (image tf.Output) {
20644	if scope.Err() != nil {
20645		return
20646	}
20647	attrs := map[string]interface{}{}
20648	for _, a := range optional {
20649		a(attrs)
20650	}
20651	opspec := tf.OpSpec{
20652		Type: "DecodeImage",
20653		Input: []tf.Input{
20654			contents,
20655		},
20656		Attrs: attrs,
20657	}
20658	op := scope.AddOperation(opspec)
20659	return op.Output(0)
20660}
20661
20662// AudioSummaryV2Attr is an optional argument to AudioSummaryV2.
20663type AudioSummaryV2Attr func(optionalAttr)
20664
20665// AudioSummaryV2MaxOutputs sets the optional max_outputs attribute to value.
20666//
20667// value: Max number of batch elements to generate audio for.
20668// If not specified, defaults to 3
20669//
20670// REQUIRES: value >= 1
20671func AudioSummaryV2MaxOutputs(value int64) AudioSummaryV2Attr {
20672	return func(m optionalAttr) {
20673		m["max_outputs"] = value
20674	}
20675}
20676
20677// Outputs a `Summary` protocol buffer with audio.
20678//
20679// The summary has up to `max_outputs` summary values containing audio. The
20680// audio is built from `tensor` which must be 3-D with shape `[batch_size,
20681// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
20682// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
20683//
20684// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
20685// build the `tag` of the summary values:
20686//
20687// *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
20688// *  If `max_outputs` is greater than 1, the summary value tags are
20689//    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
20690//
20691// Arguments:
20692//	tag: Scalar. Used to build the `tag` attribute of the summary values.
20693//	tensor: 2-D of shape `[batch_size, frames]`.
20694//	sample_rate: The sample rate of the signal in hertz.
20695//
20696// Returns Scalar. Serialized `Summary` protocol buffer.
20697func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...AudioSummaryV2Attr) (summary tf.Output) {
20698	if scope.Err() != nil {
20699		return
20700	}
20701	attrs := map[string]interface{}{}
20702	for _, a := range optional {
20703		a(attrs)
20704	}
20705	opspec := tf.OpSpec{
20706		Type: "AudioSummaryV2",
20707		Input: []tf.Input{
20708			tag, tensor, sample_rate,
20709		},
20710		Attrs: attrs,
20711	}
20712	op := scope.AddOperation(opspec)
20713	return op.Output(0)
20714}
20715
20716// Draw bounding boxes on a batch of images.
20717//
20718// Outputs a copy of `images` but draws on top of the pixels zero or more bounding
20719// boxes specified by the locations in `boxes`. The coordinates of the each
20720// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
20721// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
20722// height of the underlying image.
20723//
20724// For example, if an image is 100 x 200 pixels (height x width) and the bounding
20725// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
20726// the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
20727//
20728// Parts of the bounding box may fall outside the image.
20729//
20730// Arguments:
20731//	images: 4-D with shape `[batch, height, width, depth]`. A batch of images.
20732//	boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
20733// boxes.
20734//	colors: 2-D. A list of RGBA colors to cycle through for the boxes.
20735//
20736// Returns 4-D with the same shape as `images`. The batch of input images with
20737// bounding boxes drawn on the images.
20738func DrawBoundingBoxesV2(scope *Scope, images tf.Output, boxes tf.Output, colors tf.Output) (output tf.Output) {
20739	if scope.Err() != nil {
20740		return
20741	}
20742	opspec := tf.OpSpec{
20743		Type: "DrawBoundingBoxesV2",
20744		Input: []tf.Input{
20745			images, boxes, colors,
20746		},
20747	}
20748	op := scope.AddOperation(opspec)
20749	return op.Output(0)
20750}
20751
20752// Adjust the contrast of one or more images.
20753//
20754// `images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
20755// interpreted as `[height, width, channels]`.  The other dimensions only
20756// represent a collection of images, such as `[batch, height, width, channels].`
20757//
20758// Contrast is adjusted independently for each channel of each image.
20759//
20760// For each channel, the Op first computes the mean of the image pixels in the
20761// channel and then adjusts each component of each pixel to
20762// `(x - mean) * contrast_factor + mean`.
20763//
20764// Arguments:
20765//	images: Images to adjust.  At least 3-D.
20766//	contrast_factor: A float multiplier for adjusting contrast.
20767//
20768// Returns The contrast-adjusted image or images.
20769func AdjustContrastv2(scope *Scope, images tf.Output, contrast_factor tf.Output) (output tf.Output) {
20770	if scope.Err() != nil {
20771		return
20772	}
20773	opspec := tf.OpSpec{
20774		Type: "AdjustContrastv2",
20775		Input: []tf.Input{
20776			images, contrast_factor,
20777		},
20778	}
20779	op := scope.AddOperation(opspec)
20780	return op.Output(0)
20781}
20782
20783// Returns the rank of a tensor.
20784//
20785// This operation returns an integer representing the rank of `input`.
20786//
20787// For example:
20788//
20789// ```
20790// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
20791// # shape of tensor 't' is [2, 2, 3]
20792// rank(t) ==> 3
20793// ```
20794//
20795// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
20796// of a tensor is the number of indices required to uniquely select each element
20797// of the tensor. Rank is also known as "order", "degree", or "ndims."
20798func Rank(scope *Scope, input tf.Output) (output tf.Output) {
20799	if scope.Err() != nil {
20800		return
20801	}
20802	opspec := tf.OpSpec{
20803		Type: "Rank",
20804		Input: []tf.Input{
20805			input,
20806		},
20807	}
20808	op := scope.AddOperation(opspec)
20809	return op.Output(0)
20810}
20811
20812// EncodeJpegAttr is an optional argument to EncodeJpeg.
20813type EncodeJpegAttr func(optionalAttr)
20814
20815// EncodeJpegFormat sets the optional format attribute to value.
20816//
20817// value: Per pixel image format.
20818// If not specified, defaults to ""
20819func EncodeJpegFormat(value string) EncodeJpegAttr {
20820	return func(m optionalAttr) {
20821		m["format"] = value
20822	}
20823}
20824
20825// EncodeJpegQuality sets the optional quality attribute to value.
20826//
20827// value: Quality of the compression from 0 to 100 (higher is better and slower).
20828// If not specified, defaults to 95
20829func EncodeJpegQuality(value int64) EncodeJpegAttr {
20830	return func(m optionalAttr) {
20831		m["quality"] = value
20832	}
20833}
20834
20835// EncodeJpegProgressive sets the optional progressive attribute to value.
20836//
20837// value: If True, create a JPEG that loads progressively (coarse to fine).
20838// If not specified, defaults to false
20839func EncodeJpegProgressive(value bool) EncodeJpegAttr {
20840	return func(m optionalAttr) {
20841		m["progressive"] = value
20842	}
20843}
20844
20845// EncodeJpegOptimizeSize sets the optional optimize_size attribute to value.
20846//
20847// value: If True, spend CPU/RAM to reduce size with no quality change.
20848// If not specified, defaults to false
20849func EncodeJpegOptimizeSize(value bool) EncodeJpegAttr {
20850	return func(m optionalAttr) {
20851		m["optimize_size"] = value
20852	}
20853}
20854
20855// EncodeJpegChromaDownsampling sets the optional chroma_downsampling attribute to value.
20856//
20857// value: See http://en.wikipedia.org/wiki/Chroma_subsampling.
20858// If not specified, defaults to true
20859func EncodeJpegChromaDownsampling(value bool) EncodeJpegAttr {
20860	return func(m optionalAttr) {
20861		m["chroma_downsampling"] = value
20862	}
20863}
20864
20865// EncodeJpegDensityUnit sets the optional density_unit attribute to value.
20866//
20867// value: Unit used to specify `x_density` and `y_density`:
20868// pixels per inch (`'in'`) or centimeter (`'cm'`).
20869// If not specified, defaults to "in"
20870func EncodeJpegDensityUnit(value string) EncodeJpegAttr {
20871	return func(m optionalAttr) {
20872		m["density_unit"] = value
20873	}
20874}
20875
20876// EncodeJpegXDensity sets the optional x_density attribute to value.
20877//
20878// value: Horizontal pixels per density unit.
20879// If not specified, defaults to 300
20880func EncodeJpegXDensity(value int64) EncodeJpegAttr {
20881	return func(m optionalAttr) {
20882		m["x_density"] = value
20883	}
20884}
20885
20886// EncodeJpegYDensity sets the optional y_density attribute to value.
20887//
20888// value: Vertical pixels per density unit.
20889// If not specified, defaults to 300
20890func EncodeJpegYDensity(value int64) EncodeJpegAttr {
20891	return func(m optionalAttr) {
20892		m["y_density"] = value
20893	}
20894}
20895
20896// EncodeJpegXmpMetadata sets the optional xmp_metadata attribute to value.
20897//
20898// value: If not empty, embed this XMP metadata in the image header.
20899// If not specified, defaults to ""
20900func EncodeJpegXmpMetadata(value string) EncodeJpegAttr {
20901	return func(m optionalAttr) {
20902		m["xmp_metadata"] = value
20903	}
20904}
20905
20906// JPEG-encode an image.
20907//
20908// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
20909//
20910// The attr `format` can be used to override the color format of the encoded
20911// output.  Values can be:
20912//
20913// *   `''`: Use a default format based on the number of channels in the image.
20914// *   `grayscale`: Output a grayscale JPEG image.  The `channels` dimension
20915//     of `image` must be 1.
20916// *   `rgb`: Output an RGB JPEG image. The `channels` dimension
20917//     of `image` must be 3.
20918//
20919// If `format` is not specified or is the empty string, a default format is picked
20920// in function of the number of channels in `image`:
20921//
20922// *   1: Output a grayscale image.
20923// *   3: Output an RGB image.
20924//
20925// Arguments:
20926//	image: 3-D with shape `[height, width, channels]`.
20927//
20928// Returns 0-D. JPEG-encoded image.
20929func EncodeJpeg(scope *Scope, image tf.Output, optional ...EncodeJpegAttr) (contents tf.Output) {
20930	if scope.Err() != nil {
20931		return
20932	}
20933	attrs := map[string]interface{}{}
20934	for _, a := range optional {
20935		a(attrs)
20936	}
20937	opspec := tf.OpSpec{
20938		Type: "EncodeJpeg",
20939		Input: []tf.Input{
20940			image,
20941		},
20942		Attrs: attrs,
20943	}
20944	op := scope.AddOperation(opspec)
20945	return op.Output(0)
20946}
20947
20948// DecodeAndCropJpegAttr is an optional argument to DecodeAndCropJpeg.
20949type DecodeAndCropJpegAttr func(optionalAttr)
20950
20951// DecodeAndCropJpegChannels sets the optional channels attribute to value.
20952//
20953// value: Number of color channels for the decoded image.
20954// If not specified, defaults to 0
20955func DecodeAndCropJpegChannels(value int64) DecodeAndCropJpegAttr {
20956	return func(m optionalAttr) {
20957		m["channels"] = value
20958	}
20959}
20960
20961// DecodeAndCropJpegRatio sets the optional ratio attribute to value.
20962//
20963// value: Downscaling ratio.
20964// If not specified, defaults to 1
20965func DecodeAndCropJpegRatio(value int64) DecodeAndCropJpegAttr {
20966	return func(m optionalAttr) {
20967		m["ratio"] = value
20968	}
20969}
20970
20971// DecodeAndCropJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
20972//
20973// value: If true use a slower but nicer upscaling of the
20974// chroma planes (yuv420/422 only).
20975// If not specified, defaults to true
20976func DecodeAndCropJpegFancyUpscaling(value bool) DecodeAndCropJpegAttr {
20977	return func(m optionalAttr) {
20978		m["fancy_upscaling"] = value
20979	}
20980}
20981
20982// DecodeAndCropJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
20983//
20984// value: If true try to recover an image from truncated input.
20985// If not specified, defaults to false
20986func DecodeAndCropJpegTryRecoverTruncated(value bool) DecodeAndCropJpegAttr {
20987	return func(m optionalAttr) {
20988		m["try_recover_truncated"] = value
20989	}
20990}
20991
20992// DecodeAndCropJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
20993//
20994// value: The minimum required fraction of lines before a truncated
20995// input is accepted.
20996// If not specified, defaults to 1
20997func DecodeAndCropJpegAcceptableFraction(value float32) DecodeAndCropJpegAttr {
20998	return func(m optionalAttr) {
20999		m["acceptable_fraction"] = value
21000	}
21001}
21002
21003// DecodeAndCropJpegDctMethod sets the optional dct_method attribute to value.
21004//
21005// value: string specifying a hint about the algorithm used for
21006// decompression.  Defaults to "" which maps to a system-specific
21007// default.  Currently valid values are ["INTEGER_FAST",
21008// "INTEGER_ACCURATE"].  The hint may be ignored (e.g., the internal
21009// jpeg library changes to a version that does not have that specific
21010// option.)
21011// If not specified, defaults to ""
21012func DecodeAndCropJpegDctMethod(value string) DecodeAndCropJpegAttr {
21013	return func(m optionalAttr) {
21014		m["dct_method"] = value
21015	}
21016}
21017
21018// Decode and Crop a JPEG-encoded image to a uint8 tensor.
21019//
21020// The attr `channels` indicates the desired number of color channels for the
21021// decoded image.
21022//
21023// Accepted values are:
21024//
21025// *   0: Use the number of channels in the JPEG-encoded image.
21026// *   1: output a grayscale image.
21027// *   3: output an RGB image.
21028//
21029// If needed, the JPEG-encoded image is transformed to match the requested number
21030// of color channels.
21031//
21032// The attr `ratio` allows downscaling the image by an integer factor during
21033// decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
21034// downscaling the image later.
21035//
21036//
21037// It is equivalent to a combination of decode and crop, but much faster by only
21038// decoding partial jpeg image.
21039//
21040// Arguments:
21041//	contents: 0-D.  The JPEG-encoded image.
21042//	crop_window: 1-D.  The crop window: [crop_y, crop_x, crop_height, crop_width].
21043//
21044// Returns 3-D with shape `[height, width, channels]`..
21045func DecodeAndCropJpeg(scope *Scope, contents tf.Output, crop_window tf.Output, optional ...DecodeAndCropJpegAttr) (image tf.Output) {
21046	if scope.Err() != nil {
21047		return
21048	}
21049	attrs := map[string]interface{}{}
21050	for _, a := range optional {
21051		a(attrs)
21052	}
21053	opspec := tf.OpSpec{
21054		Type: "DecodeAndCropJpeg",
21055		Input: []tf.Input{
21056			contents, crop_window,
21057		},
21058		Attrs: attrs,
21059	}
21060	op := scope.AddOperation(opspec)
21061	return op.Output(0)
21062}
21063
21064// ResizeNearestNeighborGradAttr is an optional argument to ResizeNearestNeighborGrad.
21065type ResizeNearestNeighborGradAttr func(optionalAttr)
21066
21067// ResizeNearestNeighborGradAlignCorners sets the optional align_corners attribute to value.
21068//
21069// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
21070// aligned. Defaults to false.
21071// If not specified, defaults to false
21072func ResizeNearestNeighborGradAlignCorners(value bool) ResizeNearestNeighborGradAttr {
21073	return func(m optionalAttr) {
21074		m["align_corners"] = value
21075	}
21076}
21077
21078// ResizeNearestNeighborGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
21079// If not specified, defaults to false
21080func ResizeNearestNeighborGradHalfPixelCenters(value bool) ResizeNearestNeighborGradAttr {
21081	return func(m optionalAttr) {
21082		m["half_pixel_centers"] = value
21083	}
21084}
21085
21086// Computes the gradient of nearest neighbor interpolation.
21087//
21088// Arguments:
21089//	grads: 4-D with shape `[batch, height, width, channels]`.
21090//	size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
21091// original input size.
21092//
21093// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
21094// with respect to the input image.
21095func ResizeNearestNeighborGrad(scope *Scope, grads tf.Output, size tf.Output, optional ...ResizeNearestNeighborGradAttr) (output tf.Output) {
21096	if scope.Err() != nil {
21097		return
21098	}
21099	attrs := map[string]interface{}{}
21100	for _, a := range optional {
21101		a(attrs)
21102	}
21103	opspec := tf.OpSpec{
21104		Type: "ResizeNearestNeighborGrad",
21105		Input: []tf.Input{
21106			grads, size,
21107		},
21108		Attrs: attrs,
21109	}
21110	op := scope.AddOperation(opspec)
21111	return op.Output(0)
21112}
21113
21114// Runs multiple additive regression ensemble predictors on input instances and
21115//
21116// computes the logits. It is designed to be used during prediction.
21117// It traverses all the trees and calculates the final score for each instance.
21118//
21119// Arguments:
21120//
21121//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
21122// feature.
21123//	logits_dimension: scalar, dimension of the logits, to be used for partial logits
21124// shape.
21125//
21126// Returns Output rank 2 Tensor containing logits for each example.
21127func BoostedTreesPredict(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, logits_dimension int64) (logits tf.Output) {
21128	if scope.Err() != nil {
21129		return
21130	}
21131	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
21132	opspec := tf.OpSpec{
21133		Type: "BoostedTreesPredict",
21134		Input: []tf.Input{
21135			tree_ensemble_handle, tf.OutputList(bucketized_features),
21136		},
21137		Attrs: attrs,
21138	}
21139	op := scope.AddOperation(opspec)
21140	return op.Output(0)
21141}
21142
21143// MaxAttr is an optional argument to Max.
21144type MaxAttr func(optionalAttr)
21145
21146// MaxKeepDims sets the optional keep_dims attribute to value.
21147//
21148// value: If true, retain reduced dimensions with length 1.
21149// If not specified, defaults to false
21150func MaxKeepDims(value bool) MaxAttr {
21151	return func(m optionalAttr) {
21152		m["keep_dims"] = value
21153	}
21154}
21155
21156// Computes the maximum of elements across dimensions of a tensor.
21157//
21158// Reduces `input` along the dimensions given in `axis`. Unless
21159// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
21160// `axis`. If `keep_dims` is true, the reduced dimensions are
21161// retained with length 1.
21162//
21163// Arguments:
21164//	input: The tensor to reduce.
21165//	axis: The dimensions to reduce. Must be in the range
21166// `[-rank(input), rank(input))`.
21167//
21168// Returns The reduced tensor.
21169func Max(scope *Scope, input tf.Output, axis tf.Output, optional ...MaxAttr) (output tf.Output) {
21170	if scope.Err() != nil {
21171		return
21172	}
21173	attrs := map[string]interface{}{}
21174	for _, a := range optional {
21175		a(attrs)
21176	}
21177	opspec := tf.OpSpec{
21178		Type: "Max",
21179		Input: []tf.Input{
21180			input, axis,
21181		},
21182		Attrs: attrs,
21183	}
21184	op := scope.AddOperation(opspec)
21185	return op.Output(0)
21186}
21187
21188// ResizeBilinearGradAttr is an optional argument to ResizeBilinearGrad.
21189type ResizeBilinearGradAttr func(optionalAttr)
21190
21191// ResizeBilinearGradAlignCorners sets the optional align_corners attribute to value.
21192//
21193// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
21194// aligned. Defaults to false.
21195// If not specified, defaults to false
21196func ResizeBilinearGradAlignCorners(value bool) ResizeBilinearGradAttr {
21197	return func(m optionalAttr) {
21198		m["align_corners"] = value
21199	}
21200}
21201
21202// ResizeBilinearGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
21203// If not specified, defaults to false
21204func ResizeBilinearGradHalfPixelCenters(value bool) ResizeBilinearGradAttr {
21205	return func(m optionalAttr) {
21206		m["half_pixel_centers"] = value
21207	}
21208}
21209
21210// Computes the gradient of bilinear interpolation.
21211//
21212// Arguments:
21213//	grads: 4-D with shape `[batch, height, width, channels]`.
21214//	original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
21215// The image tensor that was resized.
21216//
21217// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
21218// Gradients with respect to the input image. Input image must have been
21219// float or double.
21220func ResizeBilinearGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBilinearGradAttr) (output tf.Output) {
21221	if scope.Err() != nil {
21222		return
21223	}
21224	attrs := map[string]interface{}{}
21225	for _, a := range optional {
21226		a(attrs)
21227	}
21228	opspec := tf.OpSpec{
21229		Type: "ResizeBilinearGrad",
21230		Input: []tf.Input{
21231			grads, original_image,
21232		},
21233		Attrs: attrs,
21234	}
21235	op := scope.AddOperation(opspec)
21236	return op.Output(0)
21237}
21238
21239// Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`.
21240//
21241// tensor: The tensor to put on the list.
21242// input_handle: The old list.
21243// output_handle: A list with the elements of the old list followed by tensor.
21244// element_dtype: the type of elements in the list.
21245// element_shape: a shape compatible with that of elements in the list.
21246func TensorListPushBack(scope *Scope, input_handle tf.Output, tensor tf.Output) (output_handle tf.Output) {
21247	if scope.Err() != nil {
21248		return
21249	}
21250	opspec := tf.OpSpec{
21251		Type: "TensorListPushBack",
21252		Input: []tf.Input{
21253			input_handle, tensor,
21254		},
21255	}
21256	op := scope.AddOperation(opspec)
21257	return op.Output(0)
21258}
21259
21260// UniqueWithCountsAttr is an optional argument to UniqueWithCounts.
21261type UniqueWithCountsAttr func(optionalAttr)
21262
21263// UniqueWithCountsOutIdx sets the optional out_idx attribute to value.
21264// If not specified, defaults to DT_INT32
21265func UniqueWithCountsOutIdx(value tf.DataType) UniqueWithCountsAttr {
21266	return func(m optionalAttr) {
21267		m["out_idx"] = value
21268	}
21269}
21270
21271// Finds unique elements in a 1-D tensor.
21272//
21273// This operation returns a tensor `y` containing all of the unique elements of `x`
21274// sorted in the same order that they occur in `x`. This operation also returns a
21275// tensor `idx` the same size as `x` that contains the index of each value of `x`
21276// in the unique output `y`. Finally, it returns a third tensor `count` that
21277// contains the count of each element of `y` in `x`. In other words:
21278//
21279// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
21280//
21281// For example:
21282//
21283// ```
21284// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
21285// y, idx, count = unique_with_counts(x)
21286// y ==> [1, 2, 4, 7, 8]
21287// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
21288// count ==> [2, 1, 3, 1, 2]
21289// ```
21290//
21291// Arguments:
21292//	x: 1-D.
21293//
21294// Returns:
21295//	y: 1-D.
21296//	idx: 1-D.
21297//	count: 1-D.
21298func UniqueWithCounts(scope *Scope, x tf.Output, optional ...UniqueWithCountsAttr) (y tf.Output, idx tf.Output, count tf.Output) {
21299	if scope.Err() != nil {
21300		return
21301	}
21302	attrs := map[string]interface{}{}
21303	for _, a := range optional {
21304		a(attrs)
21305	}
21306	opspec := tf.OpSpec{
21307		Type: "UniqueWithCounts",
21308		Input: []tf.Input{
21309			x,
21310		},
21311		Attrs: attrs,
21312	}
21313	op := scope.AddOperation(opspec)
21314	return op.Output(0), op.Output(1), op.Output(2)
21315}
21316
21317// ResizeBicubicGradAttr is an optional argument to ResizeBicubicGrad.
21318type ResizeBicubicGradAttr func(optionalAttr)
21319
21320// ResizeBicubicGradAlignCorners sets the optional align_corners attribute to value.
21321//
21322// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
21323// aligned. Defaults to false.
21324// If not specified, defaults to false
21325func ResizeBicubicGradAlignCorners(value bool) ResizeBicubicGradAttr {
21326	return func(m optionalAttr) {
21327		m["align_corners"] = value
21328	}
21329}
21330
21331// ResizeBicubicGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
21332// If not specified, defaults to false
21333func ResizeBicubicGradHalfPixelCenters(value bool) ResizeBicubicGradAttr {
21334	return func(m optionalAttr) {
21335		m["half_pixel_centers"] = value
21336	}
21337}
21338
21339// Computes the gradient of bicubic interpolation.
21340//
21341// Arguments:
21342//	grads: 4-D with shape `[batch, height, width, channels]`.
21343//	original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
21344// The image tensor that was resized.
21345//
21346// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
21347// Gradients with respect to the input image. Input image must have been
21348// float or double.
21349func ResizeBicubicGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBicubicGradAttr) (output tf.Output) {
21350	if scope.Err() != nil {
21351		return
21352	}
21353	attrs := map[string]interface{}{}
21354	for _, a := range optional {
21355		a(attrs)
21356	}
21357	opspec := tf.OpSpec{
21358		Type: "ResizeBicubicGrad",
21359		Input: []tf.Input{
21360			grads, original_image,
21361		},
21362		Attrs: attrs,
21363	}
21364	op := scope.AddOperation(opspec)
21365	return op.Output(0)
21366}
21367
21368// Convert one or more images from HSV to RGB.
21369//
21370// Outputs a tensor of the same shape as the `images` tensor, containing the RGB
21371// value of the pixels. The output is only well defined if the value in `images`
21372// are in `[0,1]`.
21373//
21374// See `rgb_to_hsv` for a description of the HSV encoding.
21375//
21376// Arguments:
21377//	images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
21378//
21379// Returns `images` converted to RGB.
21380func HSVToRGB(scope *Scope, images tf.Output) (output tf.Output) {
21381	if scope.Err() != nil {
21382		return
21383	}
21384	opspec := tf.OpSpec{
21385		Type: "HSVToRGB",
21386		Input: []tf.Input{
21387			images,
21388		},
21389	}
21390	op := scope.AddOperation(opspec)
21391	return op.Output(0)
21392}
21393
21394// Computes the mean along sparse segments of a tensor.
21395//
21396// Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is
21397// missing, the `output` tensor at that position will be zeroed.
21398//
21399// Read
21400// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
21401// for an explanation of segments.
21402//
21403// Arguments:
21404//
21405//	indices: A 1-D tensor. Has same rank as `segment_ids`.
21406//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
21407//	num_segments: Should equal the number of distinct segment IDs.
21408//
21409// Returns Has same shape as data, except for dimension 0 which has size
21410// `num_segments`.
21411func SparseSegmentMeanWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
21412	if scope.Err() != nil {
21413		return
21414	}
21415	opspec := tf.OpSpec{
21416		Type: "SparseSegmentMeanWithNumSegments",
21417		Input: []tf.Input{
21418			data, indices, segment_ids, num_segments,
21419		},
21420	}
21421	op := scope.AddOperation(opspec)
21422	return op.Output(0)
21423}
21424
21425// ResizeBicubicAttr is an optional argument to ResizeBicubic.
21426type ResizeBicubicAttr func(optionalAttr)
21427
21428// ResizeBicubicAlignCorners sets the optional align_corners attribute to value.
21429//
21430// value: If true, the centers of the 4 corner pixels of the input and output tensors are
21431// aligned, preserving the values at the corner pixels. Defaults to false.
21432// If not specified, defaults to false
21433func ResizeBicubicAlignCorners(value bool) ResizeBicubicAttr {
21434	return func(m optionalAttr) {
21435		m["align_corners"] = value
21436	}
21437}
21438
21439// ResizeBicubicHalfPixelCenters sets the optional half_pixel_centers attribute to value.
21440// If not specified, defaults to false
21441func ResizeBicubicHalfPixelCenters(value bool) ResizeBicubicAttr {
21442	return func(m optionalAttr) {
21443		m["half_pixel_centers"] = value
21444	}
21445}
21446
21447// Resize `images` to `size` using bicubic interpolation.
21448//
21449// Input images can be of different types but output images are always float.
21450//
21451// Arguments:
21452//	images: 4-D with shape `[batch, height, width, channels]`.
21453//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
21454// new size for the images.
21455//
21456// Returns 4-D with shape
21457// `[batch, new_height, new_width, channels]`.
21458func ResizeBicubic(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBicubicAttr) (resized_images tf.Output) {
21459	if scope.Err() != nil {
21460		return
21461	}
21462	attrs := map[string]interface{}{}
21463	for _, a := range optional {
21464		a(attrs)
21465	}
21466	opspec := tf.OpSpec{
21467		Type: "ResizeBicubic",
21468		Input: []tf.Input{
21469			images, size,
21470		},
21471		Attrs: attrs,
21472	}
21473	op := scope.AddOperation(opspec)
21474	return op.Output(0)
21475}
21476
21477// ResizeAreaAttr is an optional argument to ResizeArea.
21478type ResizeAreaAttr func(optionalAttr)
21479
21480// ResizeAreaAlignCorners sets the optional align_corners attribute to value.
21481//
21482// value: If true, the centers of the 4 corner pixels of the input and output tensors are
21483// aligned, preserving the values at the corner pixels. Defaults to false.
21484// If not specified, defaults to false
21485func ResizeAreaAlignCorners(value bool) ResizeAreaAttr {
21486	return func(m optionalAttr) {
21487		m["align_corners"] = value
21488	}
21489}
21490
21491// Resize `images` to `size` using area interpolation.
21492//
21493// Input images can be of different types but output images are always float.
21494//
21495// The range of pixel values for the output image might be slightly different
21496// from the range for the input image because of limited numerical precision.
21497// To guarantee an output range, for example `[0.0, 1.0]`, apply
21498// `tf.clip_by_value` to the output.
21499//
21500// Each output pixel is computed by first transforming the pixel's footprint into
21501// the input tensor and then averaging the pixels that intersect the footprint. An
21502// input pixel's contribution to the average is weighted by the fraction of its
21503// area that intersects the footprint.  This is the same as OpenCV's INTER_AREA.
21504//
21505// Arguments:
21506//	images: 4-D with shape `[batch, height, width, channels]`.
21507//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
21508// new size for the images.
21509//
21510// Returns 4-D with shape
21511// `[batch, new_height, new_width, channels]`.
21512func ResizeArea(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeAreaAttr) (resized_images tf.Output) {
21513	if scope.Err() != nil {
21514		return
21515	}
21516	attrs := map[string]interface{}{}
21517	for _, a := range optional {
21518		a(attrs)
21519	}
21520	opspec := tf.OpSpec{
21521		Type: "ResizeArea",
21522		Input: []tf.Input{
21523			images, size,
21524		},
21525		Attrs: attrs,
21526	}
21527	op := scope.AddOperation(opspec)
21528	return op.Output(0)
21529}
21530
21531//   This op is used as a placeholder in If branch functions. It doesn't provide a
21532//   valid output when run, so must either be removed (e.g. replaced with a
21533//   function input) or guaranteed not to be used (e.g. if mirroring an
21534//   intermediate output needed for the gradient computation of the other branch).
21535//
21536// Arguments:
21537//	dtype: The type of the output.
21538//	shape:     The purported shape of the output. This is only used for shape inference;
21539//     the output will not necessarily have this shape. Can be a partial shape.
21540//
21541// Returns     \"Fake\" output value. This should not be consumed by another op.
21542func FakeParam(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
21543	if scope.Err() != nil {
21544		return
21545	}
21546	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
21547	opspec := tf.OpSpec{
21548		Type: "FakeParam",
21549
21550		Attrs: attrs,
21551	}
21552	op := scope.AddOperation(opspec)
21553	return op.Output(0)
21554}
21555
21556// Computes the power of one value to another.
21557//
21558// Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
21559// corresponding elements in `x` and `y`. For example:
21560//
21561// ```
21562// # tensor 'x' is [[2, 2]], [3, 3]]
21563// # tensor 'y' is [[8, 16], [2, 3]]
21564// tf.pow(x, y) ==> [[256, 65536], [9, 27]]
21565// ```
21566func Pow(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
21567	if scope.Err() != nil {
21568		return
21569	}
21570	opspec := tf.OpSpec{
21571		Type: "Pow",
21572		Input: []tf.Input{
21573			x, y,
21574		},
21575	}
21576	op := scope.AddOperation(opspec)
21577	return op.Output(0)
21578}
21579
21580// Records the latency of producing `input_dataset` elements in a StatsAggregator.
21581func LatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
21582	if scope.Err() != nil {
21583		return
21584	}
21585	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
21586	opspec := tf.OpSpec{
21587		Type: "LatencyStatsDataset",
21588		Input: []tf.Input{
21589			input_dataset, tag,
21590		},
21591		Attrs: attrs,
21592	}
21593	op := scope.AddOperation(opspec)
21594	return op.Output(0)
21595}
21596
21597// Computes natural logarithm of x element-wise.
21598//
21599// I.e., \\(y = \log_e x\\).
21600//
21601// Example:
21602//
21603// ```python
21604// x = tf.constant([0, 0.5, 1, 5])
21605// tf.math.log(x) ==> [-inf, -0.6931472,  0. ,  1.609438]
21606// ```
21607func Log(scope *Scope, x tf.Output) (y tf.Output) {
21608	if scope.Err() != nil {
21609		return
21610	}
21611	opspec := tf.OpSpec{
21612		Type: "Log",
21613		Input: []tf.Input{
21614			x,
21615		},
21616	}
21617	op := scope.AddOperation(opspec)
21618	return op.Output(0)
21619}
21620
21621// ShardDatasetAttr is an optional argument to ShardDataset.
21622type ShardDatasetAttr func(optionalAttr)
21623
21624// ShardDatasetRequireNonEmpty sets the optional require_non_empty attribute to value.
21625// If not specified, defaults to false
21626func ShardDatasetRequireNonEmpty(value bool) ShardDatasetAttr {
21627	return func(m optionalAttr) {
21628		m["require_non_empty"] = value
21629	}
21630}
21631
21632// Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
21633//
21634// Arguments:
21635//
21636//	num_shards: An integer representing the number of shards operating in parallel.
21637//	index: An integer representing the current worker index.
21638//
21639//
21640func ShardDataset(scope *Scope, input_dataset tf.Output, num_shards tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShardDatasetAttr) (handle tf.Output) {
21641	if scope.Err() != nil {
21642		return
21643	}
21644	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
21645	for _, a := range optional {
21646		a(attrs)
21647	}
21648	opspec := tf.OpSpec{
21649		Type: "ShardDataset",
21650		Input: []tf.Input{
21651			input_dataset, num_shards, index,
21652		},
21653		Attrs: attrs,
21654	}
21655	op := scope.AddOperation(opspec)
21656	return op.Output(0)
21657}
21658
21659// Creates an all-zeros CSRSparseMatrix with shape `dense_shape`.
21660//
21661// Arguments:
21662//	dense_shape: The desired matrix shape.
21663//
21664//
21665// Returns An empty CSR matrix with shape `dense_shape`.
21666func SparseMatrixZeros(scope *Scope, dense_shape tf.Output, type_ tf.DataType) (sparse_matrix tf.Output) {
21667	if scope.Err() != nil {
21668		return
21669	}
21670	attrs := map[string]interface{}{"type": type_}
21671	opspec := tf.OpSpec{
21672		Type: "SparseMatrixZeros",
21673		Input: []tf.Input{
21674			dense_shape,
21675		},
21676		Attrs: attrs,
21677	}
21678	op := scope.AddOperation(opspec)
21679	return op.Output(0)
21680}
21681
21682// AddManySparseToTensorsMapAttr is an optional argument to AddManySparseToTensorsMap.
21683type AddManySparseToTensorsMapAttr func(optionalAttr)
21684
21685// AddManySparseToTensorsMapContainer sets the optional container attribute to value.
21686//
21687// value: The container name for the `SparseTensorsMap` created by this op.
21688// If not specified, defaults to ""
21689func AddManySparseToTensorsMapContainer(value string) AddManySparseToTensorsMapAttr {
21690	return func(m optionalAttr) {
21691		m["container"] = value
21692	}
21693}
21694
21695// AddManySparseToTensorsMapSharedName sets the optional shared_name attribute to value.
21696//
21697// value: The shared name for the `SparseTensorsMap` created by this op.
21698// If blank, the new Operation's unique name is used.
21699// If not specified, defaults to ""
21700func AddManySparseToTensorsMapSharedName(value string) AddManySparseToTensorsMapAttr {
21701	return func(m optionalAttr) {
21702		m["shared_name"] = value
21703	}
21704}
21705
21706// Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
21707//
21708// A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
21709// `sparse_values`, and `sparse_shape`, where
21710//
21711// ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
21712//
21713// An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
21714// having a first `sparse_indices` column taking values between `[0, N)`, where
21715// the minibatch size `N == sparse_shape[0]`.
21716//
21717// The input `SparseTensor` must have rank `R` greater than 1, and the first
21718// dimension is treated as the minibatch dimension.  Elements of the `SparseTensor`
21719// must be sorted in increasing order of this first dimension.  The stored
21720// `SparseTensor` objects pointed to by each row of the output `sparse_handles`
21721// will have rank `R-1`.
21722//
21723// The `SparseTensor` values can then be read out as part of a minibatch by passing
21724// the given keys as vector elements to `TakeManySparseFromTensorsMap`.  To ensure
21725// the correct `SparseTensorsMap` is accessed, ensure that the same
21726// `container` and `shared_name` are passed to that Op.  If no `shared_name`
21727// is provided here, instead use the *name* of the Operation created by calling
21728// `AddManySparseToTensorsMap` as the `shared_name` passed to
21729// `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
21730//
21731// Arguments:
21732//	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
21733// `sparse_indices[:, 0]` must be ordered values in `[0, N)`.
21734//	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
21735//	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
21736// The minibatch size `N == sparse_shape[0]`.
21737//
21738// Returns 1-D.  The handles of the `SparseTensor` now stored in the
21739// `SparseTensorsMap`.  Shape: `[N]`.
21740func AddManySparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddManySparseToTensorsMapAttr) (sparse_handles tf.Output) {
21741	if scope.Err() != nil {
21742		return
21743	}
21744	attrs := map[string]interface{}{}
21745	for _, a := range optional {
21746		a(attrs)
21747	}
21748	opspec := tf.OpSpec{
21749		Type: "AddManySparseToTensorsMap",
21750		Input: []tf.Input{
21751			sparse_indices, sparse_values, sparse_shape,
21752		},
21753		Attrs: attrs,
21754	}
21755	op := scope.AddOperation(opspec)
21756	return op.Output(0)
21757}
21758
21759// SendAttr is an optional argument to Send.
21760type SendAttr func(optionalAttr)
21761
21762// SendClientTerminated sets the optional client_terminated attribute to value.
21763//
21764// value: If set to true, this indicates that the node was added
21765// to the graph as a result of a client-side feed or fetch of Tensor data,
21766// in which case the corresponding send or recv is expected to be managed
21767// locally by the caller.
21768// If not specified, defaults to false
21769func SendClientTerminated(value bool) SendAttr {
21770	return func(m optionalAttr) {
21771		m["client_terminated"] = value
21772	}
21773}
21774
21775// Sends the named tensor from send_device to recv_device.
21776//
21777// Arguments:
21778//	tensor: The tensor to send.
21779//	tensor_name: The name of the tensor to send.
21780//	send_device: The name of the device sending the tensor.
21781//	send_device_incarnation: The current incarnation of send_device.
21782//	recv_device: The name of the device receiving the tensor.
21783//
21784// Returns the created operation.
21785func Send(scope *Scope, tensor tf.Output, tensor_name string, send_device string, send_device_incarnation int64, recv_device string, optional ...SendAttr) (o *tf.Operation) {
21786	if scope.Err() != nil {
21787		return
21788	}
21789	attrs := map[string]interface{}{"tensor_name": tensor_name, "send_device": send_device, "send_device_incarnation": send_device_incarnation, "recv_device": recv_device}
21790	for _, a := range optional {
21791		a(attrs)
21792	}
21793	opspec := tf.OpSpec{
21794		Type: "Send",
21795		Input: []tf.Input{
21796			tensor,
21797		},
21798		Attrs: attrs,
21799	}
21800	return scope.AddOperation(opspec)
21801}
21802
21803// Elementwise computes the bitwise OR of `x` and `y`.
21804//
21805// The result will have those bits set, that are set in `x`, `y` or both. The
21806// computation is performed on the underlying representations of `x` and `y`.
21807//
21808// For example:
21809//
21810// ```python
21811// import tensorflow as tf
21812// from tensorflow.python.ops import bitwise_ops
21813// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
21814//               tf.uint8, tf.uint16, tf.uint32, tf.uint64]
21815//
21816// for dtype in dtype_list:
21817//   lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
21818//   rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
21819//   exp = tf.constant([5, 5, 7, 15], dtype=tf.float32)
21820//
21821//   res = bitwise_ops.bitwise_or(lhs, rhs)
21822//   tf.assert_equal(tf.cast(res,  tf.float32), exp)  # TRUE
21823// ```
21824//
21825func BitwiseOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
21826	if scope.Err() != nil {
21827		return
21828	}
21829	opspec := tf.OpSpec{
21830		Type: "BitwiseOr",
21831		Input: []tf.Input{
21832			x, y,
21833		},
21834	}
21835	op := scope.AddOperation(opspec)
21836	return op.Output(0)
21837}
21838
21839// BatchMatMulV2Attr is an optional argument to BatchMatMulV2.
21840type BatchMatMulV2Attr func(optionalAttr)
21841
21842// BatchMatMulV2AdjX sets the optional adj_x attribute to value.
21843//
21844// value: If `True`, adjoint the slices of `x`. Defaults to `False`.
21845// If not specified, defaults to false
21846func BatchMatMulV2AdjX(value bool) BatchMatMulV2Attr {
21847	return func(m optionalAttr) {
21848		m["adj_x"] = value
21849	}
21850}
21851
21852// BatchMatMulV2AdjY sets the optional adj_y attribute to value.
21853//
21854// value: If `True`, adjoint the slices of `y`. Defaults to `False`.
21855// If not specified, defaults to false
21856func BatchMatMulV2AdjY(value bool) BatchMatMulV2Attr {
21857	return func(m optionalAttr) {
21858		m["adj_y"] = value
21859	}
21860}
21861
21862// Multiplies slices of two tensors in batches.
21863//
21864// Multiplies all slices of `Tensor` `x` and `y` (each slice can be
21865// viewed as an element of a batch), and arranges the individual results
21866// in a single output tensor of the same batch size. Each of the
21867// individual slices can optionally be adjointed (to adjoint a matrix
21868// means to transpose and conjugate it) before multiplication by setting
21869// the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
21870//
21871// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
21872// and `[..., r_y, c_y]`.
21873//
21874// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
21875//
21876//     r_o = c_x if adj_x else r_x
21877//     c_o = r_y if adj_y else c_y
21878//
21879// It is computed as:
21880//
21881//     output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
21882//
21883// *NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More
21884// about broadcasting
21885// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
21886//
21887//
21888// Arguments:
21889//	x: 2-D or higher with shape `[..., r_x, c_x]`.
21890//	y: 2-D or higher with shape `[..., r_y, c_y]`.
21891//
21892// Returns 3-D or higher with shape `[..., r_o, c_o]`
21893func BatchMatMulV2(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulV2Attr) (output tf.Output) {
21894	if scope.Err() != nil {
21895		return
21896	}
21897	attrs := map[string]interface{}{}
21898	for _, a := range optional {
21899		a(attrs)
21900	}
21901	opspec := tf.OpSpec{
21902		Type: "BatchMatMulV2",
21903		Input: []tf.Input{
21904			x, y,
21905		},
21906		Attrs: attrs,
21907	}
21908	op := scope.AddOperation(opspec)
21909	return op.Output(0)
21910}
21911
21912// Generate a sharded filename. The filename is printf formatted as
21913//
21914//    %s-%05d-of-%05d, basename, shard, num_shards.
21915func ShardedFilename(scope *Scope, basename tf.Output, shard tf.Output, num_shards tf.Output) (filename tf.Output) {
21916	if scope.Err() != nil {
21917		return
21918	}
21919	opspec := tf.OpSpec{
21920		Type: "ShardedFilename",
21921		Input: []tf.Input{
21922			basename, shard, num_shards,
21923		},
21924	}
21925	op := scope.AddOperation(opspec)
21926	return op.Output(0)
21927}
21928
21929// Calculates the softmax of a CSRSparseMatrix.
21930//
21931// Calculate the softmax of the innermost dimensions of a SparseMatrix.
21932//
21933// Missing values are treated as `-inf` (i.e., logits of zero probability); and
21934// the output has the same sparsity structure as the input (though missing values
21935// in the output may now be treated as having probability zero).
21936//
21937// Arguments:
21938//	logits: A CSRSparseMatrix.
21939//
21940//
21941// Returns A CSRSparseMatrix.
21942func SparseMatrixSoftmax(scope *Scope, logits tf.Output, type_ tf.DataType) (softmax tf.Output) {
21943	if scope.Err() != nil {
21944		return
21945	}
21946	attrs := map[string]interface{}{"type": type_}
21947	opspec := tf.OpSpec{
21948		Type: "SparseMatrixSoftmax",
21949		Input: []tf.Input{
21950			logits,
21951		},
21952		Attrs: attrs,
21953	}
21954	op := scope.AddOperation(opspec)
21955	return op.Output(0)
21956}
21957
21958// Creates a tree ensemble model and returns a handle to it.
21959//
21960// Arguments:
21961//	tree_ensemble_handle: Handle to the tree ensemble resource to be created.
21962//	stamp_token: Token to use as the initial value of the resource stamp.
21963//	tree_ensemble_serialized: Serialized proto of the tree ensemble.
21964//
21965// Returns the created operation.
21966func BoostedTreesCreateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, tree_ensemble_serialized tf.Output) (o *tf.Operation) {
21967	if scope.Err() != nil {
21968		return
21969	}
21970	opspec := tf.OpSpec{
21971		Type: "BoostedTreesCreateEnsemble",
21972		Input: []tf.Input{
21973			tree_ensemble_handle, stamp_token, tree_ensemble_serialized,
21974		},
21975	}
21976	return scope.AddOperation(opspec)
21977}
21978
21979// Encodes an `ExtensionType` value into a `variant` scalar Tensor.
21980//
21981// Returns a scalar variant tensor containing a single `CompositeTensorVariant`
21982// with the specified Tensor components and TypeSpec.
21983//
21984// Arguments:
21985//	components: The component tensors for the extension type value.
21986//	metadata: String serialization for the TypeSpec.  (Note: the encoding for the TypeSpec
21987// may change in future versions of TensorFlow.)
21988//
21989// Returns A `variant` Tensor that containing the encoded value.
21990func CompositeTensorVariantFromComponents(scope *Scope, components []tf.Output, metadata string) (encoded tf.Output) {
21991	if scope.Err() != nil {
21992		return
21993	}
21994	attrs := map[string]interface{}{"metadata": metadata}
21995	opspec := tf.OpSpec{
21996		Type: "CompositeTensorVariantFromComponents",
21997		Input: []tf.Input{
21998			tf.OutputList(components),
21999		},
22000		Attrs: attrs,
22001	}
22002	op := scope.AddOperation(opspec)
22003	return op.Output(0)
22004}
22005
22006// DataServiceDatasetAttr is an optional argument to DataServiceDataset.
22007type DataServiceDatasetAttr func(optionalAttr)
22008
22009// DataServiceDatasetTaskRefreshIntervalHintMs sets the optional task_refresh_interval_hint_ms attribute to value.
22010// If not specified, defaults to -1
22011func DataServiceDatasetTaskRefreshIntervalHintMs(value int64) DataServiceDatasetAttr {
22012	return func(m optionalAttr) {
22013		m["task_refresh_interval_hint_ms"] = value
22014	}
22015}
22016
22017// DataServiceDatasetDataTransferProtocol sets the optional data_transfer_protocol attribute to value.
22018// If not specified, defaults to ""
22019func DataServiceDatasetDataTransferProtocol(value string) DataServiceDatasetAttr {
22020	return func(m optionalAttr) {
22021		m["data_transfer_protocol"] = value
22022	}
22023}
22024
22025// DataServiceDatasetTargetWorkers sets the optional target_workers attribute to value.
22026// If not specified, defaults to "AUTO"
22027func DataServiceDatasetTargetWorkers(value string) DataServiceDatasetAttr {
22028	return func(m optionalAttr) {
22029		m["target_workers"] = value
22030	}
22031}
22032
22033// Creates a dataset that reads data from the tf.data service.
22034func DataServiceDataset(scope *Scope, dataset_id tf.Output, processing_mode tf.Output, address tf.Output, protocol tf.Output, job_name tf.Output, max_outstanding_requests tf.Output, iteration_counter tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DataServiceDatasetAttr) (handle tf.Output) {
22035	if scope.Err() != nil {
22036		return
22037	}
22038	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
22039	for _, a := range optional {
22040		a(attrs)
22041	}
22042	opspec := tf.OpSpec{
22043		Type: "DataServiceDataset",
22044		Input: []tf.Input{
22045			dataset_id, processing_mode, address, protocol, job_name, max_outstanding_requests, iteration_counter,
22046		},
22047		Attrs: attrs,
22048	}
22049	op := scope.AddOperation(opspec)
22050	return op.Output(0)
22051}
22052
22053// A dataset that splits the elements of its input into multiple elements.
22054func ExperimentalUnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
22055	if scope.Err() != nil {
22056		return
22057	}
22058	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
22059	opspec := tf.OpSpec{
22060		Type: "ExperimentalUnbatchDataset",
22061		Input: []tf.Input{
22062			input_dataset,
22063		},
22064		Attrs: attrs,
22065	}
22066	op := scope.AddOperation(opspec)
22067	return op.Output(0)
22068}
22069
22070// SparseReduceMaxSparseAttr is an optional argument to SparseReduceMaxSparse.
22071type SparseReduceMaxSparseAttr func(optionalAttr)
22072
22073// SparseReduceMaxSparseKeepDims sets the optional keep_dims attribute to value.
22074//
22075// value: If true, retain reduced dimensions with length 1.
22076// If not specified, defaults to false
22077func SparseReduceMaxSparseKeepDims(value bool) SparseReduceMaxSparseAttr {
22078	return func(m optionalAttr) {
22079		m["keep_dims"] = value
22080	}
22081}
22082
22083// Computes the max of elements across dimensions of a SparseTensor.
22084//
22085// This Op takes a SparseTensor and is the sparse counterpart to
22086// `tf.reduce_max()`.  In contrast to SparseReduceMax, this Op returns a
22087// SparseTensor.
22088//
22089// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
22090// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
22091// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
22092// with length 1.
22093//
22094// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
22095// with a single element is returned.  Additionally, the axes can be negative,
22096// which are interpreted according to the indexing rules in Python.
22097//
22098// Arguments:
22099//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
22100// SparseTensor, possibly not in canonical ordering.
22101//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
22102//	input_shape: 1-D.  Shape of the input SparseTensor.
22103//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
22104func SparseReduceMaxSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
22105	if scope.Err() != nil {
22106		return
22107	}
22108	attrs := map[string]interface{}{}
22109	for _, a := range optional {
22110		a(attrs)
22111	}
22112	opspec := tf.OpSpec{
22113		Type: "SparseReduceMaxSparse",
22114		Input: []tf.Input{
22115			input_indices, input_values, input_shape, reduction_axes,
22116		},
22117		Attrs: attrs,
22118	}
22119	op := scope.AddOperation(opspec)
22120	return op.Output(0), op.Output(1), op.Output(2)
22121}
22122
22123// Creates a dataset that caches elements from `input_dataset`.
22124//
22125// A CacheDataset will iterate over the input_dataset, and store tensors. If the
22126// cache already exists, the cache will be used. If the cache is inappropriate
22127// (e.g. cannot be opened, contains tensors of the wrong shape / size), an error
22128// will the returned when used.
22129//
22130// Arguments:
22131//
22132//	filename: A path on the filesystem where we should cache the dataset. Note: this
22133// will be a directory.
22134//
22135//
22136func CacheDataset(scope *Scope, input_dataset tf.Output, filename tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
22137	if scope.Err() != nil {
22138		return
22139	}
22140	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
22141	opspec := tf.OpSpec{
22142		Type: "CacheDataset",
22143		Input: []tf.Input{
22144			input_dataset, filename,
22145		},
22146		Attrs: attrs,
22147	}
22148	op := scope.AddOperation(opspec)
22149	return op.Output(0)
22150}
22151
22152// ThreadPoolHandleAttr is an optional argument to ThreadPoolHandle.
22153type ThreadPoolHandleAttr func(optionalAttr)
22154
22155// ThreadPoolHandleMaxIntraOpParallelism sets the optional max_intra_op_parallelism attribute to value.
22156//
22157// value: The maximum degree of parallelism to use within operations that execute on this
22158// threadpool.
22159// If not specified, defaults to 1
22160func ThreadPoolHandleMaxIntraOpParallelism(value int64) ThreadPoolHandleAttr {
22161	return func(m optionalAttr) {
22162		m["max_intra_op_parallelism"] = value
22163	}
22164}
22165
22166// ThreadPoolHandleContainer sets the optional container attribute to value.
22167// If not specified, defaults to ""
22168func ThreadPoolHandleContainer(value string) ThreadPoolHandleAttr {
22169	return func(m optionalAttr) {
22170		m["container"] = value
22171	}
22172}
22173
22174// ThreadPoolHandleSharedName sets the optional shared_name attribute to value.
22175// If not specified, defaults to ""
22176func ThreadPoolHandleSharedName(value string) ThreadPoolHandleAttr {
22177	return func(m optionalAttr) {
22178		m["shared_name"] = value
22179	}
22180}
22181
22182// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
22183//
22184// Arguments:
22185//	num_threads: The number of threads in the thread pool.
22186//	display_name: A human-readable name for the threads that may be visible in some
22187// visualizations.
22188// threadpool.
22189//
22190// Returns A resource that can be consumed by one or more ExperimentalThreadPoolDataset
22191// ops.
22192func ThreadPoolHandle(scope *Scope, num_threads int64, display_name string, optional ...ThreadPoolHandleAttr) (handle tf.Output) {
22193	if scope.Err() != nil {
22194		return
22195	}
22196	attrs := map[string]interface{}{"num_threads": num_threads, "display_name": display_name}
22197	for _, a := range optional {
22198		a(attrs)
22199	}
22200	opspec := tf.OpSpec{
22201		Type: "ThreadPoolHandle",
22202
22203		Attrs: attrs,
22204	}
22205	op := scope.AddOperation(opspec)
22206	return op.Output(0)
22207}
22208
22209// Gets the next output from the given iterator as an Optional variant.
22210func IteratorGetNextAsOptional(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (optional tf.Output) {
22211	if scope.Err() != nil {
22212		return
22213	}
22214	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
22215	opspec := tf.OpSpec{
22216		Type: "IteratorGetNextAsOptional",
22217		Input: []tf.Input{
22218			iterator,
22219		},
22220		Attrs: attrs,
22221	}
22222	op := scope.AddOperation(opspec)
22223	return op.Output(0)
22224}
22225
22226// Computes the number of elements in the given table.
22227//
22228// Arguments:
22229//	table_handle: Handle to the table.
22230//
22231// Returns Scalar that contains number of elements in the table.
22232func LookupTableSizeV2(scope *Scope, table_handle tf.Output) (size tf.Output) {
22233	if scope.Err() != nil {
22234		return
22235	}
22236	opspec := tf.OpSpec{
22237		Type: "LookupTableSizeV2",
22238		Input: []tf.Input{
22239			table_handle,
22240		},
22241	}
22242	op := scope.AddOperation(opspec)
22243	return op.Output(0)
22244}
22245
22246// Creates a dataset that executes a SQL query and emits rows of the result set.
22247//
22248// Arguments:
22249//	driver_name: The database type. Currently, the only supported type is 'sqlite'.
22250//	data_source_name: A connection string to connect to the database.
22251//	query: A SQL query to execute.
22252//
22253//
22254func ExperimentalSqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
22255	if scope.Err() != nil {
22256		return
22257	}
22258	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
22259	opspec := tf.OpSpec{
22260		Type: "ExperimentalSqlDataset",
22261		Input: []tf.Input{
22262			driver_name, data_source_name, query,
22263		},
22264		Attrs: attrs,
22265	}
22266	op := scope.AddOperation(opspec)
22267	return op.Output(0)
22268}
22269
22270// RegisterDatasetAttr is an optional argument to RegisterDataset.
22271type RegisterDatasetAttr func(optionalAttr)
22272
22273// RegisterDatasetElementSpec sets the optional element_spec attribute to value.
22274// If not specified, defaults to ""
22275func RegisterDatasetElementSpec(value string) RegisterDatasetAttr {
22276	return func(m optionalAttr) {
22277		m["element_spec"] = value
22278	}
22279}
22280
22281// Registers a dataset with the tf.data service.
22282func RegisterDataset(scope *Scope, dataset tf.Output, address tf.Output, protocol tf.Output, external_state_policy int64, optional ...RegisterDatasetAttr) (dataset_id tf.Output) {
22283	if scope.Err() != nil {
22284		return
22285	}
22286	attrs := map[string]interface{}{"external_state_policy": external_state_policy}
22287	for _, a := range optional {
22288		a(attrs)
22289	}
22290	opspec := tf.OpSpec{
22291		Type: "RegisterDataset",
22292		Input: []tf.Input{
22293			dataset, address, protocol,
22294		},
22295		Attrs: attrs,
22296	}
22297	op := scope.AddOperation(opspec)
22298	return op.Output(0)
22299}
22300
22301// AutoShardDatasetAttr is an optional argument to AutoShardDataset.
22302type AutoShardDatasetAttr func(optionalAttr)
22303
22304// AutoShardDatasetAutoShardPolicy sets the optional auto_shard_policy attribute to value.
22305// If not specified, defaults to 0
22306func AutoShardDatasetAutoShardPolicy(value int64) AutoShardDatasetAttr {
22307	return func(m optionalAttr) {
22308		m["auto_shard_policy"] = value
22309	}
22310}
22311
22312// AutoShardDatasetNumReplicas sets the optional num_replicas attribute to value.
22313// If not specified, defaults to 0
22314func AutoShardDatasetNumReplicas(value int64) AutoShardDatasetAttr {
22315	return func(m optionalAttr) {
22316		m["num_replicas"] = value
22317	}
22318}
22319
22320// Creates a dataset that shards the input dataset.
22321//
22322// Creates a dataset that shards the input dataset by num_workers, returning a
22323// sharded dataset for the index-th worker. This attempts to automatically shard
22324// a dataset by examining the Dataset graph and inserting a shard op before the
22325// inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).
22326//
22327// This dataset will throw a NotFound error if we cannot shard the dataset
22328// automatically.
22329//
22330// Arguments:
22331//	input_dataset: A variant tensor representing the input dataset.
22332//	num_workers: A scalar representing the number of workers to distribute this dataset across.
22333//	index: A scalar representing the index of the current worker out of num_workers.
22334//
22335//
22336func AutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...AutoShardDatasetAttr) (handle tf.Output) {
22337	if scope.Err() != nil {
22338		return
22339	}
22340	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
22341	for _, a := range optional {
22342		a(attrs)
22343	}
22344	opspec := tf.OpSpec{
22345		Type: "AutoShardDataset",
22346		Input: []tf.Input{
22347			input_dataset, num_workers, index,
22348		},
22349		Attrs: attrs,
22350	}
22351	op := scope.AddOperation(opspec)
22352	return op.Output(0)
22353}
22354
22355// LoadTPUEmbeddingProximalAdagradParametersAttr is an optional argument to LoadTPUEmbeddingProximalAdagradParameters.
22356type LoadTPUEmbeddingProximalAdagradParametersAttr func(optionalAttr)
22357
22358// LoadTPUEmbeddingProximalAdagradParametersTableId sets the optional table_id attribute to value.
22359// If not specified, defaults to -1
22360func LoadTPUEmbeddingProximalAdagradParametersTableId(value int64) LoadTPUEmbeddingProximalAdagradParametersAttr {
22361	return func(m optionalAttr) {
22362		m["table_id"] = value
22363	}
22364}
22365
22366// LoadTPUEmbeddingProximalAdagradParametersTableName sets the optional table_name attribute to value.
22367// If not specified, defaults to ""
22368func LoadTPUEmbeddingProximalAdagradParametersTableName(value string) LoadTPUEmbeddingProximalAdagradParametersAttr {
22369	return func(m optionalAttr) {
22370		m["table_name"] = value
22371	}
22372}
22373
22374// LoadTPUEmbeddingProximalAdagradParametersConfig sets the optional config attribute to value.
22375// If not specified, defaults to ""
22376func LoadTPUEmbeddingProximalAdagradParametersConfig(value string) LoadTPUEmbeddingProximalAdagradParametersAttr {
22377	return func(m optionalAttr) {
22378		m["config"] = value
22379	}
22380}
22381
22382// Load proximal Adagrad embedding parameters.
22383//
22384// An op that loads optimization parameters into HBM for embedding. Must be
22385// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22386// embedding table configuration. For example, this op is used to install
22387// parameters that are loaded from a checkpoint before a training loop is
22388// executed.
22389//
22390// Arguments:
22391//	parameters: Value of parameters used in the proximal Adagrad optimization algorithm.
22392//	accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm.
22393//
22394//
22395//
22396// Returns the created operation.
22397func LoadTPUEmbeddingProximalAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingProximalAdagradParametersAttr) (o *tf.Operation) {
22398	if scope.Err() != nil {
22399		return
22400	}
22401	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22402	for _, a := range optional {
22403		a(attrs)
22404	}
22405	opspec := tf.OpSpec{
22406		Type: "LoadTPUEmbeddingProximalAdagradParameters",
22407		Input: []tf.Input{
22408			parameters, accumulators,
22409		},
22410		Attrs: attrs,
22411	}
22412	return scope.AddOperation(opspec)
22413}
22414
22415// Multiplies sparse updates into the variable referenced by `resource`.
22416//
22417// This operation computes
22418//
22419//     # Scalar indices
22420//     ref[indices, ...] *= updates[...]
22421//
22422//     # Vector indices (for each i)
22423//     ref[indices[i], ...] *= updates[i, ...]
22424//
22425//     # High rank indices (for each i, ..., j)
22426//     ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
22427//
22428// Duplicate entries are handled correctly: if multiple `indices` reference
22429// the same location, their contributions multiply.
22430//
22431// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
22432//
22433// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
22434// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
22435// </div>
22436//
22437// Arguments:
22438//	resource: Should be from a `Variable` node.
22439//	indices: A tensor of indices into the first dimension of `ref`.
22440//	updates: A tensor of updated values to add to `ref`.
22441//
22442// Returns the created operation.
22443func ResourceScatterMul(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
22444	if scope.Err() != nil {
22445		return
22446	}
22447	opspec := tf.OpSpec{
22448		Type: "ResourceScatterMul",
22449		Input: []tf.Input{
22450			resource, indices, updates,
22451		},
22452	}
22453	return scope.AddOperation(opspec)
22454}
22455
22456// Creates a dataset that passes a sliding window over `input_dataset`.
22457//
22458// Arguments:
22459//
22460//	window_size: A scalar representing the number of elements in the
22461// sliding window.
22462//	window_shift: A scalar representing the steps moving the sliding window
22463// forward in one iteration. It must be positive.
22464//	window_stride: A scalar representing the stride of the input elements of the sliding window.
22465// It must be positive.
22466//
22467//
22468func ExperimentalSlidingWindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, window_shift tf.Output, window_stride tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
22469	if scope.Err() != nil {
22470		return
22471	}
22472	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
22473	opspec := tf.OpSpec{
22474		Type: "ExperimentalSlidingWindowDataset",
22475		Input: []tf.Input{
22476			input_dataset, window_size, window_shift, window_stride,
22477		},
22478		Attrs: attrs,
22479	}
22480	op := scope.AddOperation(opspec)
22481	return op.Output(0)
22482}
22483
22484// RealAttr is an optional argument to Real.
22485type RealAttr func(optionalAttr)
22486
22487// RealTout sets the optional Tout attribute to value.
22488// If not specified, defaults to DT_FLOAT
22489func RealTout(value tf.DataType) RealAttr {
22490	return func(m optionalAttr) {
22491		m["Tout"] = value
22492	}
22493}
22494
22495// Returns the real part of a complex number.
22496//
22497// Given a tensor `input` of complex numbers, this operation returns a tensor of
22498// type `float` that is the real part of each element in `input`. All elements in
22499// `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
22500//  part returned by this operation and *b* is the imaginary part.
22501//
22502// For example:
22503//
22504// ```
22505// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
22506// tf.real(input) ==> [-2.25, 3.25]
22507// ```
22508func Real(scope *Scope, input tf.Output, optional ...RealAttr) (output tf.Output) {
22509	if scope.Err() != nil {
22510		return
22511	}
22512	attrs := map[string]interface{}{}
22513	for _, a := range optional {
22514		a(attrs)
22515	}
22516	opspec := tf.OpSpec{
22517		Type: "Real",
22518		Input: []tf.Input{
22519			input,
22520		},
22521		Attrs: attrs,
22522	}
22523	op := scope.AddOperation(opspec)
22524	return op.Output(0)
22525}
22526
22527// Deprecated. Use TensorArraySizeV3
22528//
22529// DEPRECATED at GraphDef version 26: Use TensorArraySizeV3
22530func TensorArraySizeV2(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
22531	if scope.Err() != nil {
22532		return
22533	}
22534	opspec := tf.OpSpec{
22535		Type: "TensorArraySizeV2",
22536		Input: []tf.Input{
22537			handle, flow_in,
22538		},
22539	}
22540	op := scope.AddOperation(opspec)
22541	return op.Output(0)
22542}
22543
22544// ArgMaxAttr is an optional argument to ArgMax.
22545type ArgMaxAttr func(optionalAttr)
22546
22547// ArgMaxOutputType sets the optional output_type attribute to value.
22548// If not specified, defaults to DT_INT64
22549func ArgMaxOutputType(value tf.DataType) ArgMaxAttr {
22550	return func(m optionalAttr) {
22551		m["output_type"] = value
22552	}
22553}
22554
22555// Returns the index with the largest value across dimensions of a tensor.
22556//
22557// Note that in case of ties the identity of the return value is not guaranteed.
22558//
22559// Usage:
22560//   ```python
22561//   import tensorflow as tf
22562//   a = [1, 10, 26.9, 2.8, 166.32, 62.3]
22563//   b = tf.math.argmax(input = a)
22564//   c = tf.keras.backend.eval(b)
22565//   # c = 4
22566//   # here a[4] = 166.32 which is the largest element of a across axis 0
22567//   ```
22568//
22569// Arguments:
22570//
22571//	dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
22572// Describes which dimension of the input Tensor to reduce across. For vectors,
22573// use dimension = 0.
22574func ArgMax(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMaxAttr) (output tf.Output) {
22575	if scope.Err() != nil {
22576		return
22577	}
22578	attrs := map[string]interface{}{}
22579	for _, a := range optional {
22580		a(attrs)
22581	}
22582	opspec := tf.OpSpec{
22583		Type: "ArgMax",
22584		Input: []tf.Input{
22585			input, dimension,
22586		},
22587		Attrs: attrs,
22588	}
22589	op := scope.AddOperation(opspec)
22590	return op.Output(0)
22591}
22592
22593// Reshapes a tensor.
22594//
22595// Given `tensor`, this operation returns a tensor that has the same values
22596// as `tensor` with shape `shape`.
22597//
22598// If one component of 1-D tensor `shape` is the special value -1, the size of that
22599// dimension is computed so that the total size remains constant.  In particular, a
22600// `shape` of `[-1]` flattens into 1-D.  At most one component of `shape` may be
22601// unknown.
22602//
22603// The `shape` must be 1-D and the operation returns a tensor with shape
22604// `shape` filled with the values of `tensor`. In this case, the number of elements
22605// implied by `shape` must be the same as the number of elements in `tensor`.
22606//
22607// It is an error if `shape` is not 1-D.
22608//
22609// For example:
22610//
22611// ```
22612// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
22613// # tensor 't' has shape [9]
22614// reshape(t, [3, 3]) ==> [[1, 2, 3],
22615//                         [4, 5, 6],
22616//                         [7, 8, 9]]
22617//
22618// # tensor 't' is [[[1, 1], [2, 2]],
22619// #                [[3, 3], [4, 4]]]
22620// # tensor 't' has shape [2, 2, 2]
22621// reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
22622//                         [3, 3, 4, 4]]
22623//
22624// # tensor 't' is [[[1, 1, 1],
22625// #                 [2, 2, 2]],
22626// #                [[3, 3, 3],
22627// #                 [4, 4, 4]],
22628// #                [[5, 5, 5],
22629// #                 [6, 6, 6]]]
22630// # tensor 't' has shape [3, 2, 3]
22631// # pass '[-1]' to flatten 't'
22632// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
22633//
22634// # -1 can also be used to infer the shape
22635//
22636// # -1 is inferred to be 9:
22637// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
22638//                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
22639// # -1 is inferred to be 2:
22640// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
22641//                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
22642// # -1 is inferred to be 3:
22643// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
22644//                               [2, 2, 2],
22645//                               [3, 3, 3]],
22646//                              [[4, 4, 4],
22647//                               [5, 5, 5],
22648//                               [6, 6, 6]]]
22649//
22650// # tensor 't' is [7]
22651// # shape `[]` reshapes to a scalar
22652// reshape(t, []) ==> 7
22653// ```
22654//
22655// Arguments:
22656//
22657//	shape: Defines the shape of the output tensor.
22658func Reshape(scope *Scope, tensor tf.Output, shape tf.Output) (output tf.Output) {
22659	if scope.Err() != nil {
22660		return
22661	}
22662	opspec := tf.OpSpec{
22663		Type: "Reshape",
22664		Input: []tf.Input{
22665			tensor, shape,
22666		},
22667	}
22668	op := scope.AddOperation(opspec)
22669	return op.Output(0)
22670}
22671
22672// SnapshotDatasetAttr is an optional argument to SnapshotDataset.
22673type SnapshotDatasetAttr func(optionalAttr)
22674
22675// SnapshotDatasetCompression sets the optional compression attribute to value.
22676// If not specified, defaults to ""
22677func SnapshotDatasetCompression(value string) SnapshotDatasetAttr {
22678	return func(m optionalAttr) {
22679		m["compression"] = value
22680	}
22681}
22682
22683// SnapshotDatasetReaderPathPrefix sets the optional reader_path_prefix attribute to value.
22684// If not specified, defaults to ""
22685func SnapshotDatasetReaderPathPrefix(value string) SnapshotDatasetAttr {
22686	return func(m optionalAttr) {
22687		m["reader_path_prefix"] = value
22688	}
22689}
22690
22691// SnapshotDatasetWriterPathPrefix sets the optional writer_path_prefix attribute to value.
22692// If not specified, defaults to ""
22693func SnapshotDatasetWriterPathPrefix(value string) SnapshotDatasetAttr {
22694	return func(m optionalAttr) {
22695		m["writer_path_prefix"] = value
22696	}
22697}
22698
22699// SnapshotDatasetShardSizeBytes sets the optional shard_size_bytes attribute to value.
22700// If not specified, defaults to 10737418240
22701func SnapshotDatasetShardSizeBytes(value int64) SnapshotDatasetAttr {
22702	return func(m optionalAttr) {
22703		m["shard_size_bytes"] = value
22704	}
22705}
22706
22707// SnapshotDatasetPendingSnapshotExpirySeconds sets the optional pending_snapshot_expiry_seconds attribute to value.
22708// If not specified, defaults to 86400
22709func SnapshotDatasetPendingSnapshotExpirySeconds(value int64) SnapshotDatasetAttr {
22710	return func(m optionalAttr) {
22711		m["pending_snapshot_expiry_seconds"] = value
22712	}
22713}
22714
22715// SnapshotDatasetNumReaderThreads sets the optional num_reader_threads attribute to value.
22716// If not specified, defaults to 1
22717func SnapshotDatasetNumReaderThreads(value int64) SnapshotDatasetAttr {
22718	return func(m optionalAttr) {
22719		m["num_reader_threads"] = value
22720	}
22721}
22722
22723// SnapshotDatasetReaderBufferSize sets the optional reader_buffer_size attribute to value.
22724// If not specified, defaults to 1
22725func SnapshotDatasetReaderBufferSize(value int64) SnapshotDatasetAttr {
22726	return func(m optionalAttr) {
22727		m["reader_buffer_size"] = value
22728	}
22729}
22730
22731// SnapshotDatasetNumWriterThreads sets the optional num_writer_threads attribute to value.
22732// If not specified, defaults to 1
22733func SnapshotDatasetNumWriterThreads(value int64) SnapshotDatasetAttr {
22734	return func(m optionalAttr) {
22735		m["num_writer_threads"] = value
22736	}
22737}
22738
22739// SnapshotDatasetWriterBufferSize sets the optional writer_buffer_size attribute to value.
22740// If not specified, defaults to 1
22741func SnapshotDatasetWriterBufferSize(value int64) SnapshotDatasetAttr {
22742	return func(m optionalAttr) {
22743		m["writer_buffer_size"] = value
22744	}
22745}
22746
22747// SnapshotDatasetShuffleOnRead sets the optional shuffle_on_read attribute to value.
22748// If not specified, defaults to false
22749func SnapshotDatasetShuffleOnRead(value bool) SnapshotDatasetAttr {
22750	return func(m optionalAttr) {
22751		m["shuffle_on_read"] = value
22752	}
22753}
22754
22755// SnapshotDatasetSeed sets the optional seed attribute to value.
22756// If not specified, defaults to 0
22757func SnapshotDatasetSeed(value int64) SnapshotDatasetAttr {
22758	return func(m optionalAttr) {
22759		m["seed"] = value
22760	}
22761}
22762
22763// SnapshotDatasetSeed2 sets the optional seed2 attribute to value.
22764// If not specified, defaults to 0
22765func SnapshotDatasetSeed2(value int64) SnapshotDatasetAttr {
22766	return func(m optionalAttr) {
22767		m["seed2"] = value
22768	}
22769}
22770
22771// SnapshotDatasetMode sets the optional mode attribute to value.
22772// If not specified, defaults to "auto"
22773func SnapshotDatasetMode(value string) SnapshotDatasetAttr {
22774	return func(m optionalAttr) {
22775		m["mode"] = value
22776	}
22777}
22778
22779// SnapshotDatasetSnapshotName sets the optional snapshot_name attribute to value.
22780// If not specified, defaults to ""
22781func SnapshotDatasetSnapshotName(value string) SnapshotDatasetAttr {
22782	return func(m optionalAttr) {
22783		m["snapshot_name"] = value
22784	}
22785}
22786
22787// Creates a dataset that will write to / read from a snapshot.
22788//
22789// This dataset attempts to determine whether a valid snapshot exists at the
22790// `snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`.
22791// If not, it will run the preprocessing pipeline as usual, and write out a
22792// snapshot of the data processed for future use.
22793//
22794// Arguments:
22795//	input_dataset: A variant tensor representing the input dataset.
22796//	path: The path we should write snapshots to / read snapshots from.
22797//
22798//
22799func SnapshotDataset(scope *Scope, input_dataset tf.Output, path tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...SnapshotDatasetAttr) (handle tf.Output) {
22800	if scope.Err() != nil {
22801		return
22802	}
22803	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
22804	for _, a := range optional {
22805		a(attrs)
22806	}
22807	opspec := tf.OpSpec{
22808		Type: "SnapshotDataset",
22809		Input: []tf.Input{
22810			input_dataset, path,
22811		},
22812		Attrs: attrs,
22813	}
22814	op := scope.AddOperation(opspec)
22815	return op.Output(0)
22816}
22817
22818// RebatchDatasetAttr is an optional argument to RebatchDataset.
22819type RebatchDatasetAttr func(optionalAttr)
22820
22821// RebatchDatasetUseFallback sets the optional use_fallback attribute to value.
22822// If not specified, defaults to true
22823func RebatchDatasetUseFallback(value bool) RebatchDatasetAttr {
22824	return func(m optionalAttr) {
22825		m["use_fallback"] = value
22826	}
22827}
22828
22829// Creates a dataset that changes the batch size.
22830//
22831// Creates a dataset that changes the batch size of the dataset to current batch
22832// size // num_workers.
22833//
22834// Arguments:
22835//	input_dataset: A variant tensor representing the input dataset.
22836//	num_replicas: A scalar representing the number of replicas to distribute this batch across. As
22837// a result of this transformation the current batch size would end up being
22838// divided  by this parameter.
22839//
22840//
22841func RebatchDataset(scope *Scope, input_dataset tf.Output, num_replicas tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...RebatchDatasetAttr) (handle tf.Output) {
22842	if scope.Err() != nil {
22843		return
22844	}
22845	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
22846	for _, a := range optional {
22847		a(attrs)
22848	}
22849	opspec := tf.OpSpec{
22850		Type: "RebatchDataset",
22851		Input: []tf.Input{
22852			input_dataset, num_replicas,
22853		},
22854		Attrs: attrs,
22855	}
22856	op := scope.AddOperation(opspec)
22857	return op.Output(0)
22858}
22859
22860// Creates a dataset that contains the unique elements of `input_dataset`.
22861func UniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
22862	if scope.Err() != nil {
22863		return
22864	}
22865	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
22866	opspec := tf.OpSpec{
22867		Type: "UniqueDataset",
22868		Input: []tf.Input{
22869			input_dataset,
22870		},
22871		Attrs: attrs,
22872	}
22873	op := scope.AddOperation(opspec)
22874	return op.Output(0)
22875}
22876
22877// FractionalAvgPoolAttr is an optional argument to FractionalAvgPool.
22878type FractionalAvgPoolAttr func(optionalAttr)
22879
22880// FractionalAvgPoolPseudoRandom sets the optional pseudo_random attribute to value.
22881//
22882// value: When set to True, generates the pooling sequence in a
22883// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
22884// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
22885// difference between pseudorandom and random.
22886// If not specified, defaults to false
22887func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr {
22888	return func(m optionalAttr) {
22889		m["pseudo_random"] = value
22890	}
22891}
22892
22893// FractionalAvgPoolOverlapping sets the optional overlapping attribute to value.
22894//
22895// value: When set to True, it means when pooling, the values at the boundary
22896// of adjacent pooling cells are used by both cells. For example:
22897//
22898// `index  0  1  2  3  4`
22899//
22900// `value  20 5  16 3  7`
22901//
22902// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
22903// The result would be [41/3, 26/3] for fractional avg pooling.
22904// If not specified, defaults to false
22905func FractionalAvgPoolOverlapping(value bool) FractionalAvgPoolAttr {
22906	return func(m optionalAttr) {
22907		m["overlapping"] = value
22908	}
22909}
22910
22911// FractionalAvgPoolDeterministic sets the optional deterministic attribute to value.
22912//
22913// value: When set to True, a fixed pooling region will be used when
22914// iterating over a FractionalAvgPool node in the computation graph. Mainly used
22915// in unit test to make FractionalAvgPool deterministic.
22916// If not specified, defaults to false
22917func FractionalAvgPoolDeterministic(value bool) FractionalAvgPoolAttr {
22918	return func(m optionalAttr) {
22919		m["deterministic"] = value
22920	}
22921}
22922
22923// FractionalAvgPoolSeed sets the optional seed attribute to value.
22924//
22925// value: If either seed or seed2 are set to be non-zero, the random number
22926// generator is seeded by the given seed.  Otherwise, it is seeded by a
22927// random seed.
22928// If not specified, defaults to 0
22929func FractionalAvgPoolSeed(value int64) FractionalAvgPoolAttr {
22930	return func(m optionalAttr) {
22931		m["seed"] = value
22932	}
22933}
22934
22935// FractionalAvgPoolSeed2 sets the optional seed2 attribute to value.
22936//
22937// value: An second seed to avoid seed collision.
22938// If not specified, defaults to 0
22939func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr {
22940	return func(m optionalAttr) {
22941		m["seed2"] = value
22942	}
22943}
22944
22945// Performs fractional average pooling on the input.
22946//
22947// Fractional average pooling is similar to Fractional max pooling in the pooling
22948// region generation step. The only difference is that after pooling regions are
22949// generated, a mean operation is performed instead of a max operation in each
22950// pooling region.
22951//
22952// Arguments:
22953//	value: 4-D with shape `[batch, height, width, channels]`.
22954//	pooling_ratio: Pooling ratio for each dimension of `value`, currently only
22955// supports row and col dimension and should be >= 1.0. For example, a valid
22956// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
22957// must be 1.0 because we don't allow pooling on batch and channels
22958// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
22959// respectively.
22960//
22961// Returns:
22962//	output: output tensor after fractional avg pooling.
22963//	row_pooling_sequence: row pooling sequence, needed to calculate gradient.
22964//	col_pooling_sequence: column pooling sequence, needed to calculate gradient.
22965func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalAvgPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
22966	if scope.Err() != nil {
22967		return
22968	}
22969	attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
22970	for _, a := range optional {
22971		a(attrs)
22972	}
22973	opspec := tf.OpSpec{
22974		Type: "FractionalAvgPool",
22975		Input: []tf.Input{
22976			value,
22977		},
22978		Attrs: attrs,
22979	}
22980	op := scope.AddOperation(opspec)
22981	return op.Output(0), op.Output(1), op.Output(2)
22982}
22983
22984// Creates a Dataset that returns pseudorandom numbers.
22985//
22986// Creates a Dataset that returns a stream of uniformly distributed
22987// pseudorandom 64-bit signed integers.
22988//
22989// In the TensorFlow Python API, you can instantiate this dataset via the
22990// class `tf.data.experimental.RandomDataset`.
22991//
22992// Instances of this dataset are also created as a result of the
22993// `hoist_random_uniform` static optimization. Whether this optimization is
22994// performed is determined by the `experimental_optimization.hoist_random_uniform`
22995// option of `tf.data.Options`.
22996//
22997// Arguments:
22998//	seed: A scalar seed for the random number generator. If either seed or
22999// seed2 is set to be non-zero, the random number generator is seeded
23000// by the given seed.  Otherwise, a random seed is used.
23001//	seed2: A second scalar seed to avoid seed collision.
23002//
23003//
23004func RandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
23005	if scope.Err() != nil {
23006		return
23007	}
23008	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
23009	opspec := tf.OpSpec{
23010		Type: "RandomDataset",
23011		Input: []tf.Input{
23012			seed, seed2,
23013		},
23014		Attrs: attrs,
23015	}
23016	op := scope.AddOperation(opspec)
23017	return op.Output(0)
23018}
23019
23020// ExperimentalIgnoreErrorsDatasetAttr is an optional argument to ExperimentalIgnoreErrorsDataset.
23021type ExperimentalIgnoreErrorsDatasetAttr func(optionalAttr)
23022
23023// ExperimentalIgnoreErrorsDatasetLogWarning sets the optional log_warning attribute to value.
23024// If not specified, defaults to false
23025func ExperimentalIgnoreErrorsDatasetLogWarning(value bool) ExperimentalIgnoreErrorsDatasetAttr {
23026	return func(m optionalAttr) {
23027		m["log_warning"] = value
23028	}
23029}
23030
23031// Creates a dataset that contains the elements of `input_dataset` ignoring errors.
23032func ExperimentalIgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalIgnoreErrorsDatasetAttr) (handle tf.Output) {
23033	if scope.Err() != nil {
23034		return
23035	}
23036	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
23037	for _, a := range optional {
23038		a(attrs)
23039	}
23040	opspec := tf.OpSpec{
23041		Type: "ExperimentalIgnoreErrorsDataset",
23042		Input: []tf.Input{
23043			input_dataset,
23044		},
23045		Attrs: attrs,
23046	}
23047	op := scope.AddOperation(opspec)
23048	return op.Output(0)
23049}
23050
23051// Creates a Dataset that returns pseudorandom numbers.
23052//
23053// Arguments:
23054//	seed: A scalar seed for the random number generator. If either seed or
23055// seed2 is set to be non-zero, the random number generator is seeded
23056// by the given seed.  Otherwise, a random seed is used.
23057//	seed2: A second scalar seed to avoid seed collision.
23058//
23059//
23060func ExperimentalRandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
23061	if scope.Err() != nil {
23062		return
23063	}
23064	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
23065	opspec := tf.OpSpec{
23066		Type: "ExperimentalRandomDataset",
23067		Input: []tf.Input{
23068			seed, seed2,
23069		},
23070		Attrs: attrs,
23071	}
23072	op := scope.AddOperation(opspec)
23073	return op.Output(0)
23074}
23075
23076// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
23077//
23078// Arguments:
23079//
23080//	num_threads: Identifies the number of threads to use for the private threadpool.
23081//
23082//
23083func PrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
23084	if scope.Err() != nil {
23085		return
23086	}
23087	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
23088	opspec := tf.OpSpec{
23089		Type: "PrivateThreadPoolDataset",
23090		Input: []tf.Input{
23091			input_dataset, num_threads,
23092		},
23093		Attrs: attrs,
23094	}
23095	op := scope.AddOperation(opspec)
23096	return op.Output(0)
23097}
23098
23099// Returns a batched matrix tensor with new batched diagonal values.
23100//
23101// Given `input` and `diagonal`, this operation returns a tensor with the
23102// same shape and values as `input`, except for the main diagonal of the
23103// innermost matrices.  These will be overwritten by the values in `diagonal`.
23104//
23105// The output is computed as follows:
23106//
23107// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
23108// `k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a
23109// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
23110//
23111//   * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
23112//   * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
23113//
23114// Arguments:
23115//	input: Rank `k+1`, where `k >= 1`.
23116//	diagonal: Rank `k`, where `k >= 1`.
23117//
23118// Returns Rank `k+1`, with `output.shape = input.shape`.
23119func MatrixSetDiag(scope *Scope, input tf.Output, diagonal tf.Output) (output tf.Output) {
23120	if scope.Err() != nil {
23121		return
23122	}
23123	opspec := tf.OpSpec{
23124		Type: "MatrixSetDiag",
23125		Input: []tf.Input{
23126			input, diagonal,
23127		},
23128	}
23129	op := scope.AddOperation(opspec)
23130	return op.Output(0)
23131}
23132
23133// ParseExampleDatasetV2Attr is an optional argument to ParseExampleDatasetV2.
23134type ParseExampleDatasetV2Attr func(optionalAttr)
23135
23136// ParseExampleDatasetV2Deterministic sets the optional deterministic attribute to value.
23137//
23138// value: A string indicating the op-level determinism to use. Deterministic controls
23139// whether the dataset is allowed to return elements out of order if the next
23140// element to be returned isn't available, but a later element is. Options are
23141// "true", "false", and "default". "default" indicates that determinism should be
23142// decided by the `experimental_deterministic` parameter of `tf.data.Options`.
23143// If not specified, defaults to "default"
23144func ParseExampleDatasetV2Deterministic(value string) ParseExampleDatasetV2Attr {
23145	return func(m optionalAttr) {
23146		m["deterministic"] = value
23147	}
23148}
23149
23150// ParseExampleDatasetV2RaggedKeys sets the optional ragged_keys attribute to value.
23151// If not specified, defaults to {}
23152//
23153// REQUIRES: len(value) >= 0
23154func ParseExampleDatasetV2RaggedKeys(value []string) ParseExampleDatasetV2Attr {
23155	return func(m optionalAttr) {
23156		m["ragged_keys"] = value
23157	}
23158}
23159
23160// ParseExampleDatasetV2RaggedValueTypes sets the optional ragged_value_types attribute to value.
23161// If not specified, defaults to {}
23162//
23163// REQUIRES: len(value) >= 0
23164func ParseExampleDatasetV2RaggedValueTypes(value []tf.DataType) ParseExampleDatasetV2Attr {
23165	return func(m optionalAttr) {
23166		m["ragged_value_types"] = value
23167	}
23168}
23169
23170// ParseExampleDatasetV2RaggedSplitTypes sets the optional ragged_split_types attribute to value.
23171// If not specified, defaults to {}
23172//
23173// REQUIRES: len(value) >= 0
23174func ParseExampleDatasetV2RaggedSplitTypes(value []tf.DataType) ParseExampleDatasetV2Attr {
23175	return func(m optionalAttr) {
23176		m["ragged_split_types"] = value
23177	}
23178}
23179
23180// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
23181//
23182// Arguments:
23183//
23184//
23185//	dense_defaults: A dict mapping string keys to `Tensor`s.
23186// The keys of the dict must match the dense_keys of the feature.
23187//	sparse_keys: A list of string keys in the examples features.
23188// The results for these keys will be returned as `SparseTensor` objects.
23189//	dense_keys: A list of Ndense string Tensors (scalars).
23190// The keys expected in the Examples features associated with dense values.
23191//	sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
23192// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
23193// and `tf.string` (`BytesList`) are supported.
23194//	dense_shapes: List of tuples with the same length as `dense_keys`.
23195// The shape of the data for each dense feature referenced by `dense_keys`.
23196// Required for any input tensors identified by `dense_keys`.  Must be
23197// either fully defined, or may contain an unknown first dimension.
23198// An unknown first dimension means the feature is treated as having
23199// a variable number of blocks, and the output shape along this dimension
23200// is considered unknown at graph build time.  Padding is applied for
23201// minibatch elements smaller than the maximum number of blocks for the
23202// given feature along this dimension.
23203//	output_types: The type list for the return values.
23204//	output_shapes: The list of shapes being produced.
23205func ParseExampleDatasetV2(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ParseExampleDatasetV2Attr) (handle tf.Output) {
23206	if scope.Err() != nil {
23207		return
23208	}
23209	attrs := map[string]interface{}{"sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes, "output_types": output_types, "output_shapes": output_shapes}
23210	for _, a := range optional {
23211		a(attrs)
23212	}
23213	opspec := tf.OpSpec{
23214		Type: "ParseExampleDatasetV2",
23215		Input: []tf.Input{
23216			input_dataset, num_parallel_calls, tf.OutputList(dense_defaults),
23217		},
23218		Attrs: attrs,
23219	}
23220	op := scope.AddOperation(opspec)
23221	return op.Output(0)
23222}
23223
23224// RestoreSliceAttr is an optional argument to RestoreSlice.
23225type RestoreSliceAttr func(optionalAttr)
23226
23227// RestoreSlicePreferredShard sets the optional preferred_shard attribute to value.
23228//
23229// value: Index of file to open first if multiple files match
23230// `file_pattern`. See the documentation for `Restore`.
23231// If not specified, defaults to -1
23232func RestoreSlicePreferredShard(value int64) RestoreSliceAttr {
23233	return func(m optionalAttr) {
23234		m["preferred_shard"] = value
23235	}
23236}
23237
23238// Restores a tensor from checkpoint files.
23239//
23240// This is like `Restore` except that restored tensor can be listed as filling
23241// only a slice of a larger tensor.  `shape_and_slice` specifies the shape of the
23242// larger tensor and the slice that the restored tensor covers.
23243//
23244// The `shape_and_slice` input has the same format as the
23245// elements of the `shapes_and_slices` input of the `SaveSlices` op.
23246//
23247// Arguments:
23248//	file_pattern: Must have a single element. The pattern of the files from
23249// which we read the tensor.
23250//	tensor_name: Must have a single element. The name of the tensor to be
23251// restored.
23252//	shape_and_slice: Scalar. The shapes and slice specifications to use when
23253// restoring a tensors.
23254//	dt: The type of the tensor to be restored.
23255//
23256// Returns The restored tensor.
23257func RestoreSlice(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, shape_and_slice tf.Output, dt tf.DataType, optional ...RestoreSliceAttr) (tensor tf.Output) {
23258	if scope.Err() != nil {
23259		return
23260	}
23261	attrs := map[string]interface{}{"dt": dt}
23262	for _, a := range optional {
23263		a(attrs)
23264	}
23265	opspec := tf.OpSpec{
23266		Type: "RestoreSlice",
23267		Input: []tf.Input{
23268			file_pattern, tensor_name, shape_and_slice,
23269		},
23270		Attrs: attrs,
23271	}
23272	op := scope.AddOperation(opspec)
23273	return op.Output(0)
23274}
23275
23276// ParameterizedTruncatedNormalAttr is an optional argument to ParameterizedTruncatedNormal.
23277type ParameterizedTruncatedNormalAttr func(optionalAttr)
23278
23279// ParameterizedTruncatedNormalSeed sets the optional seed attribute to value.
23280//
23281// value: If either `seed` or `seed2` are set to be non-zero, the random number
23282// generator is seeded by the given seed.  Otherwise, it is seeded by a
23283// random seed.
23284// If not specified, defaults to 0
23285func ParameterizedTruncatedNormalSeed(value int64) ParameterizedTruncatedNormalAttr {
23286	return func(m optionalAttr) {
23287		m["seed"] = value
23288	}
23289}
23290
23291// ParameterizedTruncatedNormalSeed2 sets the optional seed2 attribute to value.
23292//
23293// value: A second seed to avoid seed collision.
23294// If not specified, defaults to 0
23295func ParameterizedTruncatedNormalSeed2(value int64) ParameterizedTruncatedNormalAttr {
23296	return func(m optionalAttr) {
23297		m["seed2"] = value
23298	}
23299}
23300
23301// Outputs random values from a normal distribution. The parameters may each be a
23302//
23303// scalar which applies to the entire output, or a vector of length shape[0] which
23304// stores the parameters for each batch.
23305//
23306// Arguments:
23307//	shape: The shape of the output tensor. Batches are indexed by the 0th dimension.
23308//	means: The mean parameter of each batch.
23309//	stdevs: The standard deviation parameter of each batch. Must be greater than 0.
23310//	minvals: The minimum cutoff. May be -infinity.
23311//	maxvals: The maximum cutoff. May be +infinity, and must be more than the minval
23312// for each batch.
23313//
23314// Returns A matrix of shape num_batches x samples_per_batch, filled with random
23315// truncated normal values using the parameters for each row.
23316func ParameterizedTruncatedNormal(scope *Scope, shape tf.Output, means tf.Output, stdevs tf.Output, minvals tf.Output, maxvals tf.Output, optional ...ParameterizedTruncatedNormalAttr) (output tf.Output) {
23317	if scope.Err() != nil {
23318		return
23319	}
23320	attrs := map[string]interface{}{}
23321	for _, a := range optional {
23322		a(attrs)
23323	}
23324	opspec := tf.OpSpec{
23325		Type: "ParameterizedTruncatedNormal",
23326		Input: []tf.Input{
23327			shape, means, stdevs, minvals, maxvals,
23328		},
23329		Attrs: attrs,
23330	}
23331	op := scope.AddOperation(opspec)
23332	return op.Output(0)
23333}
23334
23335// Computes the sign and the log of the absolute value of the determinant of
23336//
23337// one or more square matrices.
23338//
23339// The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions
23340// form square matrices. The outputs are two tensors containing the signs and
23341// absolute values of the log determinants for all N input submatrices
23342// `[..., :, :]` such that `determinant = sign*exp(log_abs_determinant)`.
23343// The `log_abs_determinant` is computed as `det(P)*sum(log(diag(LU)))` where `LU`
23344// is the `LU` decomposition of the input and `P` is the corresponding
23345// permutation matrix.
23346//
23347// Arguments:
23348//	input: Shape is `[N, M, M]`.
23349//
23350// Returns:
23351//	sign: The signs of the log determinants of the inputs. Shape is `[N]`.
23352//	log_abs_determinant: The logs of the absolute values of the determinants
23353// of the N input matrices.  Shape is `[N]`.
23354func LogMatrixDeterminant(scope *Scope, input tf.Output) (sign tf.Output, log_abs_determinant tf.Output) {
23355	if scope.Err() != nil {
23356		return
23357	}
23358	opspec := tf.OpSpec{
23359		Type: "LogMatrixDeterminant",
23360		Input: []tf.Input{
23361			input,
23362		},
23363	}
23364	op := scope.AddOperation(opspec)
23365	return op.Output(0), op.Output(1)
23366}
23367
23368// SkipgramAttr is an optional argument to Skipgram.
23369type SkipgramAttr func(optionalAttr)
23370
23371// SkipgramWindowSize sets the optional window_size attribute to value.
23372//
23373// value: The number of words to predict to the left and right of the target.
23374// If not specified, defaults to 5
23375func SkipgramWindowSize(value int64) SkipgramAttr {
23376	return func(m optionalAttr) {
23377		m["window_size"] = value
23378	}
23379}
23380
23381// SkipgramMinCount sets the optional min_count attribute to value.
23382//
23383// value: The minimum number of word occurrences for it to be included in the
23384// vocabulary.
23385// If not specified, defaults to 5
23386func SkipgramMinCount(value int64) SkipgramAttr {
23387	return func(m optionalAttr) {
23388		m["min_count"] = value
23389	}
23390}
23391
23392// SkipgramSubsample sets the optional subsample attribute to value.
23393//
23394// value: Threshold for word occurrence. Words that appear with higher
23395// frequency will be randomly down-sampled. Set to 0 to disable.
23396// If not specified, defaults to 0.001
23397func SkipgramSubsample(value float32) SkipgramAttr {
23398	return func(m optionalAttr) {
23399		m["subsample"] = value
23400	}
23401}
23402
23403// Parses a text file and creates a batch of examples.
23404//
23405// DEPRECATED at GraphDef version 19: Moving word2vec into tensorflow_models/tutorials and deprecating its ops here as a result
23406//
23407// Arguments:
23408//	filename: The corpus's text file name.
23409//	batch_size: The size of produced batch.
23410//
23411// Returns:
23412//	vocab_word: A vector of words in the corpus.
23413//	vocab_freq: Frequencies of words. Sorted in the non-ascending order.
23414//	words_per_epoch: Number of words per epoch in the data file.
23415//	current_epoch: The current epoch number.
23416//	total_words_processed: The total number of words processed so far.
23417//	examples: A vector of word ids.
23418//	labels: A vector of word ids.
23419func Skipgram(scope *Scope, filename string, batch_size int64, optional ...SkipgramAttr) (vocab_word tf.Output, vocab_freq tf.Output, words_per_epoch tf.Output, current_epoch tf.Output, total_words_processed tf.Output, examples tf.Output, labels tf.Output) {
23420	if scope.Err() != nil {
23421		return
23422	}
23423	attrs := map[string]interface{}{"filename": filename, "batch_size": batch_size}
23424	for _, a := range optional {
23425		a(attrs)
23426	}
23427	opspec := tf.OpSpec{
23428		Type: "Skipgram",
23429
23430		Attrs: attrs,
23431	}
23432	op := scope.AddOperation(opspec)
23433	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
23434}
23435
23436// Records the latency of producing `input_dataset` elements in a StatsAggregator.
23437func ExperimentalLatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
23438	if scope.Err() != nil {
23439		return
23440	}
23441	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
23442	opspec := tf.OpSpec{
23443		Type: "ExperimentalLatencyStatsDataset",
23444		Input: []tf.Input{
23445			input_dataset, tag,
23446		},
23447		Attrs: attrs,
23448	}
23449	op := scope.AddOperation(opspec)
23450	return op.Output(0)
23451}
23452
23453// AddSparseToTensorsMapAttr is an optional argument to AddSparseToTensorsMap.
23454type AddSparseToTensorsMapAttr func(optionalAttr)
23455
23456// AddSparseToTensorsMapContainer sets the optional container attribute to value.
23457//
23458// value: The container name for the `SparseTensorsMap` created by this op.
23459// If not specified, defaults to ""
23460func AddSparseToTensorsMapContainer(value string) AddSparseToTensorsMapAttr {
23461	return func(m optionalAttr) {
23462		m["container"] = value
23463	}
23464}
23465
23466// AddSparseToTensorsMapSharedName sets the optional shared_name attribute to value.
23467//
23468// value: The shared name for the `SparseTensorsMap` created by this op.
23469// If blank, the new Operation's unique name is used.
23470// If not specified, defaults to ""
23471func AddSparseToTensorsMapSharedName(value string) AddSparseToTensorsMapAttr {
23472	return func(m optionalAttr) {
23473		m["shared_name"] = value
23474	}
23475}
23476
23477// Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
23478//
23479// A `SparseTensor` is represented by three tensors: `sparse_indices`,
23480// `sparse_values`, and `sparse_shape`.
23481//
23482// This operator takes the given `SparseTensor` and adds it to a container
23483// object (a `SparseTensorsMap`).  A unique key within this container is generated
23484// in the form of an `int64`, and this is the value that is returned.
23485//
23486// The `SparseTensor` can then be read out as part of a minibatch by passing
23487// the key as a vector element to `TakeManySparseFromTensorsMap`.  To ensure
23488// the correct `SparseTensorsMap` is accessed, ensure that the same
23489// `container` and `shared_name` are passed to that Op.  If no `shared_name`
23490// is provided here, instead use the *name* of the Operation created by calling
23491// `AddSparseToTensorsMap` as the `shared_name` passed to
23492// `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
23493//
23494// Arguments:
23495//	sparse_indices: 2-D.  The `indices` of the `SparseTensor`.
23496//	sparse_values: 1-D.  The `values` of the `SparseTensor`.
23497//	sparse_shape: 1-D.  The `shape` of the `SparseTensor`.
23498//
23499// Returns 0-D.  The handle of the `SparseTensor` now stored in the
23500// `SparseTensorsMap`.
23501func AddSparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddSparseToTensorsMapAttr) (sparse_handle tf.Output) {
23502	if scope.Err() != nil {
23503		return
23504	}
23505	attrs := map[string]interface{}{}
23506	for _, a := range optional {
23507		a(attrs)
23508	}
23509	opspec := tf.OpSpec{
23510		Type: "AddSparseToTensorsMap",
23511		Input: []tf.Input{
23512			sparse_indices, sparse_values, sparse_shape,
23513		},
23514		Attrs: attrs,
23515	}
23516	op := scope.AddOperation(opspec)
23517	return op.Output(0)
23518}
23519
23520// Transforms a vector of tf.Example protos (as strings) into typed tensors.
23521//
23522// Arguments:
23523//	serialized: A scalar or vector containing binary serialized Example protos.
23524//	names: A tensor containing the names of the serialized protos.
23525// Corresponds 1:1 with the `serialized` tensor.
23526// May contain, for example, table key (descriptive) names for the
23527// corresponding serialized protos.  These are purely useful for debugging
23528// purposes, and the presence of values here has no effect on the output.
23529// May also be an empty vector if no names are available.
23530// If non-empty, this tensor must have the same shape as "serialized".
23531//	sparse_keys: Vector of strings.
23532// The keys expected in the Examples' features associated with sparse values.
23533//	dense_keys: Vector of strings.
23534// The keys expected in the Examples' features associated with dense values.
23535//	ragged_keys: Vector of strings.
23536// The keys expected in the Examples' features associated with ragged values.
23537//	dense_defaults: A list of Tensors (some may be empty).  Corresponds 1:1 with `dense_keys`.
23538// dense_defaults[j] provides default values
23539// when the example's feature_map lacks dense_key[j].  If an empty Tensor is
23540// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
23541// The input type is inferred from dense_defaults[j], even when it's empty.
23542// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
23543// then the shape of dense_defaults[j] must match that of dense_shapes[j].
23544// If dense_shapes[j] has an undefined major dimension (variable strides dense
23545// feature), dense_defaults[j] must contain a single element:
23546// the padding element.
23547//	num_sparse: The number of sparse keys.
23548//	sparse_types: A list of `num_sparse` types; the data types of data in each Feature
23549// given in sparse_keys.
23550// Currently the ParseExample supports DT_FLOAT (FloatList),
23551// DT_INT64 (Int64List), and DT_STRING (BytesList).
23552//	ragged_value_types: A list of `num_ragged` types; the data types of data in each Feature
23553// given in ragged_keys (where `num_ragged = sparse_keys.size()`).
23554// Currently the ParseExample supports DT_FLOAT (FloatList),
23555// DT_INT64 (Int64List), and DT_STRING (BytesList).
23556//	ragged_split_types: A list of `num_ragged` types; the data types of row_splits in each Feature
23557// given in ragged_keys (where `num_ragged = sparse_keys.size()`).
23558// May be DT_INT32 or DT_INT64.
23559//	dense_shapes: A list of `num_dense` shapes; the shapes of data in each Feature
23560// given in dense_keys (where `num_dense = dense_keys.size()`).
23561// The number of elements in the Feature corresponding to dense_key[j]
23562// must always equal dense_shapes[j].NumEntries().
23563// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
23564// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
23565// The dense outputs are just the inputs row-stacked by batch.
23566// This works for dense_shapes[j] = (-1, D1, ..., DN).  In this case
23567// the shape of the output Tensor dense_values[j] will be
23568// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks
23569// of elements of length D1 * .... * DN, across all minibatch entries
23570// in the input.  Any minibatch entry with less than M blocks of elements of
23571// length D1 * ... * DN will be padded with the corresponding default_value
23572// scalar element along the second dimension.
23573func ParseExampleV2(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys tf.Output, dense_keys tf.Output, ragged_keys tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_types []tf.DataType, ragged_value_types []tf.DataType, ragged_split_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output, ragged_values []tf.Output, ragged_row_splits []tf.Output) {
23574	if scope.Err() != nil {
23575		return
23576	}
23577	attrs := map[string]interface{}{"num_sparse": num_sparse, "sparse_types": sparse_types, "ragged_value_types": ragged_value_types, "ragged_split_types": ragged_split_types, "dense_shapes": dense_shapes}
23578	opspec := tf.OpSpec{
23579		Type: "ParseExampleV2",
23580		Input: []tf.Input{
23581			serialized, names, sparse_keys, dense_keys, ragged_keys, tf.OutputList(dense_defaults),
23582		},
23583		Attrs: attrs,
23584	}
23585	op := scope.AddOperation(opspec)
23586	if scope.Err() != nil {
23587		return
23588	}
23589	var idx int
23590	var err error
23591	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
23592		scope.UpdateErr("ParseExampleV2", err)
23593		return
23594	}
23595	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
23596		scope.UpdateErr("ParseExampleV2", err)
23597		return
23598	}
23599	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
23600		scope.UpdateErr("ParseExampleV2", err)
23601		return
23602	}
23603	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
23604		scope.UpdateErr("ParseExampleV2", err)
23605		return
23606	}
23607	if ragged_values, idx, err = makeOutputList(op, idx, "ragged_values"); err != nil {
23608		scope.UpdateErr("ParseExampleV2", err)
23609		return
23610	}
23611	if ragged_row_splits, idx, err = makeOutputList(op, idx, "ragged_row_splits"); err != nil {
23612		scope.UpdateErr("ParseExampleV2", err)
23613		return
23614	}
23615	return sparse_indices, sparse_values, sparse_shapes, dense_values, ragged_values, ragged_row_splits
23616}
23617
23618// Computes the exponential linear function.
23619//
23620// The ELU function is defined as:
23621//
23622//  * $ e ^ x - 1 $ if $ x < 0 $
23623//  * $ x $ if $ x >= 0 $
23624//
23625// Examples:
23626//
23627// >>> tf.nn.elu(1.0)
23628// <tf.Tensor: shape=(), dtype=float32, numpy=1.0>
23629// >>> tf.nn.elu(0.0)
23630// <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
23631// >>> tf.nn.elu(-1000.0)
23632// <tf.Tensor: shape=(), dtype=float32, numpy=-1.0>
23633//
23634// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
23635// ](http://arxiv.org/abs/1511.07289)
23636func Elu(scope *Scope, features tf.Output) (activations tf.Output) {
23637	if scope.Err() != nil {
23638		return
23639	}
23640	opspec := tf.OpSpec{
23641		Type: "Elu",
23642		Input: []tf.Input{
23643			features,
23644		},
23645	}
23646	op := scope.AddOperation(opspec)
23647	return op.Output(0)
23648}
23649
23650// Records the bytes size of each element of `input_dataset` in a StatsAggregator.
23651func ExperimentalBytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
23652	if scope.Err() != nil {
23653		return
23654	}
23655	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
23656	opspec := tf.OpSpec{
23657		Type: "ExperimentalBytesProducedStatsDataset",
23658		Input: []tf.Input{
23659			input_dataset, tag,
23660		},
23661		Attrs: attrs,
23662	}
23663	op := scope.AddOperation(opspec)
23664	return op.Output(0)
23665}
23666
23667// Returns the name of the device on which `resource` has been placed.
23668func ExperimentalIteratorGetDevice(scope *Scope, resource tf.Output) (device tf.Output) {
23669	if scope.Err() != nil {
23670		return
23671	}
23672	opspec := tf.OpSpec{
23673		Type: "ExperimentalIteratorGetDevice",
23674		Input: []tf.Input{
23675			resource,
23676		},
23677	}
23678	op := scope.AddOperation(opspec)
23679	return op.Output(0)
23680}
23681
23682// Adjust the saturation of one or more images.
23683//
23684// `images` is a tensor of at least 3 dimensions.  The last dimension is
23685// interpreted as channels, and must be three.
23686//
23687// The input image is considered in the RGB colorspace. Conceptually, the RGB
23688// colors are first mapped into HSV. A scale is then applied all the saturation
23689// values, and then remapped back to RGB colorspace.
23690//
23691// Arguments:
23692//	images: Images to adjust.  At least 3-D.
23693//	scale: A float scale to add to the saturation.
23694//
23695// Returns The hue-adjusted image or images.
23696func AdjustSaturation(scope *Scope, images tf.Output, scale tf.Output) (output tf.Output) {
23697	if scope.Err() != nil {
23698		return
23699	}
23700	opspec := tf.OpSpec{
23701		Type: "AdjustSaturation",
23702		Input: []tf.Input{
23703			images, scale,
23704		},
23705	}
23706	op := scope.AddOperation(opspec)
23707	return op.Output(0)
23708}
23709
23710// Returns the name of the device on which `resource` has been placed.
23711func IteratorGetDevice(scope *Scope, resource tf.Output) (device tf.Output) {
23712	if scope.Err() != nil {
23713		return
23714	}
23715	opspec := tf.OpSpec{
23716		Type: "IteratorGetDevice",
23717		Input: []tf.Input{
23718			resource,
23719		},
23720	}
23721	op := scope.AddOperation(opspec)
23722	return op.Output(0)
23723}
23724
23725// Creates a dataset that emits the key-value pairs in one or more LMDB files.
23726//
23727// The Lightning Memory-Mapped Database Manager, or LMDB, is an embedded binary
23728// key-value database. This dataset can read the contents of LMDB database files,
23729// the names of which generally have the `.mdb` suffix.
23730//
23731// Each output element consists of a key-value pair represented as a pair of
23732// scalar string `Tensor`s, where the first `Tensor` contains the key and the
23733// second `Tensor` contains the value.
23734//
23735// LMDB uses different file formats on big- and little-endian machines.
23736// `LMDBDataset` can only read files in the format of the host machine.
23737//
23738// Arguments:
23739//	filenames: A scalar or a vector containing the name(s) of the binary file(s) to be
23740// read.
23741//
23742//
23743func LMDBDataset(scope *Scope, filenames tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
23744	if scope.Err() != nil {
23745		return
23746	}
23747	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
23748	opspec := tf.OpSpec{
23749		Type: "LMDBDataset",
23750		Input: []tf.Input{
23751			filenames,
23752		},
23753		Attrs: attrs,
23754	}
23755	op := scope.AddOperation(opspec)
23756	return op.Output(0)
23757}
23758
23759// MatrixInverseAttr is an optional argument to MatrixInverse.
23760type MatrixInverseAttr func(optionalAttr)
23761
23762// MatrixInverseAdjoint sets the optional adjoint attribute to value.
23763// If not specified, defaults to false
23764func MatrixInverseAdjoint(value bool) MatrixInverseAttr {
23765	return func(m optionalAttr) {
23766		m["adjoint"] = value
23767	}
23768}
23769
23770// Computes the inverse of one or more square invertible matrices or their adjoints (conjugate transposes).
23771//
23772//
23773// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
23774// form square matrices. The output is a tensor of the same shape as the input
23775// containing the inverse for all input submatrices `[..., :, :]`.
23776//
23777// The op uses LU decomposition with partial pivoting to compute the inverses.
23778//
23779// If a matrix is not invertible there is no guarantee what the op does. It
23780// may detect the condition and raise an exception or it may simply return a
23781// garbage result.
23782//
23783// Arguments:
23784//	input: Shape is `[..., M, M]`.
23785//
23786// Returns Shape is `[..., M, M]`.
23787//
23788// @compatibility(numpy)
23789// Equivalent to np.linalg.inv
23790// @end_compatibility
23791func MatrixInverse(scope *Scope, input tf.Output, optional ...MatrixInverseAttr) (output tf.Output) {
23792	if scope.Err() != nil {
23793		return
23794	}
23795	attrs := map[string]interface{}{}
23796	for _, a := range optional {
23797		a(attrs)
23798	}
23799	opspec := tf.OpSpec{
23800		Type: "MatrixInverse",
23801		Input: []tf.Input{
23802			input,
23803		},
23804		Attrs: attrs,
23805	}
23806	op := scope.AddOperation(opspec)
23807	return op.Output(0)
23808}
23809
23810// Computes acos of x element-wise.
23811//
23812//
23813//   Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
23814//
23815//   Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
23816//
23817func Acos(scope *Scope, x tf.Output) (y tf.Output) {
23818	if scope.Err() != nil {
23819		return
23820	}
23821	opspec := tf.OpSpec{
23822		Type: "Acos",
23823		Input: []tf.Input{
23824			x,
23825		},
23826	}
23827	op := scope.AddOperation(opspec)
23828	return op.Output(0)
23829}
23830
23831// Creates a dataset that batches input elements into a SparseTensor.
23832//
23833// Arguments:
23834//	input_dataset: A handle to an input dataset. Must have a single component.
23835//	batch_size: A scalar representing the number of elements to accumulate in a
23836// batch.
23837//	row_shape: A vector representing the dense shape of each row in the produced
23838// SparseTensor. The shape may be partially specified, using `-1` to indicate
23839// that a particular dimension should use the maximum size of all batch elements.
23840//
23841//
23842func ExperimentalDenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
23843	if scope.Err() != nil {
23844		return
23845	}
23846	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
23847	opspec := tf.OpSpec{
23848		Type: "ExperimentalDenseToSparseBatchDataset",
23849		Input: []tf.Input{
23850			input_dataset, batch_size, row_shape,
23851		},
23852		Attrs: attrs,
23853	}
23854	op := scope.AddOperation(opspec)
23855	return op.Output(0)
23856}
23857
23858// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
23859//
23860// Arguments:
23861//
23862//	thread_pool: A resource produced by the ThreadPoolHandle op.
23863//
23864//
23865func ExperimentalThreadPoolDataset(scope *Scope, input_dataset tf.Output, thread_pool tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
23866	if scope.Err() != nil {
23867		return
23868	}
23869	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
23870	opspec := tf.OpSpec{
23871		Type: "ExperimentalThreadPoolDataset",
23872		Input: []tf.Input{
23873			input_dataset, thread_pool,
23874		},
23875		Attrs: attrs,
23876	}
23877	op := scope.AddOperation(opspec)
23878	return op.Output(0)
23879}
23880
23881// ResourceSparseApplyCenteredRMSPropAttr is an optional argument to ResourceSparseApplyCenteredRMSProp.
23882type ResourceSparseApplyCenteredRMSPropAttr func(optionalAttr)
23883
23884// ResourceSparseApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
23885//
23886// value: If `True`, updating of the var, mg, ms, and mom tensors is
23887// protected by a lock; otherwise the behavior is undefined, but may exhibit less
23888// contention.
23889// If not specified, defaults to false
23890func ResourceSparseApplyCenteredRMSPropUseLocking(value bool) ResourceSparseApplyCenteredRMSPropAttr {
23891	return func(m optionalAttr) {
23892		m["use_locking"] = value
23893	}
23894}
23895
23896// Update '*var' according to the centered RMSProp algorithm.
23897//
23898// The centered RMSProp algorithm uses an estimate of the centered second moment
23899// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
23900// uses the (uncentered) second moment. This often helps with training, but is
23901// slightly more expensive in terms of computation and memory.
23902//
23903// Note that in dense implementation of this algorithm, mg, ms, and mom will
23904// update even if the grad is zero, but in this sparse implementation, mg, ms,
23905// and mom will not update in iterations during which the grad is zero.
23906//
23907// mean_square = decay * mean_square + (1-decay) * gradient ** 2
23908// mean_grad = decay * mean_grad + (1-decay) * gradient
23909// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
23910//
23911// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
23912// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
23913// var <- var - mom
23914//
23915// Arguments:
23916//	var_: Should be from a Variable().
23917//	mg: Should be from a Variable().
23918//	ms: Should be from a Variable().
23919//	mom: Should be from a Variable().
23920//	lr: Scaling factor. Must be a scalar.
23921//	rho: Decay rate. Must be a scalar.
23922//
23923//	epsilon: Ridge term. Must be a scalar.
23924//	grad: The gradient.
23925//	indices: A vector of indices into the first dimension of var, ms and mom.
23926//
23927// Returns the created operation.
23928func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyCenteredRMSPropAttr) (o *tf.Operation) {
23929	if scope.Err() != nil {
23930		return
23931	}
23932	attrs := map[string]interface{}{}
23933	for _, a := range optional {
23934		a(attrs)
23935	}
23936	opspec := tf.OpSpec{
23937		Type: "ResourceSparseApplyCenteredRMSProp",
23938		Input: []tf.Input{
23939			var_, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices,
23940		},
23941		Attrs: attrs,
23942	}
23943	return scope.AddOperation(opspec)
23944}
23945
23946// Writes the given dataset to the given file using the TFRecord format.
23947//
23948// Arguments:
23949//	input_dataset: A variant tensor representing the dataset to write.
23950//	filename: A scalar string tensor representing the filename to use.
23951//	compression_type: A scalar string tensor containing either (i) the empty string (no
23952// compression), (ii) "ZLIB", or (iii) "GZIP".
23953//
23954// Returns the created operation.
23955func DatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, compression_type tf.Output) (o *tf.Operation) {
23956	if scope.Err() != nil {
23957		return
23958	}
23959	opspec := tf.OpSpec{
23960		Type: "DatasetToTFRecord",
23961		Input: []tf.Input{
23962			input_dataset, filename, compression_type,
23963		},
23964	}
23965	return scope.AddOperation(opspec)
23966}
23967
23968// Creates a dataset from the given `graph_def`.
23969//
23970// Creates a dataset from the provided `graph_def`.
23971//
23972// Arguments:
23973//	graph_def: The graph representation of the dataset (as serialized GraphDef).
23974//
23975// Returns A variant tensor representing the dataset.
23976func DatasetFromGraph(scope *Scope, graph_def tf.Output) (handle tf.Output) {
23977	if scope.Err() != nil {
23978		return
23979	}
23980	opspec := tf.OpSpec{
23981		Type: "DatasetFromGraph",
23982		Input: []tf.Input{
23983			graph_def,
23984		},
23985	}
23986	op := scope.AddOperation(opspec)
23987	return op.Output(0)
23988}
23989
23990// Interleave the values from the `data` tensors into a single tensor.
23991//
23992// Builds a merged tensor such that
23993//
23994// ```python
23995//     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
23996// ```
23997//
23998// For example, if each `indices[m]` is scalar or vector, we have
23999//
24000// ```python
24001//     # Scalar indices:
24002//     merged[indices[m], ...] = data[m][...]
24003//
24004//     # Vector indices:
24005//     merged[indices[m][i], ...] = data[m][i, ...]
24006// ```
24007//
24008// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
24009// and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
24010// must have `data[i].shape = indices[i].shape + constant`.  In terms of this
24011// `constant`, the output shape is
24012//
24013//     merged.shape = [max(indices)] + constant
24014//
24015// Values are merged in order, so if an index appears in both `indices[m][i]` and
24016// `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
24017// merged result. If you do not need this guarantee, ParallelDynamicStitch might
24018// perform better on some devices.
24019//
24020// For example:
24021//
24022// ```python
24023//     indices[0] = 6
24024//     indices[1] = [4, 1]
24025//     indices[2] = [[5, 2], [0, 3]]
24026//     data[0] = [61, 62]
24027//     data[1] = [[41, 42], [11, 12]]
24028//     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
24029//     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
24030//               [51, 52], [61, 62]]
24031// ```
24032//
24033// This method can be used to merge partitions created by `dynamic_partition`
24034// as illustrated on the following example:
24035//
24036// ```python
24037//     # Apply function (increments x_i) on elements for which a certain condition
24038//     # apply (x_i != -1 in this example).
24039//     x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
24040//     condition_mask=tf.not_equal(x,tf.constant(-1.))
24041//     partitioned_data = tf.dynamic_partition(
24042//         x, tf.cast(condition_mask, tf.int32) , 2)
24043//     partitioned_data[1] = partitioned_data[1] + 1.0
24044//     condition_indices = tf.dynamic_partition(
24045//         tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
24046//     x = tf.dynamic_stitch(condition_indices, partitioned_data)
24047//     # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
24048//     # unchanged.
24049// ```
24050//
24051// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
24052// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
24053// </div>
24054func DynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
24055	if scope.Err() != nil {
24056		return
24057	}
24058	opspec := tf.OpSpec{
24059		Type: "DynamicStitch",
24060		Input: []tf.Input{
24061			tf.OutputList(indices), tf.OutputList(data),
24062		},
24063	}
24064	op := scope.AddOperation(opspec)
24065	return op.Output(0)
24066}
24067
24068// Replaces the contents of the table with the specified keys and values.
24069//
24070// The tensor `keys` must be of the same type as the keys of the table.
24071// The tensor `values` must be of the type of the table values.
24072//
24073// Arguments:
24074//	table_handle: Handle to the table.
24075//	keys: Any shape.  Keys to look up.
24076//	values: Values to associate with keys.
24077//
24078// Returns the created operation.
24079func LookupTableImportV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
24080	if scope.Err() != nil {
24081		return
24082	}
24083	opspec := tf.OpSpec{
24084		Type: "LookupTableImportV2",
24085		Input: []tf.Input{
24086			table_handle, keys, values,
24087		},
24088	}
24089	return scope.AddOperation(opspec)
24090}
24091
24092// Convert the quantized 'input' tensor into a lower-precision 'output', using the
24093//
24094// actual distribution of the values to maximize the usage of the lower bit depth
24095// and adjusting the output min and max ranges accordingly.
24096//
24097// [input_min, input_max] are scalar floats that specify the range for the float
24098// interpretation of the 'input' data. For example, if input_min is -1.0f and
24099// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
24100// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
24101//
24102// This operator tries to squeeze as much precision as possible into an output with
24103// a lower bit depth by calculating the actual min and max values found in the
24104// data. For example, maybe that quint16 input has no values lower than 16,384 and
24105// none higher than 49,152. That means only half the range is actually needed, all
24106// the float interpretations are between -0.5f and 0.5f, so if we want to compress
24107// the data into a quint8 output, we can use that range rather than the theoretical
24108// -1.0f to 1.0f that is suggested by the input min and max.
24109//
24110// In practice, this is most useful for taking output from operations like
24111// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
24112// may have large potential output ranges, but in practice have a distribution of
24113// input values that only uses a small fraction of the possible range. By feeding
24114// that output into this operator, we can reduce it from 32 bits down to 8 with
24115// minimal loss of accuracy.
24116//
24117// Arguments:
24118//
24119//	input_min: The float value that the minimum quantized input value represents.
24120//	input_max: The float value that the maximum quantized input value represents.
24121//	out_type: The type of the output. Should be a lower bit depth than Tinput.
24122//
24123// Returns:
24124//	output
24125//	output_min: The float value that the minimum quantized output value represents.
24126//	output_max: The float value that the maximum quantized output value represents.
24127func QuantizeDownAndShrinkRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
24128	if scope.Err() != nil {
24129		return
24130	}
24131	attrs := map[string]interface{}{"out_type": out_type}
24132	opspec := tf.OpSpec{
24133		Type: "QuantizeDownAndShrinkRange",
24134		Input: []tf.Input{
24135			input, input_min, input_max,
24136		},
24137		Attrs: attrs,
24138	}
24139	op := scope.AddOperation(opspec)
24140	return op.Output(0), op.Output(1), op.Output(2)
24141}
24142
24143// Tensor contraction according to Einstein summation convention.
24144//
24145// Implements generalized Tensor contraction and reduction. Each input Tensor must
24146// have a corresponding input subscript appearing in the comma-separated left-hand
24147// side of the equation. The right-hand side of the equation consists of the
24148// output subscript. The input subscripts and the output subscript should consist
24149// of zero or more named axis labels and at most one ellipsis (`...`).
24150//
24151// The named axis labels may be any single character other than those having
24152// special meaning, namely `,.->`. The behavior of this Op is undefined if it
24153// receives an ill-formatted equation; since the validation is done at
24154// graph-building time, we omit format validation checks at runtime.
24155//
24156// Note: This Op is *not* intended to be called by the user; instead users should
24157// call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`.
24158//
24159// Operations are applied to the input(s) according to the following rules:
24160//
24161//  (a) Generalized Diagonals: For input dimensions corresponding to axis labels
24162//      appearing more than once in the same input subscript, we take the
24163//      generalized (`k`-dimensional) diagonal.
24164//      For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the
24165//      generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`,
24166//      `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`.
24167//
24168//  (b) Reduction: Axes corresponding to labels appearing only in one input
24169//      subscript but not in the output subscript are summed over prior to Tensor
24170//      contraction.
24171//      For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are
24172//      the reduction axis labels.
24173//
24174//  (c) Batch Dimensions: Axes corresponding to labels appearing in each of the
24175//      input subscripts and also in the output subscript make up the batch
24176//      dimensions in Tensor contraction. Unnamed axis labels corresponding to
24177//      ellipsis (`...`) also correspond to batch dimensions.
24178//      For example, for the equation denoting batch matrix multiplication,
24179//      `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension.
24180//
24181//  (d) Contraction: In case of binary einsum, axes corresponding to labels
24182//      appearing in two different inputs (and not in the output) are contracted
24183//      against each other.
24184//      Considering the batch matrix multiplication equation again
24185//      (`bij,bjk->bik`), the contracted axis label is `j`.
24186//
24187//  (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis
24188//      labels, the opposite operation of (a) is applied. For example, in the
24189//      equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]`
24190//      are all zeros, except for the (generalized) diagonal which is populated
24191//      with values from the input.
24192//      Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is
24193//      provided to enable computing the symbolic gradient of `tf.einsum`.
24194//
24195// The output subscripts must contain only labels appearing in at least one of the
24196// input subscripts. Furthermore, all dimensions mapping to the same axis label
24197// must be equal.
24198//
24199// Any of the input and output subscripts may contain at most a single ellipsis
24200// (`...`). These ellipsis are mapped against dimensions not corresponding to any
24201// named axis label. If two inputs contain ellipsis, then they are broadcasted
24202// according to standard NumPy broadcasting
24203// [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
24204//
24205// The broadcasted dimensions are placed in the corresponding location of the
24206// ellipsis in the output subscript. If the broadcasted dimensions are non-empty
24207// and the output subscripts do not contain ellipsis, then an InvalidArgument error
24208// is raised.
24209//
24210// @compatibility(numpy)
24211// Similar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html).
24212//
24213// Comparison with `numpy.einsum`:
24214//
24215//  * This Op only supports unary and binary forms of `numpy.einsum`.
24216//  * This Op does not support implicit form. (i.e. equations without `->`).
24217//  * This Op also supports repeated indices in the output subscript, which is not
24218//    supported by `numpy.einsum`.
24219// @end_compatibility
24220//
24221//
24222// Arguments:
24223//	inputs: List of 1 or 2 Tensors.
24224//	equation: String describing the Einstein Summation operation; in the format of np.einsum.
24225//
24226// Returns Output Tensor with shape depending upon `equation`.
24227func Einsum(scope *Scope, inputs []tf.Output, equation string) (output tf.Output) {
24228	if scope.Err() != nil {
24229		return
24230	}
24231	attrs := map[string]interface{}{"equation": equation}
24232	opspec := tf.OpSpec{
24233		Type: "Einsum",
24234		Input: []tf.Input{
24235			tf.OutputList(inputs),
24236		},
24237		Attrs: attrs,
24238	}
24239	op := scope.AddOperation(opspec)
24240	return op.Output(0)
24241}
24242
24243// Outputs a tensor containing the reduction across all input tensors.
24244//
24245// Outputs a tensor containing the reduction across all input tensors passed to ops
24246// within the same `shared_name.
24247//
24248// The graph should be constructed so if one op runs with shared_name value `c`,
24249// then `num_devices` ops will run with shared_name value `c`.  Failure to do so
24250// will cause the graph execution to fail to complete.
24251//
24252// input: the input to the reduction
24253// data: the value of the reduction across all `num_devices` devices.
24254// reduction: the reduction operation to perform.
24255// num_devices: The number of devices participating in this reduction.
24256// shared_name: Identifier that shared between ops of the same reduction.
24257func NcclAllReduce(scope *Scope, input tf.Output, reduction string, num_devices int64, shared_name string) (data tf.Output) {
24258	if scope.Err() != nil {
24259		return
24260	}
24261	attrs := map[string]interface{}{"reduction": reduction, "num_devices": num_devices, "shared_name": shared_name}
24262	opspec := tf.OpSpec{
24263		Type: "NcclAllReduce",
24264		Input: []tf.Input{
24265			input,
24266		},
24267		Attrs: attrs,
24268	}
24269	op := scope.AddOperation(opspec)
24270	return op.Output(0)
24271}
24272
24273// MinAttr is an optional argument to Min.
24274type MinAttr func(optionalAttr)
24275
24276// MinKeepDims sets the optional keep_dims attribute to value.
24277//
24278// value: If true, retain reduced dimensions with length 1.
24279// If not specified, defaults to false
24280func MinKeepDims(value bool) MinAttr {
24281	return func(m optionalAttr) {
24282		m["keep_dims"] = value
24283	}
24284}
24285
24286// Computes the minimum of elements across dimensions of a tensor.
24287//
24288// Reduces `input` along the dimensions given in `axis`. Unless
24289// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
24290// `axis`. If `keep_dims` is true, the reduced dimensions are
24291// retained with length 1.
24292//
24293// Arguments:
24294//	input: The tensor to reduce.
24295//	axis: The dimensions to reduce. Must be in the range
24296// `[-rank(input), rank(input))`.
24297//
24298// Returns The reduced tensor.
24299func Min(scope *Scope, input tf.Output, axis tf.Output, optional ...MinAttr) (output tf.Output) {
24300	if scope.Err() != nil {
24301		return
24302	}
24303	attrs := map[string]interface{}{}
24304	for _, a := range optional {
24305		a(attrs)
24306	}
24307	opspec := tf.OpSpec{
24308		Type: "Min",
24309		Input: []tf.Input{
24310			input, axis,
24311		},
24312		Attrs: attrs,
24313	}
24314	op := scope.AddOperation(opspec)
24315	return op.Output(0)
24316}
24317
24318// Get the value of the tensor specified by its handle.
24319//
24320// Arguments:
24321//	handle: The handle for a tensor stored in the session state.
24322//	dtype: The type of the output value.
24323//
24324// Returns The tensor for the given handle.
24325func GetSessionTensor(scope *Scope, handle tf.Output, dtype tf.DataType) (value tf.Output) {
24326	if scope.Err() != nil {
24327		return
24328	}
24329	attrs := map[string]interface{}{"dtype": dtype}
24330	opspec := tf.OpSpec{
24331		Type: "GetSessionTensor",
24332		Input: []tf.Input{
24333			handle,
24334		},
24335		Attrs: attrs,
24336	}
24337	op := scope.AddOperation(opspec)
24338	return op.Output(0)
24339}
24340
24341// ParseSequenceExampleAttr is an optional argument to ParseSequenceExample.
24342type ParseSequenceExampleAttr func(optionalAttr)
24343
24344// ParseSequenceExampleNcontextSparse sets the optional Ncontext_sparse attribute to value.
24345// If not specified, defaults to 0
24346//
24347// REQUIRES: value >= 0
24348func ParseSequenceExampleNcontextSparse(value int64) ParseSequenceExampleAttr {
24349	return func(m optionalAttr) {
24350		m["Ncontext_sparse"] = value
24351	}
24352}
24353
24354// ParseSequenceExampleNcontextDense sets the optional Ncontext_dense attribute to value.
24355// If not specified, defaults to 0
24356//
24357// REQUIRES: value >= 0
24358func ParseSequenceExampleNcontextDense(value int64) ParseSequenceExampleAttr {
24359	return func(m optionalAttr) {
24360		m["Ncontext_dense"] = value
24361	}
24362}
24363
24364// ParseSequenceExampleNfeatureListSparse sets the optional Nfeature_list_sparse attribute to value.
24365// If not specified, defaults to 0
24366//
24367// REQUIRES: value >= 0
24368func ParseSequenceExampleNfeatureListSparse(value int64) ParseSequenceExampleAttr {
24369	return func(m optionalAttr) {
24370		m["Nfeature_list_sparse"] = value
24371	}
24372}
24373
24374// ParseSequenceExampleNfeatureListDense sets the optional Nfeature_list_dense attribute to value.
24375// If not specified, defaults to 0
24376//
24377// REQUIRES: value >= 0
24378func ParseSequenceExampleNfeatureListDense(value int64) ParseSequenceExampleAttr {
24379	return func(m optionalAttr) {
24380		m["Nfeature_list_dense"] = value
24381	}
24382}
24383
24384// ParseSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
24385//
24386// value: A list of Ncontext_sparse types; the data types of data in
24387// each context Feature given in context_sparse_keys.
24388// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
24389// DT_INT64 (Int64List), and DT_STRING (BytesList).
24390// If not specified, defaults to {}
24391//
24392// REQUIRES: len(value) >= 0
24393func ParseSequenceExampleContextSparseTypes(value []tf.DataType) ParseSequenceExampleAttr {
24394	return func(m optionalAttr) {
24395		m["context_sparse_types"] = value
24396	}
24397}
24398
24399// ParseSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
24400// If not specified, defaults to {}
24401//
24402// REQUIRES: len(value) >= 0
24403func ParseSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSequenceExampleAttr {
24404	return func(m optionalAttr) {
24405		m["feature_list_dense_types"] = value
24406	}
24407}
24408
24409// ParseSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
24410//
24411// value: A list of Ncontext_dense shapes; the shapes of data in
24412// each context Feature given in context_dense_keys.
24413// The number of elements in the Feature corresponding to context_dense_key[j]
24414// must always equal context_dense_shapes[j].NumEntries().
24415// The shape of context_dense_values[j] will match context_dense_shapes[j].
24416// If not specified, defaults to {}
24417//
24418// REQUIRES: len(value) >= 0
24419func ParseSequenceExampleContextDenseShapes(value []tf.Shape) ParseSequenceExampleAttr {
24420	return func(m optionalAttr) {
24421		m["context_dense_shapes"] = value
24422	}
24423}
24424
24425// ParseSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
24426//
24427// value: A list of Nfeature_list_sparse types; the data types
24428// of data in each FeatureList given in feature_list_sparse_keys.
24429// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
24430// DT_INT64 (Int64List), and DT_STRING (BytesList).
24431// If not specified, defaults to {}
24432//
24433// REQUIRES: len(value) >= 0
24434func ParseSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSequenceExampleAttr {
24435	return func(m optionalAttr) {
24436		m["feature_list_sparse_types"] = value
24437	}
24438}
24439
24440// ParseSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
24441//
24442// value: A list of Nfeature_list_dense shapes; the shapes of
24443// data in each FeatureList given in feature_list_dense_keys.
24444// The shape of each Feature in the FeatureList corresponding to
24445// feature_list_dense_key[j] must always equal
24446// feature_list_dense_shapes[j].NumEntries().
24447// If not specified, defaults to {}
24448//
24449// REQUIRES: len(value) >= 0
24450func ParseSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSequenceExampleAttr {
24451	return func(m optionalAttr) {
24452		m["feature_list_dense_shapes"] = value
24453	}
24454}
24455
24456// Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors.
24457//
24458// Arguments:
24459//	serialized: A vector containing binary serialized SequenceExample protos.
24460//	debug_name: A vector containing the names of the serialized protos.
24461// May contain, for example, table key (descriptive) name for the
24462// corresponding serialized proto.  This is purely useful for debugging
24463// purposes, and the presence of values here has no effect on the output.
24464// May also be an empty vector if no name is available.
24465//	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
24466// context_dense_defaults[j] provides default values
24467// when the SequenceExample's context map lacks context_dense_key[j].
24468// If an empty Tensor is provided for context_dense_defaults[j],
24469// then the Feature context_dense_keys[j] is required.
24470// The input type is inferred from context_dense_defaults[j], even when it's
24471// empty.  If context_dense_defaults[j] is not empty, its shape must match
24472// context_dense_shapes[j].
24473//	feature_list_dense_missing_assumed_empty: A vector listing the
24474// FeatureList keys which may be missing from the SequenceExamples.  If the
24475// associated FeatureList is missing, it is treated as empty.  By default,
24476// any FeatureList not listed in this vector must exist in the SequenceExamples.
24477//	context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
24478// The keys expected in the Examples' features associated with context_sparse
24479// values.
24480//	context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
24481// The keys expected in the SequenceExamples' context features associated with
24482// dense values.
24483//	feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
24484// (scalars).  The keys expected in the FeatureLists associated with sparse
24485// values.
24486//	feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
24487// The keys expected in the SequenceExamples' feature_lists associated
24488// with lists of dense values.
24489func ParseSequenceExample(scope *Scope, serialized tf.Output, debug_name tf.Output, context_dense_defaults []tf.Output, feature_list_dense_missing_assumed_empty []string, context_sparse_keys []string, context_dense_keys []string, feature_list_sparse_keys []string, feature_list_dense_keys []string, optional ...ParseSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output, feature_list_dense_lengths []tf.Output) {
24490	if scope.Err() != nil {
24491		return
24492	}
24493	attrs := map[string]interface{}{"feature_list_dense_missing_assumed_empty": feature_list_dense_missing_assumed_empty, "context_sparse_keys": context_sparse_keys, "context_dense_keys": context_dense_keys, "feature_list_sparse_keys": feature_list_sparse_keys, "feature_list_dense_keys": feature_list_dense_keys}
24494	for _, a := range optional {
24495		a(attrs)
24496	}
24497	opspec := tf.OpSpec{
24498		Type: "ParseSequenceExample",
24499		Input: []tf.Input{
24500			serialized, debug_name, tf.OutputList(context_dense_defaults),
24501		},
24502		Attrs: attrs,
24503	}
24504	op := scope.AddOperation(opspec)
24505	if scope.Err() != nil {
24506		return
24507	}
24508	var idx int
24509	var err error
24510	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
24511		scope.UpdateErr("ParseSequenceExample", err)
24512		return
24513	}
24514	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
24515		scope.UpdateErr("ParseSequenceExample", err)
24516		return
24517	}
24518	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
24519		scope.UpdateErr("ParseSequenceExample", err)
24520		return
24521	}
24522	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
24523		scope.UpdateErr("ParseSequenceExample", err)
24524		return
24525	}
24526	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
24527		scope.UpdateErr("ParseSequenceExample", err)
24528		return
24529	}
24530	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
24531		scope.UpdateErr("ParseSequenceExample", err)
24532		return
24533	}
24534	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
24535		scope.UpdateErr("ParseSequenceExample", err)
24536		return
24537	}
24538	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
24539		scope.UpdateErr("ParseSequenceExample", err)
24540		return
24541	}
24542	if feature_list_dense_lengths, idx, err = makeOutputList(op, idx, "feature_list_dense_lengths"); err != nil {
24543		scope.UpdateErr("ParseSequenceExample", err)
24544		return
24545	}
24546	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths
24547}
24548
24549// TPUPartitionedOutputAttr is an optional argument to TPUPartitionedOutput.
24550type TPUPartitionedOutputAttr func(optionalAttr)
24551
24552// TPUPartitionedOutputPartitionDim sets the optional partition_dim attribute to value.
24553//
24554// value: An integer describles which dimension is partitioned.
24555// If not specified, defaults to 0
24556func TPUPartitionedOutputPartitionDim(value int64) TPUPartitionedOutputAttr {
24557	return func(m optionalAttr) {
24558		m["partition_dim"] = value
24559	}
24560}
24561
24562// An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned
24563//
24564// outputs outside the XLA computation.
24565//
24566// Arguments:
24567//	inputs: A tensor which represents the full shape of partitioned tensors.
24568//
24569//
24570// Returns A list of partitioned inputs which must have the same shape.
24571func TPUPartitionedOutput(scope *Scope, inputs tf.Output, num_splits int64, optional ...TPUPartitionedOutputAttr) (output []tf.Output) {
24572	if scope.Err() != nil {
24573		return
24574	}
24575	attrs := map[string]interface{}{"num_splits": num_splits}
24576	for _, a := range optional {
24577		a(attrs)
24578	}
24579	opspec := tf.OpSpec{
24580		Type: "TPUPartitionedOutput",
24581		Input: []tf.Input{
24582			inputs,
24583		},
24584		Attrs: attrs,
24585	}
24586	op := scope.AddOperation(opspec)
24587	if scope.Err() != nil {
24588		return
24589	}
24590	var idx int
24591	var err error
24592	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
24593		scope.UpdateErr("TPUPartitionedOutput", err)
24594		return
24595	}
24596	return output
24597}
24598
24599// The shape of the elements of the given list, as a tensor.
24600//
24601//   input_handle: the list
24602//   element_shape: the shape of elements of the list
24603func TensorListElementShape(scope *Scope, input_handle tf.Output, shape_type tf.DataType) (element_shape tf.Output) {
24604	if scope.Err() != nil {
24605		return
24606	}
24607	attrs := map[string]interface{}{"shape_type": shape_type}
24608	opspec := tf.OpSpec{
24609		Type: "TensorListElementShape",
24610		Input: []tf.Input{
24611			input_handle,
24612		},
24613		Attrs: attrs,
24614	}
24615	op := scope.AddOperation(opspec)
24616	return op.Output(0)
24617}
24618
24619// Computes hyperbolic sine of x element-wise.
24620//
24621//   Given an input tensor, this function computes hyperbolic sine of every
24622//   element in the tensor. Input range is `[-inf,inf]` and output range
24623//   is `[-inf,inf]`.
24624//
24625//   ```python
24626//   x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
24627//   tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf]
24628//   ```
24629func Sinh(scope *Scope, x tf.Output) (y tf.Output) {
24630	if scope.Err() != nil {
24631		return
24632	}
24633	opspec := tf.OpSpec{
24634		Type: "Sinh",
24635		Input: []tf.Input{
24636			x,
24637		},
24638	}
24639	op := scope.AddOperation(opspec)
24640	return op.Output(0)
24641}
24642
24643// Creates and returns an empty tensor list.
24644//
24645// All list elements must be tensors of dtype element_dtype and shape compatible
24646// with element_shape.
24647//
24648// handle: an empty tensor list.
24649// element_dtype: the type of elements in the list.
24650// element_shape: a shape compatible with that of elements in the list.
24651func EmptyTensorList(scope *Scope, element_shape tf.Output, max_num_elements tf.Output, element_dtype tf.DataType) (handle tf.Output) {
24652	if scope.Err() != nil {
24653		return
24654	}
24655	attrs := map[string]interface{}{"element_dtype": element_dtype}
24656	opspec := tf.OpSpec{
24657		Type: "EmptyTensorList",
24658		Input: []tf.Input{
24659			element_shape, max_num_elements,
24660		},
24661		Attrs: attrs,
24662	}
24663	op := scope.AddOperation(opspec)
24664	return op.Output(0)
24665}
24666
24667// A transformation that asserts which transformations happen next.
24668//
24669// This transformation checks whether the camel-case names (i.e. "FlatMap", not
24670// "flat_map") of the transformations following this transformation match the list
24671// of names in the `transformations` argument. If there is a mismatch, the
24672// transformation raises an exception.
24673//
24674// The check occurs when iterating over the contents of the dataset, which
24675// means that the check happens *after* any static optimizations are applied
24676// to the dataset graph.
24677//
24678// Arguments:
24679//	input_dataset: A variant tensor representing the input dataset.
24680// `AssertNextDataset` passes through the outputs of its input dataset.
24681//	transformations: A `tf.string` vector `tf.Tensor` identifying the transformations that are
24682// expected to happen next.
24683//
24684//
24685func AssertNextDataset(scope *Scope, input_dataset tf.Output, transformations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
24686	if scope.Err() != nil {
24687		return
24688	}
24689	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
24690	opspec := tf.OpSpec{
24691		Type: "AssertNextDataset",
24692		Input: []tf.Input{
24693			input_dataset, transformations,
24694		},
24695		Attrs: attrs,
24696	}
24697	op := scope.AddOperation(opspec)
24698	return op.Output(0)
24699}
24700
24701// EncodeProtoAttr is an optional argument to EncodeProto.
24702type EncodeProtoAttr func(optionalAttr)
24703
24704// EncodeProtoDescriptorSource sets the optional descriptor_source attribute to value.
24705// If not specified, defaults to "local://"
24706func EncodeProtoDescriptorSource(value string) EncodeProtoAttr {
24707	return func(m optionalAttr) {
24708		m["descriptor_source"] = value
24709	}
24710}
24711
24712// The op serializes protobuf messages provided in the input tensors.
24713//
24714// The types of the tensors in `values` must match the schema for the fields
24715// specified in `field_names`. All the tensors in `values` must have a common
24716// shape prefix, *batch_shape*.
24717//
24718// The `sizes` tensor specifies repeat counts for each field.  The repeat count
24719// (last dimension) of a each tensor in `values` must be greater than or equal
24720// to corresponding repeat count in `sizes`.
24721//
24722// A `message_type` name must be provided to give context for the field names.
24723// The actual message descriptor can be looked up either in the linked-in
24724// descriptor pool or a filename provided by the caller using the
24725// `descriptor_source` attribute.
24726//
24727// For the most part, the mapping between Proto field types and TensorFlow dtypes
24728// is straightforward. However, there are a few special cases:
24729//
24730// - A proto field that contains a submessage or group can only be converted
24731// to `DT_STRING` (the serialized submessage). This is to reduce the complexity
24732// of the API. The resulting string can be used as input to another instance of
24733// the decode_proto op.
24734//
24735// - TensorFlow lacks support for unsigned integers. The ops represent uint64
24736// types as a `DT_INT64` with the same twos-complement bit pattern (the obvious
24737// way). Unsigned int32 values can be represented exactly by specifying type
24738// `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in
24739// the `output_types` attribute.
24740//
24741// The `descriptor_source` attribute selects the source of protocol
24742// descriptors to consult when looking up `message_type`. This may be:
24743//
24744// - An empty string  or "local://", in which case protocol descriptors are
24745// created for C++ (not Python) proto definitions linked to the binary.
24746//
24747// - A file, in which case protocol descriptors are created from the file,
24748// which is expected to contain a `FileDescriptorSet` serialized as a string.
24749// NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out`
24750// and `--include_imports` options to the protocol compiler `protoc`.
24751//
24752// - A "bytes://<bytes>", in which protocol descriptors are created from `<bytes>`,
24753// which is expected to be a `FileDescriptorSet` serialized as a string.
24754//
24755// Arguments:
24756//	sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`.
24757//	values: List of tensors containing values for the corresponding field.
24758//	field_names: List of strings containing proto field names.
24759//	message_type: Name of the proto message type to decode.
24760//
24761// Returns Tensor of serialized protos with shape `batch_shape`.
24762func EncodeProto(scope *Scope, sizes tf.Output, values []tf.Output, field_names []string, message_type string, optional ...EncodeProtoAttr) (bytes tf.Output) {
24763	if scope.Err() != nil {
24764		return
24765	}
24766	attrs := map[string]interface{}{"field_names": field_names, "message_type": message_type}
24767	for _, a := range optional {
24768		a(attrs)
24769	}
24770	opspec := tf.OpSpec{
24771		Type: "EncodeProto",
24772		Input: []tf.Input{
24773			sizes, tf.OutputList(values),
24774		},
24775		Attrs: attrs,
24776	}
24777	op := scope.AddOperation(opspec)
24778	return op.Output(0)
24779}
24780
24781// SparseCountSparseOutputAttr is an optional argument to SparseCountSparseOutput.
24782type SparseCountSparseOutputAttr func(optionalAttr)
24783
24784// SparseCountSparseOutputMinlength sets the optional minlength attribute to value.
24785//
24786// value: Minimum value to count. Can be set to -1 for no minimum.
24787// If not specified, defaults to -1
24788//
24789// REQUIRES: value >= -1
24790func SparseCountSparseOutputMinlength(value int64) SparseCountSparseOutputAttr {
24791	return func(m optionalAttr) {
24792		m["minlength"] = value
24793	}
24794}
24795
24796// SparseCountSparseOutputMaxlength sets the optional maxlength attribute to value.
24797//
24798// value: Maximum value to count. Can be set to -1 for no maximum.
24799// If not specified, defaults to -1
24800//
24801// REQUIRES: value >= -1
24802func SparseCountSparseOutputMaxlength(value int64) SparseCountSparseOutputAttr {
24803	return func(m optionalAttr) {
24804		m["maxlength"] = value
24805	}
24806}
24807
24808// Performs sparse-output bin counting for a sparse tensor input.
24809//
24810//   Counts the number of times each value occurs in the input.
24811//
24812// Arguments:
24813//	indices: Tensor containing the indices of the sparse tensor to count.
24814//	values: Tensor containing values of the sparse tensor to count.
24815//	dense_shape: Tensor containing the dense shape of the sparse tensor to count.
24816//	weights: A Tensor of the same shape as indices containing per-index weight values.
24817// May also be the empty tensor if no weights are used.
24818//	binary_output: Whether to output the number of occurrences of each value or 1.
24819//
24820// Returns:
24821//	output_indices: Indices tensor for the resulting sparse tensor object.
24822//	output_values: Values tensor for the resulting sparse tensor object.
24823//	output_dense_shape: Shape tensor for the resulting sparse tensor object.
24824func SparseCountSparseOutput(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, weights tf.Output, binary_output bool, optional ...SparseCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output) {
24825	if scope.Err() != nil {
24826		return
24827	}
24828	attrs := map[string]interface{}{"binary_output": binary_output}
24829	for _, a := range optional {
24830		a(attrs)
24831	}
24832	opspec := tf.OpSpec{
24833		Type: "SparseCountSparseOutput",
24834		Input: []tf.Input{
24835			indices, values, dense_shape, weights,
24836		},
24837		Attrs: attrs,
24838	}
24839	op := scope.AddOperation(opspec)
24840	return op.Output(0), op.Output(1), op.Output(2)
24841}
24842
24843// DebugNumericSummaryV2Attr is an optional argument to DebugNumericSummaryV2.
24844type DebugNumericSummaryV2Attr func(optionalAttr)
24845
24846// DebugNumericSummaryV2OutputDtype sets the optional output_dtype attribute to value.
24847//
24848// value: Optional. The type of the output. Can be float32 or float64 (default: float32).
24849// If not specified, defaults to DT_FLOAT
24850func DebugNumericSummaryV2OutputDtype(value tf.DataType) DebugNumericSummaryV2Attr {
24851	return func(m optionalAttr) {
24852		m["output_dtype"] = value
24853	}
24854}
24855
24856// DebugNumericSummaryV2TensorDebugMode sets the optional tensor_debug_mode attribute to value.
24857//
24858// value: Tensor debug mode: the mode in which the input tensor is summarized
24859//   by the op. See the TensorDebugMode enum in
24860//   tensorflow/core/protobuf/debug_event.proto for details.
24861//
24862// Supported values:
24863//   2 (CURT_HEALTH): Output a float32/64 tensor of shape [2]. The 1st
24864//   element is the tensor_id, if provided, and -1 otherwise. The 2nd
24865//   element is a bit which is set to 1 if the input tensor has an
24866//   infinity or nan value, or zero otherwise.
24867//
24868//   3 (CONCISE_HEALTH): Output a float32/64 tensor of shape [5]. The 1st
24869//   element is the tensor_id, if provided, and -1 otherwise. The
24870//   remaining four slots are the total number of elements, -infs,
24871//   +infs, and nans in the input tensor respectively.
24872//
24873//   4 (FULL_HEALTH): Output a float32/64 tensor of shape [11]. The 1st
24874//   element is the tensor_id, if provided, and -1 otherwise. The 2nd
24875//   element is the device_id, if provided, and -1 otherwise. The 3rd
24876//   element holds the datatype value of the input tensor as according
24877//   to the enumerated type in tensorflow/core/framework/types.proto.
24878//   The remaining elements hold the total number of elements, -infs,
24879//   +infs, nans, negative finite numbers, zeros, and positive finite
24880//   numbers in the input tensor respectively.
24881//
24882//   5 (SHAPE): Output a float32/64 tensor of shape [10]. The 1st
24883//   element is the tensor_id, if provided, and -1 otherwise. The 2nd
24884//   element holds the datatype value of the input tensor as according
24885//   to the enumerated type in tensorflow/core/framework/types.proto.
24886//   The 3rd element holds the rank of the tensor. The 4th element holds
24887//   the number of elements within the tensor. Finally the remaining 6
24888//   elements hold the shape of the tensor. If the rank of the tensor
24889//   is lower than 6, the shape is right padded with zeros. If the rank
24890//   is greater than 6, the head of the shape is truncated.
24891//
24892//   6 (FULL_NUMERICS): Output a float32/64 tensor of shape [22]. The 1st
24893//   element is the tensor_id, if provided, and -1 otherwise. The 2nd
24894//   element is the device_id, if provided, and -1 otherwise. The 3rd
24895//   element holds the datatype value of the input tensor as according
24896//   to the enumerated type in tensorflow/core/framework/types.proto.
24897//   The 4th element holds the rank of the tensor. The 5th to 11th
24898//   elements hold the shape of the tensor. If the rank of the tensor
24899//   is lower than 6, the shape is right padded with zeros. If the rank
24900//   is greater than 6, the head of the shape is truncated. The 12th to
24901//   18th elements hold the number of elements, -infs, +infs, nans,
24902//   denormal floats, negative finite numbers, zeros, and positive
24903//   finite numbers in the input tensor respectively. The final four
24904//   elements hold the min value, max value, mean, and variance of the
24905//   input tensor.
24906//
24907//   8 (REDUCE_INF_NAN_THREE_SLOTS): Output a float32/64 tensor of shape
24908//   [3]. The 1st element is -inf if any elements of the input tensor
24909//   is -inf, or zero otherwise. The 2nd element is +inf if any elements
24910//   of the input tensor is +inf, or zero otherwise.  The 3rd element is
24911//   nan if any element of the input tensor is nan, or zero otherwise.
24912// If not specified, defaults to -1
24913func DebugNumericSummaryV2TensorDebugMode(value int64) DebugNumericSummaryV2Attr {
24914	return func(m optionalAttr) {
24915		m["tensor_debug_mode"] = value
24916	}
24917}
24918
24919// DebugNumericSummaryV2TensorId sets the optional tensor_id attribute to value.
24920//
24921// value: Optional. An integer identifier for the tensor being summarized by this op.
24922// If not specified, defaults to -1
24923func DebugNumericSummaryV2TensorId(value int64) DebugNumericSummaryV2Attr {
24924	return func(m optionalAttr) {
24925		m["tensor_id"] = value
24926	}
24927}
24928
24929// Debug Numeric Summary V2 Op.
24930//
24931// Computes a numeric summary of the input tensor. The shape of the output
24932// depends on the tensor_debug_mode attribute.
24933// This op is used internally by TensorFlow Debugger (tfdbg) v2.
24934//
24935// Arguments:
24936//	input: Input tensor, to be summarized by the op.
24937func DebugNumericSummaryV2(scope *Scope, input tf.Output, optional ...DebugNumericSummaryV2Attr) (output tf.Output) {
24938	if scope.Err() != nil {
24939		return
24940	}
24941	attrs := map[string]interface{}{}
24942	for _, a := range optional {
24943		a(attrs)
24944	}
24945	opspec := tf.OpSpec{
24946		Type: "DebugNumericSummaryV2",
24947		Input: []tf.Input{
24948			input,
24949		},
24950		Attrs: attrs,
24951	}
24952	op := scope.AddOperation(opspec)
24953	return op.Output(0)
24954}
24955
24956// QueueCloseV2Attr is an optional argument to QueueCloseV2.
24957type QueueCloseV2Attr func(optionalAttr)
24958
24959// QueueCloseV2CancelPendingEnqueues sets the optional cancel_pending_enqueues attribute to value.
24960//
24961// value: If true, all pending enqueue requests that are
24962// blocked on the given queue will be canceled.
24963// If not specified, defaults to false
24964func QueueCloseV2CancelPendingEnqueues(value bool) QueueCloseV2Attr {
24965	return func(m optionalAttr) {
24966		m["cancel_pending_enqueues"] = value
24967	}
24968}
24969
24970// Closes the given queue.
24971//
24972// This operation signals that no more elements will be enqueued in the
24973// given queue. Subsequent Enqueue(Many) operations will fail.
24974// Subsequent Dequeue(Many) operations will continue to succeed if
24975// sufficient elements remain in the queue. Subsequent Dequeue(Many)
24976// operations that would block will fail immediately.
24977//
24978// Arguments:
24979//	handle: The handle to a queue.
24980//
24981// Returns the created operation.
24982func QueueCloseV2(scope *Scope, handle tf.Output, optional ...QueueCloseV2Attr) (o *tf.Operation) {
24983	if scope.Err() != nil {
24984		return
24985	}
24986	attrs := map[string]interface{}{}
24987	for _, a := range optional {
24988		a(attrs)
24989	}
24990	opspec := tf.OpSpec{
24991		Type: "QueueCloseV2",
24992		Input: []tf.Input{
24993			handle,
24994		},
24995		Attrs: attrs,
24996	}
24997	return scope.AddOperation(opspec)
24998}
24999
25000// DebugIdentityV2Attr is an optional argument to DebugIdentityV2.
25001type DebugIdentityV2Attr func(optionalAttr)
25002
25003// DebugIdentityV2TfdbgContextId sets the optional tfdbg_context_id attribute to value.
25004//
25005// value: A tfdbg-generated ID for the context that the op belongs to,
25006//   e.g., a concrete compiled tf.function.
25007// If not specified, defaults to ""
25008func DebugIdentityV2TfdbgContextId(value string) DebugIdentityV2Attr {
25009	return func(m optionalAttr) {
25010		m["tfdbg_context_id"] = value
25011	}
25012}
25013
25014// DebugIdentityV2OpName sets the optional op_name attribute to value.
25015//
25016// value: Optional. Name of the op that the debug op is concerned with.
25017//   Used only for single-tensor trace.
25018// If not specified, defaults to ""
25019func DebugIdentityV2OpName(value string) DebugIdentityV2Attr {
25020	return func(m optionalAttr) {
25021		m["op_name"] = value
25022	}
25023}
25024
25025// DebugIdentityV2OutputSlot sets the optional output_slot attribute to value.
25026//
25027// value: Optional. Output slot index of the tensor that the debug op
25028//   is concerned with. Used only for single-tensor trace.
25029// If not specified, defaults to -1
25030func DebugIdentityV2OutputSlot(value int64) DebugIdentityV2Attr {
25031	return func(m optionalAttr) {
25032		m["output_slot"] = value
25033	}
25034}
25035
25036// DebugIdentityV2TensorDebugMode sets the optional tensor_debug_mode attribute to value.
25037//
25038// value: TensorDebugMode enum value. See debug_event.proto for details.
25039// If not specified, defaults to -1
25040func DebugIdentityV2TensorDebugMode(value int64) DebugIdentityV2Attr {
25041	return func(m optionalAttr) {
25042		m["tensor_debug_mode"] = value
25043	}
25044}
25045
25046// DebugIdentityV2DebugUrls sets the optional debug_urls attribute to value.
25047//
25048// value: List of URLs to debug targets, e.g., file:///foo/tfdbg_dump.
25049// If not specified, defaults to {}
25050func DebugIdentityV2DebugUrls(value []string) DebugIdentityV2Attr {
25051	return func(m optionalAttr) {
25052		m["debug_urls"] = value
25053	}
25054}
25055
25056// DebugIdentityV2CircularBufferSize sets the optional circular_buffer_size attribute to value.
25057// If not specified, defaults to 1000
25058func DebugIdentityV2CircularBufferSize(value int64) DebugIdentityV2Attr {
25059	return func(m optionalAttr) {
25060		m["circular_buffer_size"] = value
25061	}
25062}
25063
25064// DebugIdentityV2TfdbgRunId sets the optional tfdbg_run_id attribute to value.
25065// If not specified, defaults to ""
25066func DebugIdentityV2TfdbgRunId(value string) DebugIdentityV2Attr {
25067	return func(m optionalAttr) {
25068		m["tfdbg_run_id"] = value
25069	}
25070}
25071
25072// Debug Identity V2 Op.
25073//
25074// Provides an identity mapping from input to output, while writing the content of
25075// the input tensor by calling DebugEventsWriter.
25076//
25077// The semantics of the input tensor depends on tensor_debug_mode. In typical
25078// usage, the input tensor comes directly from the user computation only when
25079// graph_debug_mode is FULL_TENSOR (see protobuf/debug_event.proto for a
25080// list of all the possible values of graph_debug_mode). For the other debug modes,
25081// the input tensor should be produced by an additional op or subgraph that
25082// computes summary information about one or more tensors.
25083//
25084// Arguments:
25085//	input: Input tensor, non-Reference type
25086func DebugIdentityV2(scope *Scope, input tf.Output, optional ...DebugIdentityV2Attr) (output tf.Output) {
25087	if scope.Err() != nil {
25088		return
25089	}
25090	attrs := map[string]interface{}{}
25091	for _, a := range optional {
25092		a(attrs)
25093	}
25094	opspec := tf.OpSpec{
25095		Type: "DebugIdentityV2",
25096		Input: []tf.Input{
25097			input,
25098		},
25099		Attrs: attrs,
25100	}
25101	op := scope.AddOperation(opspec)
25102	return op.Output(0)
25103}
25104
25105// Outputs a `Summary` protocol buffer with a tensor and per-plugin data.
25106//
25107// Arguments:
25108//	tag: A string attached to this summary. Used for organization in TensorBoard.
25109//	tensor: A tensor to serialize.
25110//	serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin
25111// data.
25112func TensorSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, serialized_summary_metadata tf.Output) (summary tf.Output) {
25113	if scope.Err() != nil {
25114		return
25115	}
25116	opspec := tf.OpSpec{
25117		Type: "TensorSummaryV2",
25118		Input: []tf.Input{
25119			tag, tensor, serialized_summary_metadata,
25120		},
25121	}
25122	op := scope.AddOperation(opspec)
25123	return op.Output(0)
25124}
25125
25126// Scatters tensor at indices in an input list.
25127//
25128// Each member of the TensorList corresponds to one row of the input tensor,
25129// specified by the given index (see `tf.gather`).
25130//
25131// input_handle: The list to scatter into.
25132// tensor: The input tensor.
25133// indices: The indices used to index into the list.
25134// output_handle: The TensorList.
25135func TensorListScatterIntoExistingList(scope *Scope, input_handle tf.Output, tensor tf.Output, indices tf.Output) (output_handle tf.Output) {
25136	if scope.Err() != nil {
25137		return
25138	}
25139	opspec := tf.OpSpec{
25140		Type: "TensorListScatterIntoExistingList",
25141		Input: []tf.Input{
25142			input_handle, tensor, indices,
25143		},
25144	}
25145	op := scope.AddOperation(opspec)
25146	return op.Output(0)
25147}
25148
25149// DebugNanCountAttr is an optional argument to DebugNanCount.
25150type DebugNanCountAttr func(optionalAttr)
25151
25152// DebugNanCountDeviceName sets the optional device_name attribute to value.
25153// If not specified, defaults to ""
25154func DebugNanCountDeviceName(value string) DebugNanCountAttr {
25155	return func(m optionalAttr) {
25156		m["device_name"] = value
25157	}
25158}
25159
25160// DebugNanCountTensorName sets the optional tensor_name attribute to value.
25161//
25162// value: Name of the input tensor.
25163// If not specified, defaults to ""
25164func DebugNanCountTensorName(value string) DebugNanCountAttr {
25165	return func(m optionalAttr) {
25166		m["tensor_name"] = value
25167	}
25168}
25169
25170// DebugNanCountDebugUrls sets the optional debug_urls attribute to value.
25171//
25172// value: List of URLs to debug targets, e.g.,
25173//   file:///foo/tfdbg_dump, grpc:://localhost:11011.
25174// If not specified, defaults to {}
25175func DebugNanCountDebugUrls(value []string) DebugNanCountAttr {
25176	return func(m optionalAttr) {
25177		m["debug_urls"] = value
25178	}
25179}
25180
25181// DebugNanCountGatedGrpc sets the optional gated_grpc attribute to value.
25182//
25183// value:  Whether this op will be gated. If any of the debug_urls of this
25184//   debug node is of the grpc:// scheme, when the value of this attribute is set
25185//   to True, the data will not actually be sent via the grpc stream unless this
25186//   debug op has been enabled at the debug_url. If all of the debug_urls of this
25187//   debug node are of the grpc:// scheme and the debug op is enabled at none of
25188//   them, the output will be an empty Tensor.
25189// If not specified, defaults to false
25190func DebugNanCountGatedGrpc(value bool) DebugNanCountAttr {
25191	return func(m optionalAttr) {
25192		m["gated_grpc"] = value
25193	}
25194}
25195
25196// Debug NaN Value Counter Op.
25197//
25198// Counts number of NaNs in the input tensor, for debugging.
25199//
25200// Arguments:
25201//	input: Input tensor, non-Reference type.
25202func DebugNanCount(scope *Scope, input tf.Output, optional ...DebugNanCountAttr) (output tf.Output) {
25203	if scope.Err() != nil {
25204		return
25205	}
25206	attrs := map[string]interface{}{}
25207	for _, a := range optional {
25208		a(attrs)
25209	}
25210	opspec := tf.OpSpec{
25211		Type: "DebugNanCount",
25212		Input: []tf.Input{
25213			input,
25214		},
25215		Attrs: attrs,
25216	}
25217	op := scope.AddOperation(opspec)
25218	return op.Output(0)
25219}
25220
25221// DebugIdentityAttr is an optional argument to DebugIdentity.
25222type DebugIdentityAttr func(optionalAttr)
25223
25224// DebugIdentityDeviceName sets the optional device_name attribute to value.
25225//
25226// value: Name of the device on which the tensor resides.
25227// If not specified, defaults to ""
25228func DebugIdentityDeviceName(value string) DebugIdentityAttr {
25229	return func(m optionalAttr) {
25230		m["device_name"] = value
25231	}
25232}
25233
25234// DebugIdentityTensorName sets the optional tensor_name attribute to value.
25235//
25236// value: Name of the input tensor.
25237// If not specified, defaults to ""
25238func DebugIdentityTensorName(value string) DebugIdentityAttr {
25239	return func(m optionalAttr) {
25240		m["tensor_name"] = value
25241	}
25242}
25243
25244// DebugIdentityDebugUrls sets the optional debug_urls attribute to value.
25245//
25246// value: List of URLs to debug targets, e.g.,
25247//   file:///foo/tfdbg_dump, grpc:://localhost:11011
25248// If not specified, defaults to {}
25249func DebugIdentityDebugUrls(value []string) DebugIdentityAttr {
25250	return func(m optionalAttr) {
25251		m["debug_urls"] = value
25252	}
25253}
25254
25255// DebugIdentityGatedGrpc sets the optional gated_grpc attribute to value.
25256//
25257// value: Whether this op will be gated. If any of the debug_urls of this
25258//   debug node is of the grpc:// scheme, when the value of this attribute is set
25259//   to True, the data will not actually be sent via the grpc stream unless this
25260//   debug op has been enabled at the debug_url. If all of the debug_urls of this
25261//   debug node are of the grpc:// scheme and the debug op is enabled at none of
25262//   them, the output will be an empty Tensor.
25263// If not specified, defaults to false
25264func DebugIdentityGatedGrpc(value bool) DebugIdentityAttr {
25265	return func(m optionalAttr) {
25266		m["gated_grpc"] = value
25267	}
25268}
25269
25270// Provides an identity mapping of the non-Ref type input tensor for debugging.
25271//
25272// Provides an identity mapping of the non-Ref type input tensor for debugging.
25273//
25274// Arguments:
25275//	input: Input tensor, non-Reference type
25276func DebugIdentity(scope *Scope, input tf.Output, optional ...DebugIdentityAttr) (output tf.Output) {
25277	if scope.Err() != nil {
25278		return
25279	}
25280	attrs := map[string]interface{}{}
25281	for _, a := range optional {
25282		a(attrs)
25283	}
25284	opspec := tf.OpSpec{
25285		Type: "DebugIdentity",
25286		Input: []tf.Input{
25287			input,
25288		},
25289		Attrs: attrs,
25290	}
25291	op := scope.AddOperation(opspec)
25292	return op.Output(0)
25293}
25294
25295// Sparse addition of two CSR matrices, C = alpha * A + beta * B.
25296//
25297// The gradients of SparseMatrixAdd outputs with respect to alpha and beta are not
25298// currently defined (TensorFlow will return zeros for these entries).
25299//
25300// Arguments:
25301//	a: A CSRSparseMatrix.
25302//	b: A CSRSparseMatrix.
25303//	alpha: A constant scalar.
25304//	beta: A constant scalar.
25305//
25306// Returns A CSRSparseMatrix.
25307func SparseMatrixAdd(scope *Scope, a tf.Output, b tf.Output, alpha tf.Output, beta tf.Output) (c tf.Output) {
25308	if scope.Err() != nil {
25309		return
25310	}
25311	opspec := tf.OpSpec{
25312		Type: "SparseMatrixAdd",
25313		Input: []tf.Input{
25314			a, b, alpha, beta,
25315		},
25316	}
25317	op := scope.AddOperation(opspec)
25318	return op.Output(0)
25319}
25320
25321// CopyHostAttr is an optional argument to CopyHost.
25322type CopyHostAttr func(optionalAttr)
25323
25324// CopyHostTensorName sets the optional tensor_name attribute to value.
25325//
25326// value: The name of the input tensor.
25327// If not specified, defaults to ""
25328func CopyHostTensorName(value string) CopyHostAttr {
25329	return func(m optionalAttr) {
25330		m["tensor_name"] = value
25331	}
25332}
25333
25334// CopyHostDebugOpsSpec sets the optional debug_ops_spec attribute to value.
25335//
25336// value: A list of debug op spec (op, url, gated_grpc) for attached debug
25337// ops. Each element of the list has the format
25338// <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
25339// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
25340// "DebugIdentity;file:///tmp/tfdbg_1;0".
25341// If not specified, defaults to {}
25342func CopyHostDebugOpsSpec(value []string) CopyHostAttr {
25343	return func(m optionalAttr) {
25344		m["debug_ops_spec"] = value
25345	}
25346}
25347
25348// Copy a tensor to host.
25349//
25350// Performs CPU-to-CPU deep-copying of tensor.
25351// N.B.: If the all downstream attached debug ops are disabled given the current
25352// gRPC gating status, the output will simply forward the input tensor without
25353// deep-copying. See the documentation of Debug* ops for more details.
25354//
25355// Unlike the Copy Op, this op has HostMemory constraint on its input or output.
25356//
25357// Arguments:
25358//	input: Input tensor.
25359func CopyHost(scope *Scope, input tf.Output, optional ...CopyHostAttr) (output tf.Output) {
25360	if scope.Err() != nil {
25361		return
25362	}
25363	attrs := map[string]interface{}{}
25364	for _, a := range optional {
25365		a(attrs)
25366	}
25367	opspec := tf.OpSpec{
25368		Type: "CopyHost",
25369		Input: []tf.Input{
25370			input,
25371		},
25372		Attrs: attrs,
25373	}
25374	op := scope.AddOperation(opspec)
25375	return op.Output(0)
25376}
25377
25378// Generates fingerprint values.
25379//
25380// Generates fingerprint values of `data`.
25381//
25382// Fingerprint op considers the first dimension of `data` as the batch dimension,
25383// and `output[i]` contains the fingerprint value generated from contents in
25384// `data[i, ...]` for all `i`.
25385//
25386// Fingerprint op writes fingerprint values as byte arrays. For example, the
25387// default method `farmhash64` generates a 64-bit fingerprint value at a time.
25388// This 8-byte value is written out as an `uint8` array of size 8, in little-endian
25389// order.
25390//
25391// For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4),
25392// and that the fingerprint method is `farmhash64`. In this case, the output shape
25393// is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of
25394// each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in
25395// `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers
25396// in `data[1, :, :]`.
25397//
25398// Note that this op fingerprints the raw underlying buffer, and it does not
25399// fingerprint Tensor's metadata such as data type and/or shape. For example, the
25400// fingerprint values are invariant under reshapes and bitcasts as long as the
25401// batch dimension remain the same:
25402//
25403// ```
25404// Fingerprint(data) == Fingerprint(Reshape(data, ...))
25405// Fingerprint(data) == Fingerprint(Bitcast(data, ...))
25406// ```
25407//
25408// For string data, one should expect `Fingerprint(data) !=
25409// Fingerprint(ReduceJoin(data))` in general.
25410//
25411// Arguments:
25412//	data: Must have rank 1 or higher.
25413//	method: Fingerprint method used by this op. Currently available method is
25414// `farmhash::fingerprint64`.
25415//
25416// Returns A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to
25417// `data`'s first dimension, and the second dimension size depends on the
25418// fingerprint algorithm.
25419func Fingerprint(scope *Scope, data tf.Output, method tf.Output) (fingerprint tf.Output) {
25420	if scope.Err() != nil {
25421		return
25422	}
25423	opspec := tf.OpSpec{
25424		Type: "Fingerprint",
25425		Input: []tf.Input{
25426			data, method,
25427		},
25428	}
25429	op := scope.AddOperation(opspec)
25430	return op.Output(0)
25431}
25432
25433// CopyAttr is an optional argument to Copy.
25434type CopyAttr func(optionalAttr)
25435
25436// CopyTensorName sets the optional tensor_name attribute to value.
25437//
25438// value: The name of the input tensor.
25439// If not specified, defaults to ""
25440func CopyTensorName(value string) CopyAttr {
25441	return func(m optionalAttr) {
25442		m["tensor_name"] = value
25443	}
25444}
25445
25446// CopyDebugOpsSpec sets the optional debug_ops_spec attribute to value.
25447//
25448// value: A list of debug op spec (op, url, gated_grpc) for attached debug
25449// ops. Each element of the list has the format
25450// <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
25451// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
25452// "DebugIdentity;file:///tmp/tfdbg_1;0".
25453// If not specified, defaults to {}
25454func CopyDebugOpsSpec(value []string) CopyAttr {
25455	return func(m optionalAttr) {
25456		m["debug_ops_spec"] = value
25457	}
25458}
25459
25460// Copy a tensor from CPU-to-CPU or GPU-to-GPU.
25461//
25462// Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
25463// device on which the tensor is allocated.
25464// N.B.: If the all downstream attached debug ops are disabled given the current
25465// gRPC gating status, the output will simply forward the input tensor without
25466// deep-copying. See the documentation of Debug* ops for more details.
25467//
25468// Unlike the CopyHost Op, this op does not have HostMemory constraint on its
25469// input or output.
25470//
25471// Arguments:
25472//	input: Input tensor.
25473func Copy(scope *Scope, input tf.Output, optional ...CopyAttr) (output tf.Output) {
25474	if scope.Err() != nil {
25475		return
25476	}
25477	attrs := map[string]interface{}{}
25478	for _, a := range optional {
25479		a(attrs)
25480	}
25481	opspec := tf.OpSpec{
25482		Type: "Copy",
25483		Input: []tf.Input{
25484			input,
25485		},
25486		Attrs: attrs,
25487	}
25488	op := scope.AddOperation(opspec)
25489	return op.Output(0)
25490}
25491
25492// Gets the next output from the given iterator.
25493//
25494// This operation is a synchronous version IteratorGetNext. It should only be used
25495// in situations where the iterator does not block the calling thread, or where
25496// the calling thread is not a member of the thread pool used to execute parallel
25497// operations (e.g. in eager mode).
25498func IteratorGetNextSync(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
25499	if scope.Err() != nil {
25500		return
25501	}
25502	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
25503	opspec := tf.OpSpec{
25504		Type: "IteratorGetNextSync",
25505		Input: []tf.Input{
25506			iterator,
25507		},
25508		Attrs: attrs,
25509	}
25510	op := scope.AddOperation(opspec)
25511	if scope.Err() != nil {
25512		return
25513	}
25514	var idx int
25515	var err error
25516	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
25517		scope.UpdateErr("IteratorGetNextSync", err)
25518		return
25519	}
25520	return components
25521}
25522
25523// Creates a dataset by attaching tf.data.Options to `input_dataset`.
25524//
25525// Arguments:
25526//	input_dataset: A variant tensor representing the input dataset.
25527//	serialized_options: A `tf.string` scalar `tf.Tensor` of serialized `tf.data.Options` protocol buffer.
25528//
25529//
25530func OptionsDataset(scope *Scope, input_dataset tf.Output, serialized_options string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
25531	if scope.Err() != nil {
25532		return
25533	}
25534	attrs := map[string]interface{}{"serialized_options": serialized_options, "output_types": output_types, "output_shapes": output_shapes}
25535	opspec := tf.OpSpec{
25536		Type: "OptionsDataset",
25537		Input: []tf.Input{
25538			input_dataset,
25539		},
25540		Attrs: attrs,
25541	}
25542	op := scope.AddOperation(opspec)
25543	return op.Output(0)
25544}
25545
25546// ExtractJpegShapeAttr is an optional argument to ExtractJpegShape.
25547type ExtractJpegShapeAttr func(optionalAttr)
25548
25549// ExtractJpegShapeOutputType sets the optional output_type attribute to value.
25550//
25551// value: (Optional) The output type of the operation (int32 or int64).
25552// Defaults to int32.
25553// If not specified, defaults to DT_INT32
25554func ExtractJpegShapeOutputType(value tf.DataType) ExtractJpegShapeAttr {
25555	return func(m optionalAttr) {
25556		m["output_type"] = value
25557	}
25558}
25559
25560// Extract the shape information of a JPEG-encoded image.
25561//
25562// This op only parses the image header, so it is much faster than DecodeJpeg.
25563//
25564// Arguments:
25565//	contents: 0-D. The JPEG-encoded image.
25566//
25567// Returns 1-D. The image shape with format [height, width, channels].
25568func ExtractJpegShape(scope *Scope, contents tf.Output, optional ...ExtractJpegShapeAttr) (image_shape tf.Output) {
25569	if scope.Err() != nil {
25570		return
25571	}
25572	attrs := map[string]interface{}{}
25573	for _, a := range optional {
25574		a(attrs)
25575	}
25576	opspec := tf.OpSpec{
25577		Type: "ExtractJpegShape",
25578		Input: []tf.Input{
25579			contents,
25580		},
25581		Attrs: attrs,
25582	}
25583	op := scope.AddOperation(opspec)
25584	return op.Output(0)
25585}
25586
25587// Creates a TensorArray for storing the gradients of values in the given handle.
25588//
25589// If the given TensorArray gradient already exists, returns a reference to it.
25590//
25591// Locks the size of the original TensorArray by disabling its dynamic size flag.
25592//
25593// **A note about the input flow_in:**
25594//
25595// The handle flow_in forces the execution of the gradient lookup to occur
25596// only after certain other operations have occurred.  For example, when
25597// the forward TensorArray is dynamically sized, writes to this TensorArray
25598// may resize the object.  The gradient TensorArray is statically sized based
25599// on the size of the forward TensorArray when this operation executes.
25600// Furthermore, the size of the forward TensorArray is frozen by this call.
25601// As a result, the flow is used to ensure that the call to generate the gradient
25602// TensorArray only happens after all writes are executed.
25603//
25604// In the case of dynamically sized TensorArrays, gradient computation should
25605// only be performed on read operations that have themselves been chained via
25606// flow to occur only after all writes have executed. That way the final size
25607// of the forward TensorArray is known when this operation is called.
25608//
25609// **A note about the source attribute:**
25610//
25611// TensorArray gradient calls use an accumulator TensorArray object.  If
25612// multiple gradients are calculated and run in the same session, the multiple
25613// gradient nodes may accidentally flow through the same accumulator TensorArray.
25614// This double counts and generally breaks the TensorArray gradient flow.
25615//
25616// The solution is to identify which gradient call this particular
25617// TensorArray gradient is being called in.  This is performed by identifying
25618// a unique string (e.g. "gradients", "gradients_1", ...) from the input
25619// gradient Tensor's name.  This string is used as a suffix when creating
25620// the TensorArray gradient object here (the attribute `source`).
25621//
25622// The attribute `source` is added as a suffix to the forward TensorArray's
25623// name when performing the creation / lookup, so that each separate gradient
25624// calculation gets its own TensorArray accumulator.
25625//
25626// Arguments:
25627//	handle: The handle to the forward TensorArray.
25628//	flow_in: A float scalar that enforces proper chaining of operations.
25629//	source: The gradient source string, used to decide which gradient TensorArray
25630// to return.
25631func TensorArrayGradV3(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
25632	if scope.Err() != nil {
25633		return
25634	}
25635	attrs := map[string]interface{}{"source": source}
25636	opspec := tf.OpSpec{
25637		Type: "TensorArrayGradV3",
25638		Input: []tf.Input{
25639			handle, flow_in,
25640		},
25641		Attrs: attrs,
25642	}
25643	op := scope.AddOperation(opspec)
25644	return op.Output(0), op.Output(1)
25645}
25646
25647// Produces a string handle for the given MultiDeviceIterator.
25648//
25649// Arguments:
25650//	multi_device_iterator: A MultiDeviceIterator resource.
25651//
25652// Returns A string representing the resource.
25653func MultiDeviceIteratorToStringHandle(scope *Scope, multi_device_iterator tf.Output) (string_handle tf.Output) {
25654	if scope.Err() != nil {
25655		return
25656	}
25657	opspec := tf.OpSpec{
25658		Type: "MultiDeviceIteratorToStringHandle",
25659		Input: []tf.Input{
25660			multi_device_iterator,
25661		},
25662	}
25663	op := scope.AddOperation(opspec)
25664	return op.Output(0)
25665}
25666
25667// QuantizeAndDequantizeV4Attr is an optional argument to QuantizeAndDequantizeV4.
25668type QuantizeAndDequantizeV4Attr func(optionalAttr)
25669
25670// QuantizeAndDequantizeV4SignedInput sets the optional signed_input attribute to value.
25671//
25672// value: Whether the quantization is signed or unsigned. (actually this parameter should
25673// have been called <b>`signed_output`</b>)
25674// If not specified, defaults to true
25675func QuantizeAndDequantizeV4SignedInput(value bool) QuantizeAndDequantizeV4Attr {
25676	return func(m optionalAttr) {
25677		m["signed_input"] = value
25678	}
25679}
25680
25681// QuantizeAndDequantizeV4NumBits sets the optional num_bits attribute to value.
25682//
25683// value: The bitwidth of the quantization.
25684// If not specified, defaults to 8
25685func QuantizeAndDequantizeV4NumBits(value int64) QuantizeAndDequantizeV4Attr {
25686	return func(m optionalAttr) {
25687		m["num_bits"] = value
25688	}
25689}
25690
25691// QuantizeAndDequantizeV4RangeGiven sets the optional range_given attribute to value.
25692//
25693// value: Whether the range is given or should be determined from the `input` tensor.
25694// If not specified, defaults to false
25695func QuantizeAndDequantizeV4RangeGiven(value bool) QuantizeAndDequantizeV4Attr {
25696	return func(m optionalAttr) {
25697		m["range_given"] = value
25698	}
25699}
25700
25701// QuantizeAndDequantizeV4RoundMode sets the optional round_mode attribute to value.
25702//
25703// value: The 'round_mode' attribute controls which rounding tie-breaking algorithm is
25704// used when rounding float values to their quantized equivalents. The following
25705// rounding modes are currently supported:
25706//
25707// *   HALF_TO_EVEN: this is the default round_mode.
25708// *   HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5
25709//     rounds up to -7.
25710//
25711// If not specified, defaults to "HALF_TO_EVEN"
25712func QuantizeAndDequantizeV4RoundMode(value string) QuantizeAndDequantizeV4Attr {
25713	return func(m optionalAttr) {
25714		m["round_mode"] = value
25715	}
25716}
25717
25718// QuantizeAndDequantizeV4NarrowRange sets the optional narrow_range attribute to value.
25719//
25720// value: If True, then the absolute value of the quantized minimum value is the same as
25721// the quantized maximum value, instead of 1 greater.
25722// i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
25723// If not specified, defaults to false
25724func QuantizeAndDequantizeV4NarrowRange(value bool) QuantizeAndDequantizeV4Attr {
25725	return func(m optionalAttr) {
25726		m["narrow_range"] = value
25727	}
25728}
25729
25730// QuantizeAndDequantizeV4Axis sets the optional axis attribute to value.
25731//
25732// value: If specified, this axis is treated as a channel or slice axis, and a separate
25733// quantization range is used for each channel or slice along this axis.
25734// If not specified, defaults to -1
25735func QuantizeAndDequantizeV4Axis(value int64) QuantizeAndDequantizeV4Attr {
25736	return func(m optionalAttr) {
25737		m["axis"] = value
25738	}
25739}
25740
25741// Quantizes then dequantizes a tensor.
25742//
25743// This is almost identical to QuantizeAndDequantizeV2, except that it returns a
25744// gradient of 1 for inputs that are within the quantization range, or 0 otherwise.
25745//
25746// Arguments:
25747//	input: Tensor to quantize and then dequantize.
25748//	input_min: If `range_given == True`, this specifies the minimum input value that needs to
25749// be represented, otherwise it is determined from the min value of the `input`
25750// tensor.
25751//	input_max: If `range_given == True`, this specifies the maximum input value that needs to
25752// be represented, otherwise it is determined from the max value of the `input`
25753// tensor.
25754func QuantizeAndDequantizeV4(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV4Attr) (output tf.Output) {
25755	if scope.Err() != nil {
25756		return
25757	}
25758	attrs := map[string]interface{}{}
25759	for _, a := range optional {
25760		a(attrs)
25761	}
25762	opspec := tf.OpSpec{
25763		Type: "QuantizeAndDequantizeV4",
25764		Input: []tf.Input{
25765			input, input_min, input_max,
25766		},
25767		Attrs: attrs,
25768	}
25769	op := scope.AddOperation(opspec)
25770	return op.Output(0)
25771}
25772
25773// Gets next element for the provided shard number.
25774//
25775// Arguments:
25776//	multi_device_iterator: A MultiDeviceIterator resource.
25777//	shard_num: Integer representing which shard to fetch data for.
25778//	incarnation_id: Which incarnation of the MultiDeviceIterator is running.
25779//	output_types: The type list for the return values.
25780//	output_shapes: The list of shapes being produced.
25781//
25782// Returns Result of the get_next on the dataset.
25783func MultiDeviceIteratorGetNextFromShard(scope *Scope, multi_device_iterator tf.Output, shard_num tf.Output, incarnation_id tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
25784	if scope.Err() != nil {
25785		return
25786	}
25787	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
25788	opspec := tf.OpSpec{
25789		Type: "MultiDeviceIteratorGetNextFromShard",
25790		Input: []tf.Input{
25791			multi_device_iterator, shard_num, incarnation_id,
25792		},
25793		Attrs: attrs,
25794	}
25795	op := scope.AddOperation(opspec)
25796	if scope.Err() != nil {
25797		return
25798	}
25799	var idx int
25800	var err error
25801	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
25802		scope.UpdateErr("MultiDeviceIteratorGetNextFromShard", err)
25803		return
25804	}
25805	return components
25806}
25807
25808// BoostedTreesCalculateBestFeatureSplitAttr is an optional argument to BoostedTreesCalculateBestFeatureSplit.
25809type BoostedTreesCalculateBestFeatureSplitAttr func(optionalAttr)
25810
25811// BoostedTreesCalculateBestFeatureSplitSplitType sets the optional split_type attribute to value.
25812//
25813// value: A string indicating if this Op should perform inequality split or equality split.
25814// If not specified, defaults to "inequality"
25815func BoostedTreesCalculateBestFeatureSplitSplitType(value string) BoostedTreesCalculateBestFeatureSplitAttr {
25816	return func(m optionalAttr) {
25817		m["split_type"] = value
25818	}
25819}
25820
25821// Calculates gains for each feature and returns the best possible split information for the feature.
25822//
25823// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
25824//
25825// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
25826//
25827// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
25828//
25829// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
25830//
25831// Arguments:
25832//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
25833//	stats_summary: A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.
25834// The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
25835//	l1: l1 regularization factor on leaf weights, per instance based.
25836//	l2: l2 regularization factor on leaf weights, per instance based.
25837//	tree_complexity: adjustment to the gain, per leaf based.
25838//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
25839//	logits_dimension: The dimension of logit, i.e., number of classes.
25840//
25841// Returns:
25842//	node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
25843//	gains: A Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
25844//	feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.
25845//	thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
25846//	left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
25847//	right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
25848//	split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.
25849// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
25850func BoostedTreesCalculateBestFeatureSplit(scope *Scope, node_id_range tf.Output, stats_summary tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64, optional ...BoostedTreesCalculateBestFeatureSplitAttr) (node_ids tf.Output, gains tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output) {
25851	if scope.Err() != nil {
25852		return
25853	}
25854	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
25855	for _, a := range optional {
25856		a(attrs)
25857	}
25858	opspec := tf.OpSpec{
25859		Type: "BoostedTreesCalculateBestFeatureSplit",
25860		Input: []tf.Input{
25861			node_id_range, stats_summary, l1, l2, tree_complexity, min_node_weight,
25862		},
25863		Attrs: attrs,
25864	}
25865	op := scope.AddOperation(opspec)
25866	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
25867}
25868
25869// Wraps the XLA DynamicUpdateSlice operator, documented at
25870//
25871//  https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice
25872// .
25873//
25874// XlaDynamicUpdateSlice generates a result which is the value of the `input`
25875// operand, with a slice update overwritten at `indices`. The shape of `update`
25876// determines the shape of the sub-array of the result which is updated. The shape
25877// of indices must be rank == 1, with dimension size equal to the rank of `input`.
25878//
25879// Handling of out-of-bounds slice indices is implementation-defined.
25880//
25881// Arguments:
25882//	input: A `Tensor` of type T.
25883//	update: A `Tensor` of type T. Same rank as `input`.
25884//	indices: A vector of indices into `input`. Must have length equal to the rank of
25885// `input`.
25886//
25887// Returns A `Tensor` of type T.
25888func XlaDynamicUpdateSlice(scope *Scope, input tf.Output, update tf.Output, indices tf.Output) (output tf.Output) {
25889	if scope.Err() != nil {
25890		return
25891	}
25892	opspec := tf.OpSpec{
25893		Type: "XlaDynamicUpdateSlice",
25894		Input: []tf.Input{
25895			input, update, indices,
25896		},
25897	}
25898	op := scope.AddOperation(opspec)
25899	return op.Output(0)
25900}
25901
25902// Computes gradients for SparseSegmentMean.
25903//
25904// Returns tensor "output" with same shape as grad, except for dimension 0 whose
25905// value is output_dim0.
25906//
25907// Arguments:
25908//	grad: gradient propagated to the SparseSegmentMean op.
25909//	indices: indices passed to the corresponding SparseSegmentMean op.
25910//	segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.
25911//	output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.
25912func SparseSegmentMeanGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
25913	if scope.Err() != nil {
25914		return
25915	}
25916	opspec := tf.OpSpec{
25917		Type: "SparseSegmentMeanGrad",
25918		Input: []tf.Input{
25919			grad, indices, segment_ids, output_dim0,
25920		},
25921	}
25922	op := scope.AddOperation(opspec)
25923	return op.Output(0)
25924}
25925
25926// SvdAttr is an optional argument to Svd.
25927type SvdAttr func(optionalAttr)
25928
25929// SvdComputeUv sets the optional compute_uv attribute to value.
25930//
25931// value: If true, left and right singular vectors will be
25932// computed and returned in `u` and `v`, respectively.
25933// If false, `u` and `v` are not set and should never referenced.
25934// If not specified, defaults to true
25935func SvdComputeUv(value bool) SvdAttr {
25936	return func(m optionalAttr) {
25937		m["compute_uv"] = value
25938	}
25939}
25940
25941// SvdFullMatrices sets the optional full_matrices attribute to value.
25942//
25943// value: If true, compute full-sized `u` and `v`. If false
25944// (the default), compute only the leading `P` singular vectors.
25945// Ignored if `compute_uv` is `False`.
25946// If not specified, defaults to false
25947func SvdFullMatrices(value bool) SvdAttr {
25948	return func(m optionalAttr) {
25949		m["full_matrices"] = value
25950	}
25951}
25952
25953// Computes the singular value decompositions of one or more matrices.
25954//
25955// Computes the SVD of each inner matrix in `input` such that
25956// `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
25957//
25958// ```python
25959// # a is a tensor containing a batch of matrices.
25960// # s is a tensor of singular values for each matrix.
25961// # u is the tensor containing the left singular vectors for each matrix.
25962// # v is the tensor containing the right singular vectors for each matrix.
25963// s, u, v = svd(a)
25964// s, _, _ = svd(a, compute_uv=False)
25965// ```
25966//
25967// Arguments:
25968//	input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
25969// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
25970//
25971// Returns:
25972//	s: Singular values. Shape is `[..., P]`.
25973//	u: Left singular vectors. If `full_matrices` is `False` then shape is
25974// `[..., M, P]`; if `full_matrices` is `True` then shape is
25975// `[..., M, M]`. Undefined if `compute_uv` is `False`.
25976//	v: Left singular vectors. If `full_matrices` is `False` then shape is
25977// `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
25978// Undefined if `compute_uv` is false.
25979func Svd(scope *Scope, input tf.Output, optional ...SvdAttr) (s tf.Output, u tf.Output, v tf.Output) {
25980	if scope.Err() != nil {
25981		return
25982	}
25983	attrs := map[string]interface{}{}
25984	for _, a := range optional {
25985		a(attrs)
25986	}
25987	opspec := tf.OpSpec{
25988		Type: "Svd",
25989		Input: []tf.Input{
25990			input,
25991		},
25992		Attrs: attrs,
25993	}
25994	op := scope.AddOperation(opspec)
25995	return op.Output(0), op.Output(1), op.Output(2)
25996}
25997
25998// Determine the script codes of a given tensor of Unicode integer code points.
25999//
26000// This operation converts Unicode code points to script codes corresponding to
26001// each code point. Script codes correspond to International Components for
26002// Unicode (ICU) UScriptCode values.
26003//
26004// See
26005// [ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html)
26006// for more details on script codes.
26007//
26008// For an example, see the unicode strings guide on [unicode scripts]
26009// (https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode).
26010//
26011// Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will
26012// match input shape.
26013//
26014// Examples:
26015//
26016// >>> tf.strings.unicode_script([1, 31, 38])
26017// <tf.Tensor: shape=(3,), dtype=int32, numpy=array([0, 0, 0], dtype=int32)>
26018//
26019// Arguments:
26020//	input: A Tensor of int32 Unicode code points.
26021//
26022// Returns A Tensor of int32 script codes corresponding to each input code point.
26023func UnicodeScript(scope *Scope, input tf.Output) (output tf.Output) {
26024	if scope.Err() != nil {
26025		return
26026	}
26027	opspec := tf.OpSpec{
26028		Type: "UnicodeScript",
26029		Input: []tf.Input{
26030			input,
26031		},
26032	}
26033	op := scope.AddOperation(opspec)
26034	return op.Output(0)
26035}
26036
26037// Returns the value stored in an Optional variant or raises an error if none exists.
26038func OptionalGetValue(scope *Scope, optional tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
26039	if scope.Err() != nil {
26040		return
26041	}
26042	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
26043	opspec := tf.OpSpec{
26044		Type: "OptionalGetValue",
26045		Input: []tf.Input{
26046			optional,
26047		},
26048		Attrs: attrs,
26049	}
26050	op := scope.AddOperation(opspec)
26051	if scope.Err() != nil {
26052		return
26053	}
26054	var idx int
26055	var err error
26056	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
26057		scope.UpdateErr("OptionalGetValue", err)
26058		return
26059	}
26060	return components
26061}
26062
26063// DecodeCSVAttr is an optional argument to DecodeCSV.
26064type DecodeCSVAttr func(optionalAttr)
26065
26066// DecodeCSVFieldDelim sets the optional field_delim attribute to value.
26067//
26068// value: char delimiter to separate fields in a record.
26069// If not specified, defaults to ","
26070func DecodeCSVFieldDelim(value string) DecodeCSVAttr {
26071	return func(m optionalAttr) {
26072		m["field_delim"] = value
26073	}
26074}
26075
26076// DecodeCSVUseQuoteDelim sets the optional use_quote_delim attribute to value.
26077//
26078// value: If false, treats double quotation marks as regular
26079// characters inside of the string fields (ignoring RFC 4180, Section 2,
26080// Bullet 5).
26081// If not specified, defaults to true
26082func DecodeCSVUseQuoteDelim(value bool) DecodeCSVAttr {
26083	return func(m optionalAttr) {
26084		m["use_quote_delim"] = value
26085	}
26086}
26087
26088// DecodeCSVNaValue sets the optional na_value attribute to value.
26089//
26090// value: Additional string to recognize as NA/NaN.
26091// If not specified, defaults to ""
26092func DecodeCSVNaValue(value string) DecodeCSVAttr {
26093	return func(m optionalAttr) {
26094		m["na_value"] = value
26095	}
26096}
26097
26098// DecodeCSVSelectCols sets the optional select_cols attribute to value.
26099// If not specified, defaults to {}
26100func DecodeCSVSelectCols(value []int64) DecodeCSVAttr {
26101	return func(m optionalAttr) {
26102		m["select_cols"] = value
26103	}
26104}
26105
26106// Convert CSV records to tensors. Each column maps to one tensor.
26107//
26108// RFC 4180 format is expected for the CSV records.
26109// (https://tools.ietf.org/html/rfc4180)
26110// Note that we allow leading and trailing spaces with int or float field.
26111//
26112// Arguments:
26113//	records: Each string is a record/row in the csv and all records should have
26114// the same format.
26115//	record_defaults: One tensor per column of the input record, with either a
26116// scalar default value for that column or an empty vector if the column is
26117// required.
26118//
26119// Returns Each tensor will have the same shape as records.
26120func DecodeCSV(scope *Scope, records tf.Output, record_defaults []tf.Output, optional ...DecodeCSVAttr) (output []tf.Output) {
26121	if scope.Err() != nil {
26122		return
26123	}
26124	attrs := map[string]interface{}{}
26125	for _, a := range optional {
26126		a(attrs)
26127	}
26128	opspec := tf.OpSpec{
26129		Type: "DecodeCSV",
26130		Input: []tf.Input{
26131			records, tf.OutputList(record_defaults),
26132		},
26133		Attrs: attrs,
26134	}
26135	op := scope.AddOperation(opspec)
26136	if scope.Err() != nil {
26137		return
26138	}
26139	var idx int
26140	var err error
26141	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
26142		scope.UpdateErr("DecodeCSV", err)
26143		return
26144	}
26145	return output
26146}
26147
26148// An op used by XLA SPMD partitioner to switch from automatic partitioning to
26149//
26150// manual partitioning. It annotates the input (full-shape, to be automatically
26151// partitioned) with the same sharding used by manual partitioning, and outputs a
26152// shard-shaped tensor to be consumed by later manually-partitioned ops. If the
26153// shape is not evenly partitionable, the padding region will be masked with 0s.
26154func XlaSpmdFullToShardShape(scope *Scope, input tf.Output, manual_sharding string) (output tf.Output) {
26155	if scope.Err() != nil {
26156		return
26157	}
26158	attrs := map[string]interface{}{"manual_sharding": manual_sharding}
26159	opspec := tf.OpSpec{
26160		Type: "XlaSpmdFullToShardShape",
26161		Input: []tf.Input{
26162			input,
26163		},
26164		Attrs: attrs,
26165	}
26166	op := scope.AddOperation(opspec)
26167	return op.Output(0)
26168}
26169
26170// RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr is an optional argument to RetrieveTPUEmbeddingStochasticGradientDescentParameters.
26171type RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr func(optionalAttr)
26172
26173// RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId sets the optional table_id attribute to value.
26174// If not specified, defaults to -1
26175func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr {
26176	return func(m optionalAttr) {
26177		m["table_id"] = value
26178	}
26179}
26180
26181// RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName sets the optional table_name attribute to value.
26182// If not specified, defaults to ""
26183func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr {
26184	return func(m optionalAttr) {
26185		m["table_name"] = value
26186	}
26187}
26188
26189// RetrieveTPUEmbeddingStochasticGradientDescentParametersConfig sets the optional config attribute to value.
26190// If not specified, defaults to ""
26191func RetrieveTPUEmbeddingStochasticGradientDescentParametersConfig(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr {
26192	return func(m optionalAttr) {
26193		m["config"] = value
26194	}
26195}
26196
26197// Retrieve SGD embedding parameters.
26198//
26199// An op that retrieves optimization parameters from embedding to host
26200// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
26201// the correct embedding table configuration. For example, this op is
26202// used to retrieve updated parameters before saving a checkpoint.
26203//
26204// Returns Parameter parameters updated by the stochastic gradient descent optimization algorithm.
26205func RetrieveTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr) (parameters tf.Output) {
26206	if scope.Err() != nil {
26207		return
26208	}
26209	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
26210	for _, a := range optional {
26211		a(attrs)
26212	}
26213	opspec := tf.OpSpec{
26214		Type: "RetrieveTPUEmbeddingStochasticGradientDescentParameters",
26215
26216		Attrs: attrs,
26217	}
26218	op := scope.AddOperation(opspec)
26219	return op.Output(0)
26220}
26221
26222// Creates an Optional variant with no value.
26223func OptionalNone(scope *Scope) (optional tf.Output) {
26224	if scope.Err() != nil {
26225		return
26226	}
26227	opspec := tf.OpSpec{
26228		Type: "OptionalNone",
26229	}
26230	op := scope.AddOperation(opspec)
26231	return op.Output(0)
26232}
26233
26234// Retrieve multiple values from the computation outfeed. Device ordinal is a
26235// tensor allowing dynamic outfeed.
26236//
26237// This operation will block indefinitely until data is available. Output `i`
26238// corresponds to XLA tuple element `i`.
26239//
26240// Arguments:
26241//	device_ordinal: An int scalar tensor, representing the TPU device to use. This should be -1 when
26242// the Op is running on a TPU device, and >= 0 when the Op is running on the CPU
26243// device.
26244//	dtypes: The element types of each element in `outputs`.
26245//	shapes: The shapes of each tensor in `outputs`.
26246//
26247// Returns A list of tensors that will be read from the outfeed.
26248func OutfeedDequeueTupleV2(scope *Scope, device_ordinal tf.Output, dtypes []tf.DataType, shapes []tf.Shape) (outputs []tf.Output) {
26249	if scope.Err() != nil {
26250		return
26251	}
26252	attrs := map[string]interface{}{"dtypes": dtypes, "shapes": shapes}
26253	opspec := tf.OpSpec{
26254		Type: "OutfeedDequeueTupleV2",
26255		Input: []tf.Input{
26256			device_ordinal,
26257		},
26258		Attrs: attrs,
26259	}
26260	op := scope.AddOperation(opspec)
26261	if scope.Err() != nil {
26262		return
26263	}
26264	var idx int
26265	var err error
26266	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
26267		scope.UpdateErr("OutfeedDequeueTupleV2", err)
26268		return
26269	}
26270	return outputs
26271}
26272
26273// QrAttr is an optional argument to Qr.
26274type QrAttr func(optionalAttr)
26275
26276// QrFullMatrices sets the optional full_matrices attribute to value.
26277//
26278// value: If true, compute full-sized `q` and `r`. If false
26279// (the default), compute only the leading `P` columns of `q`.
26280// If not specified, defaults to false
26281func QrFullMatrices(value bool) QrAttr {
26282	return func(m optionalAttr) {
26283		m["full_matrices"] = value
26284	}
26285}
26286
26287// Computes the QR decompositions of one or more matrices.
26288//
26289// Computes the QR decomposition of each inner matrix in `tensor` such that
26290// `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
26291//
26292// Currently, the gradient for the QR decomposition is well-defined only when
26293// the first `P` columns of the inner matrix are linearly independent, where
26294// `P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`.
26295//
26296// ```python
26297// # a is a tensor.
26298// # q is a tensor of orthonormal matrices.
26299// # r is a tensor of upper triangular matrices.
26300// q, r = qr(a)
26301// q_full, r_full = qr(a, full_matrices=True)
26302// ```
26303//
26304// Arguments:
26305//	input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
26306// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
26307//
26308// Returns:
26309//	q: Orthonormal basis for range of `a`. If `full_matrices` is `False` then
26310// shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
26311// `[..., M, M]`.
26312//	r: Triangular factor. If `full_matrices` is `False` then shape is
26313// `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.
26314func Qr(scope *Scope, input tf.Output, optional ...QrAttr) (q tf.Output, r tf.Output) {
26315	if scope.Err() != nil {
26316		return
26317	}
26318	attrs := map[string]interface{}{}
26319	for _, a := range optional {
26320		a(attrs)
26321	}
26322	opspec := tf.OpSpec{
26323		Type: "Qr",
26324		Input: []tf.Input{
26325			input,
26326		},
26327		Attrs: attrs,
26328	}
26329	op := scope.AddOperation(opspec)
26330	return op.Output(0), op.Output(1)
26331}
26332
26333// Constructs an Optional variant from a tuple of tensors.
26334func OptionalFromValue(scope *Scope, components []tf.Output) (optional tf.Output) {
26335	if scope.Err() != nil {
26336		return
26337	}
26338	opspec := tf.OpSpec{
26339		Type: "OptionalFromValue",
26340		Input: []tf.Input{
26341			tf.OutputList(components),
26342		},
26343	}
26344	op := scope.AddOperation(opspec)
26345	return op.Output(0)
26346}
26347
26348// OptimizeDatasetV2Attr is an optional argument to OptimizeDatasetV2.
26349type OptimizeDatasetV2Attr func(optionalAttr)
26350
26351// OptimizeDatasetV2OptimizationConfigs sets the optional optimization_configs attribute to value.
26352// If not specified, defaults to {}
26353func OptimizeDatasetV2OptimizationConfigs(value []string) OptimizeDatasetV2Attr {
26354	return func(m optionalAttr) {
26355		m["optimization_configs"] = value
26356	}
26357}
26358
26359// Creates a dataset by applying related optimizations to `input_dataset`.
26360//
26361// Creates a dataset by applying related optimizations to `input_dataset`.
26362//
26363// Arguments:
26364//	input_dataset: A variant tensor representing the input dataset.
26365//	optimizations_enabled: A `tf.string` vector `tf.Tensor` identifying user enabled optimizations.
26366//	optimizations_disabled: A `tf.string` vector `tf.Tensor` identifying user disabled optimizations.
26367//	optimizations_default: A `tf.string` vector `tf.Tensor` identifying optimizations by default.
26368//
26369//
26370func OptimizeDatasetV2(scope *Scope, input_dataset tf.Output, optimizations_enabled tf.Output, optimizations_disabled tf.Output, optimizations_default tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...OptimizeDatasetV2Attr) (handle tf.Output) {
26371	if scope.Err() != nil {
26372		return
26373	}
26374	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
26375	for _, a := range optional {
26376		a(attrs)
26377	}
26378	opspec := tf.OpSpec{
26379		Type: "OptimizeDatasetV2",
26380		Input: []tf.Input{
26381			input_dataset, optimizations_enabled, optimizations_disabled, optimizations_default,
26382		},
26383		Attrs: attrs,
26384	}
26385	op := scope.AddOperation(opspec)
26386	return op.Output(0)
26387}
26388
26389// OptimizeDatasetAttr is an optional argument to OptimizeDataset.
26390type OptimizeDatasetAttr func(optionalAttr)
26391
26392// OptimizeDatasetOptimizationConfigs sets the optional optimization_configs attribute to value.
26393// If not specified, defaults to {}
26394func OptimizeDatasetOptimizationConfigs(value []string) OptimizeDatasetAttr {
26395	return func(m optionalAttr) {
26396		m["optimization_configs"] = value
26397	}
26398}
26399
26400// Creates a dataset by applying optimizations to `input_dataset`.
26401//
26402// Creates a dataset by applying optimizations to `input_dataset`.
26403//
26404// Arguments:
26405//	input_dataset: A variant tensor representing the input dataset.
26406//	optimizations: A `tf.string` vector `tf.Tensor` identifying optimizations to use.
26407//
26408//
26409func OptimizeDataset(scope *Scope, input_dataset tf.Output, optimizations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...OptimizeDatasetAttr) (handle tf.Output) {
26410	if scope.Err() != nil {
26411		return
26412	}
26413	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
26414	for _, a := range optional {
26415		a(attrs)
26416	}
26417	opspec := tf.OpSpec{
26418		Type: "OptimizeDataset",
26419		Input: []tf.Input{
26420			input_dataset, optimizations,
26421		},
26422		Attrs: attrs,
26423	}
26424	op := scope.AddOperation(opspec)
26425	return op.Output(0)
26426}
26427
26428// Restores tensors from a V2 checkpoint.
26429//
26430// For backward compatibility with the V1 format, this Op currently allows
26431// restoring from a V1 checkpoint as well:
26432//   - This Op first attempts to find the V2 index file pointed to by "prefix", and
26433//     if found proceed to read it as a V2 checkpoint;
26434//   - Otherwise the V1 read path is invoked.
26435// Relying on this behavior is not recommended, as the ability to fall back to read
26436// V1 might be deprecated and eventually removed.
26437//
26438// By default, restores the named tensors in full.  If the caller wishes to restore
26439// specific slices of stored tensors, "shape_and_slices" should be non-empty
26440// strings and correspondingly well-formed.
26441//
26442// Callers must ensure all the named tensors are indeed stored in the checkpoint.
26443//
26444// Arguments:
26445//	prefix: Must have a single element.  The prefix of a V2 checkpoint.
26446//	tensor_names: shape {N}.  The names of the tensors to be restored.
26447//	shape_and_slices: shape {N}.  The slice specs of the tensors to be restored.
26448// Empty strings indicate that they are non-partitioned tensors.
26449//	dtypes: shape {N}.  The list of expected dtype for the tensors.  Must match
26450// those stored in the checkpoint.
26451//
26452// Returns shape {N}.  The restored tensors, whose shapes are read from the
26453// checkpoint directly.
26454func RestoreV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, dtypes []tf.DataType) (tensors []tf.Output) {
26455	if scope.Err() != nil {
26456		return
26457	}
26458	attrs := map[string]interface{}{"dtypes": dtypes}
26459	opspec := tf.OpSpec{
26460		Type: "RestoreV2",
26461		Input: []tf.Input{
26462			prefix, tensor_names, shape_and_slices,
26463		},
26464		Attrs: attrs,
26465	}
26466	op := scope.AddOperation(opspec)
26467	if scope.Err() != nil {
26468		return
26469	}
26470	var idx int
26471	var err error
26472	if tensors, idx, err = makeOutputList(op, idx, "tensors"); err != nil {
26473		scope.UpdateErr("RestoreV2", err)
26474		return
26475	}
26476	return tensors
26477}
26478
26479// DatasetToGraphV2Attr is an optional argument to DatasetToGraphV2.
26480type DatasetToGraphV2Attr func(optionalAttr)
26481
26482// DatasetToGraphV2ExternalStatePolicy sets the optional external_state_policy attribute to value.
26483// If not specified, defaults to 0
26484func DatasetToGraphV2ExternalStatePolicy(value int64) DatasetToGraphV2Attr {
26485	return func(m optionalAttr) {
26486		m["external_state_policy"] = value
26487	}
26488}
26489
26490// DatasetToGraphV2StripDeviceAssignment sets the optional strip_device_assignment attribute to value.
26491// If not specified, defaults to false
26492func DatasetToGraphV2StripDeviceAssignment(value bool) DatasetToGraphV2Attr {
26493	return func(m optionalAttr) {
26494		m["strip_device_assignment"] = value
26495	}
26496}
26497
26498// Returns a serialized GraphDef representing `input_dataset`.
26499//
26500// Returns a graph representation for `input_dataset`.
26501//
26502// Arguments:
26503//	input_dataset: A variant tensor representing the dataset to return the graph representation for.
26504//
26505// Returns The graph representation of the dataset (as serialized GraphDef).
26506func DatasetToGraphV2(scope *Scope, input_dataset tf.Output, optional ...DatasetToGraphV2Attr) (graph tf.Output) {
26507	if scope.Err() != nil {
26508		return
26509	}
26510	attrs := map[string]interface{}{}
26511	for _, a := range optional {
26512		a(attrs)
26513	}
26514	opspec := tf.OpSpec{
26515		Type: "DatasetToGraphV2",
26516		Input: []tf.Input{
26517			input_dataset,
26518		},
26519		Attrs: attrs,
26520	}
26521	op := scope.AddOperation(opspec)
26522	return op.Output(0)
26523}
26524
26525// DatasetToGraphAttr is an optional argument to DatasetToGraph.
26526type DatasetToGraphAttr func(optionalAttr)
26527
26528// DatasetToGraphStatefulWhitelist sets the optional stateful_whitelist attribute to value.
26529// If not specified, defaults to {}
26530//
26531// REQUIRES: len(value) >= 0
26532func DatasetToGraphStatefulWhitelist(value []string) DatasetToGraphAttr {
26533	return func(m optionalAttr) {
26534		m["stateful_whitelist"] = value
26535	}
26536}
26537
26538// DatasetToGraphAllowStateful sets the optional allow_stateful attribute to value.
26539// If not specified, defaults to false
26540func DatasetToGraphAllowStateful(value bool) DatasetToGraphAttr {
26541	return func(m optionalAttr) {
26542		m["allow_stateful"] = value
26543	}
26544}
26545
26546// DatasetToGraphStripDeviceAssignment sets the optional strip_device_assignment attribute to value.
26547// If not specified, defaults to false
26548func DatasetToGraphStripDeviceAssignment(value bool) DatasetToGraphAttr {
26549	return func(m optionalAttr) {
26550		m["strip_device_assignment"] = value
26551	}
26552}
26553
26554// Returns a serialized GraphDef representing `input_dataset`.
26555//
26556// Returns a graph representation for `input_dataset`.
26557//
26558// Arguments:
26559//	input_dataset: A variant tensor representing the dataset to return the graph representation for.
26560//
26561// Returns The graph representation of the dataset (as serialized GraphDef).
26562func DatasetToGraph(scope *Scope, input_dataset tf.Output, optional ...DatasetToGraphAttr) (graph tf.Output) {
26563	if scope.Err() != nil {
26564		return
26565	}
26566	attrs := map[string]interface{}{}
26567	for _, a := range optional {
26568		a(attrs)
26569	}
26570	opspec := tf.OpSpec{
26571		Type: "DatasetToGraph",
26572		Input: []tf.Input{
26573			input_dataset,
26574		},
26575		Attrs: attrs,
26576	}
26577	op := scope.AddOperation(opspec)
26578	return op.Output(0)
26579}
26580
26581// ExtractGlimpseAttr is an optional argument to ExtractGlimpse.
26582type ExtractGlimpseAttr func(optionalAttr)
26583
26584// ExtractGlimpseCentered sets the optional centered attribute to value.
26585//
26586// value: indicates if the offset coordinates are centered relative to
26587// the image, in which case the (0, 0) offset is relative to the center
26588// of the input images. If false, the (0,0) offset corresponds to the
26589// upper left corner of the input images.
26590// If not specified, defaults to true
26591func ExtractGlimpseCentered(value bool) ExtractGlimpseAttr {
26592	return func(m optionalAttr) {
26593		m["centered"] = value
26594	}
26595}
26596
26597// ExtractGlimpseNormalized sets the optional normalized attribute to value.
26598//
26599// value: indicates if the offset coordinates are normalized.
26600// If not specified, defaults to true
26601func ExtractGlimpseNormalized(value bool) ExtractGlimpseAttr {
26602	return func(m optionalAttr) {
26603		m["normalized"] = value
26604	}
26605}
26606
26607// ExtractGlimpseUniformNoise sets the optional uniform_noise attribute to value.
26608//
26609// value: indicates if the noise should be generated using a
26610// uniform distribution or a Gaussian distribution.
26611// If not specified, defaults to true
26612func ExtractGlimpseUniformNoise(value bool) ExtractGlimpseAttr {
26613	return func(m optionalAttr) {
26614		m["uniform_noise"] = value
26615	}
26616}
26617
26618// ExtractGlimpseNoise sets the optional noise attribute to value.
26619//
26620// value: indicates if the noise should `uniform`, `gaussian`, or
26621// `zero`. The default is `uniform` which means the noise type
26622// will be decided by `uniform_noise`.
26623// If not specified, defaults to "uniform"
26624func ExtractGlimpseNoise(value string) ExtractGlimpseAttr {
26625	return func(m optionalAttr) {
26626		m["noise"] = value
26627	}
26628}
26629
26630// Extracts a glimpse from the input tensor.
26631//
26632// Returns a set of windows called glimpses extracted at location
26633// `offsets` from the input tensor. If the windows only partially
26634// overlaps the inputs, the non overlapping areas will be filled with
26635// random noise.
26636//
26637// The result is a 4-D tensor of shape `[batch_size, glimpse_height,
26638// glimpse_width, channels]`. The channels and batch dimensions are the
26639// same as that of the input tensor. The height and width of the output
26640// windows are specified in the `size` parameter.
26641//
26642// The argument `normalized` and `centered` controls how the windows are built:
26643//
26644// * If the coordinates are normalized but not centered, 0.0 and 1.0
26645//   correspond to the minimum and maximum of each height and width
26646//   dimension.
26647// * If the coordinates are both normalized and centered, they range from
26648//   -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
26649//   left corner, the lower right corner is located at (1.0, 1.0) and the
26650//   center is at (0, 0).
26651// * If the coordinates are not normalized they are interpreted as
26652//   numbers of pixels.
26653//
26654// Arguments:
26655//	input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
26656//	size: A 1-D tensor of 2 elements containing the size of the glimpses
26657// to extract.  The glimpse height must be specified first, following
26658// by the glimpse width.
26659//	offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
26660// the y, x locations of the center of each window.
26661//
26662// Returns A tensor representing the glimpses `[batch_size,
26663// glimpse_height, glimpse_width, channels]`.
26664func ExtractGlimpse(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseAttr) (glimpse tf.Output) {
26665	if scope.Err() != nil {
26666		return
26667	}
26668	attrs := map[string]interface{}{}
26669	for _, a := range optional {
26670		a(attrs)
26671	}
26672	opspec := tf.OpSpec{
26673		Type: "ExtractGlimpse",
26674		Input: []tf.Input{
26675			input, size, offsets,
26676		},
26677		Attrs: attrs,
26678	}
26679	op := scope.AddOperation(opspec)
26680	return op.Output(0)
26681}
26682
26683// Writes the given dataset to the given file using the TFRecord format.
26684//
26685// Arguments:
26686//	input_dataset: A variant tensor representing the dataset to write.
26687//	filename: A scalar string tensor representing the filename to use.
26688//	compression_type: A scalar string tensor containing either (i) the empty string (no
26689// compression), (ii) "ZLIB", or (iii) "GZIP".
26690//
26691// Returns the created operation.
26692func ExperimentalDatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, compression_type tf.Output) (o *tf.Operation) {
26693	if scope.Err() != nil {
26694		return
26695	}
26696	opspec := tf.OpSpec{
26697		Type: "ExperimentalDatasetToTFRecord",
26698		Input: []tf.Input{
26699			input_dataset, filename, compression_type,
26700		},
26701	}
26702	return scope.AddOperation(opspec)
26703}
26704
26705// GenerateVocabRemappingAttr is an optional argument to GenerateVocabRemapping.
26706type GenerateVocabRemappingAttr func(optionalAttr)
26707
26708// GenerateVocabRemappingOldVocabSize sets the optional old_vocab_size attribute to value.
26709//
26710// value: Number of entries in the old vocab file to consider.  If -1,
26711// use the entire old vocabulary.
26712// If not specified, defaults to -1
26713//
26714// REQUIRES: value >= -1
26715func GenerateVocabRemappingOldVocabSize(value int64) GenerateVocabRemappingAttr {
26716	return func(m optionalAttr) {
26717		m["old_vocab_size"] = value
26718	}
26719}
26720
26721// Given a path to new and old vocabulary files, returns a remapping Tensor of
26722//
26723// length `num_new_vocab`, where `remapping[i]` contains the row number in the old
26724// vocabulary that corresponds to row `i` in the new vocabulary (starting at line
26725// `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
26726// in the new vocabulary is not in the old vocabulary.  The old vocabulary is
26727// constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the
26728// default value of -1.
26729//
26730// `num_vocab_offset` enables
26731// use in the partitioned variable case, and should generally be set through
26732// examining partitioning info.  The format of the files should be a text file,
26733// with each line containing a single entity within the vocabulary.
26734//
26735// For example, with `new_vocab_file` a text file containing each of the following
26736// elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
26737// `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
26738// `[0, -1, 2]`.
26739//
26740// The op also returns a count of how many entries in the new vocabulary
26741// were present in the old vocabulary, which is used to calculate the number of
26742// values to initialize in a weight matrix remapping
26743//
26744// This functionality can be used to remap both row vocabularies (typically,
26745// features) and column vocabularies (typically, classes) from TensorFlow
26746// checkpoints.  Note that the partitioning logic relies on contiguous vocabularies
26747// corresponding to div-partitioned variables.  Moreover, the underlying remapping
26748// uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
26749// use the corresponding index_table_from_file() as the FeatureColumn framework
26750// does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
26751//
26752// Arguments:
26753//	new_vocab_file: Path to the new vocab file.
26754//	old_vocab_file: Path to the old vocab file.
26755//	new_vocab_offset: How many entries into the new vocab file to start reading.
26756//	num_new_vocab: Number of entries in the new vocab file to remap.
26757//
26758// Returns:
26759//	remapping: A Tensor of length num_new_vocab where the element at index i
26760// is equal to the old ID that maps to the new ID i.  This element is -1 for any
26761// new ID that is not found in the old vocabulary.
26762//	num_present: Number of new vocab entries found in old vocab.
26763func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64, optional ...GenerateVocabRemappingAttr) (remapping tf.Output, num_present tf.Output) {
26764	if scope.Err() != nil {
26765		return
26766	}
26767	attrs := map[string]interface{}{"new_vocab_offset": new_vocab_offset, "num_new_vocab": num_new_vocab}
26768	for _, a := range optional {
26769		a(attrs)
26770	}
26771	opspec := tf.OpSpec{
26772		Type: "GenerateVocabRemapping",
26773		Input: []tf.Input{
26774			new_vocab_file, old_vocab_file,
26775		},
26776		Attrs: attrs,
26777	}
26778	op := scope.AddOperation(opspec)
26779	return op.Output(0), op.Output(1)
26780}
26781
26782// Batch normalization.
26783//
26784// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
26785//
26786// This op is deprecated. Prefer `tf.nn.batch_normalization`.
26787//
26788// Arguments:
26789//	t: A 4D input Tensor.
26790//	m: A 1D mean Tensor with size matching the last dimension of t.
26791// This is the first output from tf.nn.moments,
26792// or a saved moving average thereof.
26793//	v: A 1D variance Tensor with size matching the last dimension of t.
26794// This is the second output from tf.nn.moments,
26795// or a saved moving average thereof.
26796//	beta: A 1D beta Tensor with size matching the last dimension of t.
26797// An offset to be added to the normalized tensor.
26798//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
26799// If "scale_after_normalization" is true, this tensor will be multiplied
26800// with the normalized tensor.
26801//	variance_epsilon: A small float number to avoid dividing by 0.
26802//	scale_after_normalization: A bool indicating whether the resulted tensor
26803// needs to be multiplied with gamma.
26804func BatchNormWithGlobalNormalization(scope *Scope, t tf.Output, m tf.Output, v tf.Output, beta tf.Output, gamma tf.Output, variance_epsilon float32, scale_after_normalization bool) (result tf.Output) {
26805	if scope.Err() != nil {
26806		return
26807	}
26808	attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
26809	opspec := tf.OpSpec{
26810		Type: "BatchNormWithGlobalNormalization",
26811		Input: []tf.Input{
26812			t, m, v, beta, gamma,
26813		},
26814		Attrs: attrs,
26815	}
26816	op := scope.AddOperation(opspec)
26817	return op.Output(0)
26818}
26819
26820// FakeQuantWithMinMaxArgsAttr is an optional argument to FakeQuantWithMinMaxArgs.
26821type FakeQuantWithMinMaxArgsAttr func(optionalAttr)
26822
26823// FakeQuantWithMinMaxArgsMin sets the optional min attribute to value.
26824// If not specified, defaults to -6
26825func FakeQuantWithMinMaxArgsMin(value float32) FakeQuantWithMinMaxArgsAttr {
26826	return func(m optionalAttr) {
26827		m["min"] = value
26828	}
26829}
26830
26831// FakeQuantWithMinMaxArgsMax sets the optional max attribute to value.
26832// If not specified, defaults to 6
26833func FakeQuantWithMinMaxArgsMax(value float32) FakeQuantWithMinMaxArgsAttr {
26834	return func(m optionalAttr) {
26835		m["max"] = value
26836	}
26837}
26838
26839// FakeQuantWithMinMaxArgsNumBits sets the optional num_bits attribute to value.
26840// If not specified, defaults to 8
26841func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr {
26842	return func(m optionalAttr) {
26843		m["num_bits"] = value
26844	}
26845}
26846
26847// FakeQuantWithMinMaxArgsNarrowRange sets the optional narrow_range attribute to value.
26848// If not specified, defaults to false
26849func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr {
26850	return func(m optionalAttr) {
26851		m["narrow_range"] = value
26852	}
26853}
26854
26855// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
26856//
26857// Attributes
26858//
26859// *   `[min; max]` define the clamping range for the `inputs` data.
26860// *   `inputs` values are quantized into the quantization range (
26861// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
26862// when it is true) and then de-quantized and output as floats in `[min; max]`
26863// interval.
26864// *   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
26865//
26866// Before quantization, `min` and `max` values are adjusted with the following
26867// logic.
26868// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
26869// the behavior can be unexpected:
26870//
26871// *   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
26872// *   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
26873// *   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
26874// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
26875//
26876// Quantization is called fake since the output is still in floating point.
26877func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output) {
26878	if scope.Err() != nil {
26879		return
26880	}
26881	attrs := map[string]interface{}{}
26882	for _, a := range optional {
26883		a(attrs)
26884	}
26885	opspec := tf.OpSpec{
26886		Type: "FakeQuantWithMinMaxArgs",
26887		Input: []tf.Input{
26888			inputs,
26889		},
26890		Attrs: attrs,
26891	}
26892	op := scope.AddOperation(opspec)
26893	return op.Output(0)
26894}
26895
26896// Computes the static batch size of a dataset sans partial batches.
26897func ComputeBatchSize(scope *Scope, input_dataset tf.Output) (batch_size tf.Output) {
26898	if scope.Err() != nil {
26899		return
26900	}
26901	opspec := tf.OpSpec{
26902		Type: "ComputeBatchSize",
26903		Input: []tf.Input{
26904			input_dataset,
26905		},
26906	}
26907	op := scope.AddOperation(opspec)
26908	return op.Output(0)
26909}
26910
26911// RaggedCountSparseOutputAttr is an optional argument to RaggedCountSparseOutput.
26912type RaggedCountSparseOutputAttr func(optionalAttr)
26913
26914// RaggedCountSparseOutputMinlength sets the optional minlength attribute to value.
26915//
26916// value: Minimum value to count. Can be set to -1 for no minimum.
26917// If not specified, defaults to -1
26918//
26919// REQUIRES: value >= -1
26920func RaggedCountSparseOutputMinlength(value int64) RaggedCountSparseOutputAttr {
26921	return func(m optionalAttr) {
26922		m["minlength"] = value
26923	}
26924}
26925
26926// RaggedCountSparseOutputMaxlength sets the optional maxlength attribute to value.
26927//
26928// value: Maximum value to count. Can be set to -1 for no maximum.
26929// If not specified, defaults to -1
26930//
26931// REQUIRES: value >= -1
26932func RaggedCountSparseOutputMaxlength(value int64) RaggedCountSparseOutputAttr {
26933	return func(m optionalAttr) {
26934		m["maxlength"] = value
26935	}
26936}
26937
26938// Performs sparse-output bin counting for a ragged tensor input.
26939//
26940//   Counts the number of times each value occurs in the input.
26941//
26942// Arguments:
26943//	splits: Tensor containing the row splits of the ragged tensor to count.
26944//	values: Tensor containing values of the sparse tensor to count.
26945//	weights: A Tensor of the same shape as indices containing per-index weight values.
26946// May also be the empty tensor if no weights are used.
26947//	binary_output: Whether to output the number of occurrences of each value or 1.
26948//
26949// Returns:
26950//	output_indices: Indices tensor for the resulting sparse tensor object.
26951//	output_values: Values tensor for the resulting sparse tensor object.
26952//	output_dense_shape: Shape tensor for the resulting sparse tensor object.
26953//   END
26954//   }
26955//   attr {
26956//     name: "T"
26957//     description: <<END
26958// Dtype of the input values tensor.
26959func RaggedCountSparseOutput(scope *Scope, splits tf.Output, values tf.Output, weights tf.Output, binary_output bool, optional ...RaggedCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output) {
26960	if scope.Err() != nil {
26961		return
26962	}
26963	attrs := map[string]interface{}{"binary_output": binary_output}
26964	for _, a := range optional {
26965		a(attrs)
26966	}
26967	opspec := tf.OpSpec{
26968		Type: "RaggedCountSparseOutput",
26969		Input: []tf.Input{
26970			splits, values, weights,
26971		},
26972		Attrs: attrs,
26973	}
26974	op := scope.AddOperation(opspec)
26975	return op.Output(0), op.Output(1), op.Output(2)
26976}
26977
26978// Gets the next output from the given iterator .
26979func IteratorGetNext(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
26980	if scope.Err() != nil {
26981		return
26982	}
26983	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
26984	opspec := tf.OpSpec{
26985		Type: "IteratorGetNext",
26986		Input: []tf.Input{
26987			iterator,
26988		},
26989		Attrs: attrs,
26990	}
26991	op := scope.AddOperation(opspec)
26992	if scope.Err() != nil {
26993		return
26994	}
26995	var idx int
26996	var err error
26997	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
26998		scope.UpdateErr("IteratorGetNext", err)
26999		return
27000	}
27001	return components
27002}
27003
27004// FusedBatchNormAttr is an optional argument to FusedBatchNorm.
27005type FusedBatchNormAttr func(optionalAttr)
27006
27007// FusedBatchNormEpsilon sets the optional epsilon attribute to value.
27008//
27009// value: A small float number added to the variance of x.
27010// If not specified, defaults to 0.0001
27011func FusedBatchNormEpsilon(value float32) FusedBatchNormAttr {
27012	return func(m optionalAttr) {
27013		m["epsilon"] = value
27014	}
27015}
27016
27017// FusedBatchNormExponentialAvgFactor sets the optional exponential_avg_factor attribute to value.
27018// If not specified, defaults to 1
27019func FusedBatchNormExponentialAvgFactor(value float32) FusedBatchNormAttr {
27020	return func(m optionalAttr) {
27021		m["exponential_avg_factor"] = value
27022	}
27023}
27024
27025// FusedBatchNormDataFormat sets the optional data_format attribute to value.
27026//
27027// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
27028// If not specified, defaults to "NHWC"
27029func FusedBatchNormDataFormat(value string) FusedBatchNormAttr {
27030	return func(m optionalAttr) {
27031		m["data_format"] = value
27032	}
27033}
27034
27035// FusedBatchNormIsTraining sets the optional is_training attribute to value.
27036//
27037// value: A bool value to indicate the operation is for training (default)
27038// or inference.
27039// If not specified, defaults to true
27040func FusedBatchNormIsTraining(value bool) FusedBatchNormAttr {
27041	return func(m optionalAttr) {
27042		m["is_training"] = value
27043	}
27044}
27045
27046// Batch normalization.
27047//
27048// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
27049// The size of 1D Tensors matches the dimension C of the 4D Tensors.
27050//
27051// Arguments:
27052//	x: A 4D Tensor for input data.
27053//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
27054//	offset: A 1D Tensor for offset, to shift to the normalized x.
27055//	mean: A 1D Tensor for population mean. Used for inference only;
27056// must be empty for training.
27057//	variance: A 1D Tensor for population variance. Used for inference only;
27058// must be empty for training.
27059//
27060// Returns:
27061//	y: A 4D Tensor for output data.
27062//	batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
27063// to compute the running mean.
27064//	batch_variance: A 1D Tensor for the computed batch variance, to be used by
27065// TensorFlow to compute the running variance.
27066//	reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
27067// in the gradient computation.
27068//	reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
27069// in the cuDNN case), to be reused in the gradient computation.
27070func FusedBatchNorm(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormAttr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
27071	if scope.Err() != nil {
27072		return
27073	}
27074	attrs := map[string]interface{}{}
27075	for _, a := range optional {
27076		a(attrs)
27077	}
27078	opspec := tf.OpSpec{
27079		Type: "FusedBatchNorm",
27080		Input: []tf.Input{
27081			x, scale, offset, mean, variance,
27082		},
27083		Attrs: attrs,
27084	}
27085	op := scope.AddOperation(opspec)
27086	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
27087}
27088
27089// SparseMatMulAttr is an optional argument to SparseMatMul.
27090type SparseMatMulAttr func(optionalAttr)
27091
27092// SparseMatMulTransposeA sets the optional transpose_a attribute to value.
27093// If not specified, defaults to false
27094func SparseMatMulTransposeA(value bool) SparseMatMulAttr {
27095	return func(m optionalAttr) {
27096		m["transpose_a"] = value
27097	}
27098}
27099
27100// SparseMatMulTransposeB sets the optional transpose_b attribute to value.
27101// If not specified, defaults to false
27102func SparseMatMulTransposeB(value bool) SparseMatMulAttr {
27103	return func(m optionalAttr) {
27104		m["transpose_b"] = value
27105	}
27106}
27107
27108// SparseMatMulAIsSparse sets the optional a_is_sparse attribute to value.
27109// If not specified, defaults to false
27110func SparseMatMulAIsSparse(value bool) SparseMatMulAttr {
27111	return func(m optionalAttr) {
27112		m["a_is_sparse"] = value
27113	}
27114}
27115
27116// SparseMatMulBIsSparse sets the optional b_is_sparse attribute to value.
27117// If not specified, defaults to false
27118func SparseMatMulBIsSparse(value bool) SparseMatMulAttr {
27119	return func(m optionalAttr) {
27120		m["b_is_sparse"] = value
27121	}
27122}
27123
27124// Multiply matrix "a" by matrix "b".
27125//
27126// The inputs must be two-dimensional matrices and the inner dimension of "a" must
27127// match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not
27128// `SparseTensor`s.  This op is optimized for the case where at least one of "a" or
27129// "b" is sparse, in the sense that they have a large proportion of zero values.
27130// The breakeven for using this versus a dense matrix multiply on one platform was
27131// 30% zero values in the sparse matrix.
27132//
27133// The gradient computation of this operation will only take advantage of sparsity
27134// in the input gradient when that gradient comes from a Relu.
27135func SparseMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatMulAttr) (product tf.Output) {
27136	if scope.Err() != nil {
27137		return
27138	}
27139	attrs := map[string]interface{}{}
27140	for _, a := range optional {
27141		a(attrs)
27142	}
27143	opspec := tf.OpSpec{
27144		Type: "SparseMatMul",
27145		Input: []tf.Input{
27146			a, b,
27147		},
27148		Attrs: attrs,
27149	}
27150	op := scope.AddOperation(opspec)
27151	return op.Output(0)
27152}
27153
27154// Resizes the list.
27155//
27156//
27157// input_handle: the input list
27158// size: size of the output list
27159//
27160func TensorListResize(scope *Scope, input_handle tf.Output, size tf.Output) (output_handle tf.Output) {
27161	if scope.Err() != nil {
27162		return
27163	}
27164	opspec := tf.OpSpec{
27165		Type: "TensorListResize",
27166		Input: []tf.Input{
27167			input_handle, size,
27168		},
27169	}
27170	op := scope.AddOperation(opspec)
27171	return op.Output(0)
27172}
27173
27174// Makes a new iterator from the given `dataset` and stores it in `iterator`.
27175//
27176// This operation may be executed multiple times. Each execution will reset the
27177// iterator in `iterator` to the first element of `dataset`.
27178//
27179// Returns the created operation.
27180func MakeIterator(scope *Scope, dataset tf.Output, iterator tf.Output) (o *tf.Operation) {
27181	if scope.Err() != nil {
27182		return
27183	}
27184	opspec := tf.OpSpec{
27185		Type: "MakeIterator",
27186		Input: []tf.Input{
27187			dataset, iterator,
27188		},
27189	}
27190	return scope.AddOperation(opspec)
27191}
27192
27193// IgnoreErrorsDatasetAttr is an optional argument to IgnoreErrorsDataset.
27194type IgnoreErrorsDatasetAttr func(optionalAttr)
27195
27196// IgnoreErrorsDatasetLogWarning sets the optional log_warning attribute to value.
27197// If not specified, defaults to false
27198func IgnoreErrorsDatasetLogWarning(value bool) IgnoreErrorsDatasetAttr {
27199	return func(m optionalAttr) {
27200		m["log_warning"] = value
27201	}
27202}
27203
27204// Creates a dataset that contains the elements of `input_dataset` ignoring errors.
27205func IgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...IgnoreErrorsDatasetAttr) (handle tf.Output) {
27206	if scope.Err() != nil {
27207		return
27208	}
27209	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
27210	for _, a := range optional {
27211		a(attrs)
27212	}
27213	opspec := tf.OpSpec{
27214		Type: "IgnoreErrorsDataset",
27215		Input: []tf.Input{
27216			input_dataset,
27217		},
27218		Attrs: attrs,
27219	}
27220	op := scope.AddOperation(opspec)
27221	return op.Output(0)
27222}
27223
27224// A container for an iterator resource.
27225//
27226// Arguments:
27227//	multi_device_iterator: A handle to the multi device iterator to delete.
27228//	iterators: A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted.
27229//	deleter: A variant deleter.
27230//
27231// Returns the created operation.
27232func DeleteMultiDeviceIterator(scope *Scope, multi_device_iterator tf.Output, iterators []tf.Output, deleter tf.Output) (o *tf.Operation) {
27233	if scope.Err() != nil {
27234		return
27235	}
27236	opspec := tf.OpSpec{
27237		Type: "DeleteMultiDeviceIterator",
27238		Input: []tf.Input{
27239			multi_device_iterator, tf.OutputList(iterators), deleter,
27240		},
27241	}
27242	return scope.AddOperation(opspec)
27243}
27244
27245// A container for an iterator resource.
27246//
27247// Arguments:
27248//	handle: A handle to the iterator to delete.
27249//	deleter: A variant deleter.
27250//
27251// Returns the created operation.
27252func DeleteIterator(scope *Scope, handle tf.Output, deleter tf.Output) (o *tf.Operation) {
27253	if scope.Err() != nil {
27254		return
27255	}
27256	opspec := tf.OpSpec{
27257		Type: "DeleteIterator",
27258		Input: []tf.Input{
27259			handle, deleter,
27260		},
27261	}
27262	return scope.AddOperation(opspec)
27263}
27264
27265// Computes the gradient for the sqrt of `x` wrt its input.
27266//
27267// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
27268// is the corresponding input gradient.
27269func SqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
27270	if scope.Err() != nil {
27271		return
27272	}
27273	opspec := tf.OpSpec{
27274		Type: "SqrtGrad",
27275		Input: []tf.Input{
27276			y, dy,
27277		},
27278	}
27279	op := scope.AddOperation(opspec)
27280	return op.Output(0)
27281}
27282
27283// Splits a tensor into `num_split` tensors along one dimension.
27284//
27285// Arguments:
27286//	value: The tensor to split.
27287//	size_splits: list containing the sizes of each output tensor along the split
27288// dimension. Must sum to the dimension of value along split_dim.
27289// Can contain one -1 indicating that dimension is to be inferred.
27290//	axis: 0-D.  The dimension along which to split.  Must be in the range
27291// `[-rank(value), rank(value))`.
27292//
27293//
27294// Returns Tensors whose shape matches that of `value`
27295// except along `axis`, where their sizes are
27296// `size_splits[i]`.
27297func SplitV(scope *Scope, value tf.Output, size_splits tf.Output, axis tf.Output, num_split int64) (output []tf.Output) {
27298	if scope.Err() != nil {
27299		return
27300	}
27301	attrs := map[string]interface{}{"num_split": num_split}
27302	opspec := tf.OpSpec{
27303		Type: "SplitV",
27304		Input: []tf.Input{
27305			value, size_splits, axis,
27306		},
27307		Attrs: attrs,
27308	}
27309	op := scope.AddOperation(opspec)
27310	if scope.Err() != nil {
27311		return
27312	}
27313	var idx int
27314	var err error
27315	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
27316		scope.UpdateErr("SplitV", err)
27317		return
27318	}
27319	return output
27320}
27321
27322// A container for an iterator resource.
27323//
27324// Returns:
27325//	handle: A handle to the iterator that can be passed to a "MakeIterator" or
27326// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
27327// resource sharing by name, and does not keep a reference to the resource
27328// container.
27329//	deleter: A variant deleter that should be passed into the op that deletes the iterator.
27330func AnonymousIteratorV2(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output, deleter tf.Output) {
27331	if scope.Err() != nil {
27332		return
27333	}
27334	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
27335	opspec := tf.OpSpec{
27336		Type: "AnonymousIteratorV2",
27337
27338		Attrs: attrs,
27339	}
27340	op := scope.AddOperation(opspec)
27341	return op.Output(0), op.Output(1)
27342}
27343
27344// Creates a dataset that emits the lines of one or more text files.
27345//
27346// Arguments:
27347//	filenames: A scalar or a vector containing the name(s) of the file(s) to be
27348// read.
27349//	compression_type: A scalar containing either (i) the empty string (no
27350// compression), (ii) "ZLIB", or (iii) "GZIP".
27351//	buffer_size: A scalar containing the number of bytes to buffer.
27352func TextLineDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
27353	if scope.Err() != nil {
27354		return
27355	}
27356	opspec := tf.OpSpec{
27357		Type: "TextLineDataset",
27358		Input: []tf.Input{
27359			filenames, compression_type, buffer_size,
27360		},
27361	}
27362	op := scope.AddOperation(opspec)
27363	return op.Output(0)
27364}
27365
27366// DepthwiseConv2dNativeBackpropFilterAttr is an optional argument to DepthwiseConv2dNativeBackpropFilter.
27367type DepthwiseConv2dNativeBackpropFilterAttr func(optionalAttr)
27368
27369// DepthwiseConv2dNativeBackpropFilterExplicitPaddings sets the optional explicit_paddings attribute to value.
27370// If not specified, defaults to {}
27371func DepthwiseConv2dNativeBackpropFilterExplicitPaddings(value []int64) DepthwiseConv2dNativeBackpropFilterAttr {
27372	return func(m optionalAttr) {
27373		m["explicit_paddings"] = value
27374	}
27375}
27376
27377// DepthwiseConv2dNativeBackpropFilterDataFormat sets the optional data_format attribute to value.
27378//
27379// value: Specify the data format of the input and output data. With the
27380// default format "NHWC", the data is stored in the order of:
27381//     [batch, height, width, channels].
27382// Alternatively, the format could be "NCHW", the data storage order of:
27383//     [batch, channels, height, width].
27384// If not specified, defaults to "NHWC"
27385func DepthwiseConv2dNativeBackpropFilterDataFormat(value string) DepthwiseConv2dNativeBackpropFilterAttr {
27386	return func(m optionalAttr) {
27387		m["data_format"] = value
27388	}
27389}
27390
27391// DepthwiseConv2dNativeBackpropFilterDilations sets the optional dilations attribute to value.
27392//
27393// value: 1-D tensor of length 4.  The dilation factor for each dimension of
27394// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
27395// element on that dimension. The dimension order is determined by the value of
27396// `data_format`, see above for details. Dilations in the batch and depth
27397// dimensions must be 1.
27398// If not specified, defaults to {i:1 i:1 i:1 i:1}
27399func DepthwiseConv2dNativeBackpropFilterDilations(value []int64) DepthwiseConv2dNativeBackpropFilterAttr {
27400	return func(m optionalAttr) {
27401		m["dilations"] = value
27402	}
27403}
27404
27405// Computes the gradients of depthwise convolution with respect to the filter.
27406//
27407// Arguments:
27408//	input: 4-D with shape based on `data_format`.  For example, if
27409// `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
27410// in_width, in_channels]` tensor.
27411//	filter_sizes: An integer vector representing the tensor shape of `filter`,
27412// where `filter` is a 4-D
27413// `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
27414//	out_backprop: 4-D with shape  based on `data_format`.
27415// For example, if `data_format` is 'NHWC' then
27416// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
27417// Gradients w.r.t. the output of the convolution.
27418//	strides: The stride of the sliding window for each dimension of the input
27419// of the convolution.
27420//	padding: The type of padding algorithm to use.
27421//
27422// Returns 4-D with shape
27423// `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
27424// the `filter` input of the convolution.
27425func DepthwiseConv2dNativeBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropFilterAttr) (output tf.Output) {
27426	if scope.Err() != nil {
27427		return
27428	}
27429	attrs := map[string]interface{}{"strides": strides, "padding": padding}
27430	for _, a := range optional {
27431		a(attrs)
27432	}
27433	opspec := tf.OpSpec{
27434		Type: "DepthwiseConv2dNativeBackpropFilter",
27435		Input: []tf.Input{
27436			input, filter_sizes, out_backprop,
27437		},
27438		Attrs: attrs,
27439	}
27440	op := scope.AddOperation(opspec)
27441	return op.Output(0)
27442}
27443
27444// CropAndResizeAttr is an optional argument to CropAndResize.
27445type CropAndResizeAttr func(optionalAttr)
27446
27447// CropAndResizeMethod sets the optional method attribute to value.
27448//
27449// value: A string specifying the sampling method for resizing. It can be either
27450// `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling
27451// methods are supported: Bilinear and Nearest Neighbor.
27452// If not specified, defaults to "bilinear"
27453func CropAndResizeMethod(value string) CropAndResizeAttr {
27454	return func(m optionalAttr) {
27455		m["method"] = value
27456	}
27457}
27458
27459// CropAndResizeExtrapolationValue sets the optional extrapolation_value attribute to value.
27460//
27461// value: Value used for extrapolation, when applicable.
27462// If not specified, defaults to 0
27463func CropAndResizeExtrapolationValue(value float32) CropAndResizeAttr {
27464	return func(m optionalAttr) {
27465		m["extrapolation_value"] = value
27466	}
27467}
27468
27469// Extracts crops from the input image tensor and resizes them.
27470//
27471// Extracts crops from the input image tensor and resizes them using bilinear
27472// sampling or nearest neighbor sampling (possibly with aspect ratio change) to a
27473// common output size specified by `crop_size`. This is more general than the
27474// `crop_to_bounding_box` op which extracts a fixed size slice from the input image
27475// and does not allow resizing or aspect ratio change.
27476//
27477// Returns a tensor with `crops` from the input `image` at positions defined at the
27478// bounding box locations in `boxes`. The cropped boxes are all resized (with
27479// bilinear or nearest neighbor interpolation) to a fixed
27480// `size = [crop_height, crop_width]`. The result is a 4-D tensor
27481// `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned.
27482// In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical
27483// results to using `tf.image.resize_bilinear()` or
27484// `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with
27485// `align_corners=True`.
27486//
27487// Arguments:
27488//	image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
27489// Both `image_height` and `image_width` need to be positive.
27490//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
27491// specifies the coordinates of a box in the `box_ind[i]` image and is specified
27492// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
27493// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
27494// `[0, 1]` interval of normalized image height is mapped to
27495// `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in
27496// which case the sampled crop is an up-down flipped version of the original
27497// image. The width dimension is treated similarly. Normalized coordinates
27498// outside the `[0, 1]` range are allowed, in which case we use
27499// `extrapolation_value` to extrapolate the input image values.
27500//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
27501// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
27502//	crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
27503// cropped image patches are resized to this size. The aspect ratio of the image
27504// content is not preserved. Both `crop_height` and `crop_width` need to be
27505// positive.
27506//
27507// Returns A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
27508func CropAndResize(scope *Scope, image tf.Output, boxes tf.Output, box_ind tf.Output, crop_size tf.Output, optional ...CropAndResizeAttr) (crops tf.Output) {
27509	if scope.Err() != nil {
27510		return
27511	}
27512	attrs := map[string]interface{}{}
27513	for _, a := range optional {
27514		a(attrs)
27515	}
27516	opspec := tf.OpSpec{
27517		Type: "CropAndResize",
27518		Input: []tf.Input{
27519			image, boxes, box_ind, crop_size,
27520		},
27521		Attrs: attrs,
27522	}
27523	op := scope.AddOperation(opspec)
27524	return op.Output(0)
27525}
27526
27527// RandomUniformAttr is an optional argument to RandomUniform.
27528type RandomUniformAttr func(optionalAttr)
27529
27530// RandomUniformSeed sets the optional seed attribute to value.
27531//
27532// value: If either `seed` or `seed2` are set to be non-zero, the random number
27533// generator is seeded by the given seed.  Otherwise, it is seeded by a
27534// random seed.
27535// If not specified, defaults to 0
27536func RandomUniformSeed(value int64) RandomUniformAttr {
27537	return func(m optionalAttr) {
27538		m["seed"] = value
27539	}
27540}
27541
27542// RandomUniformSeed2 sets the optional seed2 attribute to value.
27543//
27544// value: A second seed to avoid seed collision.
27545// If not specified, defaults to 0
27546func RandomUniformSeed2(value int64) RandomUniformAttr {
27547	return func(m optionalAttr) {
27548		m["seed2"] = value
27549	}
27550}
27551
27552// Outputs random values from a uniform distribution.
27553//
27554// The generated values follow a uniform distribution in the range `[0, 1)`. The
27555// lower bound 0 is included in the range, while the upper bound 1 is excluded.
27556//
27557// Arguments:
27558//	shape: The shape of the output tensor.
27559//	dtype: The type of the output.
27560//
27561// Returns A tensor of the specified shape filled with uniform random values.
27562func RandomUniform(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomUniformAttr) (output tf.Output) {
27563	if scope.Err() != nil {
27564		return
27565	}
27566	attrs := map[string]interface{}{"dtype": dtype}
27567	for _, a := range optional {
27568		a(attrs)
27569	}
27570	opspec := tf.OpSpec{
27571		Type: "RandomUniform",
27572		Input: []tf.Input{
27573			shape,
27574		},
27575		Attrs: attrs,
27576	}
27577	op := scope.AddOperation(opspec)
27578	return op.Output(0)
27579}
27580
27581// A container for a multi device iterator resource.
27582//
27583// Returns:
27584//	handle: A handle to a multi device iterator that can be passed to a
27585// "MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator,
27586// AnonymousIterator prevents resource sharing by name, and does not keep a
27587// reference to the resource container.
27588//	deleter: A variant deleter that should be passed into the op that deletes the iterator.
27589func AnonymousMultiDeviceIterator(scope *Scope, devices []string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output, deleter tf.Output) {
27590	if scope.Err() != nil {
27591		return
27592	}
27593	attrs := map[string]interface{}{"devices": devices, "output_types": output_types, "output_shapes": output_shapes}
27594	opspec := tf.OpSpec{
27595		Type: "AnonymousMultiDeviceIterator",
27596
27597		Attrs: attrs,
27598	}
27599	op := scope.AddOperation(opspec)
27600	return op.Output(0), op.Output(1)
27601}
27602
27603// Provides the time since epoch in seconds.
27604//
27605// Returns the timestamp as a `float64` for seconds since the Unix epoch.
27606//
27607// Note: the timestamp is computed when the op is executed, not when it is added
27608// to the graph.
27609func Timestamp(scope *Scope) (ts tf.Output) {
27610	if scope.Err() != nil {
27611		return
27612	}
27613	opspec := tf.OpSpec{
27614		Type: "Timestamp",
27615	}
27616	op := scope.AddOperation(opspec)
27617	return op.Output(0)
27618}
27619
27620// QuantizedMulAttr is an optional argument to QuantizedMul.
27621type QuantizedMulAttr func(optionalAttr)
27622
27623// QuantizedMulToutput sets the optional Toutput attribute to value.
27624// If not specified, defaults to DT_QINT32
27625func QuantizedMulToutput(value tf.DataType) QuantizedMulAttr {
27626	return func(m optionalAttr) {
27627		m["Toutput"] = value
27628	}
27629}
27630
27631// Returns x * y element-wise, working on quantized buffers.
27632//
27633// Arguments:
27634//
27635//
27636//	min_x: The float value that the lowest quantized `x` value represents.
27637//	max_x: The float value that the highest quantized `x` value represents.
27638//	min_y: The float value that the lowest quantized `y` value represents.
27639//	max_y: The float value that the highest quantized `y` value represents.
27640//
27641// Returns:
27642//	z
27643//	min_z: The float value that the lowest quantized output value represents.
27644//	max_z: The float value that the highest quantized output value represents.
27645//
27646// *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about
27647// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
27648func QuantizedMul(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedMulAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
27649	if scope.Err() != nil {
27650		return
27651	}
27652	attrs := map[string]interface{}{}
27653	for _, a := range optional {
27654		a(attrs)
27655	}
27656	opspec := tf.OpSpec{
27657		Type: "QuantizedMul",
27658		Input: []tf.Input{
27659			x, y, min_x, max_x, min_y, max_y,
27660		},
27661		Attrs: attrs,
27662	}
27663	op := scope.AddOperation(opspec)
27664	return op.Output(0), op.Output(1), op.Output(2)
27665}
27666
27667// ShuffleAndRepeatDatasetAttr is an optional argument to ShuffleAndRepeatDataset.
27668type ShuffleAndRepeatDatasetAttr func(optionalAttr)
27669
27670// ShuffleAndRepeatDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value.
27671// If not specified, defaults to true
27672func ShuffleAndRepeatDatasetReshuffleEachIteration(value bool) ShuffleAndRepeatDatasetAttr {
27673	return func(m optionalAttr) {
27674		m["reshuffle_each_iteration"] = value
27675	}
27676}
27677
27678// Creates a dataset that shuffles and repeats elements from `input_dataset`
27679//
27680// pseudorandomly.
27681//
27682// Arguments:
27683//
27684//	buffer_size: The number of output elements to buffer in an iterator over
27685// this dataset. Compare with the `min_after_dequeue` attr when creating a
27686// `RandomShuffleQueue`.
27687//	seed: A scalar seed for the random number generator. If either `seed` or
27688// `seed2` is set to be non-zero, the random number generator is seeded
27689// by the given seed.  Otherwise, a random seed is used.
27690//	seed2: A second scalar seed to avoid seed collision.
27691//	count: A scalar representing the number of times the underlying dataset
27692// should be repeated. The default is `-1`, which results in infinite repetition.
27693//
27694//
27695func ShuffleAndRepeatDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleAndRepeatDatasetAttr) (handle tf.Output) {
27696	if scope.Err() != nil {
27697		return
27698	}
27699	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
27700	for _, a := range optional {
27701		a(attrs)
27702	}
27703	opspec := tf.OpSpec{
27704		Type: "ShuffleAndRepeatDataset",
27705		Input: []tf.Input{
27706			input_dataset, buffer_size, seed, seed2, count,
27707		},
27708		Attrs: attrs,
27709	}
27710	op := scope.AddOperation(opspec)
27711	return op.Output(0)
27712}
27713
27714// Creates a TensorList by indexing into a Tensor.
27715//
27716// Each member of the TensorList corresponds to one row of the input tensor,
27717// specified by the given index (see `tf.gather`).
27718//
27719// tensor: The input tensor.
27720// indices: The indices used to index into the list.
27721// element_shape: The shape of the elements in the list (can be less specified than
27722//   the shape of the tensor).
27723// num_elements: The size of the output list. Must be large enough to accommodate
27724//   the largest index in indices. If -1, the list is just large enough to include
27725//   the largest index in indices.
27726// output_handle: The TensorList.
27727func TensorListScatterV2(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output, num_elements tf.Output) (output_handle tf.Output) {
27728	if scope.Err() != nil {
27729		return
27730	}
27731	opspec := tf.OpSpec{
27732		Type: "TensorListScatterV2",
27733		Input: []tf.Input{
27734			tensor, indices, element_shape, num_elements,
27735		},
27736	}
27737	op := scope.AddOperation(opspec)
27738	return op.Output(0)
27739}
27740
27741// ResizeBilinearAttr is an optional argument to ResizeBilinear.
27742type ResizeBilinearAttr func(optionalAttr)
27743
27744// ResizeBilinearAlignCorners sets the optional align_corners attribute to value.
27745//
27746// value: If true, the centers of the 4 corner pixels of the input and output tensors are
27747// aligned, preserving the values at the corner pixels. Defaults to false.
27748// If not specified, defaults to false
27749func ResizeBilinearAlignCorners(value bool) ResizeBilinearAttr {
27750	return func(m optionalAttr) {
27751		m["align_corners"] = value
27752	}
27753}
27754
27755// ResizeBilinearHalfPixelCenters sets the optional half_pixel_centers attribute to value.
27756// If not specified, defaults to false
27757func ResizeBilinearHalfPixelCenters(value bool) ResizeBilinearAttr {
27758	return func(m optionalAttr) {
27759		m["half_pixel_centers"] = value
27760	}
27761}
27762
27763// Resize `images` to `size` using bilinear interpolation.
27764//
27765// Input images can be of different types but output images are always float.
27766//
27767// Arguments:
27768//	images: 4-D with shape `[batch, height, width, channels]`.
27769//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
27770// new size for the images.
27771//
27772// Returns 4-D with shape
27773// `[batch, new_height, new_width, channels]`.
27774func ResizeBilinear(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBilinearAttr) (resized_images tf.Output) {
27775	if scope.Err() != nil {
27776		return
27777	}
27778	attrs := map[string]interface{}{}
27779	for _, a := range optional {
27780		a(attrs)
27781	}
27782	opspec := tf.OpSpec{
27783		Type: "ResizeBilinear",
27784		Input: []tf.Input{
27785			images, size,
27786		},
27787		Attrs: attrs,
27788	}
27789	op := scope.AddOperation(opspec)
27790	return op.Output(0)
27791}
27792
27793// Generates sparse cross from a list of sparse and dense tensors.
27794//
27795// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
27796// representing features of one feature column. It outputs a 2D `SparseTensor` with
27797// the batchwise crosses of these features.
27798//
27799// For example, if the inputs are
27800//
27801//     inputs[0]: SparseTensor with shape = [2, 2]
27802//     [0, 0]: "a"
27803//     [1, 0]: "b"
27804//     [1, 1]: "c"
27805//
27806//     inputs[1]: SparseTensor with shape = [2, 1]
27807//     [0, 0]: "d"
27808//     [1, 0]: "e"
27809//
27810//     inputs[2]: Tensor [["f"], ["g"]]
27811//
27812// then the output will be
27813//
27814//     shape = [2, 2]
27815//     [0, 0]: "a_X_d_X_f"
27816//     [1, 0]: "b_X_e_X_g"
27817//     [1, 1]: "c_X_e_X_g"
27818//
27819// if hashed_output=true then the output will be
27820//
27821//     shape = [2, 2]
27822//     [0, 0]: FingerprintCat64(
27823//                 Fingerprint64("f"), FingerprintCat64(
27824//                     Fingerprint64("d"), Fingerprint64("a")))
27825//     [1, 0]: FingerprintCat64(
27826//                 Fingerprint64("g"), FingerprintCat64(
27827//                     Fingerprint64("e"), Fingerprint64("b")))
27828//     [1, 1]: FingerprintCat64(
27829//                 Fingerprint64("g"), FingerprintCat64(
27830//                     Fingerprint64("e"), Fingerprint64("c")))
27831//
27832// Arguments:
27833//	indices: 2-D.  Indices of each input `SparseTensor`.
27834//	values: 1-D.   values of each `SparseTensor`.
27835//	shapes: 1-D.   Shapes of each `SparseTensor`.
27836//	dense_inputs: 2-D.    Columns represented by dense `Tensor`.
27837//	sep: string used when joining a list of string inputs, can be used as separator later.
27838//
27839// Returns:
27840//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
27841//	output_values: 1-D.  Non-empty values of the concatenated or hashed
27842// `SparseTensor`.
27843//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
27844func SparseCrossV2(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, sep tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
27845	if scope.Err() != nil {
27846		return
27847	}
27848	opspec := tf.OpSpec{
27849		Type: "SparseCrossV2",
27850		Input: []tf.Input{
27851			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs), sep,
27852		},
27853	}
27854	op := scope.AddOperation(opspec)
27855	return op.Output(0), op.Output(1), op.Output(2)
27856}
27857
27858// BlockLSTMV2Attr is an optional argument to BlockLSTMV2.
27859type BlockLSTMV2Attr func(optionalAttr)
27860
27861// BlockLSTMV2CellClip sets the optional cell_clip attribute to value.
27862//
27863// value: Value to clip the 'cs' value to.
27864// If not specified, defaults to 0
27865func BlockLSTMV2CellClip(value float32) BlockLSTMV2Attr {
27866	return func(m optionalAttr) {
27867		m["cell_clip"] = value
27868	}
27869}
27870
27871// BlockLSTMV2UsePeephole sets the optional use_peephole attribute to value.
27872//
27873// value: Whether to use peephole weights.
27874// If not specified, defaults to false
27875func BlockLSTMV2UsePeephole(value bool) BlockLSTMV2Attr {
27876	return func(m optionalAttr) {
27877		m["use_peephole"] = value
27878	}
27879}
27880
27881// Computes the LSTM cell forward propagation for all the time steps.
27882//
27883// This is equivalent to applying LSTMBlockCell in a loop, like so:
27884//
27885// ```python
27886// for x1 in unpack(x):
27887//   i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
27888//     x1, cs_prev, h_prev, w, wci, wcf, wco, b)
27889//   cs_prev = cs1
27890//   h_prev = h1
27891//   i.append(i1)
27892//   cs.append(cs1)
27893//   f.append(f1)
27894//   o.append(o1)
27895//   ci.append(ci1)
27896//   co.append(co1)
27897//   h.append(h1)
27898// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
27899//
27900// Note that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout,
27901// this op uses IFCO. So in order for the following snippet to be equivalent
27902// all gate-related outputs should be reordered.
27903// ```
27904//
27905// Arguments:
27906//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
27907// with zeros beyond this length.
27908//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
27909//	cs_prev: Value of the initial cell state.
27910//	h_prev: Initial output of cell (to be used for peephole).
27911//	w: The weight matrix.
27912//	wci: The weight matrix for input gate peephole connection.
27913//	wcf: The weight matrix for forget gate peephole connection.
27914//	wco: The weight matrix for output gate peephole connection.
27915//	b: The bias vector.
27916//
27917// Returns:
27918//	i: The input gate over the whole time sequence.
27919//	cs: The cell state before the tanh over the whole time sequence.
27920//	f: The forget gate over the whole time sequence.
27921//	o: The output gate over the whole time sequence.
27922//	ci: The cell input over the whole time sequence.
27923//	co: The cell after the tanh over the whole time sequence.
27924//	h: The output h vector over the whole time sequence.
27925func BlockLSTMV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...BlockLSTMV2Attr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output) {
27926	if scope.Err() != nil {
27927		return
27928	}
27929	attrs := map[string]interface{}{}
27930	for _, a := range optional {
27931		a(attrs)
27932	}
27933	opspec := tf.OpSpec{
27934		Type: "BlockLSTMV2",
27935		Input: []tf.Input{
27936			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
27937		},
27938		Attrs: attrs,
27939	}
27940	op := scope.AddOperation(opspec)
27941	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
27942}
27943
27944// Extract `patches` from `images` and put them in the "depth" output dimension.
27945//
27946// Arguments:
27947//	images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
27948//	ksizes: The size of the sliding window for each dimension of `images`.
27949//	strides: How far the centers of two consecutive patches are in
27950// the images. Must be: `[1, stride_rows, stride_cols, 1]`.
27951//	rates: Must be: `[1, rate_rows, rate_cols, 1]`. This is the
27952// input stride, specifying how far two consecutive patch samples are in the
27953// input. Equivalent to extracting patches with
27954// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
27955// subsampling them spatially by a factor of `rates`. This is equivalent to
27956// `rate` in dilated (a.k.a. Atrous) convolutions.
27957//	padding: The type of padding algorithm to use.
27958//
27959// Returns 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
27960// ksize_cols * depth]` containing image patches with size
27961// `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
27962// `out_rows` and `out_cols` are the dimensions of the output patches.
27963func ExtractImagePatches(scope *Scope, images tf.Output, ksizes []int64, strides []int64, rates []int64, padding string) (patches tf.Output) {
27964	if scope.Err() != nil {
27965		return
27966	}
27967	attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "rates": rates, "padding": padding}
27968	opspec := tf.OpSpec{
27969		Type: "ExtractImagePatches",
27970		Input: []tf.Input{
27971			images,
27972		},
27973		Attrs: attrs,
27974	}
27975	op := scope.AddOperation(opspec)
27976	return op.Output(0)
27977}
27978
27979// Forwards the value of an available tensor from `inputs` to `output`.
27980//
27981// `Merge` waits for at least one of the tensors in `inputs` to become available.
27982// It is usually combined with `Switch` to implement branching.
27983//
27984// `Merge` forwards the first tensor to become available to `output`, and sets
27985// `value_index` to its index in `inputs`.
27986//
27987// Arguments:
27988//	inputs: The input tensors, exactly one of which will become available.
27989//
27990// Returns:
27991//	output: Will be set to the available input tensor.
27992//	value_index: The index of the chosen input tensor in `inputs`.
27993func Merge(scope *Scope, inputs []tf.Output) (output tf.Output, value_index tf.Output) {
27994	if scope.Err() != nil {
27995		return
27996	}
27997	opspec := tf.OpSpec{
27998		Type: "Merge",
27999		Input: []tf.Input{
28000			tf.OutputList(inputs),
28001		},
28002	}
28003	op := scope.AddOperation(opspec)
28004	return op.Output(0), op.Output(1)
28005}
28006
28007// PaddedBatchDatasetV2Attr is an optional argument to PaddedBatchDatasetV2.
28008type PaddedBatchDatasetV2Attr func(optionalAttr)
28009
28010// PaddedBatchDatasetV2ParallelCopy sets the optional parallel_copy attribute to value.
28011// If not specified, defaults to false
28012func PaddedBatchDatasetV2ParallelCopy(value bool) PaddedBatchDatasetV2Attr {
28013	return func(m optionalAttr) {
28014		m["parallel_copy"] = value
28015	}
28016}
28017
28018// Creates a dataset that batches and pads `batch_size` elements from the input.
28019//
28020// Arguments:
28021//
28022//	batch_size: A scalar representing the number of elements to accumulate in a
28023// batch.
28024//	padded_shapes: A list of int64 tensors representing the desired padded shapes
28025// of the corresponding output components. These shapes may be partially
28026// specified, using `-1` to indicate that a particular dimension should be
28027// padded to the maximum size of all batch elements.
28028//	padding_values: A list of scalars containing the padding value to use for
28029// each of the outputs.
28030//	drop_remainder: A scalar representing whether the last batch should be dropped in case its size
28031// is smaller than desired.
28032//
28033func PaddedBatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, drop_remainder tf.Output, output_shapes []tf.Shape, optional ...PaddedBatchDatasetV2Attr) (handle tf.Output) {
28034	if scope.Err() != nil {
28035		return
28036	}
28037	attrs := map[string]interface{}{"output_shapes": output_shapes}
28038	for _, a := range optional {
28039		a(attrs)
28040	}
28041	opspec := tf.OpSpec{
28042		Type: "PaddedBatchDatasetV2",
28043		Input: []tf.Input{
28044			input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values), drop_remainder,
28045		},
28046		Attrs: attrs,
28047	}
28048	op := scope.AddOperation(opspec)
28049	return op.Output(0)
28050}
28051
28052// CudnnRNNAttr is an optional argument to CudnnRNN.
28053type CudnnRNNAttr func(optionalAttr)
28054
28055// CudnnRNNRnnMode sets the optional rnn_mode attribute to value.
28056// If not specified, defaults to "lstm"
28057func CudnnRNNRnnMode(value string) CudnnRNNAttr {
28058	return func(m optionalAttr) {
28059		m["rnn_mode"] = value
28060	}
28061}
28062
28063// CudnnRNNInputMode sets the optional input_mode attribute to value.
28064// If not specified, defaults to "linear_input"
28065func CudnnRNNInputMode(value string) CudnnRNNAttr {
28066	return func(m optionalAttr) {
28067		m["input_mode"] = value
28068	}
28069}
28070
28071// CudnnRNNDirection sets the optional direction attribute to value.
28072// If not specified, defaults to "unidirectional"
28073func CudnnRNNDirection(value string) CudnnRNNAttr {
28074	return func(m optionalAttr) {
28075		m["direction"] = value
28076	}
28077}
28078
28079// CudnnRNNDropout sets the optional dropout attribute to value.
28080// If not specified, defaults to 0
28081func CudnnRNNDropout(value float32) CudnnRNNAttr {
28082	return func(m optionalAttr) {
28083		m["dropout"] = value
28084	}
28085}
28086
28087// CudnnRNNSeed sets the optional seed attribute to value.
28088// If not specified, defaults to 0
28089func CudnnRNNSeed(value int64) CudnnRNNAttr {
28090	return func(m optionalAttr) {
28091		m["seed"] = value
28092	}
28093}
28094
28095// CudnnRNNSeed2 sets the optional seed2 attribute to value.
28096// If not specified, defaults to 0
28097func CudnnRNNSeed2(value int64) CudnnRNNAttr {
28098	return func(m optionalAttr) {
28099		m["seed2"] = value
28100	}
28101}
28102
28103// CudnnRNNIsTraining sets the optional is_training attribute to value.
28104// If not specified, defaults to true
28105func CudnnRNNIsTraining(value bool) CudnnRNNAttr {
28106	return func(m optionalAttr) {
28107		m["is_training"] = value
28108	}
28109}
28110
28111// A RNN backed by cuDNN.
28112//
28113// Computes the RNN from the input and initial states, with respect to the params
28114// buffer.
28115//
28116// rnn_mode: Indicates the type of the RNN model.
28117// input_mode: Indicate whether there is a linear projection between the input and
28118//   the actual computation before the first layer. 'skip_input' is only allowed
28119//   when input_size == num_units; 'auto_select' implies 'skip_input' when
28120//   input_size == num_units; otherwise, it implies 'linear_input'.
28121// direction: Indicates whether a bidirectional model will be used. Should be
28122//   "unidirectional" or "bidirectional".
28123// dropout: Dropout probability. When set to 0., dropout is disabled.
28124// seed: The 1st part of a seed to initialize dropout.
28125// seed2: The 2nd part of a seed to initialize dropout.
28126// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
28127// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
28128//     num_units].
28129// input_c: For LSTM, a 3-D tensor with the shape of
28130//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
28131// params: A 1-D tensor that contains the weights and biases in an opaque layout.
28132//     The size must be created through CudnnRNNParamsSize, and initialized
28133//     separately. Note that they might not be compatible across different
28134//     generations. So it is a good idea to save and restore
28135// output: A 3-D tensor with the shape of [seq_length, batch_size,
28136//     dir * num_units].
28137// output_h: The same shape has input_h.
28138// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
28139// is_training: Indicates whether this operation is used for inference or
28140//   training.
28141// reserve_space: An opaque tensor that can be used in backprop calculation. It
28142//   is only produced if is_training is false.
28143func CudnnRNN(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNAttr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output) {
28144	if scope.Err() != nil {
28145		return
28146	}
28147	attrs := map[string]interface{}{}
28148	for _, a := range optional {
28149		a(attrs)
28150	}
28151	opspec := tf.OpSpec{
28152		Type: "CudnnRNN",
28153		Input: []tf.Input{
28154			input, input_h, input_c, params,
28155		},
28156		Attrs: attrs,
28157	}
28158	op := scope.AddOperation(opspec)
28159	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
28160}
28161
28162// Creates a dataset that batches `batch_size` elements from `input_dataset`.
28163//
28164// Arguments:
28165//
28166//	batch_size: A scalar representing the number of elements to accumulate in a
28167// batch.
28168//
28169//
28170func BatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
28171	if scope.Err() != nil {
28172		return
28173	}
28174	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
28175	opspec := tf.OpSpec{
28176		Type: "BatchDataset",
28177		Input: []tf.Input{
28178			input_dataset, batch_size,
28179		},
28180		Attrs: attrs,
28181	}
28182	op := scope.AddOperation(opspec)
28183	return op.Output(0)
28184}
28185
28186// ExperimentalRebatchDatasetAttr is an optional argument to ExperimentalRebatchDataset.
28187type ExperimentalRebatchDatasetAttr func(optionalAttr)
28188
28189// ExperimentalRebatchDatasetUseFallback sets the optional use_fallback attribute to value.
28190// If not specified, defaults to true
28191func ExperimentalRebatchDatasetUseFallback(value bool) ExperimentalRebatchDatasetAttr {
28192	return func(m optionalAttr) {
28193		m["use_fallback"] = value
28194	}
28195}
28196
28197// Creates a dataset that changes the batch size.
28198//
28199// Creates a dataset that changes the batch size of the dataset to current batch
28200// size // num_replicas.
28201//
28202// Arguments:
28203//	input_dataset: A variant tensor representing the input dataset.
28204//	num_replicas: A scalar representing the number of replicas to distribute this batch across. As
28205// a result of this transformation the current batch size would end up being
28206// divided  by this parameter.
28207//
28208//
28209func ExperimentalRebatchDataset(scope *Scope, input_dataset tf.Output, num_replicas tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalRebatchDatasetAttr) (handle tf.Output) {
28210	if scope.Err() != nil {
28211		return
28212	}
28213	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
28214	for _, a := range optional {
28215		a(attrs)
28216	}
28217	opspec := tf.OpSpec{
28218		Type: "ExperimentalRebatchDataset",
28219		Input: []tf.Input{
28220			input_dataset, num_replicas,
28221		},
28222		Attrs: attrs,
28223	}
28224	op := scope.AddOperation(opspec)
28225	return op.Output(0)
28226}
28227
28228// DecodeCompressedAttr is an optional argument to DecodeCompressed.
28229type DecodeCompressedAttr func(optionalAttr)
28230
28231// DecodeCompressedCompressionType sets the optional compression_type attribute to value.
28232//
28233// value: A scalar containing either (i) the empty string (no
28234// compression), (ii) "ZLIB", or (iii) "GZIP".
28235// If not specified, defaults to ""
28236func DecodeCompressedCompressionType(value string) DecodeCompressedAttr {
28237	return func(m optionalAttr) {
28238		m["compression_type"] = value
28239	}
28240}
28241
28242// Decompress strings.
28243//
28244// This op decompresses each element of the `bytes` input `Tensor`, which
28245// is assumed to be compressed using the given `compression_type`.
28246//
28247// The `output` is a string `Tensor` of the same shape as `bytes`,
28248// each element containing the decompressed data from the corresponding
28249// element in `bytes`.
28250//
28251// Arguments:
28252//	bytes: A Tensor of string which is compressed.
28253//
28254// Returns A Tensor with the same shape as input `bytes`, uncompressed
28255// from bytes.
28256func DecodeCompressed(scope *Scope, bytes tf.Output, optional ...DecodeCompressedAttr) (output tf.Output) {
28257	if scope.Err() != nil {
28258		return
28259	}
28260	attrs := map[string]interface{}{}
28261	for _, a := range optional {
28262		a(attrs)
28263	}
28264	opspec := tf.OpSpec{
28265		Type: "DecodeCompressed",
28266		Input: []tf.Input{
28267			bytes,
28268		},
28269		Attrs: attrs,
28270	}
28271	op := scope.AddOperation(opspec)
28272	return op.Output(0)
28273}
28274
28275// SetSizeAttr is an optional argument to SetSize.
28276type SetSizeAttr func(optionalAttr)
28277
28278// SetSizeValidateIndices sets the optional validate_indices attribute to value.
28279// If not specified, defaults to true
28280func SetSizeValidateIndices(value bool) SetSizeAttr {
28281	return func(m optionalAttr) {
28282		m["validate_indices"] = value
28283	}
28284}
28285
28286// Number of unique elements along last dimension of input `set`.
28287//
28288// Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
28289// and `set_shape`. The last dimension contains values in a set, duplicates are
28290// allowed but ignored.
28291//
28292// If `validate_indices` is `True`, this op validates the order and range of `set`
28293// indices.
28294//
28295// Arguments:
28296//	set_indices: 2D `Tensor`, indices of a `SparseTensor`.
28297//	set_values: 1D `Tensor`, values of a `SparseTensor`.
28298//	set_shape: 1D `Tensor`, shape of a `SparseTensor`.
28299//
28300// Returns For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st
28301// `n-1` dimensions as `set`. Each value is the number of unique elements in
28302// the corresponding `[0...n-1]` dimension of `set`.
28303func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shape tf.Output, optional ...SetSizeAttr) (size tf.Output) {
28304	if scope.Err() != nil {
28305		return
28306	}
28307	attrs := map[string]interface{}{}
28308	for _, a := range optional {
28309		a(attrs)
28310	}
28311	opspec := tf.OpSpec{
28312		Type: "SetSize",
28313		Input: []tf.Input{
28314			set_indices, set_values, set_shape,
28315		},
28316		Attrs: attrs,
28317	}
28318	op := scope.AddOperation(opspec)
28319	return op.Output(0)
28320}
28321
28322//   Combines (nests of) input elements into a dataset of (nests of) windows.
28323//
28324//   A "window" is a finite dataset of flat elements of size `size` (or possibly
28325//   fewer if there are not enough input elements to fill the window and
28326//   `drop_remainder` evaluates to false).
28327//
28328//   The `shift` argument determines the number of input elements by which
28329//   the window moves on each iteration.  The first element in the `k`th window
28330//   will be element
28331//
28332//   ```
28333//   1 + (k-1) * shift
28334//   ```
28335//
28336//   of the input dataset. In particular, the first element of the first window
28337//   will always be the first element of the input dataset.
28338//
28339//   If the `stride` parameter is greater than 1, then each window will skip
28340//   `(stride - 1)` input elements between each element that appears in the
28341//   window. Output windows will still contain `size` elements regardless of
28342//   the value of `stride`.
28343//
28344//   The `stride` argument determines the stride of the input elements, and the
28345//   `shift` argument determines the shift of the window.
28346//
28347//   For example, letting `{...}` to represent a Dataset:
28348//
28349//   - `tf.data.Dataset.range(7).window(2)` produces
28350//     `{{0, 1}, {2, 3}, {4, 5}, {6}}`
28351//   - `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces
28352//     `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}`
28353//   - `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces
28354//     `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}`
28355//
28356//   Note that when the `window` transformation is applied to a dataset of
28357//   nested elements, it produces a dataset of nested windows.
28358//
28359//   For example:
28360//
28361//   - `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)`
28362//     produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}`
28363//   - `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)`
28364//     produces `{{"a": {0, 1}}, {"a": {2, 3}}}`
28365//
28366// Arguments:
28367//
28368//	size: An integer scalar, representing the number of elements
28369// of the input dataset to combine into a window. Must be positive.
28370//	shift: An integer scalar, representing the number of input elements
28371// by which the window moves in each iteration.  Defaults to `size`.
28372// Must be positive.
28373//	stride: An integer scalar, representing the stride of the input elements
28374// in the sliding window. Must be positive. The default value of 1 means
28375// "retain every input element".
28376//	drop_remainder: A Boolean scalar, representing whether the last window should be
28377// dropped if its size is smaller than `window_size`.
28378//
28379//
28380func WindowDataset(scope *Scope, input_dataset tf.Output, size tf.Output, shift tf.Output, stride tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
28381	if scope.Err() != nil {
28382		return
28383	}
28384	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
28385	opspec := tf.OpSpec{
28386		Type: "WindowDataset",
28387		Input: []tf.Input{
28388			input_dataset, size, shift, stride, drop_remainder,
28389		},
28390		Attrs: attrs,
28391	}
28392	op := scope.AddOperation(opspec)
28393	return op.Output(0)
28394}
28395
28396// Runs multiple additive regression ensemble predictors on input instances and
28397//
28398// computes the update to cached logits. It is designed to be used during training.
28399// It traverses the trees starting from cached tree id and cached node id and
28400// calculates the updates to be pushed to the cache.
28401//
28402// Arguments:
28403//
28404//	cached_tree_ids: Rank 1 Tensor containing cached tree ids which is the starting
28405// tree of prediction.
28406//	cached_node_ids: Rank 1 Tensor containing cached node id which is the starting
28407// node of prediction.
28408//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
28409// feature.
28410//	logits_dimension: scalar, dimension of the logits, to be used for partial logits
28411// shape.
28412//
28413// Returns:
28414//	partial_logits: Rank 2 Tensor containing logits update (with respect to cached
28415// values stored) for each example.
28416//	tree_ids: Rank 1 Tensor containing new tree ids for each example.
28417//	node_ids: Rank 1 Tensor containing new node ids in the new tree_ids.
28418func BoostedTreesTrainingPredict(scope *Scope, tree_ensemble_handle tf.Output, cached_tree_ids tf.Output, cached_node_ids tf.Output, bucketized_features []tf.Output, logits_dimension int64) (partial_logits tf.Output, tree_ids tf.Output, node_ids tf.Output) {
28419	if scope.Err() != nil {
28420		return
28421	}
28422	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
28423	opspec := tf.OpSpec{
28424		Type: "BoostedTreesTrainingPredict",
28425		Input: []tf.Input{
28426			tree_ensemble_handle, cached_tree_ids, cached_node_ids, tf.OutputList(bucketized_features),
28427		},
28428		Attrs: attrs,
28429	}
28430	op := scope.AddOperation(opspec)
28431	return op.Output(0), op.Output(1), op.Output(2)
28432}
28433
28434// Forwards the input to the output.
28435//
28436// This operator represents the loop termination condition used by the
28437// "pivot" switches of a loop.
28438//
28439// Arguments:
28440//	input: A boolean scalar, representing the branch predicate of the Switch op.
28441//
28442// Returns The same tensor as `input`.
28443func LoopCond(scope *Scope, input tf.Output) (output tf.Output) {
28444	if scope.Err() != nil {
28445		return
28446	}
28447	opspec := tf.OpSpec{
28448		Type: "LoopCond",
28449		Input: []tf.Input{
28450			input,
28451		},
28452	}
28453	op := scope.AddOperation(opspec)
28454	return op.Output(0)
28455}
28456
28457// Computes the gradient for the inverse of `x` wrt its input.
28458//
28459// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
28460// is the corresponding input gradient.
28461func ReciprocalGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
28462	if scope.Err() != nil {
28463		return
28464	}
28465	opspec := tf.OpSpec{
28466		Type: "ReciprocalGrad",
28467		Input: []tf.Input{
28468			y, dy,
28469		},
28470	}
28471	op := scope.AddOperation(opspec)
28472	return op.Output(0)
28473}
28474
28475// Reshapes a quantized tensor as per the Reshape op.
28476//
28477// ```
28478//
28479// Arguments:
28480//
28481//	shape: Defines the shape of the output tensor.
28482//	input_min: The minimum value of the input.
28483//	input_max: The maximum value of the input.
28484//
28485// Returns:
28486//	output
28487//	output_min: This value is copied from input_min.
28488//	output_max: This value is copied from input_max.
28489func QuantizedReshape(scope *Scope, tensor tf.Output, shape tf.Output, input_min tf.Output, input_max tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
28490	if scope.Err() != nil {
28491		return
28492	}
28493	opspec := tf.OpSpec{
28494		Type: "QuantizedReshape",
28495		Input: []tf.Input{
28496			tensor, shape, input_min, input_max,
28497		},
28498	}
28499	op := scope.AddOperation(opspec)
28500	return op.Output(0), op.Output(1), op.Output(2)
28501}
28502
28503// Creates a dataset that skips `count` elements from the `input_dataset`.
28504//
28505// Arguments:
28506//
28507//	count: A scalar representing the number of elements from the `input_dataset`
28508// that should be skipped.  If count is -1, skips everything.
28509//
28510//
28511func SkipDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
28512	if scope.Err() != nil {
28513		return
28514	}
28515	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
28516	opspec := tf.OpSpec{
28517		Type: "SkipDataset",
28518		Input: []tf.Input{
28519			input_dataset, count,
28520		},
28521		Attrs: attrs,
28522	}
28523	op := scope.AddOperation(opspec)
28524	return op.Output(0)
28525}
28526
28527// A placeholder op for a value that will be fed into the computation.
28528//
28529// DEPRECATED at GraphDef version 23: Placeholder now behaves the same as PlaceholderV2.
28530//
28531// N.B. This operation will fail with an error if it is executed. It is
28532// intended as a way to represent a value that will always be fed, and to
28533// provide attrs that enable the fed value to be checked at runtime.
28534//
28535// Arguments:
28536//	dtype: The type of elements in the tensor.
28537//	shape: The shape of the tensor. The shape can be any partially-specified
28538// shape.  To be unconstrained, pass in a shape with unknown rank.
28539//
28540// Returns A placeholder tensor that must be replaced using the feed mechanism.
28541func PlaceholderV2(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
28542	if scope.Err() != nil {
28543		return
28544	}
28545	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
28546	opspec := tf.OpSpec{
28547		Type: "PlaceholderV2",
28548
28549		Attrs: attrs,
28550	}
28551	op := scope.AddOperation(opspec)
28552	return op.Output(0)
28553}
28554
28555// RetrieveTPUEmbeddingMDLAdagradLightParametersAttr is an optional argument to RetrieveTPUEmbeddingMDLAdagradLightParameters.
28556type RetrieveTPUEmbeddingMDLAdagradLightParametersAttr func(optionalAttr)
28557
28558// RetrieveTPUEmbeddingMDLAdagradLightParametersTableId sets the optional table_id attribute to value.
28559// If not specified, defaults to -1
28560func RetrieveTPUEmbeddingMDLAdagradLightParametersTableId(value int64) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr {
28561	return func(m optionalAttr) {
28562		m["table_id"] = value
28563	}
28564}
28565
28566// RetrieveTPUEmbeddingMDLAdagradLightParametersTableName sets the optional table_name attribute to value.
28567// If not specified, defaults to ""
28568func RetrieveTPUEmbeddingMDLAdagradLightParametersTableName(value string) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr {
28569	return func(m optionalAttr) {
28570		m["table_name"] = value
28571	}
28572}
28573
28574// RetrieveTPUEmbeddingMDLAdagradLightParametersConfig sets the optional config attribute to value.
28575// If not specified, defaults to ""
28576func RetrieveTPUEmbeddingMDLAdagradLightParametersConfig(value string) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr {
28577	return func(m optionalAttr) {
28578		m["config"] = value
28579	}
28580}
28581
28582// Retrieve MDL Adagrad Light embedding parameters.
28583//
28584// An op that retrieves optimization parameters from embedding to host
28585// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
28586// the correct embedding table configuration. For example, this op is
28587// used to retrieve updated parameters before saving a checkpoint.
28588//
28589// Returns:
28590//	parameters: Parameter parameters updated by the MDL Adagrad Light optimization algorithm.
28591//	accumulators: Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.
28592//	weights: Parameter weights updated by the MDL Adagrad Light optimization algorithm.
28593//	benefits: Parameter benefits updated by the MDL Adagrad Light optimization algorithm.
28594func RetrieveTPUEmbeddingMDLAdagradLightParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMDLAdagradLightParametersAttr) (parameters tf.Output, accumulators tf.Output, weights tf.Output, benefits tf.Output) {
28595	if scope.Err() != nil {
28596		return
28597	}
28598	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
28599	for _, a := range optional {
28600		a(attrs)
28601	}
28602	opspec := tf.OpSpec{
28603		Type: "RetrieveTPUEmbeddingMDLAdagradLightParameters",
28604
28605		Attrs: attrs,
28606	}
28607	op := scope.AddOperation(opspec)
28608	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
28609}
28610
28611// Creates a dataset that emits each dim-0 slice of `components` once.
28612func TensorSliceDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
28613	if scope.Err() != nil {
28614		return
28615	}
28616	attrs := map[string]interface{}{"output_shapes": output_shapes}
28617	opspec := tf.OpSpec{
28618		Type: "TensorSliceDataset",
28619		Input: []tf.Input{
28620			tf.OutputList(components),
28621		},
28622		Attrs: attrs,
28623	}
28624	op := scope.AddOperation(opspec)
28625	return op.Output(0)
28626}
28627
28628// RandomShuffleQueueV2Attr is an optional argument to RandomShuffleQueueV2.
28629type RandomShuffleQueueV2Attr func(optionalAttr)
28630
28631// RandomShuffleQueueV2Shapes sets the optional shapes attribute to value.
28632//
28633// value: The shape of each component in a value. The length of this attr must
28634// be either 0 or the same as the length of component_types. If the length of
28635// this attr is 0, the shapes of queue elements are not constrained, and
28636// only one element may be dequeued at a time.
28637// If not specified, defaults to {}
28638//
28639// REQUIRES: len(value) >= 0
28640func RandomShuffleQueueV2Shapes(value []tf.Shape) RandomShuffleQueueV2Attr {
28641	return func(m optionalAttr) {
28642		m["shapes"] = value
28643	}
28644}
28645
28646// RandomShuffleQueueV2Capacity sets the optional capacity attribute to value.
28647//
28648// value: The upper bound on the number of elements in this queue.
28649// Negative numbers mean no limit.
28650// If not specified, defaults to -1
28651func RandomShuffleQueueV2Capacity(value int64) RandomShuffleQueueV2Attr {
28652	return func(m optionalAttr) {
28653		m["capacity"] = value
28654	}
28655}
28656
28657// RandomShuffleQueueV2MinAfterDequeue sets the optional min_after_dequeue attribute to value.
28658//
28659// value: Dequeue will block unless there would be this
28660// many elements after the dequeue or the queue is closed. This
28661// ensures a minimum level of mixing of elements.
28662// If not specified, defaults to 0
28663func RandomShuffleQueueV2MinAfterDequeue(value int64) RandomShuffleQueueV2Attr {
28664	return func(m optionalAttr) {
28665		m["min_after_dequeue"] = value
28666	}
28667}
28668
28669// RandomShuffleQueueV2Seed sets the optional seed attribute to value.
28670//
28671// value: If either seed or seed2 is set to be non-zero, the random number
28672// generator is seeded by the given seed.  Otherwise, a random seed is used.
28673// If not specified, defaults to 0
28674func RandomShuffleQueueV2Seed(value int64) RandomShuffleQueueV2Attr {
28675	return func(m optionalAttr) {
28676		m["seed"] = value
28677	}
28678}
28679
28680// RandomShuffleQueueV2Seed2 sets the optional seed2 attribute to value.
28681//
28682// value: A second seed to avoid seed collision.
28683// If not specified, defaults to 0
28684func RandomShuffleQueueV2Seed2(value int64) RandomShuffleQueueV2Attr {
28685	return func(m optionalAttr) {
28686		m["seed2"] = value
28687	}
28688}
28689
28690// RandomShuffleQueueV2Container sets the optional container attribute to value.
28691//
28692// value: If non-empty, this queue is placed in the given container.
28693// Otherwise, a default container is used.
28694// If not specified, defaults to ""
28695func RandomShuffleQueueV2Container(value string) RandomShuffleQueueV2Attr {
28696	return func(m optionalAttr) {
28697		m["container"] = value
28698	}
28699}
28700
28701// RandomShuffleQueueV2SharedName sets the optional shared_name attribute to value.
28702//
28703// value: If non-empty, this queue will be shared under the given name
28704// across multiple sessions.
28705// If not specified, defaults to ""
28706func RandomShuffleQueueV2SharedName(value string) RandomShuffleQueueV2Attr {
28707	return func(m optionalAttr) {
28708		m["shared_name"] = value
28709	}
28710}
28711
28712// A queue that randomizes the order of elements.
28713//
28714// Arguments:
28715//	component_types: The type of each component in a value.
28716//
28717// Returns The handle to the queue.
28718func RandomShuffleQueueV2(scope *Scope, component_types []tf.DataType, optional ...RandomShuffleQueueV2Attr) (handle tf.Output) {
28719	if scope.Err() != nil {
28720		return
28721	}
28722	attrs := map[string]interface{}{"component_types": component_types}
28723	for _, a := range optional {
28724		a(attrs)
28725	}
28726	opspec := tf.OpSpec{
28727		Type: "RandomShuffleQueueV2",
28728
28729		Attrs: attrs,
28730	}
28731	op := scope.AddOperation(opspec)
28732	return op.Output(0)
28733}
28734
28735// Creates a dataset that splits a SparseTensor into elements row-wise.
28736func SparseTensorSliceDataset(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (handle tf.Output) {
28737	if scope.Err() != nil {
28738		return
28739	}
28740	opspec := tf.OpSpec{
28741		Type: "SparseTensorSliceDataset",
28742		Input: []tf.Input{
28743			indices, values, dense_shape,
28744		},
28745	}
28746	op := scope.AddOperation(opspec)
28747	return op.Output(0)
28748}
28749
28750// Creates a dataset that emits `components` as a tuple of tensors once.
28751func TensorDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
28752	if scope.Err() != nil {
28753		return
28754	}
28755	attrs := map[string]interface{}{"output_shapes": output_shapes}
28756	opspec := tf.OpSpec{
28757		Type: "TensorDataset",
28758		Input: []tf.Input{
28759			tf.OutputList(components),
28760		},
28761		Attrs: attrs,
28762	}
28763	op := scope.AddOperation(opspec)
28764	return op.Output(0)
28765}
28766
28767// Deprecated, use python implementation tf.linalg.matrix_exponential.
28768//
28769// DEPRECATED at GraphDef version 27: Use Python implementation tf.linalg.matrix_exponential instead.
28770func MatrixExponential(scope *Scope, input tf.Output) (output tf.Output) {
28771	if scope.Err() != nil {
28772		return
28773	}
28774	opspec := tf.OpSpec{
28775		Type: "MatrixExponential",
28776		Input: []tf.Input{
28777			input,
28778		},
28779	}
28780	op := scope.AddOperation(opspec)
28781	return op.Output(0)
28782}
28783
28784// Gather slices from `params` into a Tensor with shape specified by `indices`.
28785//
28786// `indices` is a K-dimensional integer tensor, best thought of as a
28787// (K-1)-dimensional tensor of indices into `params`, where each element defines a
28788// slice of `params`:
28789//
28790//     output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
28791//
28792// Whereas in `tf.gather` `indices` defines slices into the `axis`
28793// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
28794// first `N` dimensions of `params`, where `N = indices.shape[-1]`.
28795//
28796// The last dimension of `indices` can be at most the rank of
28797// `params`:
28798//
28799//     indices.shape[-1] <= params.rank
28800//
28801// The last dimension of `indices` corresponds to elements
28802// (if `indices.shape[-1] == params.rank`) or slices
28803// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
28804// of `params`.  The output tensor has shape
28805//
28806//     indices.shape[:-1] + params.shape[indices.shape[-1]:]
28807//
28808// Note that on CPU, if an out of bound index is found, an error is returned.
28809// On GPU, if an out of bound index is found, a 0 is stored in the
28810// corresponding output value.
28811//
28812// Some examples below.
28813//
28814// Simple indexing into a matrix:
28815//
28816// ```python
28817//     indices = [[0, 0], [1, 1]]
28818//     params = [['a', 'b'], ['c', 'd']]
28819//     output = ['a', 'd']
28820// ```
28821//
28822// Slice indexing into a matrix:
28823//
28824// ```python
28825//     indices = [[1], [0]]
28826//     params = [['a', 'b'], ['c', 'd']]
28827//     output = [['c', 'd'], ['a', 'b']]
28828// ```
28829//
28830// Indexing into a 3-tensor:
28831//
28832// ```python
28833//     indices = [[1]]
28834//     params = [[['a0', 'b0'], ['c0', 'd0']],
28835//               [['a1', 'b1'], ['c1', 'd1']]]
28836//     output = [[['a1', 'b1'], ['c1', 'd1']]]
28837//
28838//
28839//     indices = [[0, 1], [1, 0]]
28840//     params = [[['a0', 'b0'], ['c0', 'd0']],
28841//               [['a1', 'b1'], ['c1', 'd1']]]
28842//     output = [['c0', 'd0'], ['a1', 'b1']]
28843//
28844//
28845//     indices = [[0, 0, 1], [1, 0, 1]]
28846//     params = [[['a0', 'b0'], ['c0', 'd0']],
28847//               [['a1', 'b1'], ['c1', 'd1']]]
28848//     output = ['b0', 'b1']
28849// ```
28850//
28851// Batched indexing into a matrix:
28852//
28853// ```python
28854//     indices = [[[0, 0]], [[0, 1]]]
28855//     params = [['a', 'b'], ['c', 'd']]
28856//     output = [['a'], ['b']]
28857// ```
28858//
28859// Batched slice indexing into a matrix:
28860//
28861// ```python
28862//     indices = [[[1]], [[0]]]
28863//     params = [['a', 'b'], ['c', 'd']]
28864//     output = [[['c', 'd']], [['a', 'b']]]
28865// ```
28866//
28867// Batched indexing into a 3-tensor:
28868//
28869// ```python
28870//     indices = [[[1]], [[0]]]
28871//     params = [[['a0', 'b0'], ['c0', 'd0']],
28872//               [['a1', 'b1'], ['c1', 'd1']]]
28873//     output = [[[['a1', 'b1'], ['c1', 'd1']]],
28874//               [[['a0', 'b0'], ['c0', 'd0']]]]
28875//
28876//     indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
28877//     params = [[['a0', 'b0'], ['c0', 'd0']],
28878//               [['a1', 'b1'], ['c1', 'd1']]]
28879//     output = [[['c0', 'd0'], ['a1', 'b1']],
28880//               [['a0', 'b0'], ['c1', 'd1']]]
28881//
28882//
28883//     indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
28884//     params = [[['a0', 'b0'], ['c0', 'd0']],
28885//               [['a1', 'b1'], ['c1', 'd1']]]
28886//     output = [['b0', 'b1'], ['d0', 'c1']]
28887// ```
28888//
28889// See also `tf.gather` and `tf.batch_gather`.
28890//
28891// Arguments:
28892//	params: The tensor from which to gather values.
28893//	indices: Index tensor.
28894//
28895// Returns Values from `params` gathered from indices given by `indices`, with
28896// shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
28897func GatherNd(scope *Scope, params tf.Output, indices tf.Output) (output tf.Output) {
28898	if scope.Err() != nil {
28899		return
28900	}
28901	opspec := tf.OpSpec{
28902		Type: "GatherNd",
28903		Input: []tf.Input{
28904			params, indices,
28905		},
28906	}
28907	op := scope.AddOperation(opspec)
28908	return op.Output(0)
28909}
28910
28911// MaxPoolAttr is an optional argument to MaxPool.
28912type MaxPoolAttr func(optionalAttr)
28913
28914// MaxPoolExplicitPaddings sets the optional explicit_paddings attribute to value.
28915// If not specified, defaults to {}
28916func MaxPoolExplicitPaddings(value []int64) MaxPoolAttr {
28917	return func(m optionalAttr) {
28918		m["explicit_paddings"] = value
28919	}
28920}
28921
28922// MaxPoolDataFormat sets the optional data_format attribute to value.
28923//
28924// value: Specify the data format of the input and output data. With the
28925// default format "NHWC", the data is stored in the order of:
28926//     [batch, in_height, in_width, in_channels].
28927// Alternatively, the format could be "NCHW", the data storage order of:
28928//     [batch, in_channels, in_height, in_width].
28929// If not specified, defaults to "NHWC"
28930func MaxPoolDataFormat(value string) MaxPoolAttr {
28931	return func(m optionalAttr) {
28932		m["data_format"] = value
28933	}
28934}
28935
28936// Performs max pooling on the input.
28937//
28938// Arguments:
28939//	input: 4-D input to pool over.
28940//	ksize: The size of the window for each dimension of the input tensor.
28941//	strides: The stride of the sliding window for each dimension of the
28942// input tensor.
28943//	padding: The type of padding algorithm to use.
28944//
28945// Returns The max pooled output tensor.
28946func MaxPool(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolAttr) (output tf.Output) {
28947	if scope.Err() != nil {
28948		return
28949	}
28950	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
28951	for _, a := range optional {
28952		a(attrs)
28953	}
28954	opspec := tf.OpSpec{
28955		Type: "MaxPool",
28956		Input: []tf.Input{
28957			input,
28958		},
28959		Attrs: attrs,
28960	}
28961	op := scope.AddOperation(opspec)
28962	return op.Output(0)
28963}
28964
28965// OrderedMapClearAttr is an optional argument to OrderedMapClear.
28966type OrderedMapClearAttr func(optionalAttr)
28967
28968// OrderedMapClearCapacity sets the optional capacity attribute to value.
28969// If not specified, defaults to 0
28970//
28971// REQUIRES: value >= 0
28972func OrderedMapClearCapacity(value int64) OrderedMapClearAttr {
28973	return func(m optionalAttr) {
28974		m["capacity"] = value
28975	}
28976}
28977
28978// OrderedMapClearMemoryLimit sets the optional memory_limit attribute to value.
28979// If not specified, defaults to 0
28980//
28981// REQUIRES: value >= 0
28982func OrderedMapClearMemoryLimit(value int64) OrderedMapClearAttr {
28983	return func(m optionalAttr) {
28984		m["memory_limit"] = value
28985	}
28986}
28987
28988// OrderedMapClearContainer sets the optional container attribute to value.
28989// If not specified, defaults to ""
28990func OrderedMapClearContainer(value string) OrderedMapClearAttr {
28991	return func(m optionalAttr) {
28992		m["container"] = value
28993	}
28994}
28995
28996// OrderedMapClearSharedName sets the optional shared_name attribute to value.
28997// If not specified, defaults to ""
28998func OrderedMapClearSharedName(value string) OrderedMapClearAttr {
28999	return func(m optionalAttr) {
29000		m["shared_name"] = value
29001	}
29002}
29003
29004// Op removes all elements in the underlying container.
29005//
29006// Returns the created operation.
29007func OrderedMapClear(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapClearAttr) (o *tf.Operation) {
29008	if scope.Err() != nil {
29009		return
29010	}
29011	attrs := map[string]interface{}{"dtypes": dtypes}
29012	for _, a := range optional {
29013		a(attrs)
29014	}
29015	opspec := tf.OpSpec{
29016		Type: "OrderedMapClear",
29017
29018		Attrs: attrs,
29019	}
29020	return scope.AddOperation(opspec)
29021}
29022
29023// OrderedMapSizeAttr is an optional argument to OrderedMapSize.
29024type OrderedMapSizeAttr func(optionalAttr)
29025
29026// OrderedMapSizeCapacity sets the optional capacity attribute to value.
29027// If not specified, defaults to 0
29028//
29029// REQUIRES: value >= 0
29030func OrderedMapSizeCapacity(value int64) OrderedMapSizeAttr {
29031	return func(m optionalAttr) {
29032		m["capacity"] = value
29033	}
29034}
29035
29036// OrderedMapSizeMemoryLimit sets the optional memory_limit attribute to value.
29037// If not specified, defaults to 0
29038//
29039// REQUIRES: value >= 0
29040func OrderedMapSizeMemoryLimit(value int64) OrderedMapSizeAttr {
29041	return func(m optionalAttr) {
29042		m["memory_limit"] = value
29043	}
29044}
29045
29046// OrderedMapSizeContainer sets the optional container attribute to value.
29047// If not specified, defaults to ""
29048func OrderedMapSizeContainer(value string) OrderedMapSizeAttr {
29049	return func(m optionalAttr) {
29050		m["container"] = value
29051	}
29052}
29053
29054// OrderedMapSizeSharedName sets the optional shared_name attribute to value.
29055// If not specified, defaults to ""
29056func OrderedMapSizeSharedName(value string) OrderedMapSizeAttr {
29057	return func(m optionalAttr) {
29058		m["shared_name"] = value
29059	}
29060}
29061
29062// Op returns the number of elements in the underlying container.
29063func OrderedMapSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapSizeAttr) (size tf.Output) {
29064	if scope.Err() != nil {
29065		return
29066	}
29067	attrs := map[string]interface{}{"dtypes": dtypes}
29068	for _, a := range optional {
29069		a(attrs)
29070	}
29071	opspec := tf.OpSpec{
29072		Type: "OrderedMapSize",
29073
29074		Attrs: attrs,
29075	}
29076	op := scope.AddOperation(opspec)
29077	return op.Output(0)
29078}
29079
29080// Delete the TensorArray from its resource container.
29081//
29082// This enables the user to close and release the resource in the middle
29083// of a step/run.
29084//
29085// Arguments:
29086//	handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
29087//
29088// Returns the created operation.
29089func TensorArrayCloseV3(scope *Scope, handle tf.Output) (o *tf.Operation) {
29090	if scope.Err() != nil {
29091		return
29092	}
29093	opspec := tf.OpSpec{
29094		Type: "TensorArrayCloseV3",
29095		Input: []tf.Input{
29096			handle,
29097		},
29098	}
29099	return scope.AddOperation(opspec)
29100}
29101
29102// Saves tensors in V2 checkpoint format.
29103//
29104// By default, saves the named tensors in full.  If the caller wishes to save
29105// specific slices of full tensors, "shape_and_slices" should be non-empty strings
29106// and correspondingly well-formed.
29107//
29108// Arguments:
29109//	prefix: Must have a single element. The prefix of the V2 checkpoint to which we
29110// write the tensors.
29111//	tensor_names: shape {N}. The names of the tensors to be saved.
29112//	shape_and_slices: shape {N}.  The slice specs of the tensors to be saved.
29113// Empty strings indicate that they are non-partitioned tensors.
29114//	tensors: `N` tensors to save.
29115//
29116// Returns the created operation.
29117func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, tensors []tf.Output) (o *tf.Operation) {
29118	if scope.Err() != nil {
29119		return
29120	}
29121	opspec := tf.OpSpec{
29122		Type: "SaveV2",
29123		Input: []tf.Input{
29124			prefix, tensor_names, shape_and_slices, tf.OutputList(tensors),
29125		},
29126	}
29127	return scope.AddOperation(opspec)
29128}
29129
29130// Returns the TopK unique values in the array in sorted order.
29131//
29132// The running time is proportional to the product of K and the input
29133// size. Sorting the whole array is more efficient for sufficiently large
29134// values of K. The median-of-medians algorithm is probably faster, but
29135// difficult to implement efficiently in XLA. If there are fewer than K
29136// unique numbers (not NANs), the results are padded with negative
29137// infinity. NaNs are never returned. Subnormal numbers are flushed to
29138// zero. If an element appears at multiple indices, the highest index is
29139// returned. If a TopK element never appears in the input due to padding
29140// values, the indices are padded with negative one. If a padding value
29141// appears in the input and padding is needed, the highest index of the
29142// padding value will be returned. The semantics are not the same as
29143// kth_order_statistic.
29144func TopKUnique(scope *Scope, input tf.Output, k int64) (topk tf.Output, topk_indices tf.Output) {
29145	if scope.Err() != nil {
29146		return
29147	}
29148	attrs := map[string]interface{}{"k": k}
29149	opspec := tf.OpSpec{
29150		Type: "TopKUnique",
29151		Input: []tf.Input{
29152			input,
29153		},
29154		Attrs: attrs,
29155	}
29156	op := scope.AddOperation(opspec)
29157	return op.Output(0), op.Output(1)
29158}
29159
29160// TPUReplicateMetadataAttr is an optional argument to TPUReplicateMetadata.
29161type TPUReplicateMetadataAttr func(optionalAttr)
29162
29163// TPUReplicateMetadataNumCoresPerReplica sets the optional num_cores_per_replica attribute to value.
29164//
29165// value: Number of cores per replica. Used for model parallelism.
29166// If not specified, defaults to 1
29167func TPUReplicateMetadataNumCoresPerReplica(value int64) TPUReplicateMetadataAttr {
29168	return func(m optionalAttr) {
29169		m["num_cores_per_replica"] = value
29170	}
29171}
29172
29173// TPUReplicateMetadataTopology sets the optional topology attribute to value.
29174//
29175// value: TopologyProto indicating the topology of the TPU pod slice.
29176// If not specified, defaults to ""
29177func TPUReplicateMetadataTopology(value string) TPUReplicateMetadataAttr {
29178	return func(m optionalAttr) {
29179		m["topology"] = value
29180	}
29181}
29182
29183// TPUReplicateMetadataUseTpu sets the optional use_tpu attribute to value.
29184//
29185// value: Whether to place the computation on the TPU.
29186// If not specified, defaults to true
29187func TPUReplicateMetadataUseTpu(value bool) TPUReplicateMetadataAttr {
29188	return func(m optionalAttr) {
29189		m["use_tpu"] = value
29190	}
29191}
29192
29193// TPUReplicateMetadataDeviceAssignment sets the optional device_assignment attribute to value.
29194//
29195// value: The assignment of devices for the computation.
29196// If not specified, defaults to {}
29197func TPUReplicateMetadataDeviceAssignment(value []int64) TPUReplicateMetadataAttr {
29198	return func(m optionalAttr) {
29199		m["device_assignment"] = value
29200	}
29201}
29202
29203// TPUReplicateMetadataComputationShape sets the optional computation_shape attribute to value.
29204//
29205// value: DEPRECATED. Use num_cores_per_replica instead.
29206// If not specified, defaults to {}
29207func TPUReplicateMetadataComputationShape(value []int64) TPUReplicateMetadataAttr {
29208	return func(m optionalAttr) {
29209		m["computation_shape"] = value
29210	}
29211}
29212
29213// TPUReplicateMetadataHostComputeCore sets the optional host_compute_core attribute to value.
29214// If not specified, defaults to {}
29215func TPUReplicateMetadataHostComputeCore(value []string) TPUReplicateMetadataAttr {
29216	return func(m optionalAttr) {
29217		m["host_compute_core"] = value
29218	}
29219}
29220
29221// TPUReplicateMetadataPaddingMap sets the optional padding_map attribute to value.
29222// If not specified, defaults to {}
29223func TPUReplicateMetadataPaddingMap(value []string) TPUReplicateMetadataAttr {
29224	return func(m optionalAttr) {
29225		m["padding_map"] = value
29226	}
29227}
29228
29229// TPUReplicateMetadataStepMarkerLocation sets the optional step_marker_location attribute to value.
29230// If not specified, defaults to "STEP_MARK_AT_ENTRY"
29231func TPUReplicateMetadataStepMarkerLocation(value string) TPUReplicateMetadataAttr {
29232	return func(m optionalAttr) {
29233		m["step_marker_location"] = value
29234	}
29235}
29236
29237// TPUReplicateMetadataAllowSoftPlacement sets the optional allow_soft_placement attribute to value.
29238// If not specified, defaults to false
29239func TPUReplicateMetadataAllowSoftPlacement(value bool) TPUReplicateMetadataAttr {
29240	return func(m optionalAttr) {
29241		m["allow_soft_placement"] = value
29242	}
29243}
29244
29245// TPUReplicateMetadataUseSpmdForXlaPartitioning sets the optional use_spmd_for_xla_partitioning attribute to value.
29246// If not specified, defaults to false
29247func TPUReplicateMetadataUseSpmdForXlaPartitioning(value bool) TPUReplicateMetadataAttr {
29248	return func(m optionalAttr) {
29249		m["use_spmd_for_xla_partitioning"] = value
29250	}
29251}
29252
29253// Metadata indicating how the TPU computation should be replicated.
29254//
29255// This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph.
29256//
29257// Arguments:
29258//	num_replicas: Number of replicas of the computation
29259//
29260// Returns the created operation.
29261func TPUReplicateMetadata(scope *Scope, num_replicas int64, optional ...TPUReplicateMetadataAttr) (o *tf.Operation) {
29262	if scope.Err() != nil {
29263		return
29264	}
29265	attrs := map[string]interface{}{"num_replicas": num_replicas}
29266	for _, a := range optional {
29267		a(attrs)
29268	}
29269	opspec := tf.OpSpec{
29270		Type: "TPUReplicateMetadata",
29271
29272		Attrs: attrs,
29273	}
29274	return scope.AddOperation(opspec)
29275}
29276
29277// OrderedMapStageAttr is an optional argument to OrderedMapStage.
29278type OrderedMapStageAttr func(optionalAttr)
29279
29280// OrderedMapStageCapacity sets the optional capacity attribute to value.
29281//
29282// value: Maximum number of elements in the Staging Area. If > 0, inserts
29283// on the container will block when the capacity is reached.
29284// If not specified, defaults to 0
29285//
29286// REQUIRES: value >= 0
29287func OrderedMapStageCapacity(value int64) OrderedMapStageAttr {
29288	return func(m optionalAttr) {
29289		m["capacity"] = value
29290	}
29291}
29292
29293// OrderedMapStageMemoryLimit sets the optional memory_limit attribute to value.
29294// If not specified, defaults to 0
29295//
29296// REQUIRES: value >= 0
29297func OrderedMapStageMemoryLimit(value int64) OrderedMapStageAttr {
29298	return func(m optionalAttr) {
29299		m["memory_limit"] = value
29300	}
29301}
29302
29303// OrderedMapStageContainer sets the optional container attribute to value.
29304//
29305// value: If non-empty, this queue is placed in the given container. Otherwise,
29306// a default container is used.
29307// If not specified, defaults to ""
29308func OrderedMapStageContainer(value string) OrderedMapStageAttr {
29309	return func(m optionalAttr) {
29310		m["container"] = value
29311	}
29312}
29313
29314// OrderedMapStageSharedName sets the optional shared_name attribute to value.
29315//
29316// value: It is necessary to match this name to the matching Unstage Op.
29317// If not specified, defaults to ""
29318func OrderedMapStageSharedName(value string) OrderedMapStageAttr {
29319	return func(m optionalAttr) {
29320		m["shared_name"] = value
29321	}
29322}
29323
29324// Stage (key, values) in the underlying container which behaves like a ordered
29325//
29326// associative container.   Elements are ordered by key.
29327//
29328// Arguments:
29329//	key: int64
29330//
29331//	values: a list of tensors
29332// dtypes A list of data types that inserted values should adhere to.
29333//
29334//
29335// Returns the created operation.
29336func OrderedMapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...OrderedMapStageAttr) (o *tf.Operation) {
29337	if scope.Err() != nil {
29338		return
29339	}
29340	attrs := map[string]interface{}{"dtypes": dtypes}
29341	for _, a := range optional {
29342		a(attrs)
29343	}
29344	opspec := tf.OpSpec{
29345		Type: "OrderedMapStage",
29346		Input: []tf.Input{
29347			key, indices, tf.OutputList(values),
29348		},
29349		Attrs: attrs,
29350	}
29351	return scope.AddOperation(opspec)
29352}
29353
29354// RandomGammaAttr is an optional argument to RandomGamma.
29355type RandomGammaAttr func(optionalAttr)
29356
29357// RandomGammaSeed sets the optional seed attribute to value.
29358//
29359// value: If either `seed` or `seed2` are set to be non-zero, the random number
29360// generator is seeded by the given seed.  Otherwise, it is seeded by a
29361// random seed.
29362// If not specified, defaults to 0
29363func RandomGammaSeed(value int64) RandomGammaAttr {
29364	return func(m optionalAttr) {
29365		m["seed"] = value
29366	}
29367}
29368
29369// RandomGammaSeed2 sets the optional seed2 attribute to value.
29370//
29371// value: A second seed to avoid seed collision.
29372// If not specified, defaults to 0
29373func RandomGammaSeed2(value int64) RandomGammaAttr {
29374	return func(m optionalAttr) {
29375		m["seed2"] = value
29376	}
29377}
29378
29379// Outputs random values from the Gamma distribution(s) described by alpha.
29380//
29381// This op uses the algorithm by Marsaglia et al. to acquire samples via
29382// transformation-rejection from pairs of uniform and normal random variables.
29383// See http://dl.acm.org/citation.cfm?id=358414
29384//
29385// Arguments:
29386//	shape: 1-D integer tensor. Shape of independent samples to draw from each
29387// distribution described by the shape parameters given in alpha.
29388//	alpha: A tensor in which each scalar is a "shape" parameter describing the
29389// associated gamma distribution.
29390//
29391// Returns A tensor with shape `shape + shape(alpha)`. Each slice
29392// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
29393// `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
29394func RandomGamma(scope *Scope, shape tf.Output, alpha tf.Output, optional ...RandomGammaAttr) (output tf.Output) {
29395	if scope.Err() != nil {
29396		return
29397	}
29398	attrs := map[string]interface{}{}
29399	for _, a := range optional {
29400		a(attrs)
29401	}
29402	opspec := tf.OpSpec{
29403		Type: "RandomGamma",
29404		Input: []tf.Input{
29405			shape, alpha,
29406		},
29407		Attrs: attrs,
29408	}
29409	op := scope.AddOperation(opspec)
29410	return op.Output(0)
29411}
29412
29413// Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise.
29414func Xlog1py(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
29415	if scope.Err() != nil {
29416		return
29417	}
29418	opspec := tf.OpSpec{
29419		Type: "Xlog1py",
29420		Input: []tf.Input{
29421			x, y,
29422		},
29423	}
29424	op := scope.AddOperation(opspec)
29425	return op.Output(0)
29426}
29427
29428// QuantizedResizeBilinearAttr is an optional argument to QuantizedResizeBilinear.
29429type QuantizedResizeBilinearAttr func(optionalAttr)
29430
29431// QuantizedResizeBilinearAlignCorners sets the optional align_corners attribute to value.
29432//
29433// value: If true, the centers of the 4 corner pixels of the input and output tensors are
29434// aligned, preserving the values at the corner pixels. Defaults to false.
29435// If not specified, defaults to false
29436func QuantizedResizeBilinearAlignCorners(value bool) QuantizedResizeBilinearAttr {
29437	return func(m optionalAttr) {
29438		m["align_corners"] = value
29439	}
29440}
29441
29442// QuantizedResizeBilinearHalfPixelCenters sets the optional half_pixel_centers attribute to value.
29443// If not specified, defaults to false
29444func QuantizedResizeBilinearHalfPixelCenters(value bool) QuantizedResizeBilinearAttr {
29445	return func(m optionalAttr) {
29446		m["half_pixel_centers"] = value
29447	}
29448}
29449
29450// Resize quantized `images` to `size` using quantized bilinear interpolation.
29451//
29452// Input images and output images must be quantized types.
29453//
29454// Arguments:
29455//	images: 4-D with shape `[batch, height, width, channels]`.
29456//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
29457// new size for the images.
29458//
29459//
29460//
29461// Returns:
29462//	resized_images: 4-D with shape
29463// `[batch, new_height, new_width, channels]`.
29464//	out_min
29465//	out_max
29466func QuantizedResizeBilinear(scope *Scope, images tf.Output, size tf.Output, min tf.Output, max tf.Output, optional ...QuantizedResizeBilinearAttr) (resized_images tf.Output, out_min tf.Output, out_max tf.Output) {
29467	if scope.Err() != nil {
29468		return
29469	}
29470	attrs := map[string]interface{}{}
29471	for _, a := range optional {
29472		a(attrs)
29473	}
29474	opspec := tf.OpSpec{
29475		Type: "QuantizedResizeBilinear",
29476		Input: []tf.Input{
29477			images, size, min, max,
29478		},
29479		Attrs: attrs,
29480	}
29481	op := scope.AddOperation(opspec)
29482	return op.Output(0), op.Output(1), op.Output(2)
29483}
29484
29485// OrderedMapIncompleteSizeAttr is an optional argument to OrderedMapIncompleteSize.
29486type OrderedMapIncompleteSizeAttr func(optionalAttr)
29487
29488// OrderedMapIncompleteSizeCapacity sets the optional capacity attribute to value.
29489// If not specified, defaults to 0
29490//
29491// REQUIRES: value >= 0
29492func OrderedMapIncompleteSizeCapacity(value int64) OrderedMapIncompleteSizeAttr {
29493	return func(m optionalAttr) {
29494		m["capacity"] = value
29495	}
29496}
29497
29498// OrderedMapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
29499// If not specified, defaults to 0
29500//
29501// REQUIRES: value >= 0
29502func OrderedMapIncompleteSizeMemoryLimit(value int64) OrderedMapIncompleteSizeAttr {
29503	return func(m optionalAttr) {
29504		m["memory_limit"] = value
29505	}
29506}
29507
29508// OrderedMapIncompleteSizeContainer sets the optional container attribute to value.
29509// If not specified, defaults to ""
29510func OrderedMapIncompleteSizeContainer(value string) OrderedMapIncompleteSizeAttr {
29511	return func(m optionalAttr) {
29512		m["container"] = value
29513	}
29514}
29515
29516// OrderedMapIncompleteSizeSharedName sets the optional shared_name attribute to value.
29517// If not specified, defaults to ""
29518func OrderedMapIncompleteSizeSharedName(value string) OrderedMapIncompleteSizeAttr {
29519	return func(m optionalAttr) {
29520		m["shared_name"] = value
29521	}
29522}
29523
29524// Op returns the number of incomplete elements in the underlying container.
29525func OrderedMapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapIncompleteSizeAttr) (size tf.Output) {
29526	if scope.Err() != nil {
29527		return
29528	}
29529	attrs := map[string]interface{}{"dtypes": dtypes}
29530	for _, a := range optional {
29531		a(attrs)
29532	}
29533	opspec := tf.OpSpec{
29534		Type: "OrderedMapIncompleteSize",
29535
29536		Attrs: attrs,
29537	}
29538	op := scope.AddOperation(opspec)
29539	return op.Output(0)
29540}
29541
29542// MapIncompleteSizeAttr is an optional argument to MapIncompleteSize.
29543type MapIncompleteSizeAttr func(optionalAttr)
29544
29545// MapIncompleteSizeCapacity sets the optional capacity attribute to value.
29546// If not specified, defaults to 0
29547//
29548// REQUIRES: value >= 0
29549func MapIncompleteSizeCapacity(value int64) MapIncompleteSizeAttr {
29550	return func(m optionalAttr) {
29551		m["capacity"] = value
29552	}
29553}
29554
29555// MapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
29556// If not specified, defaults to 0
29557//
29558// REQUIRES: value >= 0
29559func MapIncompleteSizeMemoryLimit(value int64) MapIncompleteSizeAttr {
29560	return func(m optionalAttr) {
29561		m["memory_limit"] = value
29562	}
29563}
29564
29565// MapIncompleteSizeContainer sets the optional container attribute to value.
29566// If not specified, defaults to ""
29567func MapIncompleteSizeContainer(value string) MapIncompleteSizeAttr {
29568	return func(m optionalAttr) {
29569		m["container"] = value
29570	}
29571}
29572
29573// MapIncompleteSizeSharedName sets the optional shared_name attribute to value.
29574// If not specified, defaults to ""
29575func MapIncompleteSizeSharedName(value string) MapIncompleteSizeAttr {
29576	return func(m optionalAttr) {
29577		m["shared_name"] = value
29578	}
29579}
29580
29581// Op returns the number of incomplete elements in the underlying container.
29582func MapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...MapIncompleteSizeAttr) (size tf.Output) {
29583	if scope.Err() != nil {
29584		return
29585	}
29586	attrs := map[string]interface{}{"dtypes": dtypes}
29587	for _, a := range optional {
29588		a(attrs)
29589	}
29590	opspec := tf.OpSpec{
29591		Type: "MapIncompleteSize",
29592
29593		Attrs: attrs,
29594	}
29595	op := scope.AddOperation(opspec)
29596	return op.Output(0)
29597}
29598
29599// UnbatchAttr is an optional argument to Unbatch.
29600type UnbatchAttr func(optionalAttr)
29601
29602// UnbatchContainer sets the optional container attribute to value.
29603// If not specified, defaults to ""
29604func UnbatchContainer(value string) UnbatchAttr {
29605	return func(m optionalAttr) {
29606		m["container"] = value
29607	}
29608}
29609
29610// UnbatchSharedName sets the optional shared_name attribute to value.
29611// If not specified, defaults to ""
29612func UnbatchSharedName(value string) UnbatchAttr {
29613	return func(m optionalAttr) {
29614		m["shared_name"] = value
29615	}
29616}
29617
29618// Reverses the operation of Batch for a single output Tensor.
29619//
29620// An instance of Unbatch either receives an empty batched_tensor, in which case it
29621// asynchronously waits until the values become available from a concurrently
29622// running instance of Unbatch with the same container and shared_name, or receives
29623// a non-empty batched_tensor in which case it finalizes all other concurrently
29624// running instances and outputs its own element from the batch.
29625//
29626// batched_tensor: The possibly transformed output of Batch. The size of the first
29627//  dimension should remain unchanged by the transformations for the operation to
29628//  work.
29629// batch_index: The matching batch_index obtained from Batch.
29630// id: The id scalar emitted by Batch.
29631// unbatched_tensor: The Tensor corresponding to this execution.
29632// timeout_micros: Maximum amount of time (in microseconds) to wait to receive the
29633//  batched input tensor associated with a given invocation of the op.
29634// container: Container to control resource sharing.
29635// shared_name: Instances of Unbatch with the same container and shared_name are
29636//  assumed to possibly belong to the same batch. If left empty, the op name will
29637//  be used as the shared name.
29638func Unbatch(scope *Scope, batched_tensor tf.Output, batch_index tf.Output, id tf.Output, timeout_micros int64, optional ...UnbatchAttr) (unbatched_tensor tf.Output) {
29639	if scope.Err() != nil {
29640		return
29641	}
29642	attrs := map[string]interface{}{"timeout_micros": timeout_micros}
29643	for _, a := range optional {
29644		a(attrs)
29645	}
29646	opspec := tf.OpSpec{
29647		Type: "Unbatch",
29648		Input: []tf.Input{
29649			batched_tensor, batch_index, id,
29650		},
29651		Attrs: attrs,
29652	}
29653	op := scope.AddOperation(opspec)
29654	return op.Output(0)
29655}
29656
29657// MapUnstageAttr is an optional argument to MapUnstage.
29658type MapUnstageAttr func(optionalAttr)
29659
29660// MapUnstageCapacity sets the optional capacity attribute to value.
29661// If not specified, defaults to 0
29662//
29663// REQUIRES: value >= 0
29664func MapUnstageCapacity(value int64) MapUnstageAttr {
29665	return func(m optionalAttr) {
29666		m["capacity"] = value
29667	}
29668}
29669
29670// MapUnstageMemoryLimit sets the optional memory_limit attribute to value.
29671// If not specified, defaults to 0
29672//
29673// REQUIRES: value >= 0
29674func MapUnstageMemoryLimit(value int64) MapUnstageAttr {
29675	return func(m optionalAttr) {
29676		m["memory_limit"] = value
29677	}
29678}
29679
29680// MapUnstageContainer sets the optional container attribute to value.
29681// If not specified, defaults to ""
29682func MapUnstageContainer(value string) MapUnstageAttr {
29683	return func(m optionalAttr) {
29684		m["container"] = value
29685	}
29686}
29687
29688// MapUnstageSharedName sets the optional shared_name attribute to value.
29689// If not specified, defaults to ""
29690func MapUnstageSharedName(value string) MapUnstageAttr {
29691	return func(m optionalAttr) {
29692		m["shared_name"] = value
29693	}
29694}
29695
29696// Op removes and returns the values associated with the key
29697//
29698// from the underlying container.   If the underlying container
29699// does not contain this key, the op will block until it does.
29700func MapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageAttr) (values []tf.Output) {
29701	if scope.Err() != nil {
29702		return
29703	}
29704	attrs := map[string]interface{}{"dtypes": dtypes}
29705	for _, a := range optional {
29706		a(attrs)
29707	}
29708	opspec := tf.OpSpec{
29709		Type: "MapUnstage",
29710		Input: []tf.Input{
29711			key, indices,
29712		},
29713		Attrs: attrs,
29714	}
29715	op := scope.AddOperation(opspec)
29716	if scope.Err() != nil {
29717		return
29718	}
29719	var idx int
29720	var err error
29721	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
29722		scope.UpdateErr("MapUnstage", err)
29723		return
29724	}
29725	return values
29726}
29727
29728// Shuffle dimensions of x according to a permutation and conjugate the result.
29729//
29730// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
29731//   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
29732//   `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
29733func ConjugateTranspose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
29734	if scope.Err() != nil {
29735		return
29736	}
29737	opspec := tf.OpSpec{
29738		Type: "ConjugateTranspose",
29739		Input: []tf.Input{
29740			x, perm,
29741		},
29742	}
29743	op := scope.AddOperation(opspec)
29744	return op.Output(0)
29745}
29746
29747// RetrieveTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingCenteredRMSPropParameters.
29748type RetrieveTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
29749
29750// RetrieveTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value.
29751// If not specified, defaults to -1
29752func RetrieveTPUEmbeddingCenteredRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
29753	return func(m optionalAttr) {
29754		m["table_id"] = value
29755	}
29756}
29757
29758// RetrieveTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value.
29759// If not specified, defaults to ""
29760func RetrieveTPUEmbeddingCenteredRMSPropParametersTableName(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
29761	return func(m optionalAttr) {
29762		m["table_name"] = value
29763	}
29764}
29765
29766// RetrieveTPUEmbeddingCenteredRMSPropParametersConfig sets the optional config attribute to value.
29767// If not specified, defaults to ""
29768func RetrieveTPUEmbeddingCenteredRMSPropParametersConfig(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
29769	return func(m optionalAttr) {
29770		m["config"] = value
29771	}
29772}
29773
29774// Retrieve centered RMSProp embedding parameters.
29775//
29776// An op that retrieves optimization parameters from embedding to host
29777// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
29778// the correct embedding table configuration. For example, this op is
29779// used to retrieve updated parameters before saving a checkpoint.
29780//
29781// Returns:
29782//	parameters: Parameter parameters updated by the centered RMSProp optimization algorithm.
29783//	ms: Parameter ms updated by the centered RMSProp optimization algorithm.
29784//	mom: Parameter mom updated by the centered RMSProp optimization algorithm.
29785//	mg: Parameter mg updated by the centered RMSProp optimization algorithm.
29786func RetrieveTPUEmbeddingCenteredRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingCenteredRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output) {
29787	if scope.Err() != nil {
29788		return
29789	}
29790	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
29791	for _, a := range optional {
29792		a(attrs)
29793	}
29794	opspec := tf.OpSpec{
29795		Type: "RetrieveTPUEmbeddingCenteredRMSPropParameters",
29796
29797		Attrs: attrs,
29798	}
29799	op := scope.AddOperation(opspec)
29800	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
29801}
29802
29803// Returns x + y element-wise.
29804//
29805// *NOTE*: `RiscAdd` does not supports broadcasting.
29806//
29807// Given two input tensors, the `tf.risc_add` operation computes the sum for every element in the tensor.
29808//
29809// Both input and output have a range `(-inf, inf)`.
29810//
29811func RiscAdd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
29812	if scope.Err() != nil {
29813		return
29814	}
29815	opspec := tf.OpSpec{
29816		Type: "RiscAdd",
29817		Input: []tf.Input{
29818			x, y,
29819		},
29820	}
29821	op := scope.AddOperation(opspec)
29822	return op.Output(0)
29823}
29824
29825// MapPeekAttr is an optional argument to MapPeek.
29826type MapPeekAttr func(optionalAttr)
29827
29828// MapPeekCapacity sets the optional capacity attribute to value.
29829// If not specified, defaults to 0
29830//
29831// REQUIRES: value >= 0
29832func MapPeekCapacity(value int64) MapPeekAttr {
29833	return func(m optionalAttr) {
29834		m["capacity"] = value
29835	}
29836}
29837
29838// MapPeekMemoryLimit sets the optional memory_limit attribute to value.
29839// If not specified, defaults to 0
29840//
29841// REQUIRES: value >= 0
29842func MapPeekMemoryLimit(value int64) MapPeekAttr {
29843	return func(m optionalAttr) {
29844		m["memory_limit"] = value
29845	}
29846}
29847
29848// MapPeekContainer sets the optional container attribute to value.
29849// If not specified, defaults to ""
29850func MapPeekContainer(value string) MapPeekAttr {
29851	return func(m optionalAttr) {
29852		m["container"] = value
29853	}
29854}
29855
29856// MapPeekSharedName sets the optional shared_name attribute to value.
29857// If not specified, defaults to ""
29858func MapPeekSharedName(value string) MapPeekAttr {
29859	return func(m optionalAttr) {
29860		m["shared_name"] = value
29861	}
29862}
29863
29864// Op peeks at the values at the specified key.  If the
29865//
29866// underlying container does not contain this key
29867// this op will block until it does.
29868func MapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapPeekAttr) (values []tf.Output) {
29869	if scope.Err() != nil {
29870		return
29871	}
29872	attrs := map[string]interface{}{"dtypes": dtypes}
29873	for _, a := range optional {
29874		a(attrs)
29875	}
29876	opspec := tf.OpSpec{
29877		Type: "MapPeek",
29878		Input: []tf.Input{
29879			key, indices,
29880		},
29881		Attrs: attrs,
29882	}
29883	op := scope.AddOperation(opspec)
29884	if scope.Err() != nil {
29885		return
29886	}
29887	var idx int
29888	var err error
29889	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
29890		scope.UpdateErr("MapPeek", err)
29891		return
29892	}
29893	return values
29894}
29895
29896// SparseTensorDenseMatMulAttr is an optional argument to SparseTensorDenseMatMul.
29897type SparseTensorDenseMatMulAttr func(optionalAttr)
29898
29899// SparseTensorDenseMatMulAdjointA sets the optional adjoint_a attribute to value.
29900//
29901// value: Use the adjoint of A in the matrix multiply.  If A is complex, this
29902// is transpose(conj(A)).  Otherwise it's transpose(A).
29903// If not specified, defaults to false
29904func SparseTensorDenseMatMulAdjointA(value bool) SparseTensorDenseMatMulAttr {
29905	return func(m optionalAttr) {
29906		m["adjoint_a"] = value
29907	}
29908}
29909
29910// SparseTensorDenseMatMulAdjointB sets the optional adjoint_b attribute to value.
29911//
29912// value: Use the adjoint of B in the matrix multiply.  If B is complex, this
29913// is transpose(conj(B)).  Otherwise it's transpose(B).
29914// If not specified, defaults to false
29915func SparseTensorDenseMatMulAdjointB(value bool) SparseTensorDenseMatMulAttr {
29916	return func(m optionalAttr) {
29917		m["adjoint_b"] = value
29918	}
29919}
29920
29921// Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
29922//
29923// No validity checking is performed on the indices of A.  However, the following
29924// input format is recommended for optimal behavior:
29925//
29926// if adjoint_a == false:
29927//   A should be sorted in lexicographically increasing order.  Use SparseReorder
29928//   if you're not sure.
29929// if adjoint_a == true:
29930//   A should be sorted in order of increasing dimension 1 (i.e., "column major"
29931//   order instead of "row major" order).
29932//
29933// Arguments:
29934//	a_indices: 2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
29935//	a_values: 1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector.
29936//	a_shape: 1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector.
29937//	b: 2-D.  A dense Matrix.
29938func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output, optional ...SparseTensorDenseMatMulAttr) (product tf.Output) {
29939	if scope.Err() != nil {
29940		return
29941	}
29942	attrs := map[string]interface{}{}
29943	for _, a := range optional {
29944		a(attrs)
29945	}
29946	opspec := tf.OpSpec{
29947		Type: "SparseTensorDenseMatMul",
29948		Input: []tf.Input{
29949			a_indices, a_values, a_shape, b,
29950		},
29951		Attrs: attrs,
29952	}
29953	op := scope.AddOperation(opspec)
29954	return op.Output(0)
29955}
29956
29957// FusedBatchNormGradV2Attr is an optional argument to FusedBatchNormGradV2.
29958type FusedBatchNormGradV2Attr func(optionalAttr)
29959
29960// FusedBatchNormGradV2Epsilon sets the optional epsilon attribute to value.
29961//
29962// value: A small float number added to the variance of x.
29963// If not specified, defaults to 0.0001
29964func FusedBatchNormGradV2Epsilon(value float32) FusedBatchNormGradV2Attr {
29965	return func(m optionalAttr) {
29966		m["epsilon"] = value
29967	}
29968}
29969
29970// FusedBatchNormGradV2DataFormat sets the optional data_format attribute to value.
29971//
29972// value: The data format for y_backprop, x, x_backprop.
29973// Either "NHWC" (default) or "NCHW".
29974// If not specified, defaults to "NHWC"
29975func FusedBatchNormGradV2DataFormat(value string) FusedBatchNormGradV2Attr {
29976	return func(m optionalAttr) {
29977		m["data_format"] = value
29978	}
29979}
29980
29981// FusedBatchNormGradV2IsTraining sets the optional is_training attribute to value.
29982//
29983// value: A bool value to indicate the operation is for training (default)
29984// or inference.
29985// If not specified, defaults to true
29986func FusedBatchNormGradV2IsTraining(value bool) FusedBatchNormGradV2Attr {
29987	return func(m optionalAttr) {
29988		m["is_training"] = value
29989	}
29990}
29991
29992// Gradient for batch normalization.
29993//
29994// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
29995// The size of 1D Tensors matches the dimension C of the 4D Tensors.
29996//
29997// Arguments:
29998//	y_backprop: A 4D Tensor for the gradient with respect to y.
29999//	x: A 4D Tensor for input data.
30000//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
30001//	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
30002// mean to be reused in gradient computation. When is_training is
30003// False, a 1D Tensor for the population mean to be reused in both
30004// 1st and 2nd order gradient computation.
30005//	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
30006// variance (inverted variance in the cuDNN case) to be reused in
30007// gradient computation. When is_training is False, a 1D Tensor
30008// for the population variance to be reused in both 1st and 2nd
30009// order gradient computation.
30010//
30011// Returns:
30012//	x_backprop: A 4D Tensor for the gradient with respect to x.
30013//	scale_backprop: A 1D Tensor for the gradient with respect to scale.
30014//	offset_backprop: A 1D Tensor for the gradient with respect to offset.
30015//	reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
30016//	reserve_space_4: Unused placeholder to match the variance input
30017// in FusedBatchNorm.
30018func FusedBatchNormGradV2(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradV2Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
30019	if scope.Err() != nil {
30020		return
30021	}
30022	attrs := map[string]interface{}{}
30023	for _, a := range optional {
30024		a(attrs)
30025	}
30026	opspec := tf.OpSpec{
30027		Type: "FusedBatchNormGradV2",
30028		Input: []tf.Input{
30029			y_backprop, x, scale, reserve_space_1, reserve_space_2,
30030		},
30031		Attrs: attrs,
30032	}
30033	op := scope.AddOperation(opspec)
30034	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
30035}
30036
30037// MapStageAttr is an optional argument to MapStage.
30038type MapStageAttr func(optionalAttr)
30039
30040// MapStageCapacity sets the optional capacity attribute to value.
30041//
30042// value: Maximum number of elements in the Staging Area. If > 0, inserts
30043// on the container will block when the capacity is reached.
30044// If not specified, defaults to 0
30045//
30046// REQUIRES: value >= 0
30047func MapStageCapacity(value int64) MapStageAttr {
30048	return func(m optionalAttr) {
30049		m["capacity"] = value
30050	}
30051}
30052
30053// MapStageMemoryLimit sets the optional memory_limit attribute to value.
30054// If not specified, defaults to 0
30055//
30056// REQUIRES: value >= 0
30057func MapStageMemoryLimit(value int64) MapStageAttr {
30058	return func(m optionalAttr) {
30059		m["memory_limit"] = value
30060	}
30061}
30062
30063// MapStageContainer sets the optional container attribute to value.
30064//
30065// value: If non-empty, this queue is placed in the given container. Otherwise,
30066// a default container is used.
30067// If not specified, defaults to ""
30068func MapStageContainer(value string) MapStageAttr {
30069	return func(m optionalAttr) {
30070		m["container"] = value
30071	}
30072}
30073
30074// MapStageSharedName sets the optional shared_name attribute to value.
30075//
30076// value: It is necessary to match this name to the matching Unstage Op.
30077// If not specified, defaults to ""
30078func MapStageSharedName(value string) MapStageAttr {
30079	return func(m optionalAttr) {
30080		m["shared_name"] = value
30081	}
30082}
30083
30084// Stage (key, values) in the underlying container which behaves like a hashtable.
30085//
30086// Arguments:
30087//	key: int64
30088//
30089//	values: a list of tensors
30090// dtypes A list of data types that inserted values should adhere to.
30091//
30092//
30093// Returns the created operation.
30094func MapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...MapStageAttr) (o *tf.Operation) {
30095	if scope.Err() != nil {
30096		return
30097	}
30098	attrs := map[string]interface{}{"dtypes": dtypes}
30099	for _, a := range optional {
30100		a(attrs)
30101	}
30102	opspec := tf.OpSpec{
30103		Type: "MapStage",
30104		Input: []tf.Input{
30105			key, indices, tf.OutputList(values),
30106		},
30107		Attrs: attrs,
30108	}
30109	return scope.AddOperation(opspec)
30110}
30111
30112// Returns the truth value of (x >= y) element-wise.
30113//
30114// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
30115// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
30116//
30117// Example:
30118//
30119// ```python
30120// x = tf.constant([5, 4, 6, 7])
30121// y = tf.constant([5, 2, 5, 10])
30122// tf.math.greater_equal(x, y) ==> [True, True, True, False]
30123//
30124// x = tf.constant([5, 4, 6, 7])
30125// y = tf.constant([5])
30126// tf.math.greater_equal(x, y) ==> [True, False, True, True]
30127// ```
30128func GreaterEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
30129	if scope.Err() != nil {
30130		return
30131	}
30132	opspec := tf.OpSpec{
30133		Type: "GreaterEqual",
30134		Input: []tf.Input{
30135			x, y,
30136		},
30137	}
30138	op := scope.AddOperation(opspec)
30139	return op.Output(0)
30140}
30141
30142// QuantizeV2Attr is an optional argument to QuantizeV2.
30143type QuantizeV2Attr func(optionalAttr)
30144
30145// QuantizeV2Mode sets the optional mode attribute to value.
30146// If not specified, defaults to "MIN_COMBINED"
30147func QuantizeV2Mode(value string) QuantizeV2Attr {
30148	return func(m optionalAttr) {
30149		m["mode"] = value
30150	}
30151}
30152
30153// QuantizeV2RoundMode sets the optional round_mode attribute to value.
30154// If not specified, defaults to "HALF_AWAY_FROM_ZERO"
30155func QuantizeV2RoundMode(value string) QuantizeV2Attr {
30156	return func(m optionalAttr) {
30157		m["round_mode"] = value
30158	}
30159}
30160
30161// QuantizeV2NarrowRange sets the optional narrow_range attribute to value.
30162// If not specified, defaults to false
30163func QuantizeV2NarrowRange(value bool) QuantizeV2Attr {
30164	return func(m optionalAttr) {
30165		m["narrow_range"] = value
30166	}
30167}
30168
30169// QuantizeV2Axis sets the optional axis attribute to value.
30170// If not specified, defaults to -1
30171func QuantizeV2Axis(value int64) QuantizeV2Attr {
30172	return func(m optionalAttr) {
30173		m["axis"] = value
30174	}
30175}
30176
30177// QuantizeV2EnsureMinimumRange sets the optional ensure_minimum_range attribute to value.
30178// If not specified, defaults to 0.01
30179func QuantizeV2EnsureMinimumRange(value float32) QuantizeV2Attr {
30180	return func(m optionalAttr) {
30181		m["ensure_minimum_range"] = value
30182	}
30183}
30184
30185// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
30186//
30187// [min_range, max_range] are scalar floats that specify the range for
30188// the 'input' data. The 'mode' attribute controls exactly which calculations are
30189// used to convert the float values to their quantized equivalents.  The
30190// 'round_mode' attribute controls which rounding tie-breaking algorithm is used
30191// when rounding float values to their quantized equivalents.
30192//
30193// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
30194//
30195// ```
30196// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
30197// if T == qint8: out[i] -= (range(T) + 1) / 2.0
30198// ```
30199//
30200// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
30201//
30202// *MIN_COMBINED Mode Example*
30203//
30204// Assume the input is type float and has a possible range of [0.0, 6.0] and the
30205// output type is quint8 ([0, 255]). The min_range and max_range values should be
30206// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
30207// value of the input by 255/6 and cast to quint8.
30208//
30209// If the output type was qint8 ([-128, 127]), the operation will additionally
30210// subtract each value by 128 prior to casting, so that the range of values aligns
30211// with the range of qint8.
30212//
30213// If the mode is 'MIN_FIRST', then this approach is used:
30214//
30215// ```
30216// num_discrete_values = 1 << (# of bits in T)
30217// range_adjust = num_discrete_values / (num_discrete_values - 1)
30218// range = (range_max - range_min) * range_adjust
30219// range_scale = num_discrete_values / range
30220// quantized = round(input * range_scale) - round(range_min * range_scale) +
30221//   numeric_limits<T>::min()
30222// quantized = max(quantized, numeric_limits<T>::min())
30223// quantized = min(quantized, numeric_limits<T>::max())
30224// ```
30225//
30226// The biggest difference between this and MIN_COMBINED is that the minimum range
30227// is rounded first, before it's subtracted from the rounded value. With
30228// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
30229// and dequantizing will introduce a larger and larger error.
30230//
30231// *SCALED mode Example*
30232//
30233// `SCALED` mode matches the quantization approach used in
30234// `QuantizeAndDequantize{V2|V3}`.
30235//
30236// If the mode is `SCALED`, the quantization is performed by multiplying each
30237// input value by a scaling_factor.
30238// The scaling_factor is determined from `min_range` and `max_range` to be as large
30239// as possible such that the range from `min_range` to `max_range` is representable
30240// within values of type T.
30241//
30242// ```c++
30243//
30244//   const int min_T = std::numeric_limits<T>::min();
30245//   const int max_T = std::numeric_limits<T>::max();
30246//   const float max_float = std::numeric_limits<float>::max();
30247//
30248//   const float scale_factor_from_min_side =
30249//       (min_T * min_range > 0) ? min_T / min_range : max_float;
30250//   const float scale_factor_from_max_side =
30251//       (max_T * max_range > 0) ? max_T / max_range : max_float;
30252//
30253//   const float scale_factor = std::min(scale_factor_from_min_side,
30254//                                       scale_factor_from_max_side);
30255// ```
30256//
30257// We next use the scale_factor to adjust min_range and max_range as follows:
30258//
30259// ```c++
30260//       min_range = min_T / scale_factor;
30261//       max_range = max_T / scale_factor;
30262// ```
30263//
30264//
30265// e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would
30266// compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8
30267// In this case, min_range would remain -10, but max_range would be adjusted to
30268// 127 / 12.8 = 9.921875
30269//
30270// So we will quantize input values in the range (-10, 9.921875) to (-128, 127).
30271//
30272// The input tensor can now be quantized by clipping values to the range
30273// `min_range` to `max_range`, then multiplying by scale_factor as follows:
30274//
30275// ```c++
30276// result = round(min(max_range, max(min_range, input)) * scale_factor)
30277// ```
30278//
30279// The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of
30280// this operation. These outputs should be used as the range for any further
30281// calculations.
30282//
30283//
30284// *narrow_range (bool) attribute*
30285//
30286// If true, we do not use the minimum quantized value.
30287// i.e. for int8 the quantized output, it would be restricted to the range
30288// -127..127 instead of the full -128..127 range.
30289// This is provided for compatibility with certain inference backends.
30290// (Only applies to SCALED mode)
30291//
30292//
30293// *axis (int) attribute*
30294//
30295// An optional `axis` attribute can specify a dimension index of the input tensor,
30296// such that quantization ranges will be calculated and applied separately for each
30297// slice of the tensor along that dimension. This is useful for per-channel
30298// quantization.
30299//
30300// If axis is specified, min_range and max_range
30301//
30302// if `axis`=None, per-tensor quantization is performed as normal.
30303//
30304//
30305// *ensure_minimum_range (float) attribute*
30306//
30307// Ensures the minimum quantization range is at least this value.
30308// The legacy default value for this is 0.01, but it is strongly suggested to
30309// set it to 0 for new uses.
30310//
30311//
30312// Arguments:
30313//
30314//	min_range: The minimum value of the quantization range. This value may be adjusted by the
30315// op depending on other parameters. The adjusted value is written to `output_min`.
30316// If the `axis` attribute is specified, this must be a 1-D tensor whose size
30317// matches the `axis` dimension of the input and output tensors.
30318//	max_range: The maximum value of the quantization range. This value may be adjusted by the
30319// op depending on other parameters. The adjusted value is written to `output_max`.
30320// If the `axis` attribute is specified, this must be a 1-D tensor whose size
30321// matches the `axis` dimension of the input and output tensors.
30322//
30323//
30324// Returns:
30325//	output: The quantized data produced from the float input.
30326//	output_min: The final quantization range minimum, used to clip input values before scaling
30327// and rounding them to quantized values.
30328// If the `axis` attribute is specified, this will be a 1-D tensor whose size
30329// matches the `axis` dimension of the input and output tensors.
30330//	output_max: The final quantization range maximum, used to clip input values before scaling
30331// and rounding them to quantized values.
30332// If the `axis` attribute is specified, this will be a 1-D tensor whose size
30333// matches the `axis` dimension of the input and output tensors.
30334func QuantizeV2(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, T tf.DataType, optional ...QuantizeV2Attr) (output tf.Output, output_min tf.Output, output_max tf.Output) {
30335	if scope.Err() != nil {
30336		return
30337	}
30338	attrs := map[string]interface{}{"T": T}
30339	for _, a := range optional {
30340		a(attrs)
30341	}
30342	opspec := tf.OpSpec{
30343		Type: "QuantizeV2",
30344		Input: []tf.Input{
30345			input, min_range, max_range,
30346		},
30347		Attrs: attrs,
30348	}
30349	op := scope.AddOperation(opspec)
30350	return op.Output(0), op.Output(1), op.Output(2)
30351}
30352
30353// UnstageAttr is an optional argument to Unstage.
30354type UnstageAttr func(optionalAttr)
30355
30356// UnstageCapacity sets the optional capacity attribute to value.
30357// If not specified, defaults to 0
30358//
30359// REQUIRES: value >= 0
30360func UnstageCapacity(value int64) UnstageAttr {
30361	return func(m optionalAttr) {
30362		m["capacity"] = value
30363	}
30364}
30365
30366// UnstageMemoryLimit sets the optional memory_limit attribute to value.
30367// If not specified, defaults to 0
30368//
30369// REQUIRES: value >= 0
30370func UnstageMemoryLimit(value int64) UnstageAttr {
30371	return func(m optionalAttr) {
30372		m["memory_limit"] = value
30373	}
30374}
30375
30376// UnstageContainer sets the optional container attribute to value.
30377// If not specified, defaults to ""
30378func UnstageContainer(value string) UnstageAttr {
30379	return func(m optionalAttr) {
30380		m["container"] = value
30381	}
30382}
30383
30384// UnstageSharedName sets the optional shared_name attribute to value.
30385// If not specified, defaults to ""
30386func UnstageSharedName(value string) UnstageAttr {
30387	return func(m optionalAttr) {
30388		m["shared_name"] = value
30389	}
30390}
30391
30392// Op is similar to a lightweight Dequeue.
30393//
30394// The basic functionality is similar to dequeue with many fewer
30395// capabilities and options.  This Op is optimized for performance.
30396func Unstage(scope *Scope, dtypes []tf.DataType, optional ...UnstageAttr) (values []tf.Output) {
30397	if scope.Err() != nil {
30398		return
30399	}
30400	attrs := map[string]interface{}{"dtypes": dtypes}
30401	for _, a := range optional {
30402		a(attrs)
30403	}
30404	opspec := tf.OpSpec{
30405		Type: "Unstage",
30406
30407		Attrs: attrs,
30408	}
30409	op := scope.AddOperation(opspec)
30410	if scope.Err() != nil {
30411		return
30412	}
30413	var idx int
30414	var err error
30415	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
30416		scope.UpdateErr("Unstage", err)
30417		return
30418	}
30419	return values
30420}
30421
30422// Computes the gradient of the sigmoid of `x` wrt its input.
30423//
30424// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
30425// `dy` is the corresponding input gradient.
30426func SigmoidGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
30427	if scope.Err() != nil {
30428		return
30429	}
30430	opspec := tf.OpSpec{
30431		Type: "SigmoidGrad",
30432		Input: []tf.Input{
30433			y, dy,
30434		},
30435	}
30436	op := scope.AddOperation(opspec)
30437	return op.Output(0)
30438}
30439
30440// Creates a Tensor by indexing into the TensorList.
30441//
30442// Each row in the produced Tensor corresponds to the element in the TensorList
30443// specified by the given index (see `tf.gather`).
30444//
30445// input_handle: The input tensor list.
30446// indices: The indices used to index into the list.
30447// values: The tensor.
30448func TensorListGather(scope *Scope, input_handle tf.Output, indices tf.Output, element_shape tf.Output, element_dtype tf.DataType) (values tf.Output) {
30449	if scope.Err() != nil {
30450		return
30451	}
30452	attrs := map[string]interface{}{"element_dtype": element_dtype}
30453	opspec := tf.OpSpec{
30454		Type: "TensorListGather",
30455		Input: []tf.Input{
30456			input_handle, indices, element_shape,
30457		},
30458		Attrs: attrs,
30459	}
30460	op := scope.AddOperation(opspec)
30461	return op.Output(0)
30462}
30463
30464// Delete the tensor specified by its handle in the session.
30465//
30466// Arguments:
30467//	handle: The handle for a tensor stored in the session state.
30468//
30469// Returns the created operation.
30470func DeleteSessionTensor(scope *Scope, handle tf.Output) (o *tf.Operation) {
30471	if scope.Err() != nil {
30472		return
30473	}
30474	opspec := tf.OpSpec{
30475		Type: "DeleteSessionTensor",
30476		Input: []tf.Input{
30477			handle,
30478		},
30479	}
30480	return scope.AddOperation(opspec)
30481}
30482
30483// ResourceApplyAdagradDAAttr is an optional argument to ResourceApplyAdagradDA.
30484type ResourceApplyAdagradDAAttr func(optionalAttr)
30485
30486// ResourceApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
30487//
30488// value: If True, updating of the var and accum tensors will be protected by
30489// a lock; otherwise the behavior is undefined, but may exhibit less contention.
30490// If not specified, defaults to false
30491func ResourceApplyAdagradDAUseLocking(value bool) ResourceApplyAdagradDAAttr {
30492	return func(m optionalAttr) {
30493		m["use_locking"] = value
30494	}
30495}
30496
30497// Update '*var' according to the proximal adagrad scheme.
30498//
30499// Arguments:
30500//	var_: Should be from a Variable().
30501//	gradient_accumulator: Should be from a Variable().
30502//	gradient_squared_accumulator: Should be from a Variable().
30503//	grad: The gradient.
30504//	lr: Scaling factor. Must be a scalar.
30505//	l1: L1 regularization. Must be a scalar.
30506//	l2: L2 regularization. Must be a scalar.
30507//	global_step: Training step number. Must be a scalar.
30508//
30509// Returns the created operation.
30510func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceApplyAdagradDAAttr) (o *tf.Operation) {
30511	if scope.Err() != nil {
30512		return
30513	}
30514	attrs := map[string]interface{}{}
30515	for _, a := range optional {
30516		a(attrs)
30517	}
30518	opspec := tf.OpSpec{
30519		Type: "ResourceApplyAdagradDA",
30520		Input: []tf.Input{
30521			var_, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step,
30522		},
30523		Attrs: attrs,
30524	}
30525	return scope.AddOperation(opspec)
30526}
30527
30528// SparseToDenseAttr is an optional argument to SparseToDense.
30529type SparseToDenseAttr func(optionalAttr)
30530
30531// SparseToDenseValidateIndices sets the optional validate_indices attribute to value.
30532//
30533// value: If true, indices are checked to make sure they are sorted in
30534// lexicographic order and that there are no repeats.
30535// If not specified, defaults to true
30536func SparseToDenseValidateIndices(value bool) SparseToDenseAttr {
30537	return func(m optionalAttr) {
30538		m["validate_indices"] = value
30539	}
30540}
30541
30542// Converts a sparse representation into a dense tensor.
30543//
30544// Builds an array `dense` with shape `output_shape` such that
30545//
30546// ```
30547// # If sparse_indices is scalar
30548// dense[i] = (i == sparse_indices ? sparse_values : default_value)
30549//
30550// # If sparse_indices is a vector, then for each i
30551// dense[sparse_indices[i]] = sparse_values[i]
30552//
30553// # If sparse_indices is an n by d matrix, then for each i in [0, n)
30554// dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
30555// ```
30556//
30557// All other values in `dense` are set to `default_value`.  If `sparse_values` is a
30558// scalar, all sparse indices are set to this single value.
30559//
30560// Indices should be sorted in lexicographic order, and indices must not
30561// contain any repeats. If `validate_indices` is true, these properties
30562// are checked during execution.
30563//
30564// Arguments:
30565//	sparse_indices: 0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
30566// index where `sparse_values[i]` will be placed.
30567//	output_shape: 1-D.  Shape of the dense output tensor.
30568//	sparse_values: 1-D.  Values corresponding to each row of `sparse_indices`,
30569// or a scalar value to be used for all sparse indices.
30570//	default_value: Scalar value to set for indices not specified in
30571// `sparse_indices`.
30572//
30573// Returns Dense output tensor of shape `output_shape`.
30574func SparseToDense(scope *Scope, sparse_indices tf.Output, output_shape tf.Output, sparse_values tf.Output, default_value tf.Output, optional ...SparseToDenseAttr) (dense tf.Output) {
30575	if scope.Err() != nil {
30576		return
30577	}
30578	attrs := map[string]interface{}{}
30579	for _, a := range optional {
30580		a(attrs)
30581	}
30582	opspec := tf.OpSpec{
30583		Type: "SparseToDense",
30584		Input: []tf.Input{
30585			sparse_indices, output_shape, sparse_values, default_value,
30586		},
30587		Attrs: attrs,
30588	}
30589	op := scope.AddOperation(opspec)
30590	return op.Output(0)
30591}
30592
30593// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
30594//
30595// Arguments:
30596//
30597//	thread_pool: A resource produced by the ThreadPoolHandle op.
30598//
30599//
30600func ThreadPoolDataset(scope *Scope, input_dataset tf.Output, thread_pool tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
30601	if scope.Err() != nil {
30602		return
30603	}
30604	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
30605	opspec := tf.OpSpec{
30606		Type: "ThreadPoolDataset",
30607		Input: []tf.Input{
30608			input_dataset, thread_pool,
30609		},
30610		Attrs: attrs,
30611	}
30612	op := scope.AddOperation(opspec)
30613	return op.Output(0)
30614}
30615
30616// Bitcasts a tensor from one type to another without copying data.
30617//
30618// Given a tensor `input`, this operation returns a tensor that has the same buffer
30619// data as `input` with datatype `type`.
30620//
30621// If the input datatype `T` is larger than the output datatype `type` then the
30622// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
30623//
30624// If `T` is smaller than `type`, the operator requires that the rightmost
30625// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
30626// [..., sizeof(`type`)/sizeof(`T`)] to [...].
30627//
30628// tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype
30629// (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()
30630// gives module error.
30631// For example,
30632//
30633// Example 1:
30634//
30635// >>> a = [1., 2., 3.]
30636// >>> equality_bitcast = tf.bitcast(a, tf.complex128)
30637// Traceback (most recent call last):
30638// ...
30639// InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]
30640// >>> equality_cast = tf.cast(a, tf.complex128)
30641// >>> print(equality_cast)
30642// tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)
30643//
30644// Example 2:
30645//
30646// >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)
30647// <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)>
30648//
30649// Example 3:
30650//
30651// >>> x = [1., 2., 3.]
30652// >>> y = [0., 2., 3.]
30653// >>> equality= tf.equal(x,y)
30654// >>> equality_cast = tf.cast(equality,tf.float32)
30655// >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)
30656// >>> print(equality)
30657// tf.Tensor([False True True], shape=(3,), dtype=bool)
30658// >>> print(equality_cast)
30659// tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)
30660// >>> print(equality_bitcast)
30661// tf.Tensor(
30662//     [[  0   0   0   0]
30663//      [  0   0 128  63]
30664//      [  0   0 128  63]], shape=(3, 4), dtype=uint8)
30665//
30666// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
30667// endian orderings will give different results.
30668func Bitcast(scope *Scope, input tf.Output, type_ tf.DataType) (output tf.Output) {
30669	if scope.Err() != nil {
30670		return
30671	}
30672	attrs := map[string]interface{}{"type": type_}
30673	opspec := tf.OpSpec{
30674		Type: "Bitcast",
30675		Input: []tf.Input{
30676			input,
30677		},
30678		Attrs: attrs,
30679	}
30680	op := scope.AddOperation(opspec)
30681	return op.Output(0)
30682}
30683
30684// Store the input tensor in the state of the current session.
30685//
30686// Arguments:
30687//	value: The tensor to be stored.
30688//
30689// Returns The handle for the tensor stored in the session state, represented
30690// as a ResourceHandle object.
30691func GetSessionHandleV2(scope *Scope, value tf.Output) (handle tf.Output) {
30692	if scope.Err() != nil {
30693		return
30694	}
30695	opspec := tf.OpSpec{
30696		Type: "GetSessionHandleV2",
30697		Input: []tf.Input{
30698			value,
30699		},
30700	}
30701	op := scope.AddOperation(opspec)
30702	return op.Output(0)
30703}
30704
30705// Enqueue multiple Tensor values on the computation outfeed.
30706//
30707// Arguments:
30708//	inputs: A list of tensors that will be inserted into the outfeed queue as an
30709// XLA tuple.
30710//
30711// Returns the created operation.
30712func OutfeedEnqueueTuple(scope *Scope, inputs []tf.Output) (o *tf.Operation) {
30713	if scope.Err() != nil {
30714		return
30715	}
30716	opspec := tf.OpSpec{
30717		Type: "OutfeedEnqueueTuple",
30718		Input: []tf.Input{
30719			tf.OutputList(inputs),
30720		},
30721	}
30722	return scope.AddOperation(opspec)
30723}
30724
30725// FakeQuantWithMinMaxVarsAttr is an optional argument to FakeQuantWithMinMaxVars.
30726type FakeQuantWithMinMaxVarsAttr func(optionalAttr)
30727
30728// FakeQuantWithMinMaxVarsNumBits sets the optional num_bits attribute to value.
30729// If not specified, defaults to 8
30730func FakeQuantWithMinMaxVarsNumBits(value int64) FakeQuantWithMinMaxVarsAttr {
30731	return func(m optionalAttr) {
30732		m["num_bits"] = value
30733	}
30734}
30735
30736// FakeQuantWithMinMaxVarsNarrowRange sets the optional narrow_range attribute to value.
30737// If not specified, defaults to false
30738func FakeQuantWithMinMaxVarsNarrowRange(value bool) FakeQuantWithMinMaxVarsAttr {
30739	return func(m optionalAttr) {
30740		m["narrow_range"] = value
30741	}
30742}
30743
30744// Fake-quantize the 'inputs' tensor of type float via global float scalars
30745//
30746// Fake-quantize the `inputs` tensor of type float via global float scalars
30747// `min` and `max` to `outputs` tensor of same shape as `inputs`.
30748//
30749// Attributes
30750//
30751// *   `[min; max]` define the clamping range for the `inputs` data.
30752// *   `inputs` values are quantized into the quantization range (
30753// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
30754// when it is true) and then de-quantized and output as floats in `[min; max]`
30755// interval.
30756// *   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
30757//
30758// Before quantization, `min` and `max` values are adjusted with the following
30759// logic.
30760// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
30761// the behavior can be unexpected:
30762//
30763// *   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
30764// *   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
30765// *   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
30766// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
30767//
30768// This operation has a gradient and thus allows for training `min` and `max`
30769// values.
30770func FakeQuantWithMinMaxVars(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsAttr) (outputs tf.Output) {
30771	if scope.Err() != nil {
30772		return
30773	}
30774	attrs := map[string]interface{}{}
30775	for _, a := range optional {
30776		a(attrs)
30777	}
30778	opspec := tf.OpSpec{
30779		Type: "FakeQuantWithMinMaxVars",
30780		Input: []tf.Input{
30781			inputs, min, max,
30782		},
30783		Attrs: attrs,
30784	}
30785	op := scope.AddOperation(opspec)
30786	return op.Output(0)
30787}
30788
30789// BatchDatasetV2Attr is an optional argument to BatchDatasetV2.
30790type BatchDatasetV2Attr func(optionalAttr)
30791
30792// BatchDatasetV2ParallelCopy sets the optional parallel_copy attribute to value.
30793// If not specified, defaults to false
30794func BatchDatasetV2ParallelCopy(value bool) BatchDatasetV2Attr {
30795	return func(m optionalAttr) {
30796		m["parallel_copy"] = value
30797	}
30798}
30799
30800// Creates a dataset that batches `batch_size` elements from `input_dataset`.
30801//
30802// Arguments:
30803//
30804//	batch_size: A scalar representing the number of elements to accumulate in a batch.
30805//	drop_remainder: A scalar representing whether the last batch should be dropped in case its size
30806// is smaller than desired.
30807//
30808//
30809func BatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...BatchDatasetV2Attr) (handle tf.Output) {
30810	if scope.Err() != nil {
30811		return
30812	}
30813	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
30814	for _, a := range optional {
30815		a(attrs)
30816	}
30817	opspec := tf.OpSpec{
30818		Type: "BatchDatasetV2",
30819		Input: []tf.Input{
30820			input_dataset, batch_size, drop_remainder,
30821		},
30822		Attrs: attrs,
30823	}
30824	op := scope.AddOperation(opspec)
30825	return op.Output(0)
30826}
30827
30828// SelfAdjointEigV2Attr is an optional argument to SelfAdjointEigV2.
30829type SelfAdjointEigV2Attr func(optionalAttr)
30830
30831// SelfAdjointEigV2ComputeV sets the optional compute_v attribute to value.
30832//
30833// value: If `True` then eigenvectors will be computed and returned in `v`.
30834// Otherwise, only the eigenvalues will be computed.
30835// If not specified, defaults to true
30836func SelfAdjointEigV2ComputeV(value bool) SelfAdjointEigV2Attr {
30837	return func(m optionalAttr) {
30838		m["compute_v"] = value
30839	}
30840}
30841
30842// Computes the eigen decomposition of one or more square self-adjoint matrices.
30843//
30844// Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
30845// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
30846// are sorted in non-decreasing order.
30847//
30848// ```python
30849// # a is a tensor.
30850// # e is a tensor of eigenvalues.
30851// # v is a tensor of eigenvectors.
30852// e, v = self_adjoint_eig(a)
30853// e = self_adjoint_eig(a, compute_v=False)
30854// ```
30855//
30856// Arguments:
30857//	input: `Tensor` input of shape `[N, N]`.
30858//
30859// Returns:
30860//	e: Eigenvalues. Shape is `[N]`.
30861//	v: Eigenvectors. Shape is `[N, N]`.
30862func SelfAdjointEigV2(scope *Scope, input tf.Output, optional ...SelfAdjointEigV2Attr) (e tf.Output, v tf.Output) {
30863	if scope.Err() != nil {
30864		return
30865	}
30866	attrs := map[string]interface{}{}
30867	for _, a := range optional {
30868		a(attrs)
30869	}
30870	opspec := tf.OpSpec{
30871		Type: "SelfAdjointEigV2",
30872		Input: []tf.Input{
30873			input,
30874		},
30875		Attrs: attrs,
30876	}
30877	op := scope.AddOperation(opspec)
30878	return op.Output(0), op.Output(1)
30879}
30880
30881// PackAttr is an optional argument to Pack.
30882type PackAttr func(optionalAttr)
30883
30884// PackAxis sets the optional axis attribute to value.
30885//
30886// value: Dimension along which to pack.  Negative values wrap around, so the
30887// valid range is `[-(R+1), R+1)`.
30888// If not specified, defaults to 0
30889func PackAxis(value int64) PackAttr {
30890	return func(m optionalAttr) {
30891		m["axis"] = value
30892	}
30893}
30894
30895// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
30896//
30897// Packs the `N` tensors in `values` into a tensor with rank one higher than each
30898// tensor in `values`, by packing them along the `axis` dimension.
30899// Given a list of tensors of shape `(A, B, C)`;
30900//
30901// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
30902// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
30903// Etc.
30904//
30905// For example:
30906//
30907// ```
30908// # 'x' is [1, 4]
30909// # 'y' is [2, 5]
30910// # 'z' is [3, 6]
30911// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
30912// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
30913// ```
30914//
30915// This is the opposite of `unpack`.
30916//
30917// Arguments:
30918//	values: Must be of same shape and type.
30919//
30920// Returns The packed tensor.
30921func Pack(scope *Scope, values []tf.Output, optional ...PackAttr) (output tf.Output) {
30922	if scope.Err() != nil {
30923		return
30924	}
30925	attrs := map[string]interface{}{}
30926	for _, a := range optional {
30927		a(attrs)
30928	}
30929	opspec := tf.OpSpec{
30930		Type: "Pack",
30931		Input: []tf.Input{
30932			tf.OutputList(values),
30933		},
30934		Attrs: attrs,
30935	}
30936	op := scope.AddOperation(opspec)
30937	return op.Output(0)
30938}
30939
30940// Converts a (possibly batched) CSRSparesMatrix to a SparseTensor.
30941//
30942// Arguments:
30943//	sparse_matrix: A (possibly batched) CSRSparseMatrix.
30944//
30945//
30946// Returns:
30947//	indices: SparseTensor indices.
30948//	values: SparseTensor values.
30949//	dense_shape: SparseTensor dense shape.
30950func CSRSparseMatrixToSparseTensor(scope *Scope, sparse_matrix tf.Output, type_ tf.DataType) (indices tf.Output, values tf.Output, dense_shape tf.Output) {
30951	if scope.Err() != nil {
30952		return
30953	}
30954	attrs := map[string]interface{}{"type": type_}
30955	opspec := tf.OpSpec{
30956		Type: "CSRSparseMatrixToSparseTensor",
30957		Input: []tf.Input{
30958			sparse_matrix,
30959		},
30960		Attrs: attrs,
30961	}
30962	op := scope.AddOperation(opspec)
30963	return op.Output(0), op.Output(1), op.Output(2)
30964}
30965
30966// Add all input tensors element wise.
30967//
30968//   Inputs must be of same size and shape.
30969//
30970//   ```python
30971//   x = [9, 7, 10]
30972//   tf.math.add_n(x) ==> 26
30973//   ```
30974func AddN(scope *Scope, inputs []tf.Output) (sum tf.Output) {
30975	if scope.Err() != nil {
30976		return
30977	}
30978	opspec := tf.OpSpec{
30979		Type: "AddN",
30980		Input: []tf.Input{
30981			tf.OutputList(inputs),
30982		},
30983	}
30984	op := scope.AddOperation(opspec)
30985	return op.Output(0)
30986}
30987
30988// TensorArrayV2Attr is an optional argument to TensorArrayV2.
30989type TensorArrayV2Attr func(optionalAttr)
30990
30991// TensorArrayV2ElementShape sets the optional element_shape attribute to value.
30992// If not specified, defaults to {unknown_rank:true}
30993func TensorArrayV2ElementShape(value tf.Shape) TensorArrayV2Attr {
30994	return func(m optionalAttr) {
30995		m["element_shape"] = value
30996	}
30997}
30998
30999// TensorArrayV2DynamicSize sets the optional dynamic_size attribute to value.
31000// If not specified, defaults to false
31001func TensorArrayV2DynamicSize(value bool) TensorArrayV2Attr {
31002	return func(m optionalAttr) {
31003		m["dynamic_size"] = value
31004	}
31005}
31006
31007// TensorArrayV2ClearAfterRead sets the optional clear_after_read attribute to value.
31008// If not specified, defaults to true
31009func TensorArrayV2ClearAfterRead(value bool) TensorArrayV2Attr {
31010	return func(m optionalAttr) {
31011		m["clear_after_read"] = value
31012	}
31013}
31014
31015// TensorArrayV2TensorArrayName sets the optional tensor_array_name attribute to value.
31016// If not specified, defaults to ""
31017func TensorArrayV2TensorArrayName(value string) TensorArrayV2Attr {
31018	return func(m optionalAttr) {
31019		m["tensor_array_name"] = value
31020	}
31021}
31022
31023// Deprecated. Use TensorArrayV3
31024//
31025// DEPRECATED at GraphDef version 26: Use TensorArrayV3
31026func TensorArrayV2(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV2Attr) (handle tf.Output) {
31027	if scope.Err() != nil {
31028		return
31029	}
31030	attrs := map[string]interface{}{"dtype": dtype}
31031	for _, a := range optional {
31032		a(attrs)
31033	}
31034	opspec := tf.OpSpec{
31035		Type: "TensorArrayV2",
31036		Input: []tf.Input{
31037			size,
31038		},
31039		Attrs: attrs,
31040	}
31041	op := scope.AddOperation(opspec)
31042	return op.Output(0)
31043}
31044
31045// ResourceApplyGradientDescentAttr is an optional argument to ResourceApplyGradientDescent.
31046type ResourceApplyGradientDescentAttr func(optionalAttr)
31047
31048// ResourceApplyGradientDescentUseLocking sets the optional use_locking attribute to value.
31049//
31050// value: If `True`, the subtraction will be protected by a lock;
31051// otherwise the behavior is undefined, but may exhibit less contention.
31052// If not specified, defaults to false
31053func ResourceApplyGradientDescentUseLocking(value bool) ResourceApplyGradientDescentAttr {
31054	return func(m optionalAttr) {
31055		m["use_locking"] = value
31056	}
31057}
31058
31059// Update '*var' by subtracting 'alpha' * 'delta' from it.
31060//
31061// Arguments:
31062//	var_: Should be from a Variable().
31063//	alpha: Scaling factor. Must be a scalar.
31064//	delta: The change.
31065//
31066// Returns the created operation.
31067func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, delta tf.Output, optional ...ResourceApplyGradientDescentAttr) (o *tf.Operation) {
31068	if scope.Err() != nil {
31069		return
31070	}
31071	attrs := map[string]interface{}{}
31072	for _, a := range optional {
31073		a(attrs)
31074	}
31075	opspec := tf.OpSpec{
31076		Type: "ResourceApplyGradientDescent",
31077		Input: []tf.Input{
31078			var_, alpha, delta,
31079		},
31080		Attrs: attrs,
31081	}
31082	return scope.AddOperation(opspec)
31083}
31084
31085// Computes the matrix logarithm of one or more square matrices:
31086//
31087//
31088// \\(log(exp(A)) = A\\)
31089//
31090// This op is only defined for complex matrices. If A is positive-definite and
31091// real, then casting to a complex matrix, taking the logarithm and casting back
31092// to a real matrix will give the correct result.
31093//
31094// This function computes the matrix logarithm using the Schur-Parlett algorithm.
31095// Details of the algorithm can be found in Section 11.6.2 of:
31096// Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008.
31097// ISBN 978-0-898716-46-7.
31098//
31099// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
31100// form square matrices. The output is a tensor of the same shape as the input
31101// containing the exponential for all input submatrices `[..., :, :]`.
31102//
31103// Arguments:
31104//	input: Shape is `[..., M, M]`.
31105//
31106// Returns Shape is `[..., M, M]`.
31107//
31108// @compatibility(scipy)
31109// Equivalent to scipy.linalg.logm
31110// @end_compatibility
31111func MatrixLogarithm(scope *Scope, input tf.Output) (output tf.Output) {
31112	if scope.Err() != nil {
31113		return
31114	}
31115	opspec := tf.OpSpec{
31116		Type: "MatrixLogarithm",
31117		Input: []tf.Input{
31118			input,
31119		},
31120	}
31121	op := scope.AddOperation(opspec)
31122	return op.Output(0)
31123}
31124
31125// ResourceSparseApplyProximalAdagradAttr is an optional argument to ResourceSparseApplyProximalAdagrad.
31126type ResourceSparseApplyProximalAdagradAttr func(optionalAttr)
31127
31128// ResourceSparseApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
31129//
31130// value: If True, updating of the var and accum tensors will be protected by
31131// a lock; otherwise the behavior is undefined, but may exhibit less contention.
31132// If not specified, defaults to false
31133func ResourceSparseApplyProximalAdagradUseLocking(value bool) ResourceSparseApplyProximalAdagradAttr {
31134	return func(m optionalAttr) {
31135		m["use_locking"] = value
31136	}
31137}
31138
31139// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
31140//
31141// That is for rows we have grad for, we update var and accum as follows:
31142// accum += grad * grad
31143// prox_v = var
31144// prox_v -= lr * grad * (1 / sqrt(accum))
31145// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
31146//
31147// Arguments:
31148//	var_: Should be from a Variable().
31149//	accum: Should be from a Variable().
31150//	lr: Learning rate. Must be a scalar.
31151//	l1: L1 regularization. Must be a scalar.
31152//	l2: L2 regularization. Must be a scalar.
31153//	grad: The gradient.
31154//	indices: A vector of indices into the first dimension of var and accum.
31155//
31156// Returns the created operation.
31157func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalAdagradAttr) (o *tf.Operation) {
31158	if scope.Err() != nil {
31159		return
31160	}
31161	attrs := map[string]interface{}{}
31162	for _, a := range optional {
31163		a(attrs)
31164	}
31165	opspec := tf.OpSpec{
31166		Type: "ResourceSparseApplyProximalAdagrad",
31167		Input: []tf.Input{
31168			var_, accum, lr, l1, l2, grad, indices,
31169		},
31170		Attrs: attrs,
31171	}
31172	return scope.AddOperation(opspec)
31173}
31174
31175// Computes numerical negative value element-wise.
31176//
31177// I.e., \\(y = -x\\).
31178func Neg(scope *Scope, x tf.Output) (y tf.Output) {
31179	if scope.Err() != nil {
31180		return
31181	}
31182	opspec := tf.OpSpec{
31183		Type: "Neg",
31184		Input: []tf.Input{
31185			x,
31186		},
31187	}
31188	op := scope.AddOperation(opspec)
31189	return op.Output(0)
31190}
31191
31192// Deprecated. Disallowed in GraphDef version >= 2.
31193//
31194// DEPRECATED at GraphDef version 2: Use AdjustContrastv2 instead
31195func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, min_value tf.Output, max_value tf.Output) (output tf.Output) {
31196	if scope.Err() != nil {
31197		return
31198	}
31199	opspec := tf.OpSpec{
31200		Type: "AdjustContrast",
31201		Input: []tf.Input{
31202			images, contrast_factor, min_value, max_value,
31203		},
31204	}
31205	op := scope.AddOperation(opspec)
31206	return op.Output(0)
31207}
31208
31209// Initializes the multi device iterator with the given dataset.
31210//
31211// Arguments:
31212//	dataset: Dataset to be iterated upon.
31213//	multi_device_iterator: A MultiDeviceIteratorResource.
31214//	max_buffer_size: The maximum size of the host side per device buffer to keep.
31215//
31216// Returns An int64 indicating which incarnation of the MultiDeviceIterator
31217// is running.
31218func MultiDeviceIteratorInit(scope *Scope, dataset tf.Output, multi_device_iterator tf.Output, max_buffer_size tf.Output) (incarnation_id tf.Output) {
31219	if scope.Err() != nil {
31220		return
31221	}
31222	opspec := tf.OpSpec{
31223		Type: "MultiDeviceIteratorInit",
31224		Input: []tf.Input{
31225			dataset, multi_device_iterator, max_buffer_size,
31226		},
31227	}
31228	op := scope.AddOperation(opspec)
31229	return op.Output(0)
31230}
31231
31232// Converts the quantized `input` tensor into a lower-precision `output`.
31233//
31234// Converts the quantized `input` tensor into a lower-precision `output`, using the
31235// output range specified with `requested_output_min` and `requested_output_max`.
31236//
31237// `[input_min, input_max]` are scalar floats that specify the range for the float
31238// interpretation of the `input` data. For example, if `input_min` is -1.0f and
31239// `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0
31240// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
31241//
31242// Arguments:
31243//
31244//	input_min: The float value that the minimum quantized input value represents.
31245//	input_max: The float value that the maximum quantized input value represents.
31246//	requested_output_min: The float value that the minimum quantized output value represents.
31247//	requested_output_max: The float value that the maximum quantized output value represents.
31248//	out_type: The type of the output. Should be a lower bit depth than Tinput.
31249//
31250// Returns:
31251//	output
31252//	output_min: The requested_output_min value is copied into this output.
31253//	output_max: The requested_output_max value is copied into this output.
31254func Requantize(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
31255	if scope.Err() != nil {
31256		return
31257	}
31258	attrs := map[string]interface{}{"out_type": out_type}
31259	opspec := tf.OpSpec{
31260		Type: "Requantize",
31261		Input: []tf.Input{
31262			input, input_min, input_max, requested_output_min, requested_output_max,
31263		},
31264		Attrs: attrs,
31265	}
31266	op := scope.AddOperation(opspec)
31267	return op.Output(0), op.Output(1), op.Output(2)
31268}
31269
31270// ComplexAbsAttr is an optional argument to ComplexAbs.
31271type ComplexAbsAttr func(optionalAttr)
31272
31273// ComplexAbsTout sets the optional Tout attribute to value.
31274// If not specified, defaults to DT_FLOAT
31275func ComplexAbsTout(value tf.DataType) ComplexAbsAttr {
31276	return func(m optionalAttr) {
31277		m["Tout"] = value
31278	}
31279}
31280
31281// Computes the complex absolute value of a tensor.
31282//
31283// Given a tensor `x` of complex numbers, this operation returns a tensor of type
31284// `float` or `double` that is the absolute value of each element in `x`. All
31285// elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
31286// value is computed as \\( \sqrt{a^2 + b^2}\\).
31287//
31288// For example:
31289//
31290// >>> x = tf.complex(3.0, 4.0)
31291// >>> print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy())
31292// 5.0
31293//
31294func ComplexAbs(scope *Scope, x tf.Output, optional ...ComplexAbsAttr) (y tf.Output) {
31295	if scope.Err() != nil {
31296		return
31297	}
31298	attrs := map[string]interface{}{}
31299	for _, a := range optional {
31300		a(attrs)
31301	}
31302	opspec := tf.OpSpec{
31303		Type: "ComplexAbs",
31304		Input: []tf.Input{
31305			x,
31306		},
31307		Attrs: attrs,
31308	}
31309	op := scope.AddOperation(opspec)
31310	return op.Output(0)
31311}
31312
31313// Converts one or more images from RGB to HSV.
31314//
31315// Outputs a tensor of the same shape as the `images` tensor, containing the HSV
31316// value of the pixels. The output is only well defined if the value in `images`
31317// are in `[0,1]`.
31318//
31319// `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
31320// `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
31321// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
31322//
31323// Usage Example:
31324//
31325// >>> blue_image = tf.stack([
31326// ...    tf.zeros([5,5]),
31327// ...    tf.zeros([5,5]),
31328// ...    tf.ones([5,5])],
31329// ...    axis=-1)
31330// >>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image)
31331// >>> blue_hsv_image[0,0].numpy()
31332// array([0.6666667, 1. , 1. ], dtype=float32)
31333//
31334//
31335// Arguments:
31336//	images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
31337//
31338// Returns `images` converted to HSV.
31339func RGBToHSV(scope *Scope, images tf.Output) (output tf.Output) {
31340	if scope.Err() != nil {
31341		return
31342	}
31343	opspec := tf.OpSpec{
31344		Type: "RGBToHSV",
31345		Input: []tf.Input{
31346			images,
31347		},
31348	}
31349	op := scope.AddOperation(opspec)
31350	return op.Output(0)
31351}
31352
31353// ListDiffAttr is an optional argument to ListDiff.
31354type ListDiffAttr func(optionalAttr)
31355
31356// ListDiffOutIdx sets the optional out_idx attribute to value.
31357// If not specified, defaults to DT_INT32
31358func ListDiffOutIdx(value tf.DataType) ListDiffAttr {
31359	return func(m optionalAttr) {
31360		m["out_idx"] = value
31361	}
31362}
31363
31364// Computes the difference between two lists of numbers or strings.
31365//
31366// Given a list `x` and a list `y`, this operation returns a list `out` that
31367// represents all values that are in `x` but not in `y`. The returned list `out`
31368// is sorted in the same order that the numbers appear in `x` (duplicates are
31369// preserved). This operation also returns a list `idx` that represents the
31370// position of each `out` element in `x`. In other words:
31371//
31372// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
31373//
31374// For example, given this input:
31375//
31376// ```
31377// x = [1, 2, 3, 4, 5, 6]
31378// y = [1, 3, 5]
31379// ```
31380//
31381// This operation would return:
31382//
31383// ```
31384// out ==> [2, 4, 6]
31385// idx ==> [1, 3, 5]
31386// ```
31387//
31388// Arguments:
31389//	x: 1-D. Values to keep.
31390//	y: 1-D. Values to remove.
31391//
31392// Returns:
31393//	out: 1-D. Values present in `x` but not in `y`.
31394//	idx: 1-D. Positions of `x` values preserved in `out`.
31395func ListDiff(scope *Scope, x tf.Output, y tf.Output, optional ...ListDiffAttr) (out tf.Output, idx tf.Output) {
31396	if scope.Err() != nil {
31397		return
31398	}
31399	attrs := map[string]interface{}{}
31400	for _, a := range optional {
31401		a(attrs)
31402	}
31403	opspec := tf.OpSpec{
31404		Type: "ListDiff",
31405		Input: []tf.Input{
31406			x, y,
31407		},
31408		Attrs: attrs,
31409	}
31410	op := scope.AddOperation(opspec)
31411	return op.Output(0), op.Output(1)
31412}
31413
31414// Deprecated. Use TensorArrayScatterV3
31415//
31416// DEPRECATED at GraphDef version 26: Use TensorArrayScatterV3
31417func TensorArrayScatterV2(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
31418	if scope.Err() != nil {
31419		return
31420	}
31421	opspec := tf.OpSpec{
31422		Type: "TensorArrayScatterV2",
31423		Input: []tf.Input{
31424			handle, indices, value, flow_in,
31425		},
31426	}
31427	op := scope.AddOperation(opspec)
31428	return op.Output(0)
31429}
31430
31431// RFFT3DAttr is an optional argument to RFFT3D.
31432type RFFT3DAttr func(optionalAttr)
31433
31434// RFFT3DTcomplex sets the optional Tcomplex attribute to value.
31435// If not specified, defaults to DT_COMPLEX64
31436func RFFT3DTcomplex(value tf.DataType) RFFT3DAttr {
31437	return func(m optionalAttr) {
31438		m["Tcomplex"] = value
31439	}
31440}
31441
31442// 3D real-valued fast Fourier transform.
31443//
31444// Computes the 3-dimensional discrete Fourier transform of a real-valued signal
31445// over the inner-most 3 dimensions of `input`.
31446//
31447// Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
31448// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
31449// of `output`: the zero-frequency term, followed by the `fft_length / 2`
31450// positive-frequency terms.
31451//
31452// Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
31453// corresponding dimension of `input`, the dimension is cropped. If it is larger,
31454// the dimension is padded with zeros.
31455//
31456// Arguments:
31457//	input: A float32 tensor.
31458//	fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
31459//
31460// Returns A complex64 tensor of the same rank as `input`. The inner-most 3
31461//   dimensions of `input` are replaced with the their 3D Fourier transform. The
31462//   inner-most dimension contains `fft_length / 2 + 1` unique frequency
31463//   components.
31464//
31465// @compatibility(numpy)
31466// Equivalent to np.fft.rfftn with 3 dimensions.
31467// @end_compatibility
31468func RFFT3D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFT3DAttr) (output tf.Output) {
31469	if scope.Err() != nil {
31470		return
31471	}
31472	attrs := map[string]interface{}{}
31473	for _, a := range optional {
31474		a(attrs)
31475	}
31476	opspec := tf.OpSpec{
31477		Type: "RFFT3D",
31478		Input: []tf.Input{
31479			input, fft_length,
31480		},
31481		Attrs: attrs,
31482	}
31483	op := scope.AddOperation(opspec)
31484	return op.Output(0)
31485}
31486
31487// TensorArrayGatherV2Attr is an optional argument to TensorArrayGatherV2.
31488type TensorArrayGatherV2Attr func(optionalAttr)
31489
31490// TensorArrayGatherV2ElementShape sets the optional element_shape attribute to value.
31491// If not specified, defaults to {unknown_rank:true}
31492func TensorArrayGatherV2ElementShape(value tf.Shape) TensorArrayGatherV2Attr {
31493	return func(m optionalAttr) {
31494		m["element_shape"] = value
31495	}
31496}
31497
31498// Deprecated. Use TensorArrayGatherV3
31499//
31500// DEPRECATED at GraphDef version 26: Use TensorArrayGatherV3
31501func TensorArrayGatherV2(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV2Attr) (value tf.Output) {
31502	if scope.Err() != nil {
31503		return
31504	}
31505	attrs := map[string]interface{}{"dtype": dtype}
31506	for _, a := range optional {
31507		a(attrs)
31508	}
31509	opspec := tf.OpSpec{
31510		Type: "TensorArrayGatherV2",
31511		Input: []tf.Input{
31512			handle, indices, flow_in,
31513		},
31514		Attrs: attrs,
31515	}
31516	op := scope.AddOperation(opspec)
31517	return op.Output(0)
31518}
31519
31520// Restore a reader to a previously saved state.
31521//
31522// Not all Readers support being restored, so this can produce an
31523// Unimplemented error.
31524//
31525// Arguments:
31526//	reader_handle: Handle to a Reader.
31527//	state: Result of a ReaderSerializeState of a Reader with type
31528// matching reader_handle.
31529//
31530// Returns the created operation.
31531func ReaderRestoreStateV2(scope *Scope, reader_handle tf.Output, state tf.Output) (o *tf.Operation) {
31532	if scope.Err() != nil {
31533		return
31534	}
31535	opspec := tf.OpSpec{
31536		Type: "ReaderRestoreStateV2",
31537		Input: []tf.Input{
31538			reader_handle, state,
31539		},
31540	}
31541	return scope.AddOperation(opspec)
31542}
31543
31544// StagePeekAttr is an optional argument to StagePeek.
31545type StagePeekAttr func(optionalAttr)
31546
31547// StagePeekCapacity sets the optional capacity attribute to value.
31548// If not specified, defaults to 0
31549//
31550// REQUIRES: value >= 0
31551func StagePeekCapacity(value int64) StagePeekAttr {
31552	return func(m optionalAttr) {
31553		m["capacity"] = value
31554	}
31555}
31556
31557// StagePeekMemoryLimit sets the optional memory_limit attribute to value.
31558// If not specified, defaults to 0
31559//
31560// REQUIRES: value >= 0
31561func StagePeekMemoryLimit(value int64) StagePeekAttr {
31562	return func(m optionalAttr) {
31563		m["memory_limit"] = value
31564	}
31565}
31566
31567// StagePeekContainer sets the optional container attribute to value.
31568// If not specified, defaults to ""
31569func StagePeekContainer(value string) StagePeekAttr {
31570	return func(m optionalAttr) {
31571		m["container"] = value
31572	}
31573}
31574
31575// StagePeekSharedName sets the optional shared_name attribute to value.
31576// If not specified, defaults to ""
31577func StagePeekSharedName(value string) StagePeekAttr {
31578	return func(m optionalAttr) {
31579		m["shared_name"] = value
31580	}
31581}
31582
31583// Op peeks at the values at the specified index.  If the
31584//
31585// underlying container does not contain sufficient elements
31586// this op will block until it does.   This Op is optimized for
31587// performance.
31588func StagePeek(scope *Scope, index tf.Output, dtypes []tf.DataType, optional ...StagePeekAttr) (values []tf.Output) {
31589	if scope.Err() != nil {
31590		return
31591	}
31592	attrs := map[string]interface{}{"dtypes": dtypes}
31593	for _, a := range optional {
31594		a(attrs)
31595	}
31596	opspec := tf.OpSpec{
31597		Type: "StagePeek",
31598		Input: []tf.Input{
31599			index,
31600		},
31601		Attrs: attrs,
31602	}
31603	op := scope.AddOperation(opspec)
31604	if scope.Err() != nil {
31605		return
31606	}
31607	var idx int
31608	var err error
31609	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
31610		scope.UpdateErr("StagePeek", err)
31611		return
31612	}
31613	return values
31614}
31615
31616// Deprecated. Use TensorArrayReadV3
31617//
31618// DEPRECATED at GraphDef version 26: Use TensorArrayReadV3
31619func TensorArrayReadV2(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
31620	if scope.Err() != nil {
31621		return
31622	}
31623	attrs := map[string]interface{}{"dtype": dtype}
31624	opspec := tf.OpSpec{
31625		Type: "TensorArrayReadV2",
31626		Input: []tf.Input{
31627			handle, index, flow_in,
31628		},
31629		Attrs: attrs,
31630	}
31631	op := scope.AddOperation(opspec)
31632	return op.Output(0)
31633}
31634
31635// AudioSpectrogramAttr is an optional argument to AudioSpectrogram.
31636type AudioSpectrogramAttr func(optionalAttr)
31637
31638// AudioSpectrogramMagnitudeSquared sets the optional magnitude_squared attribute to value.
31639//
31640// value: Whether to return the squared magnitude or just the
31641// magnitude. Using squared magnitude can avoid extra calculations.
31642// If not specified, defaults to false
31643func AudioSpectrogramMagnitudeSquared(value bool) AudioSpectrogramAttr {
31644	return func(m optionalAttr) {
31645		m["magnitude_squared"] = value
31646	}
31647}
31648
31649// Produces a visualization of audio data over time.
31650//
31651// Spectrograms are a standard way of representing audio information as a series of
31652// slices of frequency information, one slice for each window of time. By joining
31653// these together into a sequence, they form a distinctive fingerprint of the sound
31654// over time.
31655//
31656// This op expects to receive audio data as an input, stored as floats in the range
31657// -1 to 1, together with a window width in samples, and a stride specifying how
31658// far to move the window between slices. From this it generates a three
31659// dimensional output. The first dimension is for the channels in the input, so a
31660// stereo audio input would have two here for example. The second dimension is time,
31661// with successive frequency slices. The third dimension has an amplitude value for
31662// each frequency during that time slice.
31663//
31664// This means the layout when converted and saved as an image is rotated 90 degrees
31665// clockwise from a typical spectrogram. Time is descending down the Y axis, and
31666// the frequency decreases from left to right.
31667//
31668// Each value in the result represents the square root of the sum of the real and
31669// imaginary parts of an FFT on the current window of samples. In this way, the
31670// lowest dimension represents the power of each frequency in the current window,
31671// and adjacent windows are concatenated in the next dimension.
31672//
31673// To get a more intuitive and visual look at what this operation does, you can run
31674// tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
31675// resulting spectrogram as a PNG image.
31676//
31677// Arguments:
31678//	input: Float representation of audio data.
31679//	window_size: How wide the input window is in samples. For the highest efficiency
31680// this should be a power of two, but other values are accepted.
31681//	stride: How widely apart the center of adjacent sample windows should be.
31682//
31683// Returns 3D representation of the audio frequencies as an image.
31684func AudioSpectrogram(scope *Scope, input tf.Output, window_size int64, stride int64, optional ...AudioSpectrogramAttr) (spectrogram tf.Output) {
31685	if scope.Err() != nil {
31686		return
31687	}
31688	attrs := map[string]interface{}{"window_size": window_size, "stride": stride}
31689	for _, a := range optional {
31690		a(attrs)
31691	}
31692	opspec := tf.OpSpec{
31693		Type: "AudioSpectrogram",
31694		Input: []tf.Input{
31695			input,
31696		},
31697		Attrs: attrs,
31698	}
31699	op := scope.AddOperation(opspec)
31700	return op.Output(0)
31701}
31702
31703// Adds two `SparseTensor` objects to produce another `SparseTensor`.
31704//
31705// The input `SparseTensor` objects' indices are assumed ordered in standard
31706// lexicographic order.  If this is not the case, before this step run
31707// `SparseReorder` to restore index ordering.
31708//
31709// By default, if two values sum to zero at some index, the output `SparseTensor`
31710// would still include that particular location in its index, storing a zero in the
31711// corresponding value slot.  To override this, callers can specify `thresh`,
31712// indicating that if the sum has a magnitude strictly smaller than `thresh`, its
31713// corresponding value and index would then not be included.  In particular,
31714// `thresh == 0` (default) means everything is kept and actual thresholding happens
31715// only for a positive value.
31716//
31717// In the following shapes, `nnz` is the count after taking `thresh` into account.
31718//
31719// Arguments:
31720//	a_indices: 2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
31721//	a_values: 1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector.
31722//	a_shape: 1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
31723//	b_indices: 2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
31724//	b_values: 1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector.
31725//	b_shape: 1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
31726//	thresh: 0-D.  The magnitude threshold that determines if an output value/index
31727// pair takes space.
31728func SparseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output, thresh tf.Output) (sum_indices tf.Output, sum_values tf.Output, sum_shape tf.Output) {
31729	if scope.Err() != nil {
31730		return
31731	}
31732	opspec := tf.OpSpec{
31733		Type: "SparseAdd",
31734		Input: []tf.Input{
31735			a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh,
31736		},
31737	}
31738	op := scope.AddOperation(opspec)
31739	return op.Output(0), op.Output(1), op.Output(2)
31740}
31741
31742// Elementwise computes the bitwise XOR of `x` and `y`.
31743//
31744// The result will have those bits set, that are different in `x` and `y`. The
31745// computation is performed on the underlying representations of `x` and `y`.
31746//
31747// For example:
31748//
31749// ```python
31750// import tensorflow as tf
31751// from tensorflow.python.ops import bitwise_ops
31752// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
31753//               tf.uint8, tf.uint16, tf.uint32, tf.uint64]
31754//
31755// for dtype in dtype_list:
31756//   lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
31757//   rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
31758//   exp = tf.constant([5, 5, 4, 5],  dtype=tf.float32)
31759//
31760//   res = bitwise_ops.bitwise_xor(lhs, rhs)
31761//   tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
31762// ```
31763//
31764func BitwiseXor(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
31765	if scope.Err() != nil {
31766		return
31767	}
31768	opspec := tf.OpSpec{
31769		Type: "BitwiseXor",
31770		Input: []tf.Input{
31771			x, y,
31772		},
31773	}
31774	op := scope.AddOperation(opspec)
31775	return op.Output(0)
31776}
31777
31778// DenseToSparseSetOperationAttr is an optional argument to DenseToSparseSetOperation.
31779type DenseToSparseSetOperationAttr func(optionalAttr)
31780
31781// DenseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
31782// If not specified, defaults to true
31783func DenseToSparseSetOperationValidateIndices(value bool) DenseToSparseSetOperationAttr {
31784	return func(m optionalAttr) {
31785		m["validate_indices"] = value
31786	}
31787}
31788
31789// Applies set operation along last dimension of `Tensor` and `SparseTensor`.
31790//
31791// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
31792//
31793// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
31794// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
31795// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
31796// ignored.
31797//
31798// If `validate_indices` is `True`, this op validates the order and range of `set2`
31799// indices.
31800//
31801// Output `result` is a `SparseTensor` represented by `result_indices`,
31802// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
31803// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
31804// dimension contains the result of `set_operation` applied to the corresponding
31805// `[0...n-1]` dimension of `set`.
31806//
31807// Arguments:
31808//	set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
31809// Dimension `n` contains values in a set, duplicates are allowed but ignored.
31810//	set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
31811// order.
31812//	set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
31813// order.
31814//	set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
31815// be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
31816// max set size across `n-1` dimensions.
31817//
31818//
31819// Returns:
31820//	result_indices: 2D indices of a `SparseTensor`.
31821//	result_values: 1D values of a `SparseTensor`.
31822//	result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
31823// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
31824// is the max result set size across all `0...n-1` dimensions.
31825func DenseToSparseSetOperation(scope *Scope, set1 tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...DenseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
31826	if scope.Err() != nil {
31827		return
31828	}
31829	attrs := map[string]interface{}{"set_operation": set_operation}
31830	for _, a := range optional {
31831		a(attrs)
31832	}
31833	opspec := tf.OpSpec{
31834		Type: "DenseToSparseSetOperation",
31835		Input: []tf.Input{
31836			set1, set2_indices, set2_values, set2_shape,
31837		},
31838		Attrs: attrs,
31839	}
31840	op := scope.AddOperation(opspec)
31841	return op.Output(0), op.Output(1), op.Output(2)
31842}
31843
31844// Deprecated. Use TensorArrayGradV3
31845//
31846// DEPRECATED at GraphDef version 26: Use TensorArrayWriteV3
31847func TensorArrayWriteV2(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
31848	if scope.Err() != nil {
31849		return
31850	}
31851	opspec := tf.OpSpec{
31852		Type: "TensorArrayWriteV2",
31853		Input: []tf.Input{
31854			handle, index, value, flow_in,
31855		},
31856	}
31857	op := scope.AddOperation(opspec)
31858	return op.Output(0)
31859}
31860
31861// Computes the gradient of `igamma(a, x)` wrt `a`.
31862func IgammaGradA(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
31863	if scope.Err() != nil {
31864		return
31865	}
31866	opspec := tf.OpSpec{
31867		Type: "IgammaGradA",
31868		Input: []tf.Input{
31869			a, x,
31870		},
31871	}
31872	op := scope.AddOperation(opspec)
31873	return op.Output(0)
31874}
31875
31876// Shuffle dimensions of x according to a permutation.
31877//
31878// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
31879//   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
31880func Transpose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
31881	if scope.Err() != nil {
31882		return
31883	}
31884	opspec := tf.OpSpec{
31885		Type: "Transpose",
31886		Input: []tf.Input{
31887			x, perm,
31888		},
31889	}
31890	op := scope.AddOperation(opspec)
31891	return op.Output(0)
31892}
31893
31894// AssertAttr is an optional argument to Assert.
31895type AssertAttr func(optionalAttr)
31896
31897// AssertSummarize sets the optional summarize attribute to value.
31898//
31899// value: Print this many entries of each tensor.
31900// If not specified, defaults to 3
31901func AssertSummarize(value int64) AssertAttr {
31902	return func(m optionalAttr) {
31903		m["summarize"] = value
31904	}
31905}
31906
31907// Asserts that the given condition is true.
31908//
31909// If `condition` evaluates to false, print the list of tensors in `data`.
31910// `summarize` determines how many entries of the tensors to print.
31911//
31912// Arguments:
31913//	condition: The condition to evaluate.
31914//	data: The tensors to print out when condition is false.
31915//
31916// Returns the created operation.
31917func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...AssertAttr) (o *tf.Operation) {
31918	if scope.Err() != nil {
31919		return
31920	}
31921	attrs := map[string]interface{}{}
31922	for _, a := range optional {
31923		a(attrs)
31924	}
31925	opspec := tf.OpSpec{
31926		Type: "Assert",
31927		Input: []tf.Input{
31928			condition, tf.OutputList(data),
31929		},
31930		Attrs: attrs,
31931	}
31932	return scope.AddOperation(opspec)
31933}
31934
31935// Computes the matrix square root of one or more square matrices:
31936//
31937// matmul(sqrtm(A), sqrtm(A)) = A
31938//
31939// The input matrix should be invertible. If the input matrix is real, it should
31940// have no eigenvalues which are real and negative (pairs of complex conjugate
31941// eigenvalues are allowed).
31942//
31943// The matrix square root is computed by first reducing the matrix to
31944// quasi-triangular form with the real Schur decomposition. The square root
31945// of the quasi-triangular matrix is then computed directly. Details of
31946// the algorithm can be found in: Nicholas J. Higham, "Computing real
31947// square roots of a real matrix", Linear Algebra Appl., 1987.
31948//
31949// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
31950// form square matrices. The output is a tensor of the same shape as the input
31951// containing the matrix square root for all input submatrices `[..., :, :]`.
31952//
31953// Arguments:
31954//	input: Shape is `[..., M, M]`.
31955//
31956// Returns Shape is `[..., M, M]`.
31957//
31958// @compatibility(scipy)
31959// Equivalent to scipy.linalg.sqrtm
31960// @end_compatibility
31961func MatrixSquareRoot(scope *Scope, input tf.Output) (output tf.Output) {
31962	if scope.Err() != nil {
31963		return
31964	}
31965	opspec := tf.OpSpec{
31966		Type: "MatrixSquareRoot",
31967		Input: []tf.Input{
31968			input,
31969		},
31970	}
31971	op := scope.AddOperation(opspec)
31972	return op.Output(0)
31973}
31974
31975// CollectiveBcastRecvAttr is an optional argument to CollectiveBcastRecv.
31976type CollectiveBcastRecvAttr func(optionalAttr)
31977
31978// CollectiveBcastRecvCommunicationHint sets the optional communication_hint attribute to value.
31979// If not specified, defaults to "auto"
31980func CollectiveBcastRecvCommunicationHint(value string) CollectiveBcastRecvAttr {
31981	return func(m optionalAttr) {
31982		m["communication_hint"] = value
31983	}
31984}
31985
31986// CollectiveBcastRecvTimeoutSeconds sets the optional timeout_seconds attribute to value.
31987// If not specified, defaults to 0
31988func CollectiveBcastRecvTimeoutSeconds(value float32) CollectiveBcastRecvAttr {
31989	return func(m optionalAttr) {
31990		m["timeout_seconds"] = value
31991	}
31992}
31993
31994// Receives a tensor value broadcast from another device.
31995func CollectiveBcastRecv(scope *Scope, T tf.DataType, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveBcastRecvAttr) (data tf.Output) {
31996	if scope.Err() != nil {
31997		return
31998	}
31999	attrs := map[string]interface{}{"T": T, "group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
32000	for _, a := range optional {
32001		a(attrs)
32002	}
32003	opspec := tf.OpSpec{
32004		Type: "CollectiveBcastRecv",
32005
32006		Attrs: attrs,
32007	}
32008	op := scope.AddOperation(opspec)
32009	return op.Output(0)
32010}
32011
32012// Scatter the data from the input value into specific TensorArray elements.
32013//
32014// `indices` must be a vector, its length must match the first dim of `value`.
32015//
32016// Arguments:
32017//	handle: The handle to a TensorArray.
32018//	indices: The locations at which to write the tensor elements.
32019//	value: The concatenated tensor to write to the TensorArray.
32020//	flow_in: A float scalar that enforces proper chaining of operations.
32021//
32022// Returns A float scalar that enforces proper chaining of operations.
32023func TensorArrayScatterV3(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
32024	if scope.Err() != nil {
32025		return
32026	}
32027	opspec := tf.OpSpec{
32028		Type: "TensorArrayScatterV3",
32029		Input: []tf.Input{
32030			handle, indices, value, flow_in,
32031		},
32032	}
32033	op := scope.AddOperation(opspec)
32034	return op.Output(0)
32035}
32036
32037// Fetches multiple values from infeed as an XLA tuple.
32038//
32039// Arguments:
32040//	dtypes: The element types of each element in `outputs`.
32041//	shapes: The shapes of each tensor in `outputs`.
32042//
32043// Returns A list of tensors that will be provided using the infeed mechanism.
32044func InfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape) (outputs []tf.Output) {
32045	if scope.Err() != nil {
32046		return
32047	}
32048	attrs := map[string]interface{}{"dtypes": dtypes, "shapes": shapes}
32049	opspec := tf.OpSpec{
32050		Type: "InfeedDequeueTuple",
32051
32052		Attrs: attrs,
32053	}
32054	op := scope.AddOperation(opspec)
32055	if scope.Err() != nil {
32056		return
32057	}
32058	var idx int
32059	var err error
32060	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
32061		scope.UpdateErr("InfeedDequeueTuple", err)
32062		return
32063	}
32064	return outputs
32065}
32066
32067// DataServiceDatasetV2Attr is an optional argument to DataServiceDatasetV2.
32068type DataServiceDatasetV2Attr func(optionalAttr)
32069
32070// DataServiceDatasetV2TaskRefreshIntervalHintMs sets the optional task_refresh_interval_hint_ms attribute to value.
32071// If not specified, defaults to -1
32072func DataServiceDatasetV2TaskRefreshIntervalHintMs(value int64) DataServiceDatasetV2Attr {
32073	return func(m optionalAttr) {
32074		m["task_refresh_interval_hint_ms"] = value
32075	}
32076}
32077
32078// DataServiceDatasetV2DataTransferProtocol sets the optional data_transfer_protocol attribute to value.
32079// If not specified, defaults to ""
32080func DataServiceDatasetV2DataTransferProtocol(value string) DataServiceDatasetV2Attr {
32081	return func(m optionalAttr) {
32082		m["data_transfer_protocol"] = value
32083	}
32084}
32085
32086// DataServiceDatasetV2TargetWorkers sets the optional target_workers attribute to value.
32087// If not specified, defaults to "AUTO"
32088func DataServiceDatasetV2TargetWorkers(value string) DataServiceDatasetV2Attr {
32089	return func(m optionalAttr) {
32090		m["target_workers"] = value
32091	}
32092}
32093
32094// Creates a dataset that reads data from the tf.data service.
32095func DataServiceDatasetV2(scope *Scope, dataset_id tf.Output, processing_mode tf.Output, address tf.Output, protocol tf.Output, job_name tf.Output, consumer_index tf.Output, num_consumers tf.Output, max_outstanding_requests tf.Output, iteration_counter tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DataServiceDatasetV2Attr) (handle tf.Output) {
32096	if scope.Err() != nil {
32097		return
32098	}
32099	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
32100	for _, a := range optional {
32101		a(attrs)
32102	}
32103	opspec := tf.OpSpec{
32104		Type: "DataServiceDatasetV2",
32105		Input: []tf.Input{
32106			dataset_id, processing_mode, address, protocol, job_name, consumer_index, num_consumers, max_outstanding_requests, iteration_counter,
32107		},
32108		Attrs: attrs,
32109	}
32110	op := scope.AddOperation(opspec)
32111	return op.Output(0)
32112}
32113
32114// ReverseSequenceAttr is an optional argument to ReverseSequence.
32115type ReverseSequenceAttr func(optionalAttr)
32116
32117// ReverseSequenceBatchDim sets the optional batch_dim attribute to value.
32118//
32119// value: The dimension along which reversal is performed.
32120// If not specified, defaults to 0
32121func ReverseSequenceBatchDim(value int64) ReverseSequenceAttr {
32122	return func(m optionalAttr) {
32123		m["batch_dim"] = value
32124	}
32125}
32126
32127// Reverses variable length slices.
32128//
32129// This op first slices `input` along the dimension `batch_dim`, and for each
32130// slice `i`, reverses the first `seq_lengths[i]` elements along
32131// the dimension `seq_dim`.
32132//
32133// The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
32134// and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
32135//
32136// The output slice `i` along dimension `batch_dim` is then given by input
32137// slice `i`, with the first `seq_lengths[i]` slices along dimension
32138// `seq_dim` reversed.
32139//
32140// For example:
32141//
32142// ```
32143// # Given this:
32144// batch_dim = 0
32145// seq_dim = 1
32146// input.dims = (4, 8, ...)
32147// seq_lengths = [7, 2, 3, 5]
32148//
32149// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
32150// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
32151// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
32152// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
32153// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
32154//
32155// # while entries past seq_lens are copied through:
32156// output[0, 7:, :, ...] = input[0, 7:, :, ...]
32157// output[1, 2:, :, ...] = input[1, 2:, :, ...]
32158// output[2, 3:, :, ...] = input[2, 3:, :, ...]
32159// output[3, 2:, :, ...] = input[3, 2:, :, ...]
32160// ```
32161//
32162// In contrast, if:
32163//
32164// ```
32165// # Given this:
32166// batch_dim = 2
32167// seq_dim = 0
32168// input.dims = (8, ?, 4, ...)
32169// seq_lengths = [7, 2, 3, 5]
32170//
32171// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
32172// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
32173// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
32174// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
32175// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
32176//
32177// # while entries past seq_lens are copied through:
32178// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
32179// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
32180// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
32181// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
32182// ```
32183//
32184// Arguments:
32185//	input: The input to reverse.
32186//	seq_lengths: 1-D with length `input.dims(batch_dim)` and
32187// `max(seq_lengths) <= input.dims(seq_dim)`
32188//	seq_dim: The dimension which is partially reversed.
32189//
32190// Returns The partially reversed input. It has the same shape as `input`.
32191func ReverseSequence(scope *Scope, input tf.Output, seq_lengths tf.Output, seq_dim int64, optional ...ReverseSequenceAttr) (output tf.Output) {
32192	if scope.Err() != nil {
32193		return
32194	}
32195	attrs := map[string]interface{}{"seq_dim": seq_dim}
32196	for _, a := range optional {
32197		a(attrs)
32198	}
32199	opspec := tf.OpSpec{
32200		Type: "ReverseSequence",
32201		Input: []tf.Input{
32202			input, seq_lengths,
32203		},
32204		Attrs: attrs,
32205	}
32206	op := scope.AddOperation(opspec)
32207	return op.Output(0)
32208}
32209
32210// TensorArrayGatherV3Attr is an optional argument to TensorArrayGatherV3.
32211type TensorArrayGatherV3Attr func(optionalAttr)
32212
32213// TensorArrayGatherV3ElementShape sets the optional element_shape attribute to value.
32214//
32215// value: The expected shape of an element, if known. Used to
32216// validate the shapes of TensorArray elements. If this shape is not
32217// fully specified, gathering zero-size TensorArrays is an error.
32218// If not specified, defaults to {unknown_rank:true}
32219func TensorArrayGatherV3ElementShape(value tf.Shape) TensorArrayGatherV3Attr {
32220	return func(m optionalAttr) {
32221		m["element_shape"] = value
32222	}
32223}
32224
32225// Gather specific elements from the TensorArray into output `value`.
32226//
32227// All elements selected by `indices` must have the same shape.
32228//
32229// Arguments:
32230//	handle: The handle to a TensorArray.
32231//	indices: The locations in the TensorArray from which to read tensor elements.
32232//	flow_in: A float scalar that enforces proper chaining of operations.
32233//	dtype: The type of the elem that is returned.
32234//
32235// Returns All of the elements in the TensorArray, concatenated along a new
32236// axis (the new dimension 0).
32237func TensorArrayGatherV3(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV3Attr) (value tf.Output) {
32238	if scope.Err() != nil {
32239		return
32240	}
32241	attrs := map[string]interface{}{"dtype": dtype}
32242	for _, a := range optional {
32243		a(attrs)
32244	}
32245	opspec := tf.OpSpec{
32246		Type: "TensorArrayGatherV3",
32247		Input: []tf.Input{
32248			handle, indices, flow_in,
32249		},
32250		Attrs: attrs,
32251	}
32252	op := scope.AddOperation(opspec)
32253	return op.Output(0)
32254}
32255
32256// Subtracts a value from the current value of a variable.
32257//
32258// Any ReadVariableOp with a control dependency on this op is guaranteed to
32259// see the decremented value or a subsequent newer one.
32260//
32261// Arguments:
32262//	resource: handle to the resource in which to store the variable.
32263//	value: the value by which the variable will be incremented.
32264//
32265// Returns the created operation.
32266func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
32267	if scope.Err() != nil {
32268		return
32269	}
32270	opspec := tf.OpSpec{
32271		Type: "AssignSubVariableOp",
32272		Input: []tf.Input{
32273			resource, value,
32274		},
32275	}
32276	return scope.AddOperation(opspec)
32277}
32278
32279// FusedBatchNormGradAttr is an optional argument to FusedBatchNormGrad.
32280type FusedBatchNormGradAttr func(optionalAttr)
32281
32282// FusedBatchNormGradEpsilon sets the optional epsilon attribute to value.
32283//
32284// value: A small float number added to the variance of x.
32285// If not specified, defaults to 0.0001
32286func FusedBatchNormGradEpsilon(value float32) FusedBatchNormGradAttr {
32287	return func(m optionalAttr) {
32288		m["epsilon"] = value
32289	}
32290}
32291
32292// FusedBatchNormGradDataFormat sets the optional data_format attribute to value.
32293//
32294// value: The data format for y_backprop, x, x_backprop.
32295// Either "NHWC" (default) or "NCHW".
32296// If not specified, defaults to "NHWC"
32297func FusedBatchNormGradDataFormat(value string) FusedBatchNormGradAttr {
32298	return func(m optionalAttr) {
32299		m["data_format"] = value
32300	}
32301}
32302
32303// FusedBatchNormGradIsTraining sets the optional is_training attribute to value.
32304//
32305// value: A bool value to indicate the operation is for training (default)
32306// or inference.
32307// If not specified, defaults to true
32308func FusedBatchNormGradIsTraining(value bool) FusedBatchNormGradAttr {
32309	return func(m optionalAttr) {
32310		m["is_training"] = value
32311	}
32312}
32313
32314// Gradient for batch normalization.
32315//
32316// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
32317// The size of 1D Tensors matches the dimension C of the 4D Tensors.
32318//
32319// Arguments:
32320//	y_backprop: A 4D Tensor for the gradient with respect to y.
32321//	x: A 4D Tensor for input data.
32322//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
32323//	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
32324// mean to be reused in gradient computation. When is_training is
32325// False, a 1D Tensor for the population mean to be reused in both
32326// 1st and 2nd order gradient computation.
32327//	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
32328// variance (inverted variance in the cuDNN case) to be reused in
32329// gradient computation. When is_training is False, a 1D Tensor
32330// for the population variance to be reused in both 1st and 2nd
32331// order gradient computation.
32332//
32333// Returns:
32334//	x_backprop: A 4D Tensor for the gradient with respect to x.
32335//	scale_backprop: A 1D Tensor for the gradient with respect to scale.
32336//	offset_backprop: A 1D Tensor for the gradient with respect to offset.
32337//	reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
32338//	reserve_space_4: Unused placeholder to match the variance input
32339// in FusedBatchNorm.
32340func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradAttr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
32341	if scope.Err() != nil {
32342		return
32343	}
32344	attrs := map[string]interface{}{}
32345	for _, a := range optional {
32346		a(attrs)
32347	}
32348	opspec := tf.OpSpec{
32349		Type: "FusedBatchNormGrad",
32350		Input: []tf.Input{
32351			y_backprop, x, scale, reserve_space_1, reserve_space_2,
32352		},
32353		Attrs: attrs,
32354	}
32355	op := scope.AddOperation(opspec)
32356	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
32357}
32358
32359// Set a summary_writer_interface to record statistics using given stats_aggregator.
32360//
32361// Returns the created operation.
32362func StatsAggregatorSetSummaryWriter(scope *Scope, stats_aggregator tf.Output, summary tf.Output) (o *tf.Operation) {
32363	if scope.Err() != nil {
32364		return
32365	}
32366	opspec := tf.OpSpec{
32367		Type: "StatsAggregatorSetSummaryWriter",
32368		Input: []tf.Input{
32369			stats_aggregator, summary,
32370		},
32371	}
32372	return scope.AddOperation(opspec)
32373}
32374
32375// Generate a glob pattern matching all sharded file names.
32376func ShardedFilespec(scope *Scope, basename tf.Output, num_shards tf.Output) (filename tf.Output) {
32377	if scope.Err() != nil {
32378		return
32379	}
32380	opspec := tf.OpSpec{
32381		Type: "ShardedFilespec",
32382		Input: []tf.Input{
32383			basename, num_shards,
32384		},
32385	}
32386	op := scope.AddOperation(opspec)
32387	return op.Output(0)
32388}
32389
32390// XlaRngBitGeneratorAttr is an optional argument to XlaRngBitGenerator.
32391type XlaRngBitGeneratorAttr func(optionalAttr)
32392
32393// XlaRngBitGeneratorDtype sets the optional dtype attribute to value.
32394//
32395// value: The type of the tensor.
32396// If not specified, defaults to DT_UINT64
32397func XlaRngBitGeneratorDtype(value tf.DataType) XlaRngBitGeneratorAttr {
32398	return func(m optionalAttr) {
32399		m["dtype"] = value
32400	}
32401}
32402
32403// Stateless PRNG bit generator.
32404//
32405// Wraps the XLA RngBitGenerator operator, documented at
32406//  https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator.
32407//
32408// Arguments:
32409//	algorithm: The PRNG algorithm to use, one of
32410// tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}.
32411//	initial_state: Initial state for the PRNG algorithm. For THREEFRY, it should be
32412// a u64[2] and for PHILOX a u64[3].
32413//	shape: The output shape of the generated data.
32414func XlaRngBitGenerator(scope *Scope, algorithm tf.Output, initial_state tf.Output, shape tf.Output, optional ...XlaRngBitGeneratorAttr) (output_key tf.Output, output tf.Output) {
32415	if scope.Err() != nil {
32416		return
32417	}
32418	attrs := map[string]interface{}{}
32419	for _, a := range optional {
32420		a(attrs)
32421	}
32422	opspec := tf.OpSpec{
32423		Type: "XlaRngBitGenerator",
32424		Input: []tf.Input{
32425			algorithm, initial_state, shape,
32426		},
32427		Attrs: attrs,
32428	}
32429	op := scope.AddOperation(opspec)
32430	return op.Output(0), op.Output(1)
32431}
32432
32433// Pop the element at the top of the stack.
32434//
32435// Arguments:
32436//	handle: The handle to a stack.
32437//	elem_type: The type of the elem that is popped.
32438//
32439// Returns The tensor that is popped from the top of the stack.
32440func StackPopV2(scope *Scope, handle tf.Output, elem_type tf.DataType) (elem tf.Output) {
32441	if scope.Err() != nil {
32442		return
32443	}
32444	attrs := map[string]interface{}{"elem_type": elem_type}
32445	opspec := tf.OpSpec{
32446		Type: "StackPopV2",
32447		Input: []tf.Input{
32448			handle,
32449		},
32450		Attrs: attrs,
32451	}
32452	op := scope.AddOperation(opspec)
32453	return op.Output(0)
32454}
32455
32456// Creates and returns an empty tensor map.
32457//
32458// handle: an empty tensor map
32459func EmptyTensorMap(scope *Scope) (handle tf.Output) {
32460	if scope.Err() != nil {
32461		return
32462	}
32463	opspec := tf.OpSpec{
32464		Type: "EmptyTensorMap",
32465	}
32466	op := scope.AddOperation(opspec)
32467	return op.Output(0)
32468}
32469
32470// StackPushV2Attr is an optional argument to StackPushV2.
32471type StackPushV2Attr func(optionalAttr)
32472
32473// StackPushV2SwapMemory sets the optional swap_memory attribute to value.
32474//
32475// value: Swap `elem` to CPU. Default to false.
32476// If not specified, defaults to false
32477func StackPushV2SwapMemory(value bool) StackPushV2Attr {
32478	return func(m optionalAttr) {
32479		m["swap_memory"] = value
32480	}
32481}
32482
32483// Push an element onto the stack.
32484//
32485// Arguments:
32486//	handle: The handle to a stack.
32487//	elem: The tensor to be pushed onto the stack.
32488//
32489// Returns The same tensor as the input 'elem'.
32490func StackPushV2(scope *Scope, handle tf.Output, elem tf.Output, optional ...StackPushV2Attr) (output tf.Output) {
32491	if scope.Err() != nil {
32492		return
32493	}
32494	attrs := map[string]interface{}{}
32495	for _, a := range optional {
32496		a(attrs)
32497	}
32498	opspec := tf.OpSpec{
32499		Type: "StackPushV2",
32500		Input: []tf.Input{
32501			handle, elem,
32502		},
32503		Attrs: attrs,
32504	}
32505	op := scope.AddOperation(opspec)
32506	return op.Output(0)
32507}
32508
32509// Returns the gradient of `Tile`.
32510//
32511// DEPRECATED at GraphDef version 3: TileGrad has been replaced with reduce_sum
32512//
32513// Since `Tile` takes an input and repeats the input `multiples` times
32514// along each dimension, `TileGrad` takes in `multiples` and aggregates
32515// each repeated tile of `input` into `output`.
32516func TileGrad(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
32517	if scope.Err() != nil {
32518		return
32519	}
32520	opspec := tf.OpSpec{
32521		Type: "TileGrad",
32522		Input: []tf.Input{
32523			input, multiples,
32524		},
32525	}
32526	op := scope.AddOperation(opspec)
32527	return op.Output(0)
32528}
32529
32530// AudioSummaryAttr is an optional argument to AudioSummary.
32531type AudioSummaryAttr func(optionalAttr)
32532
32533// AudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
32534//
32535// value: Max number of batch elements to generate audio for.
32536// If not specified, defaults to 3
32537//
32538// REQUIRES: value >= 1
32539func AudioSummaryMaxOutputs(value int64) AudioSummaryAttr {
32540	return func(m optionalAttr) {
32541		m["max_outputs"] = value
32542	}
32543}
32544
32545// Outputs a `Summary` protocol buffer with audio.
32546//
32547// DEPRECATED at GraphDef version 15: Use AudioSummaryV2.
32548//
32549// The summary has up to `max_outputs` summary values containing audio. The
32550// audio is built from `tensor` which must be 3-D with shape `[batch_size,
32551// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
32552// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
32553//
32554// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
32555// build the `tag` of the summary values:
32556//
32557// *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
32558// *  If `max_outputs` is greater than 1, the summary value tags are
32559//    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
32560//
32561// Arguments:
32562//	tag: Scalar. Used to build the `tag` attribute of the summary values.
32563//	tensor: 2-D of shape `[batch_size, frames]`.
32564//	sample_rate: The sample rate of the signal in hertz.
32565//
32566// Returns Scalar. Serialized `Summary` protocol buffer.
32567func AudioSummary(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate float32, optional ...AudioSummaryAttr) (summary tf.Output) {
32568	if scope.Err() != nil {
32569		return
32570	}
32571	attrs := map[string]interface{}{"sample_rate": sample_rate}
32572	for _, a := range optional {
32573		a(attrs)
32574	}
32575	opspec := tf.OpSpec{
32576		Type: "AudioSummary",
32577		Input: []tf.Input{
32578			tag, tensor,
32579		},
32580		Attrs: attrs,
32581	}
32582	op := scope.AddOperation(opspec)
32583	return op.Output(0)
32584}
32585
32586// Creates a dataset that batches and pads `batch_size` elements from the input.
32587//
32588// Arguments:
32589//
32590//	batch_size: A scalar representing the number of elements to accumulate in a
32591// batch.
32592//	padded_shapes: A list of int64 tensors representing the desired padded shapes
32593// of the corresponding output components. These shapes may be partially
32594// specified, using `-1` to indicate that a particular dimension should be
32595// padded to the maximum size of all batch elements.
32596//	padding_values: A list of scalars containing the padding value to use for
32597// each of the outputs.
32598//
32599func PaddedBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
32600	if scope.Err() != nil {
32601		return
32602	}
32603	attrs := map[string]interface{}{"output_shapes": output_shapes}
32604	opspec := tf.OpSpec{
32605		Type: "PaddedBatchDataset",
32606		Input: []tf.Input{
32607			input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values),
32608		},
32609		Attrs: attrs,
32610	}
32611	op := scope.AddOperation(opspec)
32612	return op.Output(0)
32613}
32614
32615// StackV2Attr is an optional argument to StackV2.
32616type StackV2Attr func(optionalAttr)
32617
32618// StackV2StackName sets the optional stack_name attribute to value.
32619//
32620// value: Overrides the name used for the temporary stack resource. Default
32621// value is the name of the 'Stack' op (which is guaranteed unique).
32622// If not specified, defaults to ""
32623func StackV2StackName(value string) StackV2Attr {
32624	return func(m optionalAttr) {
32625		m["stack_name"] = value
32626	}
32627}
32628
32629// A stack that produces elements in first-in last-out order.
32630//
32631// Arguments:
32632//	max_size: The maximum size of the stack if non-negative. If negative, the stack
32633// size is unlimited.
32634//	elem_type: The type of the elements on the stack.
32635//
32636// Returns The handle to the stack.
32637func StackV2(scope *Scope, max_size tf.Output, elem_type tf.DataType, optional ...StackV2Attr) (handle tf.Output) {
32638	if scope.Err() != nil {
32639		return
32640	}
32641	attrs := map[string]interface{}{"elem_type": elem_type}
32642	for _, a := range optional {
32643		a(attrs)
32644	}
32645	opspec := tf.OpSpec{
32646		Type: "StackV2",
32647		Input: []tf.Input{
32648			max_size,
32649		},
32650		Attrs: attrs,
32651	}
32652	op := scope.AddOperation(opspec)
32653	return op.Output(0)
32654}
32655
32656// Picks the best counter-based RNG algorithm based on device.
32657//
32658// This op picks the best counter-based RNG algorithm based on device.
32659//
32660// Returns The RNG algorithm (shape int32[]).
32661func StatelessRandomGetAlg(scope *Scope) (alg tf.Output) {
32662	if scope.Err() != nil {
32663		return
32664	}
32665	opspec := tf.OpSpec{
32666		Type: "StatelessRandomGetAlg",
32667	}
32668	op := scope.AddOperation(opspec)
32669	return op.Output(0)
32670}
32671
32672// BoostedTreesUpdateEnsembleV2Attr is an optional argument to BoostedTreesUpdateEnsembleV2.
32673type BoostedTreesUpdateEnsembleV2Attr func(optionalAttr)
32674
32675// BoostedTreesUpdateEnsembleV2LogitsDimension sets the optional logits_dimension attribute to value.
32676//
32677// value: scalar, dimension of the logits
32678// If not specified, defaults to 1
32679func BoostedTreesUpdateEnsembleV2LogitsDimension(value int64) BoostedTreesUpdateEnsembleV2Attr {
32680	return func(m optionalAttr) {
32681		m["logits_dimension"] = value
32682	}
32683}
32684
32685// Updates the tree ensemble by adding a layer to the last tree being grown
32686//
32687// or by starting a new tree.
32688//
32689// Arguments:
32690//	tree_ensemble_handle: Handle to the ensemble variable.
32691//	feature_ids: Rank 1 tensor with ids for each feature. This is the real id of
32692// the feature that will be used in the split.
32693//	dimension_ids: List of rank 1 tensors representing the dimension in each feature.
32694//	node_ids: List of rank 1 tensors representing the nodes for which this feature
32695// has a split.
32696//	gains: List of rank 1 tensors representing the gains for each of the feature's
32697// split.
32698//	thresholds: List of rank 1 tensors representing the thesholds for each of the
32699// feature's split.
32700//	left_node_contribs: List of rank 2 tensors with left leaf contribs for each of
32701// the feature's splits. Will be added to the previous node values to constitute
32702// the values of the left nodes.
32703//	right_node_contribs: List of rank 2 tensors with right leaf contribs for each
32704// of the feature's splits. Will be added to the previous node values to constitute
32705// the values of the right nodes.
32706//	split_types: List of rank 1 tensors representing the split type for each feature.
32707//	max_depth: Max depth of the tree to build.
32708//	learning_rate: shrinkage const for each new tree.
32709//	pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning.
32710//
32711// Returns the created operation.
32712func BoostedTreesUpdateEnsembleV2(scope *Scope, tree_ensemble_handle tf.Output, feature_ids []tf.Output, dimension_ids []tf.Output, node_ids []tf.Output, gains []tf.Output, thresholds []tf.Output, left_node_contribs []tf.Output, right_node_contribs []tf.Output, split_types []tf.Output, max_depth tf.Output, learning_rate tf.Output, pruning_mode tf.Output, optional ...BoostedTreesUpdateEnsembleV2Attr) (o *tf.Operation) {
32713	if scope.Err() != nil {
32714		return
32715	}
32716	attrs := map[string]interface{}{}
32717	for _, a := range optional {
32718		a(attrs)
32719	}
32720	opspec := tf.OpSpec{
32721		Type: "BoostedTreesUpdateEnsembleV2",
32722		Input: []tf.Input{
32723			tree_ensemble_handle, tf.OutputList(feature_ids), tf.OutputList(dimension_ids), tf.OutputList(node_ids), tf.OutputList(gains), tf.OutputList(thresholds), tf.OutputList(left_node_contribs), tf.OutputList(right_node_contribs), tf.OutputList(split_types), max_depth, learning_rate, pruning_mode,
32724		},
32725		Attrs: attrs,
32726	}
32727	return scope.AddOperation(opspec)
32728}
32729
32730// SpaceToBatch for N-D tensors of type T.
32731//
32732// This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
32733// grid of blocks of shape `block_shape`, and interleaves these blocks with the
32734// "batch" dimension (0) such that in the output, the spatial dimensions
32735// `[1, ..., M]` correspond to the position within the grid, and the batch
32736// dimension combines both the position within a spatial block and the original
32737// batch position.  Prior to division into blocks, the spatial dimensions of the
32738// input are optionally zero padded according to `paddings`. See below for a
32739// precise description.
32740//
32741// This operation is equivalent to the following steps:
32742//
32743// 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
32744//    input according to `paddings` to produce `padded` of shape `padded_shape`.
32745//
32746// 2. Reshape `padded` to `reshaped_padded` of shape:
32747//
32748//      [batch] +
32749//      [padded_shape[1] / block_shape[0],
32750//        block_shape[0],
32751//       ...,
32752//       padded_shape[M] / block_shape[M-1],
32753//       block_shape[M-1]] +
32754//      remaining_shape
32755//
32756// 3. Permute dimensions of `reshaped_padded` to produce
32757//    `permuted_reshaped_padded` of shape:
32758//
32759//      block_shape +
32760//      [batch] +
32761//      [padded_shape[1] / block_shape[0],
32762//       ...,
32763//       padded_shape[M] / block_shape[M-1]] +
32764//      remaining_shape
32765//
32766// 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
32767//    dimension, producing an output tensor of shape:
32768//
32769//      [batch * prod(block_shape)] +
32770//      [padded_shape[1] / block_shape[0],
32771//       ...,
32772//       padded_shape[M] / block_shape[M-1]] +
32773//      remaining_shape
32774//
32775// Some examples:
32776//
32777// (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
32778//     `paddings = [[0, 0], [0, 0]]`:
32779//
32780// ```
32781// x = [[[[1], [2]], [[3], [4]]]]
32782// ```
32783//
32784// The output tensor has shape `[4, 1, 1, 1]` and value:
32785//
32786// ```
32787// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
32788// ```
32789//
32790// (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
32791//     `paddings = [[0, 0], [0, 0]]`:
32792//
32793// ```
32794// x = [[[[1, 2, 3], [4, 5, 6]],
32795//       [[7, 8, 9], [10, 11, 12]]]]
32796// ```
32797//
32798// The output tensor has shape `[4, 1, 1, 3]` and value:
32799//
32800// ```
32801// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
32802// ```
32803//
32804// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
32805//     `paddings = [[0, 0], [0, 0]]`:
32806//
32807// ```
32808// x = [[[[1],   [2],  [3],  [4]],
32809//       [[5],   [6],  [7],  [8]],
32810//       [[9],  [10], [11],  [12]],
32811//       [[13], [14], [15],  [16]]]]
32812// ```
32813//
32814// The output tensor has shape `[4, 2, 2, 1]` and value:
32815//
32816// ```
32817// x = [[[[1], [3]], [[9], [11]]],
32818//      [[[2], [4]], [[10], [12]]],
32819//      [[[5], [7]], [[13], [15]]],
32820//      [[[6], [8]], [[14], [16]]]]
32821// ```
32822//
32823// (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
32824//     paddings = `[[0, 0], [2, 0]]`:
32825//
32826// ```
32827// x = [[[[1],   [2],  [3],  [4]],
32828//       [[5],   [6],  [7],  [8]]],
32829//      [[[9],  [10], [11],  [12]],
32830//       [[13], [14], [15],  [16]]]]
32831// ```
32832//
32833// The output tensor has shape `[8, 1, 3, 1]` and value:
32834//
32835// ```
32836// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
32837//      [[[0], [2], [4]]], [[[0], [10], [12]]],
32838//      [[[0], [5], [7]]], [[[0], [13], [15]]],
32839//      [[[0], [6], [8]]], [[[0], [14], [16]]]]
32840// ```
32841//
32842// Among others, this operation is useful for reducing atrous convolution into
32843// regular convolution.
32844//
32845// Arguments:
32846//	input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
32847// where spatial_shape has `M` dimensions.
32848//	block_shape: 1-D with shape `[M]`, all values must be >= 1.
32849//	paddings: 2-D with shape `[M, 2]`, all values must be >= 0.
32850//   `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
32851//   `i + 1`, which corresponds to spatial dimension `i`.  It is required that
32852//   `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
32853func SpaceToBatchND(scope *Scope, input tf.Output, block_shape tf.Output, paddings tf.Output) (output tf.Output) {
32854	if scope.Err() != nil {
32855		return
32856	}
32857	opspec := tf.OpSpec{
32858		Type: "SpaceToBatchND",
32859		Input: []tf.Input{
32860			input, block_shape, paddings,
32861		},
32862	}
32863	op := scope.AddOperation(opspec)
32864	return op.Output(0)
32865}
32866
32867// Returns a batched diagonal tensor with given batched diagonal values.
32868//
32869// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
32870// diagonals of a matrix, with everything else padded with `padding`. `num_rows`
32871// and `num_cols` specify the dimension of the innermost matrix of the output. If
32872// both are not specified, the op assumes the innermost matrix is square and infers
32873// its size from `k` and the innermost dimension of `diagonal`. If only one of them
32874// is specified, the op assumes the unspecified value is the smallest possible
32875// based on other criteria.
32876//
32877// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
32878// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
32879// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
32880// `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
32881//
32882// The second innermost dimension of `diagonal` has double meaning.
32883// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
32884// [I, J, ..., M], and the output tensor is:
32885//
32886// ```
32887// output[i, j, ..., l, m, n]
32888//   = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
32889//     padding_value                             ; otherwise
32890// ```
32891//
32892// Otherwise, `M` is treated as the number of diagonals for the matrix in the
32893// same batch (`M = k[1]-k[0]+1`), and the output tensor is:
32894//
32895// ```
32896// output[i, j, ..., l, m, n]
32897//   = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
32898//     padding_value                                     ; otherwise
32899// ```
32900// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
32901//
32902// For example:
32903//
32904// ```
32905// # The main diagonal.
32906// diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
32907//                      [5, 6, 7, 8]])
32908// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
32909//                                [0, 2, 0, 0],
32910//                                [0, 0, 3, 0],
32911//                                [0, 0, 0, 4]],
32912//                               [[5, 0, 0, 0],
32913//                                [0, 6, 0, 0],
32914//                                [0, 0, 7, 0],
32915//                                [0, 0, 0, 8]]]
32916//
32917// # A superdiagonal (per batch).
32918// diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
32919//                      [4, 5, 6]])
32920// tf.matrix_diag(diagonal, k = 1)
32921//   ==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
32922//         [0, 0, 2, 0],
32923//         [0, 0, 0, 3],
32924//         [0, 0, 0, 0]],
32925//        [[0, 4, 0, 0],
32926//         [0, 0, 5, 0],
32927//         [0, 0, 0, 6],
32928//         [0, 0, 0, 0]]]
32929//
32930// # A band of diagonals.
32931// diagonals = np.array([[[1, 2, 3],  # Input shape: (2, 2, 3)
32932//                        [4, 5, 0]],
32933//                       [[6, 7, 9],
32934//                        [9, 1, 0]]])
32935// tf.matrix_diag(diagonals, k = (-1, 0))
32936//   ==> [[[1, 0, 0],  # Output shape: (2, 3, 3)
32937//         [4, 2, 0],
32938//         [0, 5, 3]],
32939//        [[6, 0, 0],
32940//         [9, 7, 0],
32941//         [0, 1, 9]]]
32942//
32943// # Rectangular matrix.
32944// diagonal = np.array([1, 2])  # Input shape: (2)
32945// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
32946//   ==> [[0, 0, 0, 0],  # Output shape: (3, 4)
32947//        [1, 0, 0, 0],
32948//        [0, 2, 0, 0]]
32949//
32950// # Rectangular matrix with inferred num_cols and padding_value = 9.
32951// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
32952//   ==> [[9, 9],  # Output shape: (3, 2)
32953//        [1, 9],
32954//        [9, 2]]
32955// ```
32956//
32957// Arguments:
32958//	diagonal: Rank `r`, where `r >= 1`
32959//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
32960// diagonal, and negative value means subdiagonals. `k` can be a single integer
32961// (for a single diagonal) or a pair of integers specifying the low and high ends
32962// of a matrix band. `k[0]` must not be larger than `k[1]`.
32963//	num_rows: The number of rows of the output matrix. If it is not provided, the op assumes
32964// the output matrix is a square matrix and infers the matrix size from k and the
32965// innermost dimension of `diagonal`.
32966//	num_cols: The number of columns of the output matrix. If it is not provided, the op
32967// assumes the output matrix is a square matrix and infers the matrix size from
32968// k and the innermost dimension of `diagonal`.
32969//	padding_value: The number to fill the area outside the specified diagonal band with.
32970// Default is 0.
32971//
32972// Returns Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.
32973func MatrixDiagV2(scope *Scope, diagonal tf.Output, k tf.Output, num_rows tf.Output, num_cols tf.Output, padding_value tf.Output) (output tf.Output) {
32974	if scope.Err() != nil {
32975		return
32976	}
32977	opspec := tf.OpSpec{
32978		Type: "MatrixDiagV2",
32979		Input: []tf.Input{
32980			diagonal, k, num_rows, num_cols, padding_value,
32981		},
32982	}
32983	op := scope.AddOperation(opspec)
32984	return op.Output(0)
32985}
32986
32987// Gets the element at the specified index in a dataset.
32988func GetElementAtIndex(scope *Scope, dataset tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
32989	if scope.Err() != nil {
32990		return
32991	}
32992	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
32993	opspec := tf.OpSpec{
32994		Type: "GetElementAtIndex",
32995		Input: []tf.Input{
32996			dataset, index,
32997		},
32998		Attrs: attrs,
32999	}
33000	op := scope.AddOperation(opspec)
33001	if scope.Err() != nil {
33002		return
33003	}
33004	var idx int
33005	var err error
33006	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
33007		scope.UpdateErr("GetElementAtIndex", err)
33008		return
33009	}
33010	return components
33011}
33012
33013// Creates a dataset that overrides the maximum intra-op parallelism.
33014//
33015// Arguments:
33016//
33017//	max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use.
33018//
33019//
33020func MaxIntraOpParallelismDataset(scope *Scope, input_dataset tf.Output, max_intra_op_parallelism tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
33021	if scope.Err() != nil {
33022		return
33023	}
33024	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
33025	opspec := tf.OpSpec{
33026		Type: "MaxIntraOpParallelismDataset",
33027		Input: []tf.Input{
33028			input_dataset, max_intra_op_parallelism,
33029		},
33030		Attrs: attrs,
33031	}
33032	op := scope.AddOperation(opspec)
33033	return op.Output(0)
33034}
33035
33036// L2 Loss.
33037//
33038// Computes half the L2 norm of a tensor without the `sqrt`:
33039//
33040//     output = sum(t ** 2) / 2
33041//
33042// Arguments:
33043//	t: Typically 2-D, but may have any dimensions.
33044//
33045// Returns 0-D.
33046func L2Loss(scope *Scope, t tf.Output) (output tf.Output) {
33047	if scope.Err() != nil {
33048		return
33049	}
33050	opspec := tf.OpSpec{
33051		Type: "L2Loss",
33052		Input: []tf.Input{
33053			t,
33054		},
33055	}
33056	op := scope.AddOperation(opspec)
33057	return op.Output(0)
33058}
33059
33060// CollectiveGatherAttr is an optional argument to CollectiveGather.
33061type CollectiveGatherAttr func(optionalAttr)
33062
33063// CollectiveGatherCommunicationHint sets the optional communication_hint attribute to value.
33064// If not specified, defaults to "auto"
33065func CollectiveGatherCommunicationHint(value string) CollectiveGatherAttr {
33066	return func(m optionalAttr) {
33067		m["communication_hint"] = value
33068	}
33069}
33070
33071// CollectiveGatherTimeoutSeconds sets the optional timeout_seconds attribute to value.
33072// If not specified, defaults to 0
33073func CollectiveGatherTimeoutSeconds(value float32) CollectiveGatherAttr {
33074	return func(m optionalAttr) {
33075		m["timeout_seconds"] = value
33076	}
33077}
33078
33079// Mutually accumulates multiple tensors of identical type and shape.
33080func CollectiveGather(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveGatherAttr) (data tf.Output) {
33081	if scope.Err() != nil {
33082		return
33083	}
33084	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
33085	for _, a := range optional {
33086		a(attrs)
33087	}
33088	opspec := tf.OpSpec{
33089		Type: "CollectiveGather",
33090		Input: []tf.Input{
33091			input,
33092		},
33093		Attrs: attrs,
33094	}
33095	op := scope.AddOperation(opspec)
33096	return op.Output(0)
33097}
33098
33099// Checks a tensor for NaN, -Inf and +Inf values.
33100//
33101// When run, reports an `InvalidArgument` error if `tensor` has any values
33102// that are not a number (NaN) or infinity (Inf). Otherwise, returns the input
33103// tensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf
33104// in the errors it throws.
33105//
33106// Arguments:
33107//
33108//	message: Prefix of the error message.
33109func CheckNumericsV2(scope *Scope, tensor tf.Output, message string) (output tf.Output) {
33110	if scope.Err() != nil {
33111		return
33112	}
33113	attrs := map[string]interface{}{"message": message}
33114	opspec := tf.OpSpec{
33115		Type: "CheckNumericsV2",
33116		Input: []tf.Input{
33117			tensor,
33118		},
33119		Attrs: attrs,
33120	}
33121	op := scope.AddOperation(opspec)
33122	return op.Output(0)
33123}
33124
33125// Applies a gradient to a given accumulator.
33126//
33127// Does not add if local_step is lesser than the accumulator's global_step.
33128//
33129// Arguments:
33130//	handle: The handle to a accumulator.
33131//	local_step: The local_step value at which the gradient was computed.
33132//	gradient: A tensor of the gradient to be accumulated.
33133//
33134// Returns the created operation.
33135func ResourceAccumulatorApplyGradient(scope *Scope, handle tf.Output, local_step tf.Output, gradient tf.Output) (o *tf.Operation) {
33136	if scope.Err() != nil {
33137		return
33138	}
33139	opspec := tf.OpSpec{
33140		Type: "ResourceAccumulatorApplyGradient",
33141		Input: []tf.Input{
33142			handle, local_step, gradient,
33143		},
33144	}
33145	return scope.AddOperation(opspec)
33146}
33147
33148// Connects N outputs from an N-way replicated TPU computation.
33149//
33150// This operation holds a replicated output from a `tpu.replicate()` computation subgraph.
33151// Each replicated output has the same shape and type alongside the input.
33152//
33153// For example:
33154// ```
33155// %computation = "tf.Computation"()
33156// %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation)
33157// ```
33158// The above computation has a replicated output of two replicas.
33159func TPUReplicatedOutput(scope *Scope, input tf.Output, num_replicas int64) (outputs []tf.Output) {
33160	if scope.Err() != nil {
33161		return
33162	}
33163	attrs := map[string]interface{}{"num_replicas": num_replicas}
33164	opspec := tf.OpSpec{
33165		Type: "TPUReplicatedOutput",
33166		Input: []tf.Input{
33167			input,
33168		},
33169		Attrs: attrs,
33170	}
33171	op := scope.AddOperation(opspec)
33172	if scope.Err() != nil {
33173		return
33174	}
33175	var idx int
33176	var err error
33177	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
33178		scope.UpdateErr("TPUReplicatedOutput", err)
33179		return
33180	}
33181	return outputs
33182}
33183
33184// Returns the number of gradients aggregated in the given accumulators.
33185//
33186// Arguments:
33187//	handle: The handle to an accumulator.
33188//
33189// Returns The number of gradients aggregated in the given accumulator.
33190func ResourceAccumulatorNumAccumulated(scope *Scope, handle tf.Output) (num_accumulated tf.Output) {
33191	if scope.Err() != nil {
33192		return
33193	}
33194	opspec := tf.OpSpec{
33195		Type: "ResourceAccumulatorNumAccumulated",
33196		Input: []tf.Input{
33197			handle,
33198		},
33199	}
33200	op := scope.AddOperation(opspec)
33201	return op.Output(0)
33202}
33203
33204// SobolSampleAttr is an optional argument to SobolSample.
33205type SobolSampleAttr func(optionalAttr)
33206
33207// SobolSampleDtype sets the optional dtype attribute to value.
33208//
33209// value: The type of the sample. One of: `float32` or `float64`.
33210// If not specified, defaults to DT_FLOAT
33211func SobolSampleDtype(value tf.DataType) SobolSampleAttr {
33212	return func(m optionalAttr) {
33213		m["dtype"] = value
33214	}
33215}
33216
33217// Generates points from the Sobol sequence.
33218//
33219// Creates a Sobol sequence with `num_results` samples. Each sample has dimension
33220// `dim`. Skips the first `skip` samples.
33221//
33222// Arguments:
33223//	dim: Positive scalar `Tensor` representing each sample's dimension.
33224//	num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol points to return
33225// in the output.
33226//	skip: Positive scalar `Tensor` of dtype int32. The number of initial points of the
33227// Sobol sequence to skip.
33228//
33229// Returns `Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
33230func SobolSample(scope *Scope, dim tf.Output, num_results tf.Output, skip tf.Output, optional ...SobolSampleAttr) (samples tf.Output) {
33231	if scope.Err() != nil {
33232		return
33233	}
33234	attrs := map[string]interface{}{}
33235	for _, a := range optional {
33236		a(attrs)
33237	}
33238	opspec := tf.OpSpec{
33239		Type: "SobolSample",
33240		Input: []tf.Input{
33241			dim, num_results, skip,
33242		},
33243		Attrs: attrs,
33244	}
33245	op := scope.AddOperation(opspec)
33246	return op.Output(0)
33247}
33248
33249// OrderedMapUnstageAttr is an optional argument to OrderedMapUnstage.
33250type OrderedMapUnstageAttr func(optionalAttr)
33251
33252// OrderedMapUnstageCapacity sets the optional capacity attribute to value.
33253// If not specified, defaults to 0
33254//
33255// REQUIRES: value >= 0
33256func OrderedMapUnstageCapacity(value int64) OrderedMapUnstageAttr {
33257	return func(m optionalAttr) {
33258		m["capacity"] = value
33259	}
33260}
33261
33262// OrderedMapUnstageMemoryLimit sets the optional memory_limit attribute to value.
33263// If not specified, defaults to 0
33264//
33265// REQUIRES: value >= 0
33266func OrderedMapUnstageMemoryLimit(value int64) OrderedMapUnstageAttr {
33267	return func(m optionalAttr) {
33268		m["memory_limit"] = value
33269	}
33270}
33271
33272// OrderedMapUnstageContainer sets the optional container attribute to value.
33273// If not specified, defaults to ""
33274func OrderedMapUnstageContainer(value string) OrderedMapUnstageAttr {
33275	return func(m optionalAttr) {
33276		m["container"] = value
33277	}
33278}
33279
33280// OrderedMapUnstageSharedName sets the optional shared_name attribute to value.
33281// If not specified, defaults to ""
33282func OrderedMapUnstageSharedName(value string) OrderedMapUnstageAttr {
33283	return func(m optionalAttr) {
33284		m["shared_name"] = value
33285	}
33286}
33287
33288// Op removes and returns the values associated with the key
33289//
33290// from the underlying container.   If the underlying container
33291// does not contain this key, the op will block until it does.
33292func OrderedMapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageAttr) (values []tf.Output) {
33293	if scope.Err() != nil {
33294		return
33295	}
33296	attrs := map[string]interface{}{"dtypes": dtypes}
33297	for _, a := range optional {
33298		a(attrs)
33299	}
33300	opspec := tf.OpSpec{
33301		Type: "OrderedMapUnstage",
33302		Input: []tf.Input{
33303			key, indices,
33304		},
33305		Attrs: attrs,
33306	}
33307	op := scope.AddOperation(opspec)
33308	if scope.Err() != nil {
33309		return
33310	}
33311	var idx int
33312	var err error
33313	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
33314		scope.UpdateErr("OrderedMapUnstage", err)
33315		return
33316	}
33317	return values
33318}
33319
33320// Deprecated. Use TensorArraySplitV3
33321//
33322// DEPRECATED at GraphDef version 26: Use TensorArraySplitV3
33323func TensorArraySplitV2(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
33324	if scope.Err() != nil {
33325		return
33326	}
33327	opspec := tf.OpSpec{
33328		Type: "TensorArraySplitV2",
33329		Input: []tf.Input{
33330			handle, value, lengths, flow_in,
33331		},
33332	}
33333	op := scope.AddOperation(opspec)
33334	return op.Output(0)
33335}
33336
33337// Computes the number of elements in the given queue.
33338//
33339// Arguments:
33340//	handle: The handle to a queue.
33341//
33342// Returns The number of elements in the given queue.
33343func QueueSizeV2(scope *Scope, handle tf.Output) (size tf.Output) {
33344	if scope.Err() != nil {
33345		return
33346	}
33347	opspec := tf.OpSpec{
33348		Type: "QueueSizeV2",
33349		Input: []tf.Input{
33350			handle,
33351		},
33352	}
33353	op := scope.AddOperation(opspec)
33354	return op.Output(0)
33355}
33356
33357// LuAttr is an optional argument to Lu.
33358type LuAttr func(optionalAttr)
33359
33360// LuOutputIdxType sets the optional output_idx_type attribute to value.
33361// If not specified, defaults to DT_INT32
33362func LuOutputIdxType(value tf.DataType) LuAttr {
33363	return func(m optionalAttr) {
33364		m["output_idx_type"] = value
33365	}
33366}
33367
33368// Computes the LU decomposition of one or more square matrices.
33369//
33370// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
33371// form square matrices.
33372//
33373// The input has to be invertible.
33374//
33375// The output consists of two tensors LU and P containing the LU decomposition
33376// of all input submatrices `[..., :, :]`. LU encodes the lower triangular and
33377// upper triangular factors.
33378//
33379// For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of
33380// shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower
33381// triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose
33382// entries correspond to the upper triangular part, including the diagonal, of LU.
33383//
33384// P represents a permutation matrix encoded as a list of indices each between `0`
33385// and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to
33386// P, then the L, U and P satisfies P_mat * input = L * U.
33387//
33388// Arguments:
33389//	input: A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of
33390// size `[M, M]`.
33391//
33392// Returns:
33393//	lu: A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the
33394// lower triangular factor `L` with unit diagonal, and whose upper triangular part
33395// denotes the upper triangular factor `U`.
33396//	p: Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is
33397// `[..., M]`.
33398// @compatibility(scipy)
33399// Similar to `scipy.linalg.lu`, except the triangular factors `L` and `U` are
33400// packed into a single tensor, the permutation is applied to `input` instead of
33401// the right hand side and the permutation `P` is returned as a list of indices
33402// instead of a permutation matrix.
33403// @end_compatibility
33404func Lu(scope *Scope, input tf.Output, optional ...LuAttr) (lu tf.Output, p tf.Output) {
33405	if scope.Err() != nil {
33406		return
33407	}
33408	attrs := map[string]interface{}{}
33409	for _, a := range optional {
33410		a(attrs)
33411	}
33412	opspec := tf.OpSpec{
33413		Type: "Lu",
33414		Input: []tf.Input{
33415			input,
33416		},
33417		Attrs: attrs,
33418	}
33419	op := scope.AddOperation(opspec)
33420	return op.Output(0), op.Output(1)
33421}
33422
33423// Exits the current frame to its parent frame.
33424//
33425// Exit makes its input `data` available to the parent frame.
33426//
33427// Arguments:
33428//	data: The tensor to be made available to the parent frame.
33429//
33430// Returns The same tensor as `data`.
33431func Exit(scope *Scope, data tf.Output) (output tf.Output) {
33432	if scope.Err() != nil {
33433		return
33434	}
33435	opspec := tf.OpSpec{
33436		Type: "Exit",
33437		Input: []tf.Input{
33438			data,
33439		},
33440	}
33441	op := scope.AddOperation(opspec)
33442	return op.Output(0)
33443}
33444
33445// InfeedEnqueueAttr is an optional argument to InfeedEnqueue.
33446type InfeedEnqueueAttr func(optionalAttr)
33447
33448// InfeedEnqueueShape sets the optional shape attribute to value.
33449//
33450// value: The shape of the tensor.
33451// If not specified, defaults to {}
33452func InfeedEnqueueShape(value tf.Shape) InfeedEnqueueAttr {
33453	return func(m optionalAttr) {
33454		m["shape"] = value
33455	}
33456}
33457
33458// InfeedEnqueueLayout sets the optional layout attribute to value.
33459//
33460// value: A vector holding the requested layout in minor-to-major sequence.
33461// If a layout attribute is passed, but its values are all -1, the layout will
33462// be computed by the infeed operation.
33463// If not specified, defaults to {}
33464func InfeedEnqueueLayout(value []int64) InfeedEnqueueAttr {
33465	return func(m optionalAttr) {
33466		m["layout"] = value
33467	}
33468}
33469
33470// InfeedEnqueueDeviceOrdinal sets the optional device_ordinal attribute to value.
33471//
33472// value: The TPU device to use. This should be -1 when the Op
33473// is running on a TPU device, and >= 0 when the Op is running on the CPU
33474// device.
33475// If not specified, defaults to -1
33476func InfeedEnqueueDeviceOrdinal(value int64) InfeedEnqueueAttr {
33477	return func(m optionalAttr) {
33478		m["device_ordinal"] = value
33479	}
33480}
33481
33482// An op which feeds a single Tensor value into the computation.
33483//
33484// Arguments:
33485//	input: A tensor that will be provided using the infeed mechanism.
33486//
33487// Returns the created operation.
33488func InfeedEnqueue(scope *Scope, input tf.Output, optional ...InfeedEnqueueAttr) (o *tf.Operation) {
33489	if scope.Err() != nil {
33490		return
33491	}
33492	attrs := map[string]interface{}{}
33493	for _, a := range optional {
33494		a(attrs)
33495	}
33496	opspec := tf.OpSpec{
33497		Type: "InfeedEnqueue",
33498		Input: []tf.Input{
33499			input,
33500		},
33501		Attrs: attrs,
33502	}
33503	return scope.AddOperation(opspec)
33504}
33505
33506// An op which supports basic einsum op with 2 inputs and 1 output.
33507//
33508// This op has better TPU performance since it doesn't have explicitly reshape and
33509// transpose operations as tf.einsum does.
33510func XlaEinsum(scope *Scope, a tf.Output, b tf.Output, equation string) (product tf.Output) {
33511	if scope.Err() != nil {
33512		return
33513	}
33514	attrs := map[string]interface{}{"equation": equation}
33515	opspec := tf.OpSpec{
33516		Type: "XlaEinsum",
33517		Input: []tf.Input{
33518			a, b,
33519		},
33520		Attrs: attrs,
33521	}
33522	op := scope.AddOperation(opspec)
33523	return op.Output(0)
33524}
33525
33526// Extracts the average gradient in the given ConditionalAccumulator.
33527//
33528// The op blocks until sufficient (i.e., more than num_required)
33529// gradients have been accumulated.  If the accumulator has already
33530// aggregated more than num_required gradients, it returns the average of
33531// the accumulated gradients.  Also automatically increments the recorded
33532// global_step in the accumulator by 1, and resets the aggregate to 0.
33533//
33534// Arguments:
33535//	handle: The handle to an accumulator.
33536//	num_required: Number of gradients required before we return an aggregate.
33537//	dtype: The data type of accumulated gradients. Needs to correspond to the type
33538// of the accumulator.
33539//
33540// Returns The average of the accumulated gradients.
33541func ResourceAccumulatorTakeGradient(scope *Scope, handle tf.Output, num_required tf.Output, dtype tf.DataType) (average tf.Output) {
33542	if scope.Err() != nil {
33543		return
33544	}
33545	attrs := map[string]interface{}{"dtype": dtype}
33546	opspec := tf.OpSpec{
33547		Type: "ResourceAccumulatorTakeGradient",
33548		Input: []tf.Input{
33549			handle, num_required,
33550		},
33551		Attrs: attrs,
33552	}
33553	op := scope.AddOperation(opspec)
33554	return op.Output(0)
33555}
33556
33557// ResourceConditionalAccumulatorAttr is an optional argument to ResourceConditionalAccumulator.
33558type ResourceConditionalAccumulatorAttr func(optionalAttr)
33559
33560// ResourceConditionalAccumulatorContainer sets the optional container attribute to value.
33561//
33562// value: If non-empty, this accumulator is placed in the given container.
33563// Otherwise, a default container is used.
33564// If not specified, defaults to ""
33565func ResourceConditionalAccumulatorContainer(value string) ResourceConditionalAccumulatorAttr {
33566	return func(m optionalAttr) {
33567		m["container"] = value
33568	}
33569}
33570
33571// ResourceConditionalAccumulatorSharedName sets the optional shared_name attribute to value.
33572//
33573// value: If non-empty, this accumulator will be shared under the
33574// given name across multiple sessions.
33575// If not specified, defaults to ""
33576func ResourceConditionalAccumulatorSharedName(value string) ResourceConditionalAccumulatorAttr {
33577	return func(m optionalAttr) {
33578		m["shared_name"] = value
33579	}
33580}
33581
33582// ResourceConditionalAccumulatorReductionType sets the optional reduction_type attribute to value.
33583// If not specified, defaults to "MEAN"
33584func ResourceConditionalAccumulatorReductionType(value string) ResourceConditionalAccumulatorAttr {
33585	return func(m optionalAttr) {
33586		m["reduction_type"] = value
33587	}
33588}
33589
33590// A conditional accumulator for aggregating gradients.
33591//
33592// The accumulator accepts gradients marked with local_step greater or
33593// equal to the most recent global_step known to the accumulator. The
33594// average can be extracted from the accumulator, provided sufficient
33595// gradients have been accumulated. Extracting the average automatically
33596// resets the aggregate to 0, and increments the global_step recorded by
33597// the accumulator.
33598// This is a resource version of ConditionalAccumulator that will work in TF2.0
33599// with tf.cond version 2.
33600//
33601// Arguments:
33602//	dtype: The type of the value being accumulated.
33603//	shape: The shape of the values, can be [], in which case shape is unknown.
33604//
33605// Returns The handle to the accumulator.
33606func ResourceConditionalAccumulator(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...ResourceConditionalAccumulatorAttr) (handle tf.Output) {
33607	if scope.Err() != nil {
33608		return
33609	}
33610	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
33611	for _, a := range optional {
33612		a(attrs)
33613	}
33614	opspec := tf.OpSpec{
33615		Type: "ResourceConditionalAccumulator",
33616
33617		Attrs: attrs,
33618	}
33619	op := scope.AddOperation(opspec)
33620	return op.Output(0)
33621}
33622
33623// MultiDeviceIteratorFromStringHandleAttr is an optional argument to MultiDeviceIteratorFromStringHandle.
33624type MultiDeviceIteratorFromStringHandleAttr func(optionalAttr)
33625
33626// MultiDeviceIteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
33627//
33628// value: The type list for the return values.
33629// If not specified, defaults to {}
33630//
33631// REQUIRES: len(value) >= 0
33632func MultiDeviceIteratorFromStringHandleOutputTypes(value []tf.DataType) MultiDeviceIteratorFromStringHandleAttr {
33633	return func(m optionalAttr) {
33634		m["output_types"] = value
33635	}
33636}
33637
33638// MultiDeviceIteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
33639//
33640// value: The list of shapes being produced.
33641// If not specified, defaults to {}
33642//
33643// REQUIRES: len(value) >= 0
33644func MultiDeviceIteratorFromStringHandleOutputShapes(value []tf.Shape) MultiDeviceIteratorFromStringHandleAttr {
33645	return func(m optionalAttr) {
33646		m["output_shapes"] = value
33647	}
33648}
33649
33650// Generates a MultiDeviceIterator resource from its provided string handle.
33651//
33652// Arguments:
33653//	string_handle: String representing the resource.
33654//
33655// Returns A MultiDeviceIterator resource.
33656func MultiDeviceIteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...MultiDeviceIteratorFromStringHandleAttr) (multi_device_iterator tf.Output) {
33657	if scope.Err() != nil {
33658		return
33659	}
33660	attrs := map[string]interface{}{}
33661	for _, a := range optional {
33662		a(attrs)
33663	}
33664	opspec := tf.OpSpec{
33665		Type: "MultiDeviceIteratorFromStringHandle",
33666		Input: []tf.Input{
33667			string_handle,
33668		},
33669		Attrs: attrs,
33670	}
33671	op := scope.AddOperation(opspec)
33672	return op.Output(0)
33673}
33674
33675// Make a static dimension into a xla bounded dynamic dimension.
33676//
33677//         The current static dimension size will become the bound and the second
33678//         operand becomes the dynamic size of the dimension.
33679func XlaSetDynamicDimensionSize(scope *Scope, input tf.Output, dim_index tf.Output, size tf.Output) (output tf.Output) {
33680	if scope.Err() != nil {
33681		return
33682	}
33683	opspec := tf.OpSpec{
33684		Type: "XlaSetDynamicDimensionSize",
33685		Input: []tf.Input{
33686			input, dim_index, size,
33687		},
33688	}
33689	op := scope.AddOperation(opspec)
33690	return op.Output(0)
33691}
33692
33693// UnicodeDecodeAttr is an optional argument to UnicodeDecode.
33694type UnicodeDecodeAttr func(optionalAttr)
33695
33696// UnicodeDecodeErrors sets the optional errors attribute to value.
33697//
33698// value: Error handling policy when there is invalid formatting found in the input.
33699// The value of 'strict' will cause the operation to produce a InvalidArgument
33700// error on any invalid input formatting. A value of 'replace' (the default) will
33701// cause the operation to replace any invalid formatting in the input with the
33702// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
33703// skip any invalid formatting in the input and produce no corresponding output
33704// character.
33705// If not specified, defaults to "replace"
33706func UnicodeDecodeErrors(value string) UnicodeDecodeAttr {
33707	return func(m optionalAttr) {
33708		m["errors"] = value
33709	}
33710}
33711
33712// UnicodeDecodeReplacementChar sets the optional replacement_char attribute to value.
33713//
33714// value: The replacement character codepoint to be used in place of any invalid
33715// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
33716// be used. The default value is the default unicode replacement character is
33717// 0xFFFD or U+65533.)
33718// If not specified, defaults to 65533
33719func UnicodeDecodeReplacementChar(value int64) UnicodeDecodeAttr {
33720	return func(m optionalAttr) {
33721		m["replacement_char"] = value
33722	}
33723}
33724
33725// UnicodeDecodeReplaceControlCharacters sets the optional replace_control_characters attribute to value.
33726//
33727// value: Whether to replace the C0 control characters (00-1F) with the
33728// `replacement_char`. Default is false.
33729// If not specified, defaults to false
33730func UnicodeDecodeReplaceControlCharacters(value bool) UnicodeDecodeAttr {
33731	return func(m optionalAttr) {
33732		m["replace_control_characters"] = value
33733	}
33734}
33735
33736// UnicodeDecodeTsplits sets the optional Tsplits attribute to value.
33737// If not specified, defaults to DT_INT64
33738func UnicodeDecodeTsplits(value tf.DataType) UnicodeDecodeAttr {
33739	return func(m optionalAttr) {
33740		m["Tsplits"] = value
33741	}
33742}
33743
33744// Decodes each string in `input` into a sequence of Unicode code points.
33745//
33746// The character codepoints for all strings are returned using a single vector
33747// `char_values`, with strings expanded to characters in row-major order.
33748//
33749// The `row_splits` tensor indicates where the codepoints for
33750// each input string begin and end within the `char_values` tensor.
33751// In particular, the values for the `i`th
33752// string (in row-major order) are stored in the slice
33753// `[row_splits[i]:row_splits[i+1]]`. Thus:
33754//
33755// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th
33756//   character in the `i`th string (in row-major order).
33757// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th
33758//   string (in row-major order).
33759//
33760// Arguments:
33761//	input: The text to be decoded. Can have any shape. Note that the output is flattened
33762// to a vector of char values.
33763//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
33764// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
33765//
33766// Returns:
33767//	row_splits: A 1D int32 tensor containing the row splits.
33768//	char_values: A 1D int32 Tensor containing the decoded codepoints.
33769func UnicodeDecode(scope *Scope, input tf.Output, input_encoding string, optional ...UnicodeDecodeAttr) (row_splits tf.Output, char_values tf.Output) {
33770	if scope.Err() != nil {
33771		return
33772	}
33773	attrs := map[string]interface{}{"input_encoding": input_encoding}
33774	for _, a := range optional {
33775		a(attrs)
33776	}
33777	opspec := tf.OpSpec{
33778		Type: "UnicodeDecode",
33779		Input: []tf.Input{
33780			input,
33781		},
33782		Attrs: attrs,
33783	}
33784	op := scope.AddOperation(opspec)
33785	return op.Output(0), op.Output(1)
33786}
33787
33788// Writes a tensor summary.
33789//
33790// Writes `tensor` at `step` with `tag` using summary `writer`.
33791//
33792// Returns the created operation.
33793func WriteSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output, tag tf.Output, summary_metadata tf.Output) (o *tf.Operation) {
33794	if scope.Err() != nil {
33795		return
33796	}
33797	opspec := tf.OpSpec{
33798		Type: "WriteSummary",
33799		Input: []tf.Input{
33800			writer, step, tensor, tag, summary_metadata,
33801		},
33802	}
33803	return scope.AddOperation(opspec)
33804}
33805
33806// BatchAttr is an optional argument to Batch.
33807type BatchAttr func(optionalAttr)
33808
33809// BatchMaxEnqueuedBatches sets the optional max_enqueued_batches attribute to value.
33810// If not specified, defaults to 10
33811func BatchMaxEnqueuedBatches(value int64) BatchAttr {
33812	return func(m optionalAttr) {
33813		m["max_enqueued_batches"] = value
33814	}
33815}
33816
33817// BatchAllowedBatchSizes sets the optional allowed_batch_sizes attribute to value.
33818// If not specified, defaults to {}
33819func BatchAllowedBatchSizes(value []int64) BatchAttr {
33820	return func(m optionalAttr) {
33821		m["allowed_batch_sizes"] = value
33822	}
33823}
33824
33825// BatchContainer sets the optional container attribute to value.
33826// If not specified, defaults to ""
33827func BatchContainer(value string) BatchAttr {
33828	return func(m optionalAttr) {
33829		m["container"] = value
33830	}
33831}
33832
33833// BatchSharedName sets the optional shared_name attribute to value.
33834// If not specified, defaults to ""
33835func BatchSharedName(value string) BatchAttr {
33836	return func(m optionalAttr) {
33837		m["shared_name"] = value
33838	}
33839}
33840
33841// BatchBatchingQueue sets the optional batching_queue attribute to value.
33842// If not specified, defaults to ""
33843func BatchBatchingQueue(value string) BatchAttr {
33844	return func(m optionalAttr) {
33845		m["batching_queue"] = value
33846	}
33847}
33848
33849// Batches all input tensors nondeterministically.
33850//
33851// When many instances of this Op are being run concurrently with the same
33852// container/shared_name in the same device, some will output zero-shaped Tensors
33853// and others will output Tensors of size up to max_batch_size.
33854//
33855// All Tensors in in_tensors are batched together (so, for example, labels and
33856// features should be batched with a single instance of this operation.
33857//
33858// Each invocation of batch emits an `id` scalar which will be used to identify
33859// this particular invocation when doing unbatch or its gradient.
33860//
33861// Each op which emits a non-empty batch will also emit a non-empty batch_index
33862// Tensor, which, is a [K, 3] matrix where each row contains the invocation's id,
33863// start, and length of elements of each set of Tensors present in batched_tensors.
33864//
33865// Batched tensors are concatenated along the first dimension, and all tensors in
33866// in_tensors must have the first dimension of the same size.
33867//
33868// in_tensors: The tensors to be batched.
33869// num_batch_threads: Number of scheduling threads for processing batches of work.
33870//  Determines the number of batches processed in parallel.
33871// max_batch_size: Batch sizes will never be bigger than this.
33872// batch_timeout_micros: Maximum number of microseconds to wait before outputting
33873//  an incomplete batch.
33874// allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does
33875//  nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
33876//  batches up to one of those sizes. The entries must increase monotonically, and
33877//  the final entry must equal max_batch_size.
33878// grad_timeout_micros: The timeout to use for the gradient. See Unbatch.
33879// batched_tensors: Either empty tensors or a batch of concatenated Tensors.
33880// batch_index: If out_tensors is non-empty, has information to invert it.
33881// container: Controls the scope of sharing of this batch.
33882// id: always contains a scalar with a unique ID for this invocation of Batch.
33883// shared_name: Concurrently running instances of batch in the same device with the
33884//  same container and shared_name will batch their elements together. If left
33885//  empty, the op name will be used as the shared name.
33886// T: the types of tensors to be batched.
33887func Batch(scope *Scope, in_tensors []tf.Output, num_batch_threads int64, max_batch_size int64, batch_timeout_micros int64, grad_timeout_micros int64, optional ...BatchAttr) (batched_tensors []tf.Output, batch_index tf.Output, id tf.Output) {
33888	if scope.Err() != nil {
33889		return
33890	}
33891	attrs := map[string]interface{}{"num_batch_threads": num_batch_threads, "max_batch_size": max_batch_size, "batch_timeout_micros": batch_timeout_micros, "grad_timeout_micros": grad_timeout_micros}
33892	for _, a := range optional {
33893		a(attrs)
33894	}
33895	opspec := tf.OpSpec{
33896		Type: "Batch",
33897		Input: []tf.Input{
33898			tf.OutputList(in_tensors),
33899		},
33900		Attrs: attrs,
33901	}
33902	op := scope.AddOperation(opspec)
33903	if scope.Err() != nil {
33904		return
33905	}
33906	var idx int
33907	var err error
33908	if batched_tensors, idx, err = makeOutputList(op, idx, "batched_tensors"); err != nil {
33909		scope.UpdateErr("Batch", err)
33910		return
33911	}
33912	batch_index = op.Output(idx)
33913	id = op.Output(idx)
33914	return batched_tensors, batch_index, id
33915}
33916
33917// Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN.
33918//
33919// *NOTE*: `MulNoNan` supports broadcasting. More about broadcasting
33920// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
33921func MulNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
33922	if scope.Err() != nil {
33923		return
33924	}
33925	opspec := tf.OpSpec{
33926		Type: "MulNoNan",
33927		Input: []tf.Input{
33928			x, y,
33929		},
33930	}
33931	op := scope.AddOperation(opspec)
33932	return op.Output(0)
33933}
33934
33935// QueueDequeueManyV2Attr is an optional argument to QueueDequeueManyV2.
33936type QueueDequeueManyV2Attr func(optionalAttr)
33937
33938// QueueDequeueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
33939//
33940// value: If the queue has fewer than n elements, this operation
33941// will block for up to timeout_ms milliseconds.
33942// Note: This option is not supported yet.
33943// If not specified, defaults to -1
33944func QueueDequeueManyV2TimeoutMs(value int64) QueueDequeueManyV2Attr {
33945	return func(m optionalAttr) {
33946		m["timeout_ms"] = value
33947	}
33948}
33949
33950// Dequeues `n` tuples of one or more tensors from the given queue.
33951//
33952// If the queue is closed and there are fewer than `n` elements, then an
33953// OutOfRange error is returned.
33954//
33955// This operation concatenates queue-element component tensors along the
33956// 0th dimension to make a single component tensor.  All of the components
33957// in the dequeued tuple will have size `n` in the 0th dimension.
33958//
33959// This operation has `k` outputs, where `k` is the number of components in
33960// the tuples stored in the given queue, and output `i` is the ith
33961// component of the dequeued tuple.
33962//
33963// N.B. If the queue is empty, this operation will block until `n` elements
33964// have been dequeued (or 'timeout_ms' elapses, if specified).
33965//
33966// Arguments:
33967//	handle: The handle to a queue.
33968//	n: The number of tuples to dequeue.
33969//	component_types: The type of each component in a tuple.
33970//
33971// Returns One or more tensors that were dequeued as a tuple.
33972func QueueDequeueManyV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueManyV2Attr) (components []tf.Output) {
33973	if scope.Err() != nil {
33974		return
33975	}
33976	attrs := map[string]interface{}{"component_types": component_types}
33977	for _, a := range optional {
33978		a(attrs)
33979	}
33980	opspec := tf.OpSpec{
33981		Type: "QueueDequeueManyV2",
33982		Input: []tf.Input{
33983			handle, n,
33984		},
33985		Attrs: attrs,
33986	}
33987	op := scope.AddOperation(opspec)
33988	if scope.Err() != nil {
33989		return
33990	}
33991	var idx int
33992	var err error
33993	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
33994		scope.UpdateErr("QueueDequeueManyV2", err)
33995		return
33996	}
33997	return components
33998}
33999
34000// VarHandleOpAttr is an optional argument to VarHandleOp.
34001type VarHandleOpAttr func(optionalAttr)
34002
34003// VarHandleOpContainer sets the optional container attribute to value.
34004//
34005// value: the container this variable is placed in.
34006// If not specified, defaults to ""
34007func VarHandleOpContainer(value string) VarHandleOpAttr {
34008	return func(m optionalAttr) {
34009		m["container"] = value
34010	}
34011}
34012
34013// VarHandleOpSharedName sets the optional shared_name attribute to value.
34014//
34015// value: the name by which this variable is referred to.
34016// If not specified, defaults to ""
34017func VarHandleOpSharedName(value string) VarHandleOpAttr {
34018	return func(m optionalAttr) {
34019		m["shared_name"] = value
34020	}
34021}
34022
34023// VarHandleOpAllowedDevices sets the optional allowed_devices attribute to value.
34024//
34025// value: DEPRECATED. The allowed devices containing the resource variable. Set when the
34026// output ResourceHandle represents a per-replica/partitioned resource variable.
34027// If not specified, defaults to {}
34028func VarHandleOpAllowedDevices(value []string) VarHandleOpAttr {
34029	return func(m optionalAttr) {
34030		m["allowed_devices"] = value
34031	}
34032}
34033
34034// Creates a handle to a Variable resource.
34035//
34036// Arguments:
34037//	dtype: the type of this variable. Must agree with the dtypes
34038// of all ops using this variable.
34039//	shape: The (possibly partially specified) shape of this variable.
34040func VarHandleOp(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...VarHandleOpAttr) (resource tf.Output) {
34041	if scope.Err() != nil {
34042		return
34043	}
34044	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
34045	for _, a := range optional {
34046		a(attrs)
34047	}
34048	opspec := tf.OpSpec{
34049		Type: "VarHandleOp",
34050
34051		Attrs: attrs,
34052	}
34053	op := scope.AddOperation(opspec)
34054	return op.Output(0)
34055}
34056
34057// ComputeAccidentalHitsAttr is an optional argument to ComputeAccidentalHits.
34058type ComputeAccidentalHitsAttr func(optionalAttr)
34059
34060// ComputeAccidentalHitsSeed sets the optional seed attribute to value.
34061//
34062// value: If either seed or seed2 are set to be non-zero, the random number
34063// generator is seeded by the given seed.  Otherwise, it is seeded by a
34064// random seed.
34065// If not specified, defaults to 0
34066func ComputeAccidentalHitsSeed(value int64) ComputeAccidentalHitsAttr {
34067	return func(m optionalAttr) {
34068		m["seed"] = value
34069	}
34070}
34071
34072// ComputeAccidentalHitsSeed2 sets the optional seed2 attribute to value.
34073//
34074// value: An second seed to avoid seed collision.
34075// If not specified, defaults to 0
34076func ComputeAccidentalHitsSeed2(value int64) ComputeAccidentalHitsAttr {
34077	return func(m optionalAttr) {
34078		m["seed2"] = value
34079	}
34080}
34081
34082// Computes the ids of the positions in sampled_candidates that match true_labels.
34083//
34084// When doing log-odds NCE, the result of this op should be passed through a
34085// SparseToDense op, then added to the logits of the sampled candidates. This has
34086// the effect of 'removing' the sampled labels that match the true labels by
34087// making the classifier sure that they are sampled labels.
34088//
34089// Arguments:
34090//	true_classes: The true_classes output of UnpackSparseLabels.
34091//	sampled_candidates: The sampled_candidates output of CandidateSampler.
34092//	num_true: Number of true labels per context.
34093//
34094// Returns:
34095//	indices: A vector of indices corresponding to rows of true_candidates.
34096//	ids: A vector of IDs of positions in sampled_candidates that match a true_label
34097// for the row with the corresponding index in indices.
34098//	weights: A vector of the same length as indices and ids, in which each element
34099// is -FLOAT_MAX.
34100func ComputeAccidentalHits(scope *Scope, true_classes tf.Output, sampled_candidates tf.Output, num_true int64, optional ...ComputeAccidentalHitsAttr) (indices tf.Output, ids tf.Output, weights tf.Output) {
34101	if scope.Err() != nil {
34102		return
34103	}
34104	attrs := map[string]interface{}{"num_true": num_true}
34105	for _, a := range optional {
34106		a(attrs)
34107	}
34108	opspec := tf.OpSpec{
34109		Type: "ComputeAccidentalHits",
34110		Input: []tf.Input{
34111			true_classes, sampled_candidates,
34112		},
34113		Attrs: attrs,
34114	}
34115	op := scope.AddOperation(opspec)
34116	return op.Output(0), op.Output(1), op.Output(2)
34117}
34118
34119// Creates a dataset that overrides the maximum intra-op parallelism.
34120//
34121// Arguments:
34122//
34123//	max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use.
34124//
34125//
34126func ExperimentalMaxIntraOpParallelismDataset(scope *Scope, input_dataset tf.Output, max_intra_op_parallelism tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
34127	if scope.Err() != nil {
34128		return
34129	}
34130	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
34131	opspec := tf.OpSpec{
34132		Type: "ExperimentalMaxIntraOpParallelismDataset",
34133		Input: []tf.Input{
34134			input_dataset, max_intra_op_parallelism,
34135		},
34136		Attrs: attrs,
34137	}
34138	op := scope.AddOperation(opspec)
34139	return op.Output(0)
34140}
34141
34142// Converts each string in the input Tensor to its hash mod by a number of buckets.
34143//
34144// The hash function is deterministic on the content of the string within the
34145// process. The hash function is a keyed hash function, where attribute `key`
34146// defines the key of the hash function. `key` is an array of 2 elements.
34147//
34148// A strong hash is important when inputs may be malicious, e.g. URLs with
34149// additional components. Adversaries could try to make their inputs hash to the
34150// same bucket for a denial-of-service attack or to skew the results. A strong
34151// hash can be used to make it difficult to find inputs with a skewed hash value
34152// distribution over buckets. This requires that the hash function is
34153// seeded by a high-entropy (random) "key" unknown to the adversary.
34154//
34155// The additional robustness comes at a cost of roughly 4x higher compute
34156// time than `tf.string_to_hash_bucket_fast`.
34157//
34158// Examples:
34159//
34160// >>> tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy()
34161// array([2, 0])
34162//
34163// Arguments:
34164//	input: The strings to assign a hash bucket.
34165//	num_buckets: The number of buckets.
34166//	key: The key used to seed the hash function, passed as a list of two uint64
34167// elements.
34168//
34169// Returns A Tensor of the same shape as the input `string_tensor`.
34170func StringToHashBucketStrong(scope *Scope, input tf.Output, num_buckets int64, key []int64) (output tf.Output) {
34171	if scope.Err() != nil {
34172		return
34173	}
34174	attrs := map[string]interface{}{"num_buckets": num_buckets, "key": key}
34175	opspec := tf.OpSpec{
34176		Type: "StringToHashBucketStrong",
34177		Input: []tf.Input{
34178			input,
34179		},
34180		Attrs: attrs,
34181	}
34182	op := scope.AddOperation(opspec)
34183	return op.Output(0)
34184}
34185
34186// An Op to sum inputs across replicated TPU instances.
34187//
34188// Each instance supplies its own input.
34189//
34190// For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`.
34191// Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0,
34192// and `B, D, F, H` as group 1. Thus we get the outputs:
34193// `[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`.
34194//
34195// Arguments:
34196//	input: The local input to the sum.
34197//	group_assignment: An int32 tensor with shape
34198// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
34199// replica ids in the ith subgroup.
34200//
34201// Returns The sum of all the distributed inputs.
34202func CrossReplicaSum(scope *Scope, input tf.Output, group_assignment tf.Output) (output tf.Output) {
34203	if scope.Err() != nil {
34204		return
34205	}
34206	opspec := tf.OpSpec{
34207		Type: "CrossReplicaSum",
34208		Input: []tf.Input{
34209			input, group_assignment,
34210		},
34211	}
34212	op := scope.AddOperation(opspec)
34213	return op.Output(0)
34214}
34215
34216// Records the bytes size of each element of `input_dataset` in a StatsAggregator.
34217func BytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
34218	if scope.Err() != nil {
34219		return
34220	}
34221	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
34222	opspec := tf.OpSpec{
34223		Type: "BytesProducedStatsDataset",
34224		Input: []tf.Input{
34225			input_dataset, tag,
34226		},
34227		Attrs: attrs,
34228	}
34229	op := scope.AddOperation(opspec)
34230	return op.Output(0)
34231}
34232
34233// Push an element onto the tensor_array.
34234//
34235// Arguments:
34236//	handle: The handle to a TensorArray.
34237//	index: The position to write to inside the TensorArray.
34238//	value: The tensor to write to the TensorArray.
34239//	flow_in: A float scalar that enforces proper chaining of operations.
34240//
34241// Returns A float scalar that enforces proper chaining of operations.
34242func TensorArrayWriteV3(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
34243	if scope.Err() != nil {
34244		return
34245	}
34246	opspec := tf.OpSpec{
34247		Type: "TensorArrayWriteV3",
34248		Input: []tf.Input{
34249			handle, index, value, flow_in,
34250		},
34251	}
34252	op := scope.AddOperation(opspec)
34253	return op.Output(0)
34254}
34255
34256// Creates a dataset that batches input elements into a SparseTensor.
34257//
34258// Arguments:
34259//	input_dataset: A handle to an input dataset. Must have a single component.
34260//	batch_size: A scalar representing the number of elements to accumulate in a
34261// batch.
34262//	row_shape: A vector representing the dense shape of each row in the produced
34263// SparseTensor. The shape may be partially specified, using `-1` to indicate
34264// that a particular dimension should use the maximum size of all batch elements.
34265//
34266//
34267func DenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
34268	if scope.Err() != nil {
34269		return
34270	}
34271	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
34272	opspec := tf.OpSpec{
34273		Type: "DenseToSparseBatchDataset",
34274		Input: []tf.Input{
34275			input_dataset, batch_size, row_shape,
34276		},
34277		Attrs: attrs,
34278	}
34279	op := scope.AddOperation(opspec)
34280	return op.Output(0)
34281}
34282
34283// Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010.
34284//
34285// Flip each bit of supported types.  For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101.
34286// This operation is performed on each element of the tensor argument `x`.
34287//
34288// Example:
34289// ```python
34290// import tensorflow as tf
34291// from tensorflow.python.ops import bitwise_ops
34292//
34293// # flip 2 (00000010) to -3 (11111101)
34294// tf.assert_equal(-3, bitwise_ops.invert(2))
34295//
34296// dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
34297//               dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
34298//
34299// inputs = [0, 5, 3, 14]
34300// for dtype in dtype_list:
34301//   # Because of issues with negative numbers, let's test this indirectly.
34302//   # 1. invert(a) and a = 0
34303//   # 2. invert(a) or a = invert(0)
34304//   input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype)
34305//   not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and(
34306//                                       input_tensor, bitwise_ops.invert(input_tensor)),
34307//                                     bitwise_ops.bitwise_or(
34308//                                       input_tensor, bitwise_ops.invert(input_tensor)),
34309//                                     bitwise_ops.invert(
34310//                                       tf.constant(0, dtype=dtype))]
34311//
34312//   expected = tf.constant([0, 0, 0, 0], dtype=tf.float32)
34313//   tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected)
34314//
34315//   expected = tf.cast([not_0] * 4, tf.float32)
34316//   tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected)
34317//
34318//   # For unsigned dtypes let's also check the result directly.
34319//   if dtype.is_unsigned:
34320//     inverted = bitwise_ops.invert(input_tensor)
34321//     expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32)
34322//     tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32))
34323// ```
34324func Invert(scope *Scope, x tf.Output) (y tf.Output) {
34325	if scope.Err() != nil {
34326		return
34327	}
34328	opspec := tf.OpSpec{
34329		Type: "Invert",
34330		Input: []tf.Input{
34331			x,
34332		},
34333	}
34334	op := scope.AddOperation(opspec)
34335	return op.Output(0)
34336}
34337
34338// DecodePngAttr is an optional argument to DecodePng.
34339type DecodePngAttr func(optionalAttr)
34340
34341// DecodePngChannels sets the optional channels attribute to value.
34342//
34343// value: Number of color channels for the decoded image.
34344// If not specified, defaults to 0
34345func DecodePngChannels(value int64) DecodePngAttr {
34346	return func(m optionalAttr) {
34347		m["channels"] = value
34348	}
34349}
34350
34351// DecodePngDtype sets the optional dtype attribute to value.
34352// If not specified, defaults to DT_UINT8
34353func DecodePngDtype(value tf.DataType) DecodePngAttr {
34354	return func(m optionalAttr) {
34355		m["dtype"] = value
34356	}
34357}
34358
34359// Decode a PNG-encoded image to a uint8 or uint16 tensor.
34360//
34361// The attr `channels` indicates the desired number of color channels for the
34362// decoded image.
34363//
34364// Accepted values are:
34365//
34366// *   0: Use the number of channels in the PNG-encoded image.
34367// *   1: output a grayscale image.
34368// *   3: output an RGB image.
34369// *   4: output an RGBA image.
34370//
34371// If needed, the PNG-encoded image is transformed to match the requested number
34372// of color channels.
34373//
34374// This op also supports decoding JPEGs and non-animated GIFs since the interface
34375// is the same, though it is cleaner to use `tf.io.decode_image`.
34376//
34377// Arguments:
34378//	contents: 0-D.  The PNG-encoded image.
34379//
34380// Returns 3-D with shape `[height, width, channels]`.
34381func DecodePng(scope *Scope, contents tf.Output, optional ...DecodePngAttr) (image tf.Output) {
34382	if scope.Err() != nil {
34383		return
34384	}
34385	attrs := map[string]interface{}{}
34386	for _, a := range optional {
34387		a(attrs)
34388	}
34389	opspec := tf.OpSpec{
34390		Type: "DecodePng",
34391		Input: []tf.Input{
34392			contents,
34393		},
34394		Attrs: attrs,
34395	}
34396	op := scope.AddOperation(opspec)
34397	return op.Output(0)
34398}
34399
34400// A dataset that splits the elements of its input into multiple elements.
34401func UnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
34402	if scope.Err() != nil {
34403		return
34404	}
34405	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
34406	opspec := tf.OpSpec{
34407		Type: "UnbatchDataset",
34408		Input: []tf.Input{
34409			input_dataset,
34410		},
34411		Attrs: attrs,
34412	}
34413	op := scope.AddOperation(opspec)
34414	return op.Output(0)
34415}
34416
34417// Returns a Tensor stack of all keys in a tensor map.
34418//
34419// input_handle: the input map
34420// keys: the returned Tensor of all keys in the map
34421func TensorMapStackKeys(scope *Scope, input_handle tf.Output, key_dtype tf.DataType) (keys tf.Output) {
34422	if scope.Err() != nil {
34423		return
34424	}
34425	attrs := map[string]interface{}{"key_dtype": key_dtype}
34426	opspec := tf.OpSpec{
34427		Type: "TensorMapStackKeys",
34428		Input: []tf.Input{
34429			input_handle,
34430		},
34431		Attrs: attrs,
34432	}
34433	op := scope.AddOperation(opspec)
34434	return op.Output(0)
34435}
34436
34437// Does nothing. Serves as a control trigger for scheduling.
34438//
34439// Only useful as a placeholder for control edges.
34440//
34441// Returns the created operation.
34442func ControlTrigger(scope *Scope) (o *tf.Operation) {
34443	if scope.Err() != nil {
34444		return
34445	}
34446	opspec := tf.OpSpec{
34447		Type: "ControlTrigger",
34448	}
34449	return scope.AddOperation(opspec)
34450}
34451
34452// Interleave the values from the `data` tensors into a single tensor.
34453//
34454// Builds a merged tensor such that
34455//
34456// ```python
34457//     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
34458// ```
34459//
34460// For example, if each `indices[m]` is scalar or vector, we have
34461//
34462// ```python
34463//     # Scalar indices:
34464//     merged[indices[m], ...] = data[m][...]
34465//
34466//     # Vector indices:
34467//     merged[indices[m][i], ...] = data[m][i, ...]
34468// ```
34469//
34470// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
34471// and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
34472// must have `data[i].shape = indices[i].shape + constant`.  In terms of this
34473// `constant`, the output shape is
34474//
34475//     merged.shape = [max(indices)] + constant
34476//
34477// Values may be merged in parallel, so if an index appears in both `indices[m][i]`
34478// and `indices[n][j]`, the result may be invalid. This differs from the normal
34479// DynamicStitch operator that defines the behavior in that case.
34480//
34481// For example:
34482//
34483// ```python
34484//     indices[0] = 6
34485//     indices[1] = [4, 1]
34486//     indices[2] = [[5, 2], [0, 3]]
34487//     data[0] = [61, 62]
34488//     data[1] = [[41, 42], [11, 12]]
34489//     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
34490//     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
34491//               [51, 52], [61, 62]]
34492// ```
34493//
34494// This method can be used to merge partitions created by `dynamic_partition`
34495// as illustrated on the following example:
34496//
34497// ```python
34498//     # Apply function (increments x_i) on elements for which a certain condition
34499//     # apply (x_i != -1 in this example).
34500//     x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
34501//     condition_mask=tf.not_equal(x,tf.constant(-1.))
34502//     partitioned_data = tf.dynamic_partition(
34503//         x, tf.cast(condition_mask, tf.int32) , 2)
34504//     partitioned_data[1] = partitioned_data[1] + 1.0
34505//     condition_indices = tf.dynamic_partition(
34506//         tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
34507//     x = tf.dynamic_stitch(condition_indices, partitioned_data)
34508//     # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
34509//     # unchanged.
34510// ```
34511//
34512// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
34513// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
34514// </div>
34515func ParallelDynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
34516	if scope.Err() != nil {
34517		return
34518	}
34519	opspec := tf.OpSpec{
34520		Type: "ParallelDynamicStitch",
34521		Input: []tf.Input{
34522			tf.OutputList(indices), tf.OutputList(data),
34523		},
34524	}
34525	op := scope.AddOperation(opspec)
34526	return op.Output(0)
34527}
34528
34529// Generate the bucket boundaries for each feature based on accumulated summaries.
34530//
34531// An op that returns a list of float tensors for a quantile stream resource. Each
34532// tensor is Rank 1 containing bucket boundaries for a single feature.
34533//
34534// Arguments:
34535//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
34536//	num_features: inferred int; number of features to get bucket boundaries for.
34537//
34538// Returns float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
34539func BoostedTreesQuantileStreamResourceGetBucketBoundaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (bucket_boundaries []tf.Output) {
34540	if scope.Err() != nil {
34541		return
34542	}
34543	attrs := map[string]interface{}{"num_features": num_features}
34544	opspec := tf.OpSpec{
34545		Type: "BoostedTreesQuantileStreamResourceGetBucketBoundaries",
34546		Input: []tf.Input{
34547			quantile_stream_resource_handle,
34548		},
34549		Attrs: attrs,
34550	}
34551	op := scope.AddOperation(opspec)
34552	if scope.Err() != nil {
34553		return
34554	}
34555	var idx int
34556	var err error
34557	if bucket_boundaries, idx, err = makeOutputList(op, idx, "bucket_boundaries"); err != nil {
34558		scope.UpdateErr("BoostedTreesQuantileStreamResourceGetBucketBoundaries", err)
34559		return
34560	}
34561	return bucket_boundaries
34562}
34563
34564// MapSizeAttr is an optional argument to MapSize.
34565type MapSizeAttr func(optionalAttr)
34566
34567// MapSizeCapacity sets the optional capacity attribute to value.
34568// If not specified, defaults to 0
34569//
34570// REQUIRES: value >= 0
34571func MapSizeCapacity(value int64) MapSizeAttr {
34572	return func(m optionalAttr) {
34573		m["capacity"] = value
34574	}
34575}
34576
34577// MapSizeMemoryLimit sets the optional memory_limit attribute to value.
34578// If not specified, defaults to 0
34579//
34580// REQUIRES: value >= 0
34581func MapSizeMemoryLimit(value int64) MapSizeAttr {
34582	return func(m optionalAttr) {
34583		m["memory_limit"] = value
34584	}
34585}
34586
34587// MapSizeContainer sets the optional container attribute to value.
34588// If not specified, defaults to ""
34589func MapSizeContainer(value string) MapSizeAttr {
34590	return func(m optionalAttr) {
34591		m["container"] = value
34592	}
34593}
34594
34595// MapSizeSharedName sets the optional shared_name attribute to value.
34596// If not specified, defaults to ""
34597func MapSizeSharedName(value string) MapSizeAttr {
34598	return func(m optionalAttr) {
34599		m["shared_name"] = value
34600	}
34601}
34602
34603// Op returns the number of elements in the underlying container.
34604func MapSize(scope *Scope, dtypes []tf.DataType, optional ...MapSizeAttr) (size tf.Output) {
34605	if scope.Err() != nil {
34606		return
34607	}
34608	attrs := map[string]interface{}{"dtypes": dtypes}
34609	for _, a := range optional {
34610		a(attrs)
34611	}
34612	opspec := tf.OpSpec{
34613		Type: "MapSize",
34614
34615		Attrs: attrs,
34616	}
34617	op := scope.AddOperation(opspec)
34618	return op.Output(0)
34619}
34620
34621// Set a bound for the given input value as a hint to Xla compiler,
34622//
34623//         returns the same value.
34624func XlaSetBound(scope *Scope, input tf.Output, bound tf.Output) (output tf.Output) {
34625	if scope.Err() != nil {
34626		return
34627	}
34628	opspec := tf.OpSpec{
34629		Type: "XlaSetBound",
34630		Input: []tf.Input{
34631			input, bound,
34632		},
34633	}
34634	op := scope.AddOperation(opspec)
34635	return op.Output(0)
34636}
34637
34638// Deprecated. Use TensorArrayGradV3
34639//
34640// DEPRECATED at GraphDef version 26: Use TensorArrayGradV3
34641func TensorArrayGradV2(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output) {
34642	if scope.Err() != nil {
34643		return
34644	}
34645	attrs := map[string]interface{}{"source": source}
34646	opspec := tf.OpSpec{
34647		Type: "TensorArrayGradV2",
34648		Input: []tf.Input{
34649			handle, flow_in,
34650		},
34651		Attrs: attrs,
34652	}
34653	op := scope.AddOperation(opspec)
34654	return op.Output(0)
34655}
34656
34657// Computes softmax cross entropy cost and gradients to backpropagate.
34658//
34659// Inputs are the logits, not probabilities.
34660//
34661// Arguments:
34662//	features: batch_size x num_classes matrix
34663//	labels: batch_size x num_classes matrix
34664// The caller must ensure that each batch of labels represents a valid
34665// probability distribution.
34666//
34667// Returns:
34668//	loss: Per example loss (batch_size vector).
34669//	backprop: backpropagated gradients (batch_size x num_classes matrix).
34670func SoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
34671	if scope.Err() != nil {
34672		return
34673	}
34674	opspec := tf.OpSpec{
34675		Type: "SoftmaxCrossEntropyWithLogits",
34676		Input: []tf.Input{
34677			features, labels,
34678		},
34679	}
34680	op := scope.AddOperation(opspec)
34681	return op.Output(0), op.Output(1)
34682}
34683
34684// Outputs the single element from the given dataset.
34685//
34686// Arguments:
34687//	dataset: A handle to a dataset that contains a single element.
34688//
34689//
34690//
34691// Returns The components of the single element of `input`.
34692func DatasetToSingleElement(scope *Scope, dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
34693	if scope.Err() != nil {
34694		return
34695	}
34696	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
34697	opspec := tf.OpSpec{
34698		Type: "DatasetToSingleElement",
34699		Input: []tf.Input{
34700			dataset,
34701		},
34702		Attrs: attrs,
34703	}
34704	op := scope.AddOperation(opspec)
34705	if scope.Err() != nil {
34706		return
34707	}
34708	var idx int
34709	var err error
34710	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
34711		scope.UpdateErr("DatasetToSingleElement", err)
34712		return
34713	}
34714	return components
34715}
34716
34717// CollectiveReduceAttr is an optional argument to CollectiveReduce.
34718type CollectiveReduceAttr func(optionalAttr)
34719
34720// CollectiveReduceWaitFor sets the optional wait_for attribute to value.
34721// If not specified, defaults to {}
34722func CollectiveReduceWaitFor(value []int64) CollectiveReduceAttr {
34723	return func(m optionalAttr) {
34724		m["wait_for"] = value
34725	}
34726}
34727
34728// CollectiveReduceCommunicationHint sets the optional communication_hint attribute to value.
34729// If not specified, defaults to "auto"
34730func CollectiveReduceCommunicationHint(value string) CollectiveReduceAttr {
34731	return func(m optionalAttr) {
34732		m["communication_hint"] = value
34733	}
34734}
34735
34736// CollectiveReduceTimeoutSeconds sets the optional timeout_seconds attribute to value.
34737// If not specified, defaults to 0
34738func CollectiveReduceTimeoutSeconds(value float32) CollectiveReduceAttr {
34739	return func(m optionalAttr) {
34740		m["timeout_seconds"] = value
34741	}
34742}
34743
34744// Mutually reduces multiple tensors of identical type and shape.
34745func CollectiveReduce(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, merge_op string, final_op string, subdiv_offsets []int64, optional ...CollectiveReduceAttr) (data tf.Output) {
34746	if scope.Err() != nil {
34747		return
34748	}
34749	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "merge_op": merge_op, "final_op": final_op, "subdiv_offsets": subdiv_offsets}
34750	for _, a := range optional {
34751		a(attrs)
34752	}
34753	opspec := tf.OpSpec{
34754		Type: "CollectiveReduce",
34755		Input: []tf.Input{
34756			input,
34757		},
34758		Attrs: attrs,
34759	}
34760	op := scope.AddOperation(opspec)
34761	return op.Output(0)
34762}
34763
34764// Returns true if queue is closed.
34765//
34766// This operation returns true if the queue is closed and false if the queue
34767// is open.
34768//
34769// Arguments:
34770//	handle: The handle to a queue.
34771func QueueIsClosedV2(scope *Scope, handle tf.Output) (is_closed tf.Output) {
34772	if scope.Err() != nil {
34773		return
34774	}
34775	opspec := tf.OpSpec{
34776		Type: "QueueIsClosedV2",
34777		Input: []tf.Input{
34778			handle,
34779		},
34780	}
34781	op := scope.AddOperation(opspec)
34782	return op.Output(0)
34783}
34784
34785// CudnnRNNCanonicalToParamsAttr is an optional argument to CudnnRNNCanonicalToParams.
34786type CudnnRNNCanonicalToParamsAttr func(optionalAttr)
34787
34788// CudnnRNNCanonicalToParamsRnnMode sets the optional rnn_mode attribute to value.
34789// If not specified, defaults to "lstm"
34790func CudnnRNNCanonicalToParamsRnnMode(value string) CudnnRNNCanonicalToParamsAttr {
34791	return func(m optionalAttr) {
34792		m["rnn_mode"] = value
34793	}
34794}
34795
34796// CudnnRNNCanonicalToParamsInputMode sets the optional input_mode attribute to value.
34797// If not specified, defaults to "linear_input"
34798func CudnnRNNCanonicalToParamsInputMode(value string) CudnnRNNCanonicalToParamsAttr {
34799	return func(m optionalAttr) {
34800		m["input_mode"] = value
34801	}
34802}
34803
34804// CudnnRNNCanonicalToParamsDirection sets the optional direction attribute to value.
34805// If not specified, defaults to "unidirectional"
34806func CudnnRNNCanonicalToParamsDirection(value string) CudnnRNNCanonicalToParamsAttr {
34807	return func(m optionalAttr) {
34808		m["direction"] = value
34809	}
34810}
34811
34812// CudnnRNNCanonicalToParamsDropout sets the optional dropout attribute to value.
34813// If not specified, defaults to 0
34814func CudnnRNNCanonicalToParamsDropout(value float32) CudnnRNNCanonicalToParamsAttr {
34815	return func(m optionalAttr) {
34816		m["dropout"] = value
34817	}
34818}
34819
34820// CudnnRNNCanonicalToParamsSeed sets the optional seed attribute to value.
34821// If not specified, defaults to 0
34822func CudnnRNNCanonicalToParamsSeed(value int64) CudnnRNNCanonicalToParamsAttr {
34823	return func(m optionalAttr) {
34824		m["seed"] = value
34825	}
34826}
34827
34828// CudnnRNNCanonicalToParamsSeed2 sets the optional seed2 attribute to value.
34829// If not specified, defaults to 0
34830func CudnnRNNCanonicalToParamsSeed2(value int64) CudnnRNNCanonicalToParamsAttr {
34831	return func(m optionalAttr) {
34832		m["seed2"] = value
34833	}
34834}
34835
34836// Converts CudnnRNN params from canonical form to usable form.
34837//
34838// Writes a set of weights into the opaque params buffer so they can be used in
34839// upcoming training or inferences.
34840//
34841// Note that the params buffer may not be compatible across different GPUs. So any
34842// save and restoration should be converted to and from the canonical weights and
34843// biases.
34844//
34845// num_layers: Specifies the number of layers in the RNN model.
34846// num_units: Specifies the size of the hidden state.
34847// input_size: Specifies the size of the input state.
34848// weights: the canonical form of weights that can be used for saving
34849//     and restoration. They are more likely to be compatible across different
34850//     generations.
34851// biases: the canonical form of biases that can be used for saving
34852//     and restoration. They are more likely to be compatible across different
34853//     generations.
34854// num_params: number of parameter sets for all layers.
34855//     Each layer may contain multiple parameter sets, with each set consisting of
34856//     a weight matrix and a bias vector.
34857// rnn_mode: Indicates the type of the RNN model.
34858// input_mode: Indicate whether there is a linear projection between the input and
34859//     The actual computation before the first layer. 'skip_input' is only allowed
34860//     when input_size == num_units; 'auto_select' implies 'skip_input' when
34861//     input_size == num_units; otherwise, it implies 'linear_input'.
34862// direction: Indicates whether a bidirectional model will be used.
34863//     dir = (direction == bidirectional) ? 2 : 1
34864// dropout: dropout probability. When set to 0., dropout is disabled.
34865// seed: the 1st part of a seed to initialize dropout.
34866// seed2: the 2nd part of a seed to initialize dropout.
34867func CudnnRNNCanonicalToParams(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, weights []tf.Output, biases []tf.Output, optional ...CudnnRNNCanonicalToParamsAttr) (params tf.Output) {
34868	if scope.Err() != nil {
34869		return
34870	}
34871	attrs := map[string]interface{}{}
34872	for _, a := range optional {
34873		a(attrs)
34874	}
34875	opspec := tf.OpSpec{
34876		Type: "CudnnRNNCanonicalToParams",
34877		Input: []tf.Input{
34878			num_layers, num_units, input_size, tf.OutputList(weights), tf.OutputList(biases),
34879		},
34880		Attrs: attrs,
34881	}
34882	op := scope.AddOperation(opspec)
34883	return op.Output(0)
34884}
34885
34886// RandomCropAttr is an optional argument to RandomCrop.
34887type RandomCropAttr func(optionalAttr)
34888
34889// RandomCropSeed sets the optional seed attribute to value.
34890//
34891// value: If either seed or seed2 are set to be non-zero, the random number
34892// generator is seeded by the given seed.  Otherwise, it is seeded by a
34893// random seed.
34894// If not specified, defaults to 0
34895func RandomCropSeed(value int64) RandomCropAttr {
34896	return func(m optionalAttr) {
34897		m["seed"] = value
34898	}
34899}
34900
34901// RandomCropSeed2 sets the optional seed2 attribute to value.
34902//
34903// value: An second seed to avoid seed collision.
34904// If not specified, defaults to 0
34905func RandomCropSeed2(value int64) RandomCropAttr {
34906	return func(m optionalAttr) {
34907		m["seed2"] = value
34908	}
34909}
34910
34911// Randomly crop `image`.
34912//
34913// DEPRECATED at GraphDef version 8: Random crop is now pure Python
34914//
34915// `size` is a 1-D int64 tensor with 2 elements representing the crop height and
34916// width.  The values must be non negative.
34917//
34918// This Op picks a random location in `image` and crops a `height` by `width`
34919// rectangle from that location.  The random location is picked so the cropped
34920// area will fit inside the original image.
34921//
34922// Arguments:
34923//	image: 3-D of shape `[height, width, channels]`.
34924//	size: 1-D of length 2 containing: `crop_height`, `crop_width`..
34925//
34926// Returns 3-D of shape `[crop_height, crop_width, channels].`
34927func RandomCrop(scope *Scope, image tf.Output, size tf.Output, optional ...RandomCropAttr) (output tf.Output) {
34928	if scope.Err() != nil {
34929		return
34930	}
34931	attrs := map[string]interface{}{}
34932	for _, a := range optional {
34933		a(attrs)
34934	}
34935	opspec := tf.OpSpec{
34936		Type: "RandomCrop",
34937		Input: []tf.Input{
34938			image, size,
34939		},
34940		Attrs: attrs,
34941	}
34942	op := scope.AddOperation(opspec)
34943	return op.Output(0)
34944}
34945
34946// CudnnRNNParamsToCanonicalV2Attr is an optional argument to CudnnRNNParamsToCanonicalV2.
34947type CudnnRNNParamsToCanonicalV2Attr func(optionalAttr)
34948
34949// CudnnRNNParamsToCanonicalV2RnnMode sets the optional rnn_mode attribute to value.
34950// If not specified, defaults to "lstm"
34951func CudnnRNNParamsToCanonicalV2RnnMode(value string) CudnnRNNParamsToCanonicalV2Attr {
34952	return func(m optionalAttr) {
34953		m["rnn_mode"] = value
34954	}
34955}
34956
34957// CudnnRNNParamsToCanonicalV2InputMode sets the optional input_mode attribute to value.
34958// If not specified, defaults to "linear_input"
34959func CudnnRNNParamsToCanonicalV2InputMode(value string) CudnnRNNParamsToCanonicalV2Attr {
34960	return func(m optionalAttr) {
34961		m["input_mode"] = value
34962	}
34963}
34964
34965// CudnnRNNParamsToCanonicalV2Direction sets the optional direction attribute to value.
34966// If not specified, defaults to "unidirectional"
34967func CudnnRNNParamsToCanonicalV2Direction(value string) CudnnRNNParamsToCanonicalV2Attr {
34968	return func(m optionalAttr) {
34969		m["direction"] = value
34970	}
34971}
34972
34973// CudnnRNNParamsToCanonicalV2Dropout sets the optional dropout attribute to value.
34974// If not specified, defaults to 0
34975func CudnnRNNParamsToCanonicalV2Dropout(value float32) CudnnRNNParamsToCanonicalV2Attr {
34976	return func(m optionalAttr) {
34977		m["dropout"] = value
34978	}
34979}
34980
34981// CudnnRNNParamsToCanonicalV2Seed sets the optional seed attribute to value.
34982// If not specified, defaults to 0
34983func CudnnRNNParamsToCanonicalV2Seed(value int64) CudnnRNNParamsToCanonicalV2Attr {
34984	return func(m optionalAttr) {
34985		m["seed"] = value
34986	}
34987}
34988
34989// CudnnRNNParamsToCanonicalV2Seed2 sets the optional seed2 attribute to value.
34990// If not specified, defaults to 0
34991func CudnnRNNParamsToCanonicalV2Seed2(value int64) CudnnRNNParamsToCanonicalV2Attr {
34992	return func(m optionalAttr) {
34993		m["seed2"] = value
34994	}
34995}
34996
34997// CudnnRNNParamsToCanonicalV2NumProj sets the optional num_proj attribute to value.
34998// If not specified, defaults to 0
34999func CudnnRNNParamsToCanonicalV2NumProj(value int64) CudnnRNNParamsToCanonicalV2Attr {
35000	return func(m optionalAttr) {
35001		m["num_proj"] = value
35002	}
35003}
35004
35005// Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM.
35006//
35007// Retrieves a set of weights from the opaque params buffer that can be saved and
35008// restored in a way compatible with future runs.
35009//
35010// Note that the params buffer may not be compatible across different GPUs. So any
35011// save and restoration should be converted to and from the canonical weights and
35012// biases.
35013//
35014// num_layers: Specifies the number of layers in the RNN model.
35015// num_units: Specifies the size of the hidden state.
35016// input_size: Specifies the size of the input state.
35017// num_params_weights: number of weight parameter matrix for all layers.
35018// num_params_biases: number of bias parameter vector for all layers.
35019// weights: the canonical form of weights that can be used for saving
35020//     and restoration. They are more likely to be compatible across different
35021//     generations.
35022// biases: the canonical form of biases that can be used for saving
35023//     and restoration. They are more likely to be compatible across different
35024//     generations.
35025// rnn_mode: Indicates the type of the RNN model.
35026// input_mode: Indicate whether there is a linear projection between the input and
35027//     The actual computation before the first layer. 'skip_input' is only allowed
35028//     when input_size == num_units; 'auto_select' implies 'skip_input' when
35029//     input_size == num_units; otherwise, it implies 'linear_input'.
35030// direction: Indicates whether a bidirectional model will be used.
35031//     dir = (direction == bidirectional) ? 2 : 1
35032// dropout: dropout probability. When set to 0., dropout is disabled.
35033// seed: the 1st part of a seed to initialize dropout.
35034// seed2: the 2nd part of a seed to initialize dropout.
35035// num_proj: The output dimensionality for the projection matrices. If None or 0,
35036//     no projection is performed.
35037func CudnnRNNParamsToCanonicalV2(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, params tf.Output, num_params_weights int64, num_params_biases int64, optional ...CudnnRNNParamsToCanonicalV2Attr) (weights []tf.Output, biases []tf.Output) {
35038	if scope.Err() != nil {
35039		return
35040	}
35041	attrs := map[string]interface{}{"num_params_weights": num_params_weights, "num_params_biases": num_params_biases}
35042	for _, a := range optional {
35043		a(attrs)
35044	}
35045	opspec := tf.OpSpec{
35046		Type: "CudnnRNNParamsToCanonicalV2",
35047		Input: []tf.Input{
35048			num_layers, num_units, input_size, params,
35049		},
35050		Attrs: attrs,
35051	}
35052	op := scope.AddOperation(opspec)
35053	if scope.Err() != nil {
35054		return
35055	}
35056	var idx int
35057	var err error
35058	if weights, idx, err = makeOutputList(op, idx, "weights"); err != nil {
35059		scope.UpdateErr("CudnnRNNParamsToCanonicalV2", err)
35060		return
35061	}
35062	if biases, idx, err = makeOutputList(op, idx, "biases"); err != nil {
35063		scope.UpdateErr("CudnnRNNParamsToCanonicalV2", err)
35064		return
35065	}
35066	return weights, biases
35067}
35068
35069// Get the current size of the TensorArray.
35070//
35071// Arguments:
35072//	handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
35073//	flow_in: A float scalar that enforces proper chaining of operations.
35074//
35075// Returns The current size of the TensorArray.
35076func TensorArraySizeV3(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
35077	if scope.Err() != nil {
35078		return
35079	}
35080	opspec := tf.OpSpec{
35081		Type: "TensorArraySizeV3",
35082		Input: []tf.Input{
35083			handle, flow_in,
35084		},
35085	}
35086	op := scope.AddOperation(opspec)
35087	return op.Output(0)
35088}
35089
35090// Returns the diagonal part of the tensor.
35091//
35092// This operation returns a tensor with the `diagonal` part
35093// of the `input`. The `diagonal` part is computed as follows:
35094//
35095// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
35096// tensor of rank `k` with dimensions `[D1,..., Dk]` where:
35097//
35098// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
35099//
35100// For example:
35101//
35102// ```
35103// # 'input' is [[1, 0, 0, 0]
35104//               [0, 2, 0, 0]
35105//               [0, 0, 3, 0]
35106//               [0, 0, 0, 4]]
35107//
35108// tf.diag_part(input) ==> [1, 2, 3, 4]
35109// ```
35110//
35111// Arguments:
35112//	input: Rank k tensor where k is even and not zero.
35113//
35114// Returns The extracted diagonal.
35115func DiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
35116	if scope.Err() != nil {
35117		return
35118	}
35119	opspec := tf.OpSpec{
35120		Type: "DiagPart",
35121		Input: []tf.Input{
35122			input,
35123		},
35124	}
35125	op := scope.AddOperation(opspec)
35126	return op.Output(0)
35127}
35128
35129// CudnnRNNParamsToCanonicalAttr is an optional argument to CudnnRNNParamsToCanonical.
35130type CudnnRNNParamsToCanonicalAttr func(optionalAttr)
35131
35132// CudnnRNNParamsToCanonicalRnnMode sets the optional rnn_mode attribute to value.
35133// If not specified, defaults to "lstm"
35134func CudnnRNNParamsToCanonicalRnnMode(value string) CudnnRNNParamsToCanonicalAttr {
35135	return func(m optionalAttr) {
35136		m["rnn_mode"] = value
35137	}
35138}
35139
35140// CudnnRNNParamsToCanonicalInputMode sets the optional input_mode attribute to value.
35141// If not specified, defaults to "linear_input"
35142func CudnnRNNParamsToCanonicalInputMode(value string) CudnnRNNParamsToCanonicalAttr {
35143	return func(m optionalAttr) {
35144		m["input_mode"] = value
35145	}
35146}
35147
35148// CudnnRNNParamsToCanonicalDirection sets the optional direction attribute to value.
35149// If not specified, defaults to "unidirectional"
35150func CudnnRNNParamsToCanonicalDirection(value string) CudnnRNNParamsToCanonicalAttr {
35151	return func(m optionalAttr) {
35152		m["direction"] = value
35153	}
35154}
35155
35156// CudnnRNNParamsToCanonicalDropout sets the optional dropout attribute to value.
35157// If not specified, defaults to 0
35158func CudnnRNNParamsToCanonicalDropout(value float32) CudnnRNNParamsToCanonicalAttr {
35159	return func(m optionalAttr) {
35160		m["dropout"] = value
35161	}
35162}
35163
35164// CudnnRNNParamsToCanonicalSeed sets the optional seed attribute to value.
35165// If not specified, defaults to 0
35166func CudnnRNNParamsToCanonicalSeed(value int64) CudnnRNNParamsToCanonicalAttr {
35167	return func(m optionalAttr) {
35168		m["seed"] = value
35169	}
35170}
35171
35172// CudnnRNNParamsToCanonicalSeed2 sets the optional seed2 attribute to value.
35173// If not specified, defaults to 0
35174func CudnnRNNParamsToCanonicalSeed2(value int64) CudnnRNNParamsToCanonicalAttr {
35175	return func(m optionalAttr) {
35176		m["seed2"] = value
35177	}
35178}
35179
35180// Retrieves CudnnRNN params in canonical form.
35181//
35182// Retrieves a set of weights from the opaque params buffer that can be saved and
35183// restored in a way compatible with future runs.
35184//
35185// Note that the params buffer may not be compatible across different GPUs. So any
35186// save and restoration should be converted to and from the canonical weights and
35187// biases.
35188//
35189// num_layers: Specifies the number of layers in the RNN model.
35190// num_units: Specifies the size of the hidden state.
35191// input_size: Specifies the size of the input state.
35192// num_params: number of parameter sets for all layers.
35193//     Each layer may contain multiple parameter sets, with each set consisting of
35194//     a weight matrix and a bias vector.
35195// weights: the canonical form of weights that can be used for saving
35196//     and restoration. They are more likely to be compatible across different
35197//     generations.
35198// biases: the canonical form of biases that can be used for saving
35199//     and restoration. They are more likely to be compatible across different
35200//     generations.
35201// rnn_mode: Indicates the type of the RNN model.
35202// input_mode: Indicate whether there is a linear projection between the input and
35203//     The actual computation before the first layer. 'skip_input' is only allowed
35204//     when input_size == num_units; 'auto_select' implies 'skip_input' when
35205//     input_size == num_units; otherwise, it implies 'linear_input'.
35206// direction: Indicates whether a bidirectional model will be used.
35207//     dir = (direction == bidirectional) ? 2 : 1
35208// dropout: dropout probability. When set to 0., dropout is disabled.
35209// seed: the 1st part of a seed to initialize dropout.
35210// seed2: the 2nd part of a seed to initialize dropout.
35211func CudnnRNNParamsToCanonical(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, params tf.Output, num_params int64, optional ...CudnnRNNParamsToCanonicalAttr) (weights []tf.Output, biases []tf.Output) {
35212	if scope.Err() != nil {
35213		return
35214	}
35215	attrs := map[string]interface{}{"num_params": num_params}
35216	for _, a := range optional {
35217		a(attrs)
35218	}
35219	opspec := tf.OpSpec{
35220		Type: "CudnnRNNParamsToCanonical",
35221		Input: []tf.Input{
35222			num_layers, num_units, input_size, params,
35223		},
35224		Attrs: attrs,
35225	}
35226	op := scope.AddOperation(opspec)
35227	if scope.Err() != nil {
35228		return
35229	}
35230	var idx int
35231	var err error
35232	if weights, idx, err = makeOutputList(op, idx, "weights"); err != nil {
35233		scope.UpdateErr("CudnnRNNParamsToCanonical", err)
35234		return
35235	}
35236	if biases, idx, err = makeOutputList(op, idx, "biases"); err != nil {
35237		scope.UpdateErr("CudnnRNNParamsToCanonical", err)
35238		return
35239	}
35240	return weights, biases
35241}
35242
35243// Creates a sequence of numbers.
35244//
35245// This operation creates a sequence of numbers that begins at `start` and
35246// extends by increments of `delta` up to but not including `limit`.
35247//
35248// For example:
35249//
35250// ```
35251// # 'start' is 3
35252// # 'limit' is 18
35253// # 'delta' is 3
35254// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
35255// ```
35256//
35257// Arguments:
35258//	start: 0-D (scalar). First entry in the sequence.
35259//	limit: 0-D (scalar). Upper limit of sequence, exclusive.
35260//	delta: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
35261//
35262// Returns 1-D.
35263func Range(scope *Scope, start tf.Output, limit tf.Output, delta tf.Output) (output tf.Output) {
35264	if scope.Err() != nil {
35265		return
35266	}
35267	opspec := tf.OpSpec{
35268		Type: "Range",
35269		Input: []tf.Input{
35270			start, limit, delta,
35271		},
35272	}
35273	op := scope.AddOperation(opspec)
35274	return op.Output(0)
35275}
35276
35277// CudnnRNNCanonicalToParamsV2Attr is an optional argument to CudnnRNNCanonicalToParamsV2.
35278type CudnnRNNCanonicalToParamsV2Attr func(optionalAttr)
35279
35280// CudnnRNNCanonicalToParamsV2RnnMode sets the optional rnn_mode attribute to value.
35281// If not specified, defaults to "lstm"
35282func CudnnRNNCanonicalToParamsV2RnnMode(value string) CudnnRNNCanonicalToParamsV2Attr {
35283	return func(m optionalAttr) {
35284		m["rnn_mode"] = value
35285	}
35286}
35287
35288// CudnnRNNCanonicalToParamsV2InputMode sets the optional input_mode attribute to value.
35289// If not specified, defaults to "linear_input"
35290func CudnnRNNCanonicalToParamsV2InputMode(value string) CudnnRNNCanonicalToParamsV2Attr {
35291	return func(m optionalAttr) {
35292		m["input_mode"] = value
35293	}
35294}
35295
35296// CudnnRNNCanonicalToParamsV2Direction sets the optional direction attribute to value.
35297// If not specified, defaults to "unidirectional"
35298func CudnnRNNCanonicalToParamsV2Direction(value string) CudnnRNNCanonicalToParamsV2Attr {
35299	return func(m optionalAttr) {
35300		m["direction"] = value
35301	}
35302}
35303
35304// CudnnRNNCanonicalToParamsV2Dropout sets the optional dropout attribute to value.
35305// If not specified, defaults to 0
35306func CudnnRNNCanonicalToParamsV2Dropout(value float32) CudnnRNNCanonicalToParamsV2Attr {
35307	return func(m optionalAttr) {
35308		m["dropout"] = value
35309	}
35310}
35311
35312// CudnnRNNCanonicalToParamsV2Seed sets the optional seed attribute to value.
35313// If not specified, defaults to 0
35314func CudnnRNNCanonicalToParamsV2Seed(value int64) CudnnRNNCanonicalToParamsV2Attr {
35315	return func(m optionalAttr) {
35316		m["seed"] = value
35317	}
35318}
35319
35320// CudnnRNNCanonicalToParamsV2Seed2 sets the optional seed2 attribute to value.
35321// If not specified, defaults to 0
35322func CudnnRNNCanonicalToParamsV2Seed2(value int64) CudnnRNNCanonicalToParamsV2Attr {
35323	return func(m optionalAttr) {
35324		m["seed2"] = value
35325	}
35326}
35327
35328// CudnnRNNCanonicalToParamsV2NumProj sets the optional num_proj attribute to value.
35329// If not specified, defaults to 0
35330func CudnnRNNCanonicalToParamsV2NumProj(value int64) CudnnRNNCanonicalToParamsV2Attr {
35331	return func(m optionalAttr) {
35332		m["num_proj"] = value
35333	}
35334}
35335
35336// Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM.
35337//
35338// Writes a set of weights into the opaque params buffer so they can be used in
35339// upcoming training or inferences.
35340//
35341// Note that the params buffer may not be compatible across different GPUs. So any
35342// save and restoration should be converted to and from the canonical weights and
35343// biases.
35344//
35345// num_layers: Specifies the number of layers in the RNN model.
35346// num_units: Specifies the size of the hidden state.
35347// input_size: Specifies the size of the input state.
35348// weights: the canonical form of weights that can be used for saving
35349//     and restoration. They are more likely to be compatible across different
35350//     generations.
35351// biases: the canonical form of biases that can be used for saving
35352//     and restoration. They are more likely to be compatible across different
35353//     generations.
35354// num_params_weights: number of weight parameter matrix for all layers.
35355// num_params_biases: number of bias parameter vector for all layers.
35356// rnn_mode: Indicates the type of the RNN model.
35357// input_mode: Indicate whether there is a linear projection between the input and
35358//     The actual computation before the first layer. 'skip_input' is only allowed
35359//     when input_size == num_units; 'auto_select' implies 'skip_input' when
35360//     input_size == num_units; otherwise, it implies 'linear_input'.
35361// direction: Indicates whether a bidirectional model will be used.
35362//     dir = (direction == bidirectional) ? 2 : 1
35363// dropout: dropout probability. When set to 0., dropout is disabled.
35364// seed: the 1st part of a seed to initialize dropout.
35365// seed2: the 2nd part of a seed to initialize dropout.
35366// num_proj: The output dimensionality for the projection matrices. If None or 0,
35367//     no projection is performed.
35368func CudnnRNNCanonicalToParamsV2(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, weights []tf.Output, biases []tf.Output, optional ...CudnnRNNCanonicalToParamsV2Attr) (params tf.Output) {
35369	if scope.Err() != nil {
35370		return
35371	}
35372	attrs := map[string]interface{}{}
35373	for _, a := range optional {
35374		a(attrs)
35375	}
35376	opspec := tf.OpSpec{
35377		Type: "CudnnRNNCanonicalToParamsV2",
35378		Input: []tf.Input{
35379			num_layers, num_units, input_size, tf.OutputList(weights), tf.OutputList(biases),
35380		},
35381		Attrs: attrs,
35382	}
35383	op := scope.AddOperation(opspec)
35384	return op.Output(0)
35385}
35386
35387// CudnnRNNV3Attr is an optional argument to CudnnRNNV3.
35388type CudnnRNNV3Attr func(optionalAttr)
35389
35390// CudnnRNNV3RnnMode sets the optional rnn_mode attribute to value.
35391// If not specified, defaults to "lstm"
35392func CudnnRNNV3RnnMode(value string) CudnnRNNV3Attr {
35393	return func(m optionalAttr) {
35394		m["rnn_mode"] = value
35395	}
35396}
35397
35398// CudnnRNNV3InputMode sets the optional input_mode attribute to value.
35399// If not specified, defaults to "linear_input"
35400func CudnnRNNV3InputMode(value string) CudnnRNNV3Attr {
35401	return func(m optionalAttr) {
35402		m["input_mode"] = value
35403	}
35404}
35405
35406// CudnnRNNV3Direction sets the optional direction attribute to value.
35407// If not specified, defaults to "unidirectional"
35408func CudnnRNNV3Direction(value string) CudnnRNNV3Attr {
35409	return func(m optionalAttr) {
35410		m["direction"] = value
35411	}
35412}
35413
35414// CudnnRNNV3Dropout sets the optional dropout attribute to value.
35415// If not specified, defaults to 0
35416func CudnnRNNV3Dropout(value float32) CudnnRNNV3Attr {
35417	return func(m optionalAttr) {
35418		m["dropout"] = value
35419	}
35420}
35421
35422// CudnnRNNV3Seed sets the optional seed attribute to value.
35423// If not specified, defaults to 0
35424func CudnnRNNV3Seed(value int64) CudnnRNNV3Attr {
35425	return func(m optionalAttr) {
35426		m["seed"] = value
35427	}
35428}
35429
35430// CudnnRNNV3Seed2 sets the optional seed2 attribute to value.
35431// If not specified, defaults to 0
35432func CudnnRNNV3Seed2(value int64) CudnnRNNV3Attr {
35433	return func(m optionalAttr) {
35434		m["seed2"] = value
35435	}
35436}
35437
35438// CudnnRNNV3NumProj sets the optional num_proj attribute to value.
35439// If not specified, defaults to 0
35440func CudnnRNNV3NumProj(value int64) CudnnRNNV3Attr {
35441	return func(m optionalAttr) {
35442		m["num_proj"] = value
35443	}
35444}
35445
35446// CudnnRNNV3IsTraining sets the optional is_training attribute to value.
35447// If not specified, defaults to true
35448func CudnnRNNV3IsTraining(value bool) CudnnRNNV3Attr {
35449	return func(m optionalAttr) {
35450		m["is_training"] = value
35451	}
35452}
35453
35454// CudnnRNNV3TimeMajor sets the optional time_major attribute to value.
35455// If not specified, defaults to true
35456func CudnnRNNV3TimeMajor(value bool) CudnnRNNV3Attr {
35457	return func(m optionalAttr) {
35458		m["time_major"] = value
35459	}
35460}
35461
35462// A RNN backed by cuDNN.
35463//
35464// Computes the RNN from the input and initial states, with respect to the params
35465// buffer. Accepts one extra input "sequence_lengths" than CudnnRNN.
35466//
35467// rnn_mode: Indicates the type of the RNN model.
35468// input_mode: Indicates whether there is a linear projection between the input and
35469//   the actual computation before the first layer. 'skip_input' is only allowed
35470//   when input_size == num_units; 'auto_select' implies 'skip_input' when
35471//   input_size == num_units; otherwise, it implies 'linear_input'.
35472// direction: Indicates whether a bidirectional model will be used. Should be
35473//   "unidirectional" or "bidirectional".
35474// dropout: Dropout probability. When set to 0., dropout is disabled.
35475// seed: The 1st part of a seed to initialize dropout.
35476// seed2: The 2nd part of a seed to initialize dropout.
35477// input: If time_major is true, this is a 3-D tensor with the shape of
35478//     [seq_length, batch_size, input_size]. If time_major is false, the shape is
35479//     [batch_size, seq_length, input_size].
35480// input_h: If time_major is true, this is a 3-D tensor with the shape of
35481//     [num_layer * dir, batch_size, num_units]. If time_major is false, the shape
35482//     is [batch_size, num_layer * dir, num_units].
35483// input_c: For LSTM, a 3-D tensor with the shape of
35484//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
35485// params: A 1-D tensor that contains the weights and biases in an opaque layout.
35486//     The size must be created through CudnnRNNParamsSize, and initialized
35487//     separately. Note that they might not be compatible across different
35488//     generations. So it is a good idea to save and restore
35489// sequence_lengths: a vector of lengths of each input sequence.
35490// output: If time_major is true, this is a 3-D tensor with the shape of
35491//     [seq_length, batch_size, dir * num_units]. If time_major is false, the
35492//     shape is [batch_size, seq_length, dir * num_units].
35493// output_h: The same shape has input_h.
35494// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
35495// is_training: Indicates whether this operation is used for inference or
35496//   training.
35497// time_major: Indicates whether the input/output format is time major or batch
35498//     major.
35499// reserve_space: An opaque tensor that can be used in backprop calculation. It
35500//   is only produced if is_training is true.
35501func CudnnRNNV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, sequence_lengths tf.Output, optional ...CudnnRNNV3Attr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output, host_reserved tf.Output) {
35502	if scope.Err() != nil {
35503		return
35504	}
35505	attrs := map[string]interface{}{}
35506	for _, a := range optional {
35507		a(attrs)
35508	}
35509	opspec := tf.OpSpec{
35510		Type: "CudnnRNNV3",
35511		Input: []tf.Input{
35512			input, input_h, input_c, params, sequence_lengths,
35513		},
35514		Attrs: attrs,
35515	}
35516	op := scope.AddOperation(opspec)
35517	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
35518}
35519
35520// Pads a tensor with zeros.
35521//
35522// This operation pads a `input` with zeros according to the `paddings` you
35523// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
35524// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
35525// how many zeros to add before the contents of `input` in that dimension, and
35526// `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
35527// in that dimension.
35528//
35529// The padded size of each dimension D of the output is:
35530//
35531// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
35532//
35533// For example:
35534//
35535// ```
35536// # 't' is [[1, 1], [2, 2]]
35537// # 'paddings' is [[1, 1], [2, 2]]
35538// # rank of 't' is 2
35539// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
35540//                       [0, 0, 1, 1, 0, 0]
35541//                       [0, 0, 2, 2, 0, 0]
35542//                       [0, 0, 0, 0, 0, 0]]
35543// ```
35544//
35545func Pad(scope *Scope, input tf.Output, paddings tf.Output) (output tf.Output) {
35546	if scope.Err() != nil {
35547		return
35548	}
35549	opspec := tf.OpSpec{
35550		Type: "Pad",
35551		Input: []tf.Input{
35552			input, paddings,
35553		},
35554	}
35555	op := scope.AddOperation(opspec)
35556	return op.Output(0)
35557}
35558
35559// CudnnRNNV2Attr is an optional argument to CudnnRNNV2.
35560type CudnnRNNV2Attr func(optionalAttr)
35561
35562// CudnnRNNV2RnnMode sets the optional rnn_mode attribute to value.
35563// If not specified, defaults to "lstm"
35564func CudnnRNNV2RnnMode(value string) CudnnRNNV2Attr {
35565	return func(m optionalAttr) {
35566		m["rnn_mode"] = value
35567	}
35568}
35569
35570// CudnnRNNV2InputMode sets the optional input_mode attribute to value.
35571// If not specified, defaults to "linear_input"
35572func CudnnRNNV2InputMode(value string) CudnnRNNV2Attr {
35573	return func(m optionalAttr) {
35574		m["input_mode"] = value
35575	}
35576}
35577
35578// CudnnRNNV2Direction sets the optional direction attribute to value.
35579// If not specified, defaults to "unidirectional"
35580func CudnnRNNV2Direction(value string) CudnnRNNV2Attr {
35581	return func(m optionalAttr) {
35582		m["direction"] = value
35583	}
35584}
35585
35586// CudnnRNNV2Dropout sets the optional dropout attribute to value.
35587// If not specified, defaults to 0
35588func CudnnRNNV2Dropout(value float32) CudnnRNNV2Attr {
35589	return func(m optionalAttr) {
35590		m["dropout"] = value
35591	}
35592}
35593
35594// CudnnRNNV2Seed sets the optional seed attribute to value.
35595// If not specified, defaults to 0
35596func CudnnRNNV2Seed(value int64) CudnnRNNV2Attr {
35597	return func(m optionalAttr) {
35598		m["seed"] = value
35599	}
35600}
35601
35602// CudnnRNNV2Seed2 sets the optional seed2 attribute to value.
35603// If not specified, defaults to 0
35604func CudnnRNNV2Seed2(value int64) CudnnRNNV2Attr {
35605	return func(m optionalAttr) {
35606		m["seed2"] = value
35607	}
35608}
35609
35610// CudnnRNNV2IsTraining sets the optional is_training attribute to value.
35611// If not specified, defaults to true
35612func CudnnRNNV2IsTraining(value bool) CudnnRNNV2Attr {
35613	return func(m optionalAttr) {
35614		m["is_training"] = value
35615	}
35616}
35617
35618// A RNN backed by cuDNN.
35619//
35620// Computes the RNN from the input and initial states, with respect to the params
35621// buffer. Produces one extra output "host_reserved" than CudnnRNN.
35622//
35623// rnn_mode: Indicates the type of the RNN model.
35624// input_mode: Indicates whether there is a linear projection between the input and
35625//   the actual computation before the first layer. 'skip_input' is only allowed
35626//   when input_size == num_units; 'auto_select' implies 'skip_input' when
35627//   input_size == num_units; otherwise, it implies 'linear_input'.
35628// direction: Indicates whether a bidirectional model will be used. Should be
35629//   "unidirectional" or "bidirectional".
35630// dropout: Dropout probability. When set to 0., dropout is disabled.
35631// seed: The 1st part of a seed to initialize dropout.
35632// seed2: The 2nd part of a seed to initialize dropout.
35633// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
35634// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
35635//     num_units].
35636// input_c: For LSTM, a 3-D tensor with the shape of
35637//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
35638// params: A 1-D tensor that contains the weights and biases in an opaque layout.
35639//     The size must be created through CudnnRNNParamsSize, and initialized
35640//     separately. Note that they might not be compatible across different
35641//     generations. So it is a good idea to save and restore
35642// output: A 3-D tensor with the shape of [seq_length, batch_size,
35643//     dir * num_units].
35644// output_h: The same shape has input_h.
35645// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
35646// is_training: Indicates whether this operation is used for inference or
35647//   training.
35648// reserve_space: An opaque tensor that can be used in backprop calculation. It
35649//   is only produced if is_training is true.
35650// host_reserved: An opaque tensor that can be used in backprop calculation. It is
35651//   only produced if is_training is true. It is output on host memory rather than
35652//   device memory.
35653func CudnnRNNV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNV2Attr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output, host_reserved tf.Output) {
35654	if scope.Err() != nil {
35655		return
35656	}
35657	attrs := map[string]interface{}{}
35658	for _, a := range optional {
35659		a(attrs)
35660	}
35661	opspec := tf.OpSpec{
35662		Type: "CudnnRNNV2",
35663		Input: []tf.Input{
35664			input, input_h, input_c, params,
35665		},
35666		Attrs: attrs,
35667	}
35668	op := scope.AddOperation(opspec)
35669	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
35670}
35671
35672// CTCBeamSearchDecoderAttr is an optional argument to CTCBeamSearchDecoder.
35673type CTCBeamSearchDecoderAttr func(optionalAttr)
35674
35675// CTCBeamSearchDecoderMergeRepeated sets the optional merge_repeated attribute to value.
35676//
35677// value: If true, merge repeated classes in output.
35678// If not specified, defaults to true
35679func CTCBeamSearchDecoderMergeRepeated(value bool) CTCBeamSearchDecoderAttr {
35680	return func(m optionalAttr) {
35681		m["merge_repeated"] = value
35682	}
35683}
35684
35685// Performs beam search decoding on the logits given in input.
35686//
35687// A note about the attribute merge_repeated: For the beam search decoder,
35688// this means that if consecutive entries in a beam are the same, only
35689// the first of these is emitted.  That is, when the top path is "A B B B B",
35690// "A B" is returned if merge_repeated = True but "A B B B B" is
35691// returned if merge_repeated = False.
35692//
35693// Arguments:
35694//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
35695//	sequence_length: A vector containing sequence lengths, size `(batch)`.
35696//	beam_width: A scalar >= 0 (beam search beam width).
35697//	top_paths: A scalar >= 0, <= beam_width (controls output size).
35698//
35699// Returns:
35700//	decoded_indices: A list (length: top_paths) of indices matrices.  Matrix j,
35701// size `(total_decoded_outputs[j] x 2)`, has indices of a
35702// `SparseTensor<int64, 2>`.  The rows store: [batch, time].
35703//	decoded_values: A list (length: top_paths) of values vectors.  Vector j,
35704// size `(length total_decoded_outputs[j])`, has the values of a
35705// `SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j.
35706//	decoded_shape: A list (length: top_paths) of shape vector.  Vector j,
35707// size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
35708// Its values are: `[batch_size, max_decoded_length[j]]`.
35709//	log_probability: A matrix, shaped: `(batch_size x top_paths)`.  The
35710// sequence log-probabilities.
35711func CTCBeamSearchDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, beam_width int64, top_paths int64, optional ...CTCBeamSearchDecoderAttr) (decoded_indices []tf.Output, decoded_values []tf.Output, decoded_shape []tf.Output, log_probability tf.Output) {
35712	if scope.Err() != nil {
35713		return
35714	}
35715	attrs := map[string]interface{}{"beam_width": beam_width, "top_paths": top_paths}
35716	for _, a := range optional {
35717		a(attrs)
35718	}
35719	opspec := tf.OpSpec{
35720		Type: "CTCBeamSearchDecoder",
35721		Input: []tf.Input{
35722			inputs, sequence_length,
35723		},
35724		Attrs: attrs,
35725	}
35726	op := scope.AddOperation(opspec)
35727	if scope.Err() != nil {
35728		return
35729	}
35730	var idx int
35731	var err error
35732	if decoded_indices, idx, err = makeOutputList(op, idx, "decoded_indices"); err != nil {
35733		scope.UpdateErr("CTCBeamSearchDecoder", err)
35734		return
35735	}
35736	if decoded_values, idx, err = makeOutputList(op, idx, "decoded_values"); err != nil {
35737		scope.UpdateErr("CTCBeamSearchDecoder", err)
35738		return
35739	}
35740	if decoded_shape, idx, err = makeOutputList(op, idx, "decoded_shape"); err != nil {
35741		scope.UpdateErr("CTCBeamSearchDecoder", err)
35742		return
35743	}
35744	log_probability = op.Output(idx)
35745	return decoded_indices, decoded_values, decoded_shape, log_probability
35746}
35747
35748// ResourceSparseApplyKerasMomentumAttr is an optional argument to ResourceSparseApplyKerasMomentum.
35749type ResourceSparseApplyKerasMomentumAttr func(optionalAttr)
35750
35751// ResourceSparseApplyKerasMomentumUseLocking sets the optional use_locking attribute to value.
35752//
35753// value: If `True`, updating of the var and accum tensors will be protected
35754// by a lock; otherwise the behavior is undefined, but may exhibit less
35755// contention.
35756// If not specified, defaults to false
35757func ResourceSparseApplyKerasMomentumUseLocking(value bool) ResourceSparseApplyKerasMomentumAttr {
35758	return func(m optionalAttr) {
35759		m["use_locking"] = value
35760	}
35761}
35762
35763// ResourceSparseApplyKerasMomentumUseNesterov sets the optional use_nesterov attribute to value.
35764//
35765// value: If `True`, the tensor passed to compute grad will be
35766// var + momentum * accum, so in the end, the var you get is actually
35767// var + momentum * accum.
35768// If not specified, defaults to false
35769func ResourceSparseApplyKerasMomentumUseNesterov(value bool) ResourceSparseApplyKerasMomentumAttr {
35770	return func(m optionalAttr) {
35771		m["use_nesterov"] = value
35772	}
35773}
35774
35775// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
35776//
35777// Set use_nesterov = True if you want to use Nesterov momentum.
35778//
35779// That is for rows we have grad for, we update var and accum as follows:
35780//
35781// accum = accum * momentum - lr * grad
35782// var += accum
35783//
35784// Arguments:
35785//	var_: Should be from a Variable().
35786//	accum: Should be from a Variable().
35787//	lr: Learning rate. Must be a scalar.
35788//	grad: The gradient.
35789//	indices: A vector of indices into the first dimension of var and accum.
35790//	momentum: Momentum. Must be a scalar.
35791//
35792// Returns the created operation.
35793func ResourceSparseApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyKerasMomentumAttr) (o *tf.Operation) {
35794	if scope.Err() != nil {
35795		return
35796	}
35797	attrs := map[string]interface{}{}
35798	for _, a := range optional {
35799		a(attrs)
35800	}
35801	opspec := tf.OpSpec{
35802		Type: "ResourceSparseApplyKerasMomentum",
35803		Input: []tf.Input{
35804			var_, accum, lr, grad, indices, momentum,
35805		},
35806		Attrs: attrs,
35807	}
35808	return scope.AddOperation(opspec)
35809}
35810
35811// CTCLossV2Attr is an optional argument to CTCLossV2.
35812type CTCLossV2Attr func(optionalAttr)
35813
35814// CTCLossV2PreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
35815//
35816// value: Scalar, if true then repeated labels are
35817// collapsed prior to the CTC calculation.
35818// If not specified, defaults to false
35819func CTCLossV2PreprocessCollapseRepeated(value bool) CTCLossV2Attr {
35820	return func(m optionalAttr) {
35821		m["preprocess_collapse_repeated"] = value
35822	}
35823}
35824
35825// CTCLossV2CtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
35826//
35827// value: Scalar.  If set to false, *during* CTC calculation
35828// repeated non-blank labels will not be merged and are interpreted as
35829// individual labels.  This is a simplified version of CTC.
35830// If not specified, defaults to true
35831func CTCLossV2CtcMergeRepeated(value bool) CTCLossV2Attr {
35832	return func(m optionalAttr) {
35833		m["ctc_merge_repeated"] = value
35834	}
35835}
35836
35837// CTCLossV2IgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
35838//
35839// value: Scalar. If set to true, during CTC
35840// calculation, items that have longer output sequences than input sequences
35841// are skipped: they don't contribute to the loss term and have zero-gradient.
35842// If not specified, defaults to false
35843func CTCLossV2IgnoreLongerOutputsThanInputs(value bool) CTCLossV2Attr {
35844	return func(m optionalAttr) {
35845		m["ignore_longer_outputs_than_inputs"] = value
35846	}
35847}
35848
35849// Calculates the CTC Loss (log probability) for each batch entry.  Also calculates
35850//
35851// the gradient.  This class performs the softmax operation for you, so inputs
35852// should be e.g. linear projections of outputs by an LSTM.
35853//
35854// Arguments:
35855//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. Default blank
35856// label is 0 rather num_classes - 1.
35857//	labels_indices: The indices of a `SparseTensor<int32, 2>`.
35858// `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
35859// `(batch b, time t)`.
35860//	labels_values: The values (labels) associated with the given batch and time.
35861//	sequence_length: A vector containing sequence lengths (batch).
35862//
35863// Returns:
35864//	loss: A vector (batch) containing log-probabilities.
35865//	gradient: The gradient of `loss`.  3-D, shape:
35866// `(max_time x batch_size x num_classes)`.
35867func CTCLossV2(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossV2Attr) (loss tf.Output, gradient tf.Output) {
35868	if scope.Err() != nil {
35869		return
35870	}
35871	attrs := map[string]interface{}{}
35872	for _, a := range optional {
35873		a(attrs)
35874	}
35875	opspec := tf.OpSpec{
35876		Type: "CTCLossV2",
35877		Input: []tf.Input{
35878			inputs, labels_indices, labels_values, sequence_length,
35879		},
35880		Attrs: attrs,
35881	}
35882	op := scope.AddOperation(opspec)
35883	return op.Output(0), op.Output(1)
35884}
35885
35886// Creates a dataset that emits the records from one or more binary files.
35887//
35888// Arguments:
35889//	filenames: A scalar or a vector containing the name(s) of the file(s) to be
35890// read.
35891//	header_bytes: A scalar representing the number of bytes to skip at the
35892// beginning of a file.
35893//	record_bytes: A scalar representing the number of bytes in each record.
35894//	footer_bytes: A scalar representing the number of bytes to skip at the end
35895// of a file.
35896//	buffer_size: A scalar representing the number of bytes to buffer. Must be > 0.
35897func FixedLengthRecordDataset(scope *Scope, filenames tf.Output, header_bytes tf.Output, record_bytes tf.Output, footer_bytes tf.Output, buffer_size tf.Output) (handle tf.Output) {
35898	if scope.Err() != nil {
35899		return
35900	}
35901	opspec := tf.OpSpec{
35902		Type: "FixedLengthRecordDataset",
35903		Input: []tf.Input{
35904			filenames, header_bytes, record_bytes, footer_bytes, buffer_size,
35905		},
35906	}
35907	op := scope.AddOperation(opspec)
35908	return op.Output(0)
35909}
35910
35911// CTCLossAttr is an optional argument to CTCLoss.
35912type CTCLossAttr func(optionalAttr)
35913
35914// CTCLossPreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
35915//
35916// value: Scalar, if true then repeated labels are
35917// collapsed prior to the CTC calculation.
35918// If not specified, defaults to false
35919func CTCLossPreprocessCollapseRepeated(value bool) CTCLossAttr {
35920	return func(m optionalAttr) {
35921		m["preprocess_collapse_repeated"] = value
35922	}
35923}
35924
35925// CTCLossCtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
35926//
35927// value: Scalar.  If set to false, *during* CTC calculation
35928// repeated non-blank labels will not be merged and are interpreted as
35929// individual labels.  This is a simplified version of CTC.
35930// If not specified, defaults to true
35931func CTCLossCtcMergeRepeated(value bool) CTCLossAttr {
35932	return func(m optionalAttr) {
35933		m["ctc_merge_repeated"] = value
35934	}
35935}
35936
35937// CTCLossIgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
35938//
35939// value: Scalar. If set to true, during CTC
35940// calculation, items that have longer output sequences than input sequences
35941// are skipped: they don't contribute to the loss term and have zero-gradient.
35942// If not specified, defaults to false
35943func CTCLossIgnoreLongerOutputsThanInputs(value bool) CTCLossAttr {
35944	return func(m optionalAttr) {
35945		m["ignore_longer_outputs_than_inputs"] = value
35946	}
35947}
35948
35949// Calculates the CTC Loss (log probability) for each batch entry.  Also calculates
35950//
35951// the gradient.  This class performs the softmax operation for you, so inputs
35952// should be e.g. linear projections of outputs by an LSTM.
35953//
35954// Arguments:
35955//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
35956//	labels_indices: The indices of a `SparseTensor<int32, 2>`.
35957// `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
35958// `(batch b, time t)`.
35959//	labels_values: The values (labels) associated with the given batch and time.
35960//	sequence_length: A vector containing sequence lengths (batch).
35961//
35962// Returns:
35963//	loss: A vector (batch) containing log-probabilities.
35964//	gradient: The gradient of `loss`.  3-D, shape:
35965// `(max_time x batch_size x num_classes)`.
35966func CTCLoss(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossAttr) (loss tf.Output, gradient tf.Output) {
35967	if scope.Err() != nil {
35968		return
35969	}
35970	attrs := map[string]interface{}{}
35971	for _, a := range optional {
35972		a(attrs)
35973	}
35974	opspec := tf.OpSpec{
35975		Type: "CTCLoss",
35976		Input: []tf.Input{
35977			inputs, labels_indices, labels_values, sequence_length,
35978		},
35979		Attrs: attrs,
35980	}
35981	op := scope.AddOperation(opspec)
35982	return op.Output(0), op.Output(1)
35983}
35984
35985// QueueDequeueV2Attr is an optional argument to QueueDequeueV2.
35986type QueueDequeueV2Attr func(optionalAttr)
35987
35988// QueueDequeueV2TimeoutMs sets the optional timeout_ms attribute to value.
35989//
35990// value: If the queue is empty, this operation will block for up to
35991// timeout_ms milliseconds.
35992// Note: This option is not supported yet.
35993// If not specified, defaults to -1
35994func QueueDequeueV2TimeoutMs(value int64) QueueDequeueV2Attr {
35995	return func(m optionalAttr) {
35996		m["timeout_ms"] = value
35997	}
35998}
35999
36000// Dequeues a tuple of one or more tensors from the given queue.
36001//
36002// This operation has k outputs, where k is the number of components
36003// in the tuples stored in the given queue, and output i is the ith
36004// component of the dequeued tuple.
36005//
36006// N.B. If the queue is empty, this operation will block until an element
36007// has been dequeued (or 'timeout_ms' elapses, if specified).
36008//
36009// Arguments:
36010//	handle: The handle to a queue.
36011//	component_types: The type of each component in a tuple.
36012//
36013// Returns One or more tensors that were dequeued as a tuple.
36014func QueueDequeueV2(scope *Scope, handle tf.Output, component_types []tf.DataType, optional ...QueueDequeueV2Attr) (components []tf.Output) {
36015	if scope.Err() != nil {
36016		return
36017	}
36018	attrs := map[string]interface{}{"component_types": component_types}
36019	for _, a := range optional {
36020		a(attrs)
36021	}
36022	opspec := tf.OpSpec{
36023		Type: "QueueDequeueV2",
36024		Input: []tf.Input{
36025			handle,
36026		},
36027		Attrs: attrs,
36028	}
36029	op := scope.AddOperation(opspec)
36030	if scope.Err() != nil {
36031		return
36032	}
36033	var idx int
36034	var err error
36035	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
36036		scope.UpdateErr("QueueDequeueV2", err)
36037		return
36038	}
36039	return components
36040}
36041
36042// Creates a TensorArray for storing multiple gradients of values in the given handle.
36043//
36044// Similar to TensorArrayGradV3. However it creates an accumulator with an
36045// expanded shape compared to the input TensorArray whose gradient is being
36046// computed. This enables multiple gradients for the same TensorArray to be
36047// calculated using the same accumulator.
36048//
36049// Arguments:
36050//	handle: The handle to the forward TensorArray.
36051//	flow_in: A float scalar that enforces proper chaining of operations.
36052//	shape_to_prepend: An int32 vector representing a shape. Elements in the gradient accumulator will
36053// have shape which is this shape_to_prepend value concatenated with shape of the
36054// elements in the TensorArray corresponding to the input handle.
36055//	source: The gradient source string, used to decide which gradient TensorArray
36056// to return.
36057func TensorArrayGradWithShape(scope *Scope, handle tf.Output, flow_in tf.Output, shape_to_prepend tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
36058	if scope.Err() != nil {
36059		return
36060	}
36061	attrs := map[string]interface{}{"source": source}
36062	opspec := tf.OpSpec{
36063		Type: "TensorArrayGradWithShape",
36064		Input: []tf.Input{
36065			handle, flow_in, shape_to_prepend,
36066		},
36067		Attrs: attrs,
36068	}
36069	op := scope.AddOperation(opspec)
36070	return op.Output(0), op.Output(1)
36071}
36072
36073// ResourceApplyCenteredRMSPropAttr is an optional argument to ResourceApplyCenteredRMSProp.
36074type ResourceApplyCenteredRMSPropAttr func(optionalAttr)
36075
36076// ResourceApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
36077//
36078// value: If `True`, updating of the var, mg, ms, and mom tensors is
36079// protected by a lock; otherwise the behavior is undefined, but may exhibit less
36080// contention.
36081// If not specified, defaults to false
36082func ResourceApplyCenteredRMSPropUseLocking(value bool) ResourceApplyCenteredRMSPropAttr {
36083	return func(m optionalAttr) {
36084		m["use_locking"] = value
36085	}
36086}
36087
36088// Update '*var' according to the centered RMSProp algorithm.
36089//
36090// The centered RMSProp algorithm uses an estimate of the centered second moment
36091// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
36092// uses the (uncentered) second moment. This often helps with training, but is
36093// slightly more expensive in terms of computation and memory.
36094//
36095// Note that in dense implementation of this algorithm, mg, ms, and mom will
36096// update even if the grad is zero, but in this sparse implementation, mg, ms,
36097// and mom will not update in iterations during which the grad is zero.
36098//
36099// mean_square = decay * mean_square + (1-decay) * gradient ** 2
36100// mean_grad = decay * mean_grad + (1-decay) * gradient
36101//
36102// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
36103//
36104// mg <- rho * mg_{t-1} + (1-rho) * grad
36105// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
36106// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
36107// var <- var - mom
36108//
36109// Arguments:
36110//	var_: Should be from a Variable().
36111//	mg: Should be from a Variable().
36112//	ms: Should be from a Variable().
36113//	mom: Should be from a Variable().
36114//	lr: Scaling factor. Must be a scalar.
36115//	rho: Decay rate. Must be a scalar.
36116//	momentum: Momentum Scale. Must be a scalar.
36117//	epsilon: Ridge term. Must be a scalar.
36118//	grad: The gradient.
36119//
36120// Returns the created operation.
36121func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyCenteredRMSPropAttr) (o *tf.Operation) {
36122	if scope.Err() != nil {
36123		return
36124	}
36125	attrs := map[string]interface{}{}
36126	for _, a := range optional {
36127		a(attrs)
36128	}
36129	opspec := tf.OpSpec{
36130		Type: "ResourceApplyCenteredRMSProp",
36131		Input: []tf.Input{
36132			var_, mg, ms, mom, lr, rho, momentum, epsilon, grad,
36133		},
36134		Attrs: attrs,
36135	}
36136	return scope.AddOperation(opspec)
36137}
36138
36139// SerializeIteratorAttr is an optional argument to SerializeIterator.
36140type SerializeIteratorAttr func(optionalAttr)
36141
36142// SerializeIteratorExternalStatePolicy sets the optional external_state_policy attribute to value.
36143// If not specified, defaults to 0
36144func SerializeIteratorExternalStatePolicy(value int64) SerializeIteratorAttr {
36145	return func(m optionalAttr) {
36146		m["external_state_policy"] = value
36147	}
36148}
36149
36150// Converts the given `resource_handle` representing an iterator to a variant tensor.
36151//
36152// Arguments:
36153//	resource_handle: A handle to an iterator resource.
36154//
36155// Returns A variant tensor storing the state of the iterator contained in the
36156// resource.
36157func SerializeIterator(scope *Scope, resource_handle tf.Output, optional ...SerializeIteratorAttr) (serialized tf.Output) {
36158	if scope.Err() != nil {
36159		return
36160	}
36161	attrs := map[string]interface{}{}
36162	for _, a := range optional {
36163		a(attrs)
36164	}
36165	opspec := tf.OpSpec{
36166		Type: "SerializeIterator",
36167		Input: []tf.Input{
36168			resource_handle,
36169		},
36170		Attrs: attrs,
36171	}
36172	op := scope.AddOperation(opspec)
36173	return op.Output(0)
36174}
36175
36176// PriorityQueueV2Attr is an optional argument to PriorityQueueV2.
36177type PriorityQueueV2Attr func(optionalAttr)
36178
36179// PriorityQueueV2ComponentTypes sets the optional component_types attribute to value.
36180//
36181// value: The type of each component in a value.
36182// If not specified, defaults to {}
36183//
36184// REQUIRES: len(value) >= 0
36185func PriorityQueueV2ComponentTypes(value []tf.DataType) PriorityQueueV2Attr {
36186	return func(m optionalAttr) {
36187		m["component_types"] = value
36188	}
36189}
36190
36191// PriorityQueueV2Capacity sets the optional capacity attribute to value.
36192//
36193// value: The upper bound on the number of elements in this queue.
36194// Negative numbers mean no limit.
36195// If not specified, defaults to -1
36196func PriorityQueueV2Capacity(value int64) PriorityQueueV2Attr {
36197	return func(m optionalAttr) {
36198		m["capacity"] = value
36199	}
36200}
36201
36202// PriorityQueueV2Container sets the optional container attribute to value.
36203//
36204// value: If non-empty, this queue is placed in the given container.
36205// Otherwise, a default container is used.
36206// If not specified, defaults to ""
36207func PriorityQueueV2Container(value string) PriorityQueueV2Attr {
36208	return func(m optionalAttr) {
36209		m["container"] = value
36210	}
36211}
36212
36213// PriorityQueueV2SharedName sets the optional shared_name attribute to value.
36214//
36215// value: If non-empty, this queue will be shared under the given name
36216// across multiple sessions.
36217// If not specified, defaults to ""
36218func PriorityQueueV2SharedName(value string) PriorityQueueV2Attr {
36219	return func(m optionalAttr) {
36220		m["shared_name"] = value
36221	}
36222}
36223
36224// A queue that produces elements sorted by the first component value.
36225//
36226// Note that the PriorityQueue requires the first component of any element
36227// to be a scalar int64, in addition to the other elements declared by
36228// component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
36229// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
36230// entry in their input (resp. output) lists.
36231//
36232// Arguments:
36233//	shapes: The shape of each component in a value. The length of this attr must
36234// be either 0 or the same as the length of component_types. If the length of
36235// this attr is 0, the shapes of queue elements are not constrained, and
36236// only one element may be dequeued at a time.
36237//
36238// Returns The handle to the queue.
36239func PriorityQueueV2(scope *Scope, shapes []tf.Shape, optional ...PriorityQueueV2Attr) (handle tf.Output) {
36240	if scope.Err() != nil {
36241		return
36242	}
36243	attrs := map[string]interface{}{"shapes": shapes}
36244	for _, a := range optional {
36245		a(attrs)
36246	}
36247	opspec := tf.OpSpec{
36248		Type: "PriorityQueueV2",
36249
36250		Attrs: attrs,
36251	}
36252	op := scope.AddOperation(opspec)
36253	return op.Output(0)
36254}
36255
36256// Strip leading and trailing whitespaces from the Tensor.
36257//
36258// Examples:
36259//
36260// >>> tf.strings.strip(["\nTensorFlow", "     The python library    "]).numpy()
36261// array([b'TensorFlow', b'The python library'], dtype=object)
36262//
36263// Arguments:
36264//	input: A string `Tensor` of any shape.
36265//
36266// Returns A string `Tensor` of the same shape as the input.
36267func StringStrip(scope *Scope, input tf.Output) (output tf.Output) {
36268	if scope.Err() != nil {
36269		return
36270	}
36271	opspec := tf.OpSpec{
36272		Type: "StringStrip",
36273		Input: []tf.Input{
36274			input,
36275		},
36276	}
36277	op := scope.AddOperation(opspec)
36278	return op.Output(0)
36279}
36280
36281// UnicodeEncodeAttr is an optional argument to UnicodeEncode.
36282type UnicodeEncodeAttr func(optionalAttr)
36283
36284// UnicodeEncodeErrors sets the optional errors attribute to value.
36285//
36286// value: Error handling policy when there is invalid formatting found in the input.
36287// The value of 'strict' will cause the operation to produce a InvalidArgument
36288// error on any invalid input formatting. A value of 'replace' (the default) will
36289// cause the operation to replace any invalid formatting in the input with the
36290// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
36291// skip any invalid formatting in the input and produce no corresponding output
36292// character.
36293// If not specified, defaults to "replace"
36294func UnicodeEncodeErrors(value string) UnicodeEncodeAttr {
36295	return func(m optionalAttr) {
36296		m["errors"] = value
36297	}
36298}
36299
36300// UnicodeEncodeReplacementChar sets the optional replacement_char attribute to value.
36301//
36302// value: The replacement character codepoint to be used in place of any invalid
36303// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
36304// be used. The default value is the default unicode replacement character is
36305// 0xFFFD (U+65533).
36306// If not specified, defaults to 65533
36307func UnicodeEncodeReplacementChar(value int64) UnicodeEncodeAttr {
36308	return func(m optionalAttr) {
36309		m["replacement_char"] = value
36310	}
36311}
36312
36313// Encode a tensor of ints into unicode strings.
36314//
36315// Returns a vector of strings, where `output[i]` is constructed by encoding the
36316// Unicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]`
36317// using `output_encoding`.
36318//
36319// ---
36320//
36321// Example:
36322//
36323// ```
36324// input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100]
36325// input_splits = [0, 5, 10]
36326// output_encoding = 'UTF-8'
36327//
36328// output = ['Hello', 'World']
36329// ```
36330//
36331// Arguments:
36332//	input_values: A 1D tensor containing the unicode codepoints that should be encoded.
36333//	input_splits: A 1D tensor specifying how the unicode codepoints should be split into strings.
36334// In particular, `output[i]` is constructed by encoding the codepoints in the
36335// slice `input_values[input_splits[i]:input_splits[i+1]]`.
36336//	output_encoding: Unicode encoding of the output strings. Valid encodings are: `"UTF-8",
36337// "UTF-16-BE", and "UTF-32-BE"`.
36338//
36339// Returns The 1-D Tensor of strings encoded from the provided unicode codepoints.
36340func UnicodeEncode(scope *Scope, input_values tf.Output, input_splits tf.Output, output_encoding string, optional ...UnicodeEncodeAttr) (output tf.Output) {
36341	if scope.Err() != nil {
36342		return
36343	}
36344	attrs := map[string]interface{}{"output_encoding": output_encoding}
36345	for _, a := range optional {
36346		a(attrs)
36347	}
36348	opspec := tf.OpSpec{
36349		Type: "UnicodeEncode",
36350		Input: []tf.Input{
36351			input_values, input_splits,
36352		},
36353		Attrs: attrs,
36354	}
36355	op := scope.AddOperation(opspec)
36356	return op.Output(0)
36357}
36358
36359// Forwards `data` to the output port determined by `pred`.
36360//
36361// If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
36362// the data goes to `output_false`.
36363//
36364// See also `RefSwitch` and `Merge`.
36365//
36366// Arguments:
36367//	data: The tensor to be forwarded to the appropriate output.
36368//	pred: A scalar that specifies which output port will receive data.
36369//
36370// Returns:
36371//	output_false: If `pred` is false, data will be forwarded to this output.
36372//	output_true: If `pred` is true, data will be forwarded to this output.
36373func Switch(scope *Scope, data tf.Output, pred tf.Output) (output_false tf.Output, output_true tf.Output) {
36374	if scope.Err() != nil {
36375		return
36376	}
36377	opspec := tf.OpSpec{
36378		Type: "Switch",
36379		Input: []tf.Input{
36380			data, pred,
36381		},
36382	}
36383	op := scope.AddOperation(opspec)
36384	return op.Output(0), op.Output(1)
36385}
36386
36387// Returns a batched matrix tensor with new batched diagonal values.
36388//
36389// Given `input` and `diagonal`, this operation returns a tensor with the
36390// same shape and values as `input`, except for the specified diagonals of the
36391// innermost matrices. These will be overwritten by the values in `diagonal`.
36392//
36393// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
36394// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
36395// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
36396// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
36397// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
36398// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
36399//
36400// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
36401// If `k` is scalar or `k[0] == k[1]`:
36402//
36403// ```
36404// output[i, j, ..., l, m, n]
36405//   = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
36406//     input[i, j, ..., l, m, n]              ; otherwise
36407// ```
36408//
36409// Otherwise,
36410//
36411// ```
36412// output[i, j, ..., l, m, n]
36413//   = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
36414//     input[i, j, ..., l, m, n]                         ; otherwise
36415// ```
36416// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
36417//
36418// For example:
36419//
36420// ```
36421// # The main diagonal.
36422// input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
36423//                    [7, 7, 7, 7],
36424//                    [7, 7, 7, 7]],
36425//                   [[7, 7, 7, 7],
36426//                    [7, 7, 7, 7],
36427//                    [7, 7, 7, 7]]])
36428// diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
36429//                      [4, 5, 6]])
36430// tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
36431//                                    [7, 2, 7, 7],
36432//                                    [7, 7, 3, 7]],
36433//                                   [[4, 7, 7, 7],
36434//                                    [7, 5, 7, 7],
36435//                                    [7, 7, 6, 7]]]
36436//
36437// # A superdiagonal (per batch).
36438// tf.matrix_set_diag(diagonal, k = 1)
36439//   ==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
36440//         [7, 7, 2, 7],
36441//         [7, 7, 7, 3]],
36442//        [[7, 4, 7, 7],
36443//         [7, 7, 5, 7],
36444//         [7, 7, 7, 6]]]
36445//
36446// # A band of diagonals.
36447// diagonals = np.array([[[1, 2, 3],  # Diagonal shape: (2, 2, 3)
36448//                        [4, 5, 0]],
36449//                       [[6, 1, 2],
36450//                        [3, 4, 0]]])
36451// tf.matrix_set_diag(diagonals, k = (-1, 0))
36452//   ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
36453//         [4, 2, 7, 7],
36454//         [0, 5, 3, 7]],
36455//        [[6, 7, 7, 7],
36456//         [3, 1, 7, 7],
36457//         [7, 4, 2, 7]]]
36458//
36459// ```
36460//
36461// Arguments:
36462//	input: Rank `r+1`, where `r >= 1`.
36463//	diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
36464// `k >= 1`.
36465//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
36466// diagonal, and negative value means subdiagonals. `k` can be a single integer
36467// (for a single diagonal) or a pair of integers specifying the low and high ends
36468// of a matrix band. `k[0]` must not be larger than `k[1]`.
36469//
36470// Returns Rank `r+1`, with `output.shape = input.shape`.
36471func MatrixSetDiagV2(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output) (output tf.Output) {
36472	if scope.Err() != nil {
36473		return
36474	}
36475	opspec := tf.OpSpec{
36476		Type: "MatrixSetDiagV2",
36477		Input: []tf.Input{
36478			input, diagonal, k,
36479		},
36480	}
36481	op := scope.AddOperation(opspec)
36482	return op.Output(0)
36483}
36484
36485// CollectiveBcastSendV2Attr is an optional argument to CollectiveBcastSendV2.
36486type CollectiveBcastSendV2Attr func(optionalAttr)
36487
36488// CollectiveBcastSendV2CommunicationHint sets the optional communication_hint attribute to value.
36489// If not specified, defaults to "auto"
36490func CollectiveBcastSendV2CommunicationHint(value string) CollectiveBcastSendV2Attr {
36491	return func(m optionalAttr) {
36492		m["communication_hint"] = value
36493	}
36494}
36495
36496// CollectiveBcastSendV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
36497// If not specified, defaults to 0
36498func CollectiveBcastSendV2TimeoutSeconds(value float32) CollectiveBcastSendV2Attr {
36499	return func(m optionalAttr) {
36500		m["timeout_seconds"] = value
36501	}
36502}
36503
36504// Broadcasts a tensor value to one or more other devices.
36505func CollectiveBcastSendV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, optional ...CollectiveBcastSendV2Attr) (data tf.Output) {
36506	if scope.Err() != nil {
36507		return
36508	}
36509	attrs := map[string]interface{}{}
36510	for _, a := range optional {
36511		a(attrs)
36512	}
36513	opspec := tf.OpSpec{
36514		Type: "CollectiveBcastSendV2",
36515		Input: []tf.Input{
36516			input, group_size, group_key, instance_key,
36517		},
36518		Attrs: attrs,
36519	}
36520	op := scope.AddOperation(opspec)
36521	return op.Output(0)
36522}
36523
36524// Computes the sum along segments of a tensor.
36525//
36526// Read
36527// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
36528// for an explanation of segments.
36529//
36530// Computes a tensor such that
36531// \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such
36532// that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
36533// need not be sorted and need not cover all values in the full
36534// range of valid values.
36535//
36536// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
36537// If the given segment ID `i` is negative, the value is dropped and will not be
36538// added to the sum of the segment.
36539//
36540// `num_segments` should equal the number of distinct segment IDs.
36541//
36542// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
36543// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
36544// </div>
36545//
36546// ``` python
36547// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
36548// tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
36549// # ==> [[ 5, 5, 5, 5],
36550// #       [5, 6, 7, 8]]
36551// ```
36552//
36553//
36554// Arguments:
36555//
36556//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
36557//
36558//
36559// Returns Has same shape as data, except for the first `segment_ids.rank`
36560// dimensions, which are replaced with a single dimension which has size
36561// `num_segments`.
36562func UnsortedSegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
36563	if scope.Err() != nil {
36564		return
36565	}
36566	opspec := tf.OpSpec{
36567		Type: "UnsortedSegmentSum",
36568		Input: []tf.Input{
36569			data, segment_ids, num_segments,
36570		},
36571	}
36572	op := scope.AddOperation(opspec)
36573	return op.Output(0)
36574}
36575
36576// CollectiveReduceV2Attr is an optional argument to CollectiveReduceV2.
36577type CollectiveReduceV2Attr func(optionalAttr)
36578
36579// CollectiveReduceV2CommunicationHint sets the optional communication_hint attribute to value.
36580// If not specified, defaults to "auto"
36581func CollectiveReduceV2CommunicationHint(value string) CollectiveReduceV2Attr {
36582	return func(m optionalAttr) {
36583		m["communication_hint"] = value
36584	}
36585}
36586
36587// CollectiveReduceV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
36588// If not specified, defaults to 0
36589func CollectiveReduceV2TimeoutSeconds(value float32) CollectiveReduceV2Attr {
36590	return func(m optionalAttr) {
36591		m["timeout_seconds"] = value
36592	}
36593}
36594
36595// CollectiveReduceV2MaxSubdivsPerDevice sets the optional max_subdivs_per_device attribute to value.
36596// If not specified, defaults to -1
36597func CollectiveReduceV2MaxSubdivsPerDevice(value int64) CollectiveReduceV2Attr {
36598	return func(m optionalAttr) {
36599		m["max_subdivs_per_device"] = value
36600	}
36601}
36602
36603// Mutually reduces multiple tensors of identical type and shape.
36604func CollectiveReduceV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, ordering_token []tf.Output, merge_op string, final_op string, optional ...CollectiveReduceV2Attr) (data tf.Output) {
36605	if scope.Err() != nil {
36606		return
36607	}
36608	attrs := map[string]interface{}{"merge_op": merge_op, "final_op": final_op}
36609	for _, a := range optional {
36610		a(attrs)
36611	}
36612	opspec := tf.OpSpec{
36613		Type: "CollectiveReduceV2",
36614		Input: []tf.Input{
36615			input, group_size, group_key, instance_key, tf.OutputList(ordering_token),
36616		},
36617		Attrs: attrs,
36618	}
36619	op := scope.AddOperation(opspec)
36620	return op.Output(0)
36621}
36622
36623// DecodeWavAttr is an optional argument to DecodeWav.
36624type DecodeWavAttr func(optionalAttr)
36625
36626// DecodeWavDesiredChannels sets the optional desired_channels attribute to value.
36627//
36628// value: Number of sample channels wanted.
36629// If not specified, defaults to -1
36630func DecodeWavDesiredChannels(value int64) DecodeWavAttr {
36631	return func(m optionalAttr) {
36632		m["desired_channels"] = value
36633	}
36634}
36635
36636// DecodeWavDesiredSamples sets the optional desired_samples attribute to value.
36637//
36638// value: Length of audio requested.
36639// If not specified, defaults to -1
36640func DecodeWavDesiredSamples(value int64) DecodeWavAttr {
36641	return func(m optionalAttr) {
36642		m["desired_samples"] = value
36643	}
36644}
36645
36646// Decode a 16-bit PCM WAV file to a float tensor.
36647//
36648// The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
36649//
36650// When desired_channels is set, if the input contains fewer channels than this
36651// then the last channel will be duplicated to give the requested number, else if
36652// the input has more channels than requested then the additional channels will be
36653// ignored.
36654//
36655// If desired_samples is set, then the audio will be cropped or padded with zeroes
36656// to the requested length.
36657//
36658// The first output contains a Tensor with the content of the audio samples. The
36659// lowest dimension will be the number of channels, and the second will be the
36660// number of samples. For example, a ten-sample-long stereo WAV file should give an
36661// output shape of [10, 2].
36662//
36663// Arguments:
36664//	contents: The WAV-encoded audio, usually from a file.
36665//
36666// Returns:
36667//	audio: 2-D with shape `[length, channels]`.
36668//	sample_rate: Scalar holding the sample rate found in the WAV header.
36669func DecodeWav(scope *Scope, contents tf.Output, optional ...DecodeWavAttr) (audio tf.Output, sample_rate tf.Output) {
36670	if scope.Err() != nil {
36671		return
36672	}
36673	attrs := map[string]interface{}{}
36674	for _, a := range optional {
36675		a(attrs)
36676	}
36677	opspec := tf.OpSpec{
36678		Type: "DecodeWav",
36679		Input: []tf.Input{
36680			contents,
36681		},
36682		Attrs: attrs,
36683	}
36684	op := scope.AddOperation(opspec)
36685	return op.Output(0), op.Output(1)
36686}
36687
36688// QueueDequeueUpToV2Attr is an optional argument to QueueDequeueUpToV2.
36689type QueueDequeueUpToV2Attr func(optionalAttr)
36690
36691// QueueDequeueUpToV2TimeoutMs sets the optional timeout_ms attribute to value.
36692//
36693// value: If the queue has fewer than n elements, this operation
36694// will block for up to timeout_ms milliseconds.
36695// Note: This option is not supported yet.
36696// If not specified, defaults to -1
36697func QueueDequeueUpToV2TimeoutMs(value int64) QueueDequeueUpToV2Attr {
36698	return func(m optionalAttr) {
36699		m["timeout_ms"] = value
36700	}
36701}
36702
36703// Dequeues `n` tuples of one or more tensors from the given queue.
36704//
36705// This operation is not supported by all queues.  If a queue does not support
36706// DequeueUpTo, then an Unimplemented error is returned.
36707//
36708// If the queue is closed and there are more than 0 but less than `n`
36709// elements remaining, then instead of returning an OutOfRange error like
36710// QueueDequeueMany, less than `n` elements are returned immediately.  If
36711// the queue is closed and there are 0 elements left in the queue, then
36712// an OutOfRange error is returned just like in QueueDequeueMany.
36713// Otherwise the behavior is identical to QueueDequeueMany:
36714//
36715// This operation concatenates queue-element component tensors along the
36716// 0th dimension to make a single component tensor.  All of the components
36717// in the dequeued tuple will have size n in the 0th dimension.
36718//
36719// This operation has `k` outputs, where `k` is the number of components in
36720// the tuples stored in the given queue, and output `i` is the ith
36721// component of the dequeued tuple.
36722//
36723// Arguments:
36724//	handle: The handle to a queue.
36725//	n: The number of tuples to dequeue.
36726//	component_types: The type of each component in a tuple.
36727//
36728// Returns One or more tensors that were dequeued as a tuple.
36729func QueueDequeueUpToV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueUpToV2Attr) (components []tf.Output) {
36730	if scope.Err() != nil {
36731		return
36732	}
36733	attrs := map[string]interface{}{"component_types": component_types}
36734	for _, a := range optional {
36735		a(attrs)
36736	}
36737	opspec := tf.OpSpec{
36738		Type: "QueueDequeueUpToV2",
36739		Input: []tf.Input{
36740			handle, n,
36741		},
36742		Attrs: attrs,
36743	}
36744	op := scope.AddOperation(opspec)
36745	if scope.Err() != nil {
36746		return
36747	}
36748	var idx int
36749	var err error
36750	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
36751		scope.UpdateErr("QueueDequeueUpToV2", err)
36752		return
36753	}
36754	return components
36755}
36756
36757// Returns the TopK values in the array in sorted order.
36758//
36759// This is a combination of MakeUnique and TopKUnique. The returned top-K will
36760// have its lower bits replaced by iota, thus it will be close to the original
36761// value but not exactly the same. The running time is proportional to the product
36762// of K and the input size. NaNs are never returned. Subnormal numbers are flushed
36763// to zero.
36764func TopKWithUnique(scope *Scope, input tf.Output, k int64) (topk tf.Output, topk_indices tf.Output) {
36765	if scope.Err() != nil {
36766		return
36767	}
36768	attrs := map[string]interface{}{"k": k}
36769	opspec := tf.OpSpec{
36770		Type: "TopKWithUnique",
36771		Input: []tf.Input{
36772			input,
36773		},
36774		Attrs: attrs,
36775	}
36776	op := scope.AddOperation(opspec)
36777	return op.Output(0), op.Output(1)
36778}
36779
36780// ImageSummaryAttr is an optional argument to ImageSummary.
36781type ImageSummaryAttr func(optionalAttr)
36782
36783// ImageSummaryMaxImages sets the optional max_images attribute to value.
36784//
36785// value: Max number of batch elements to generate images for.
36786// If not specified, defaults to 3
36787//
36788// REQUIRES: value >= 1
36789func ImageSummaryMaxImages(value int64) ImageSummaryAttr {
36790	return func(m optionalAttr) {
36791		m["max_images"] = value
36792	}
36793}
36794
36795// ImageSummaryBadColor sets the optional bad_color attribute to value.
36796//
36797// value: Color to use for pixels with non-finite values.
36798// If not specified, defaults to {dtype:DT_UINT8 tensor_shape:{dim:{size:4}} int_val:255 int_val:0 int_val:0 int_val:255}
36799func ImageSummaryBadColor(value tf.Tensor) ImageSummaryAttr {
36800	return func(m optionalAttr) {
36801		m["bad_color"] = value
36802	}
36803}
36804
36805// Outputs a `Summary` protocol buffer with images.
36806//
36807// The summary has up to `max_images` summary values containing images. The
36808// images are built from `tensor` which must be 4-D with shape `[batch_size,
36809// height, width, channels]` and where `channels` can be:
36810//
36811// *  1: `tensor` is interpreted as Grayscale.
36812// *  3: `tensor` is interpreted as RGB.
36813// *  4: `tensor` is interpreted as RGBA.
36814//
36815// The images have the same number of channels as the input tensor. For float
36816// input, the values are normalized one image at a time to fit in the range
36817// `[0, 255]`.  `uint8` values are unchanged.  The op uses two different
36818// normalization algorithms:
36819//
36820// *  If the input values are all positive, they are rescaled so the largest one
36821//    is 255.
36822//
36823// *  If any input value is negative, the values are shifted so input value 0.0
36824//    is at 127.  They are then rescaled so that either the smallest value is 0,
36825//    or the largest one is 255.
36826//
36827// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
36828// build the `tag` of the summary values:
36829//
36830// *  If `max_images` is 1, the summary value tag is '*tag*/image'.
36831// *  If `max_images` is greater than 1, the summary value tags are
36832//    generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
36833//
36834// The `bad_color` argument is the color to use in the generated images for
36835// non-finite input values.  It is a `uint8` 1-D tensor of length `channels`.
36836// Each element must be in the range `[0, 255]` (It represents the value of a
36837// pixel in the output image).  Non-finite values in the input tensor are
36838// replaced by this tensor in the output image.  The default value is the color
36839// red.
36840//
36841// Arguments:
36842//	tag: Scalar. Used to build the `tag` attribute of the summary values.
36843//	tensor: 4-D of shape `[batch_size, height, width, channels]` where
36844// `channels` is 1, 3, or 4.
36845//
36846// Returns Scalar. Serialized `Summary` protocol buffer.
36847func ImageSummary(scope *Scope, tag tf.Output, tensor tf.Output, optional ...ImageSummaryAttr) (summary tf.Output) {
36848	if scope.Err() != nil {
36849		return
36850	}
36851	attrs := map[string]interface{}{}
36852	for _, a := range optional {
36853		a(attrs)
36854	}
36855	opspec := tf.OpSpec{
36856		Type: "ImageSummary",
36857		Input: []tf.Input{
36858			tag, tensor,
36859		},
36860		Attrs: attrs,
36861	}
36862	op := scope.AddOperation(opspec)
36863	return op.Output(0)
36864}
36865
36866// ShapeNAttr is an optional argument to ShapeN.
36867type ShapeNAttr func(optionalAttr)
36868
36869// ShapeNOutType sets the optional out_type attribute to value.
36870// If not specified, defaults to DT_INT32
36871func ShapeNOutType(value tf.DataType) ShapeNAttr {
36872	return func(m optionalAttr) {
36873		m["out_type"] = value
36874	}
36875}
36876
36877// Returns shape of tensors.
36878//
36879// This operation returns N 1-D integer tensors representing shape of `input[i]s`.
36880func ShapeN(scope *Scope, input []tf.Output, optional ...ShapeNAttr) (output []tf.Output) {
36881	if scope.Err() != nil {
36882		return
36883	}
36884	attrs := map[string]interface{}{}
36885	for _, a := range optional {
36886		a(attrs)
36887	}
36888	opspec := tf.OpSpec{
36889		Type: "ShapeN",
36890		Input: []tf.Input{
36891			tf.OutputList(input),
36892		},
36893		Attrs: attrs,
36894	}
36895	op := scope.AddOperation(opspec)
36896	if scope.Err() != nil {
36897		return
36898	}
36899	var idx int
36900	var err error
36901	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
36902		scope.UpdateErr("ShapeN", err)
36903		return
36904	}
36905	return output
36906}
36907
36908// Selects the k nearest centers for each point.
36909//
36910// Rows of points are assumed to be input points. Rows of centers are assumed to be
36911// the list of candidate centers. For each point, the k centers that have least L2
36912// distance to it are computed.
36913//
36914// Arguments:
36915//	points: Matrix of shape (n, d). Rows are assumed to be input points.
36916//	centers: Matrix of shape (m, d). Rows are assumed to be centers.
36917//	k: Number of nearest centers to return for each point. If k is larger than m, then
36918// only m centers are returned.
36919//
36920// Returns:
36921//	nearest_center_indices: Matrix of shape (n, min(m, k)). Each row contains the indices of the centers
36922// closest to the corresponding point, ordered by increasing distance.
36923//	nearest_center_distances: Matrix of shape (n, min(m, k)). Each row contains the squared L2 distance to the
36924// corresponding center in nearest_center_indices.
36925func NearestNeighbors(scope *Scope, points tf.Output, centers tf.Output, k tf.Output) (nearest_center_indices tf.Output, nearest_center_distances tf.Output) {
36926	if scope.Err() != nil {
36927		return
36928	}
36929	opspec := tf.OpSpec{
36930		Type: "NearestNeighbors",
36931		Input: []tf.Input{
36932			points, centers, k,
36933		},
36934	}
36935	op := scope.AddOperation(opspec)
36936	return op.Output(0), op.Output(1)
36937}
36938
36939// Does nothing. Only useful as a placeholder for control edges.
36940//
36941// Returns the created operation.
36942func NoOp(scope *Scope) (o *tf.Operation) {
36943	if scope.Err() != nil {
36944		return
36945	}
36946	opspec := tf.OpSpec{
36947		Type: "NoOp",
36948	}
36949	return scope.AddOperation(opspec)
36950}
36951
36952// LoadAndRemapMatrixAttr is an optional argument to LoadAndRemapMatrix.
36953type LoadAndRemapMatrixAttr func(optionalAttr)
36954
36955// LoadAndRemapMatrixMaxRowsInMemory sets the optional max_rows_in_memory attribute to value.
36956//
36957// value: The maximum number of rows to load from the checkpoint at
36958// once. If less than or equal to 0, the entire matrix will be loaded into
36959// memory. Setting this arg trades increased disk reads for lower memory usage.
36960// If not specified, defaults to -1
36961func LoadAndRemapMatrixMaxRowsInMemory(value int64) LoadAndRemapMatrixAttr {
36962	return func(m optionalAttr) {
36963		m["max_rows_in_memory"] = value
36964	}
36965}
36966
36967// Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint
36968//
36969// at `ckpt_path` and potentially reorders its rows and columns using the
36970// specified remappings.
36971//
36972// Most users should use one of the wrapper initializers (such as
36973// `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this
36974// function directly.
36975//
36976// The remappings are 1-D tensors with the following properties:
36977//
36978// * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output
36979//   matrix will be initialized from the row corresponding to index
36980//   `row_remapping[i]` in the old `Tensor` from the checkpoint.
36981// * `col_remapping` must have either 0 entries (indicating that no column
36982//   reordering is needed) or `num_cols` entries. If specified, column `j` of the
36983//   output matrix will be initialized from the column corresponding to index
36984//   `col_remapping[j]` in the old `Tensor` from the checkpoint.
36985// * A value of -1 in either of the remappings signifies a "missing" entry. In that
36986//   case, values from the `initializing_values` tensor will be used to fill that
36987//   missing row or column. If `row_remapping` has `r` missing entries and
36988//   `col_remapping` has `c` missing entries, then the following condition must be
36989//   true:
36990//
36991// `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
36992//
36993// The remapping tensors can be generated using the GenerateVocabRemapping op.
36994//
36995// As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
36996// initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
36997// the value from row i, column j of the old tensor in the checkpoint, the output
36998// matrix will look like the following:
36999//
37000// [[w(1, 0),  w(1, 2),  0.5],
37001//  [w(0, 0),  w(0, 2), -0.5],
37002//  [0.25,    -0.25,      42]]
37003//
37004// Arguments:
37005//	ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from
37006// which the old matrix `Tensor` will be loaded.
37007//	old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
37008//	row_remapping: An int `Tensor` of row remappings (generally created by
37009// `generate_vocab_remapping`).  Even if no row remapping is needed, this must
37010// still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted
37011// index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
37012//	col_remapping: An int `Tensor` of column remappings (generally created by
37013// `generate_vocab_remapping`).  May be a size-0 `Tensor` if only row remapping
37014// is to be done (e.g. column ordering is the same).
37015//	initializing_values: A float `Tensor` containing  values to fill in for cells
37016// in the output matrix that are not loaded from the checkpoint. Length must be
37017// exactly the same as the number of missing / new cells.
37018//	num_rows: Number of rows (length of the 1st dimension) in the output matrix.
37019//	num_cols: Number of columns (length of the 2nd dimension) in the output matrix.
37020//
37021// Returns Output matrix containing existing values loaded from the
37022// checkpoint, and with any missing values filled in from initializing_values.
37023func LoadAndRemapMatrix(scope *Scope, ckpt_path tf.Output, old_tensor_name tf.Output, row_remapping tf.Output, col_remapping tf.Output, initializing_values tf.Output, num_rows int64, num_cols int64, optional ...LoadAndRemapMatrixAttr) (output_matrix tf.Output) {
37024	if scope.Err() != nil {
37025		return
37026	}
37027	attrs := map[string]interface{}{"num_rows": num_rows, "num_cols": num_cols}
37028	for _, a := range optional {
37029		a(attrs)
37030	}
37031	opspec := tf.OpSpec{
37032		Type: "LoadAndRemapMatrix",
37033		Input: []tf.Input{
37034			ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values,
37035		},
37036		Attrs: attrs,
37037	}
37038	op := scope.AddOperation(opspec)
37039	return op.Output(0)
37040}
37041
37042// FixedUnigramCandidateSamplerAttr is an optional argument to FixedUnigramCandidateSampler.
37043type FixedUnigramCandidateSamplerAttr func(optionalAttr)
37044
37045// FixedUnigramCandidateSamplerVocabFile sets the optional vocab_file attribute to value.
37046//
37047// value: Each valid line in this file (which should have a CSV-like format)
37048// corresponds to a valid word ID. IDs are in sequential order, starting from
37049// num_reserved_ids. The last entry in each line is expected to be a value
37050// corresponding to the count or relative probability. Exactly one of vocab_file
37051// and unigrams needs to be passed to this op.
37052// If not specified, defaults to ""
37053func FixedUnigramCandidateSamplerVocabFile(value string) FixedUnigramCandidateSamplerAttr {
37054	return func(m optionalAttr) {
37055		m["vocab_file"] = value
37056	}
37057}
37058
37059// FixedUnigramCandidateSamplerDistortion sets the optional distortion attribute to value.
37060//
37061// value: The distortion is used to skew the unigram probability distribution.
37062// Each weight is first raised to the distortion's power before adding to the
37063// internal unigram distribution. As a result, distortion = 1.0 gives regular
37064// unigram sampling (as defined by the vocab file), and distortion = 0.0 gives
37065// a uniform distribution.
37066// If not specified, defaults to 1
37067func FixedUnigramCandidateSamplerDistortion(value float32) FixedUnigramCandidateSamplerAttr {
37068	return func(m optionalAttr) {
37069		m["distortion"] = value
37070	}
37071}
37072
37073// FixedUnigramCandidateSamplerNumReservedIds sets the optional num_reserved_ids attribute to value.
37074//
37075// value: Optionally some reserved IDs can be added in the range [0,
37076// ..., num_reserved_ids) by the users. One use case is that a special unknown
37077// word token is used as ID 0. These IDs will have a sampling probability of 0.
37078// If not specified, defaults to 0
37079func FixedUnigramCandidateSamplerNumReservedIds(value int64) FixedUnigramCandidateSamplerAttr {
37080	return func(m optionalAttr) {
37081		m["num_reserved_ids"] = value
37082	}
37083}
37084
37085// FixedUnigramCandidateSamplerNumShards sets the optional num_shards attribute to value.
37086//
37087// value: A sampler can be used to sample from a subset of the original range
37088// in order to speed up the whole computation through parallelism. This parameter
37089// (together with 'shard') indicates the number of partitions that are being
37090// used in the overall computation.
37091// If not specified, defaults to 1
37092//
37093// REQUIRES: value >= 1
37094func FixedUnigramCandidateSamplerNumShards(value int64) FixedUnigramCandidateSamplerAttr {
37095	return func(m optionalAttr) {
37096		m["num_shards"] = value
37097	}
37098}
37099
37100// FixedUnigramCandidateSamplerShard sets the optional shard attribute to value.
37101//
37102// value: A sampler can be used to sample from a subset of the original range
37103// in order to speed up the whole computation through parallelism. This parameter
37104// (together with 'num_shards') indicates the particular partition number of a
37105// sampler op, when partitioning is being used.
37106// If not specified, defaults to 0
37107//
37108// REQUIRES: value >= 0
37109func FixedUnigramCandidateSamplerShard(value int64) FixedUnigramCandidateSamplerAttr {
37110	return func(m optionalAttr) {
37111		m["shard"] = value
37112	}
37113}
37114
37115// FixedUnigramCandidateSamplerUnigrams sets the optional unigrams attribute to value.
37116//
37117// value: A list of unigram counts or probabilities, one per ID in sequential
37118// order. Exactly one of vocab_file and unigrams should be passed to this op.
37119// If not specified, defaults to {}
37120func FixedUnigramCandidateSamplerUnigrams(value []float32) FixedUnigramCandidateSamplerAttr {
37121	return func(m optionalAttr) {
37122		m["unigrams"] = value
37123	}
37124}
37125
37126// FixedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
37127//
37128// value: If either seed or seed2 are set to be non-zero, the random number
37129// generator is seeded by the given seed.  Otherwise, it is seeded by a
37130// random seed.
37131// If not specified, defaults to 0
37132func FixedUnigramCandidateSamplerSeed(value int64) FixedUnigramCandidateSamplerAttr {
37133	return func(m optionalAttr) {
37134		m["seed"] = value
37135	}
37136}
37137
37138// FixedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
37139//
37140// value: An second seed to avoid seed collision.
37141// If not specified, defaults to 0
37142func FixedUnigramCandidateSamplerSeed2(value int64) FixedUnigramCandidateSamplerAttr {
37143	return func(m optionalAttr) {
37144		m["seed2"] = value
37145	}
37146}
37147
37148// Generates labels for candidate sampling with a learned unigram distribution.
37149//
37150// A unigram sampler could use a fixed unigram distribution read from a
37151// file or passed in as an in-memory array instead of building up the distribution
37152// from data on the fly. There is also an option to skew the distribution by
37153// applying a distortion power to the weights.
37154//
37155// The vocabulary file should be in CSV-like format, with the last field
37156// being the weight associated with the word.
37157//
37158// For each batch, this op picks a single set of sampled candidate labels.
37159//
37160// The advantages of sampling candidates per-batch are simplicity and the
37161// possibility of efficient dense matrix multiplication. The disadvantage is that
37162// the sampled candidates must be chosen independently of the context and of the
37163// true labels.
37164//
37165// Arguments:
37166//	true_classes: A batch_size * num_true matrix, in which each row contains the
37167// IDs of the num_true target_classes in the corresponding original label.
37168//	num_true: Number of true labels per context.
37169//	num_sampled: Number of candidates to randomly sample.
37170//	unique: If unique is true, we sample with rejection, so that all sampled
37171// candidates in a batch are unique. This requires some approximation to
37172// estimate the post-rejection sampling probabilities.
37173//	range_max: The sampler will sample integers from the interval [0, range_max).
37174//
37175// Returns:
37176//	sampled_candidates: A vector of length num_sampled, in which each element is
37177// the ID of a sampled candidate.
37178//	true_expected_count: A batch_size * num_true matrix, representing
37179// the number of times each candidate is expected to occur in a batch
37180// of sampled candidates. If unique=true, then this is a probability.
37181//	sampled_expected_count: A vector of length num_sampled, for each sampled
37182// candidate representing the number of times the candidate is expected
37183// to occur in a batch of sampled candidates.  If unique=true, then this is a
37184// probability.
37185func FixedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...FixedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
37186	if scope.Err() != nil {
37187		return
37188	}
37189	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
37190	for _, a := range optional {
37191		a(attrs)
37192	}
37193	opspec := tf.OpSpec{
37194		Type: "FixedUnigramCandidateSampler",
37195		Input: []tf.Input{
37196			true_classes,
37197		},
37198		Attrs: attrs,
37199	}
37200	op := scope.AddOperation(opspec)
37201	return op.Output(0), op.Output(1), op.Output(2)
37202}
37203
37204// MatrixDiagPartV3Attr is an optional argument to MatrixDiagPartV3.
37205type MatrixDiagPartV3Attr func(optionalAttr)
37206
37207// MatrixDiagPartV3Align sets the optional align attribute to value.
37208//
37209// value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
37210// a string specifying how superdiagonals and subdiagonals should be aligned,
37211// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
37212// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
37213// to the right (left-pads the row) and subdiagonals to the left (right-pads the
37214// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
37215// the opposite alignment.
37216// If not specified, defaults to "RIGHT_LEFT"
37217func MatrixDiagPartV3Align(value string) MatrixDiagPartV3Attr {
37218	return func(m optionalAttr) {
37219		m["align"] = value
37220	}
37221}
37222
37223// Returns the batched diagonal part of a batched tensor.
37224//
37225// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
37226// `input`.
37227//
37228// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
37229// Let `max_diag_len` be the maximum length among all diagonals to be extracted,
37230// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
37231// Let `num_diags` be the number of diagonals to extract,
37232// `num_diags = k[1] - k[0] + 1`.
37233//
37234// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
37235// `[I, J, ..., L, max_diag_len]` and values:
37236//
37237// ```
37238// diagonal[i, j, ..., l, n]
37239//   = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
37240//     padding_value                 ; otherwise.
37241// ```
37242// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
37243//
37244// Otherwise, the output tensor has rank `r` with dimensions
37245// `[I, J, ..., L, num_diags, max_diag_len]` with values:
37246//
37247// ```
37248// diagonal[i, j, ..., l, m, n]
37249//   = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
37250//     padding_value                 ; otherwise.
37251// ```
37252// where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
37253//
37254// `offset` is zero except when the alignment of the diagonal is to the right.
37255// ```
37256// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
37257//                                            and `d >= 0`) or
37258//                                          (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
37259//                                            and `d <= 0`)
37260//          0                          ; otherwise
37261// ```
37262// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
37263//
37264// The input must be at least a matrix.
37265//
37266// For example:
37267//
37268// ```
37269// input = np.array([[[1, 2, 3, 4],  # Input shape: (2, 3, 4)
37270//                    [5, 6, 7, 8],
37271//                    [9, 8, 7, 6]],
37272//                   [[5, 4, 3, 2],
37273//                    [1, 2, 3, 4],
37274//                    [5, 6, 7, 8]]])
37275//
37276// # A main diagonal from each batch.
37277// tf.matrix_diag_part(input) ==> [[1, 6, 7],  # Output shape: (2, 3)
37278//                                 [5, 2, 7]]
37279//
37280// # A superdiagonal from each batch.
37281// tf.matrix_diag_part(input, k = 1)
37282//   ==> [[2, 7, 6],  # Output shape: (2, 3)
37283//        [4, 3, 8]]
37284//
37285// # A band from each batch.
37286// tf.matrix_diag_part(input, k = (-1, 2))
37287//   ==> [[[0, 3, 8],  # Output shape: (2, 4, 3)
37288//         [2, 7, 6],
37289//         [1, 6, 7],
37290//         [5, 8, 0]],
37291//        [[0, 3, 4],
37292//         [4, 3, 8],
37293//         [5, 2, 7],
37294//         [1, 6, 0]]]
37295//
37296// # LEFT_RIGHT alignment.
37297// tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT")
37298//   ==> [[[3, 8, 0],  # Output shape: (2, 4, 3)
37299//         [2, 7, 6],
37300//         [1, 6, 7],
37301//         [0, 5, 8]],
37302//        [[3, 4, 0],
37303//         [4, 3, 8],
37304//         [5, 2, 7],
37305//         [0, 1, 6]]]
37306//
37307// # max_diag_len can be shorter than the main diagonal.
37308// tf.matrix_diag_part(input, k = (-2, -1))
37309//   ==> [[[5, 8],
37310//         [9, 0]],
37311//        [[1, 6],
37312//         [5, 0]]]
37313//
37314// # padding_value = 9
37315// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
37316//   ==> [[[9, 9, 4],  # Output shape: (2, 3, 3)
37317//         [9, 3, 8],
37318//         [2, 7, 6]],
37319//        [[9, 9, 2],
37320//         [9, 3, 4],
37321//         [4, 3, 8]]]
37322//
37323// ```
37324//
37325// Arguments:
37326//	input: Rank `r` tensor where `r >= 2`.
37327//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
37328// diagonal, and negative value means subdiagonals. `k` can be a single integer
37329// (for a single diagonal) or a pair of integers specifying the low and high ends
37330// of a matrix band. `k[0]` must not be larger than `k[1]`.
37331//	padding_value: The value to fill the area outside the specified diagonal band with.
37332// Default is 0.
37333//
37334// Returns The extracted diagonal(s).
37335func MatrixDiagPartV3(scope *Scope, input tf.Output, k tf.Output, padding_value tf.Output, optional ...MatrixDiagPartV3Attr) (diagonal tf.Output) {
37336	if scope.Err() != nil {
37337		return
37338	}
37339	attrs := map[string]interface{}{}
37340	for _, a := range optional {
37341		a(attrs)
37342	}
37343	opspec := tf.OpSpec{
37344		Type: "MatrixDiagPartV3",
37345		Input: []tf.Input{
37346			input, k, padding_value,
37347		},
37348		Attrs: attrs,
37349	}
37350	op := scope.AddOperation(opspec)
37351	return op.Output(0)
37352}
37353
37354// LearnedUnigramCandidateSamplerAttr is an optional argument to LearnedUnigramCandidateSampler.
37355type LearnedUnigramCandidateSamplerAttr func(optionalAttr)
37356
37357// LearnedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
37358//
37359// value: If either seed or seed2 are set to be non-zero, the random number
37360// generator is seeded by the given seed.  Otherwise, it is seeded by a
37361// random seed.
37362// If not specified, defaults to 0
37363func LearnedUnigramCandidateSamplerSeed(value int64) LearnedUnigramCandidateSamplerAttr {
37364	return func(m optionalAttr) {
37365		m["seed"] = value
37366	}
37367}
37368
37369// LearnedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
37370//
37371// value: An second seed to avoid seed collision.
37372// If not specified, defaults to 0
37373func LearnedUnigramCandidateSamplerSeed2(value int64) LearnedUnigramCandidateSamplerAttr {
37374	return func(m optionalAttr) {
37375		m["seed2"] = value
37376	}
37377}
37378
37379// Generates labels for candidate sampling with a learned unigram distribution.
37380//
37381// See explanations of candidate sampling and the data formats at
37382// go/candidate-sampling.
37383//
37384// For each batch, this op picks a single set of sampled candidate labels.
37385//
37386// The advantages of sampling candidates per-batch are simplicity and the
37387// possibility of efficient dense matrix multiplication. The disadvantage is that
37388// the sampled candidates must be chosen independently of the context and of the
37389// true labels.
37390//
37391// Arguments:
37392//	true_classes: A batch_size * num_true matrix, in which each row contains the
37393// IDs of the num_true target_classes in the corresponding original label.
37394//	num_true: Number of true labels per context.
37395//	num_sampled: Number of candidates to randomly sample.
37396//	unique: If unique is true, we sample with rejection, so that all sampled
37397// candidates in a batch are unique. This requires some approximation to
37398// estimate the post-rejection sampling probabilities.
37399//	range_max: The sampler will sample integers from the interval [0, range_max).
37400//
37401// Returns:
37402//	sampled_candidates: A vector of length num_sampled, in which each element is
37403// the ID of a sampled candidate.
37404//	true_expected_count: A batch_size * num_true matrix, representing
37405// the number of times each candidate is expected to occur in a batch
37406// of sampled candidates. If unique=true, then this is a probability.
37407//	sampled_expected_count: A vector of length num_sampled, for each sampled
37408// candidate representing the number of times the candidate is expected
37409// to occur in a batch of sampled candidates.  If unique=true, then this is a
37410// probability.
37411func LearnedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LearnedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
37412	if scope.Err() != nil {
37413		return
37414	}
37415	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
37416	for _, a := range optional {
37417		a(attrs)
37418	}
37419	opspec := tf.OpSpec{
37420		Type: "LearnedUnigramCandidateSampler",
37421		Input: []tf.Input{
37422			true_classes,
37423		},
37424		Attrs: attrs,
37425	}
37426	op := scope.AddOperation(opspec)
37427	return op.Output(0), op.Output(1), op.Output(2)
37428}
37429
37430// Copy a tensor setting everything outside a central band in each innermost matrix to zero.
37431//
37432// The `band` part is computed as follows:
37433// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
37434// tensor with the same shape where
37435//
37436// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
37437//
37438// The indicator function
37439//
37440// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
37441//                  (num_upper < 0 || (n-m) <= num_upper)`.
37442//
37443// For example:
37444//
37445// ```
37446// # if 'input' is [[ 0,  1,  2, 3]
37447// #                [-1,  0,  1, 2]
37448// #                [-2, -1,  0, 1]
37449// #                [-3, -2, -1, 0]],
37450//
37451// tf.linalg.band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
37452//                                        [-1,  0,  1, 2]
37453//                                        [ 0, -1,  0, 1]
37454//                                        [ 0,  0, -1, 0]],
37455//
37456// tf.linalg.band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
37457//                                       [-1,  0,  1, 0]
37458//                                       [-2, -1,  0, 1]
37459//                                       [ 0, -2, -1, 0]]
37460// ```
37461//
37462// Useful special cases:
37463//
37464// ```
37465//  tf.linalg.band_part(input, 0, -1) ==> Upper triangular part.
37466//  tf.linalg.band_part(input, -1, 0) ==> Lower triangular part.
37467//  tf.linalg.band_part(input, 0, 0) ==> Diagonal.
37468// ```
37469//
37470// Arguments:
37471//	input: Rank `k` tensor.
37472//	num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
37473// lower triangle.
37474//	num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep
37475// entire upper triangle.
37476//
37477// Returns Rank `k` tensor of the same shape as input. The extracted banded tensor.
37478func MatrixBandPart(scope *Scope, input tf.Output, num_lower tf.Output, num_upper tf.Output) (band tf.Output) {
37479	if scope.Err() != nil {
37480		return
37481	}
37482	opspec := tf.OpSpec{
37483		Type: "MatrixBandPart",
37484		Input: []tf.Input{
37485			input, num_lower, num_upper,
37486		},
37487	}
37488	op := scope.AddOperation(opspec)
37489	return op.Output(0)
37490}
37491
37492// Bucketize each feature based on bucket boundaries.
37493//
37494// An op that returns a list of float tensors, where each tensor represents the
37495// bucketized values for a single feature.
37496//
37497// Arguments:
37498//	float_values: float; List of Rank 1 Tensor each containing float values for a single feature.
37499//	bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a single
37500// feature.
37501//
37502// Returns int; List of Rank 1 Tensors each containing the bucketized values for a single feature.
37503func BoostedTreesBucketize(scope *Scope, float_values []tf.Output, bucket_boundaries []tf.Output) (buckets []tf.Output) {
37504	if scope.Err() != nil {
37505		return
37506	}
37507	opspec := tf.OpSpec{
37508		Type: "BoostedTreesBucketize",
37509		Input: []tf.Input{
37510			tf.OutputList(float_values), tf.OutputList(bucket_boundaries),
37511		},
37512	}
37513	op := scope.AddOperation(opspec)
37514	if scope.Err() != nil {
37515		return
37516	}
37517	var idx int
37518	var err error
37519	if buckets, idx, err = makeOutputList(op, idx, "buckets"); err != nil {
37520		scope.UpdateErr("BoostedTreesBucketize", err)
37521		return
37522	}
37523	return buckets
37524}
37525
37526// ResourceApplyAdadeltaAttr is an optional argument to ResourceApplyAdadelta.
37527type ResourceApplyAdadeltaAttr func(optionalAttr)
37528
37529// ResourceApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
37530//
37531// value: If True, updating of the var, accum and update_accum tensors will be protected by
37532// a lock; otherwise the behavior is undefined, but may exhibit less contention.
37533// If not specified, defaults to false
37534func ResourceApplyAdadeltaUseLocking(value bool) ResourceApplyAdadeltaAttr {
37535	return func(m optionalAttr) {
37536		m["use_locking"] = value
37537	}
37538}
37539
37540// Update '*var' according to the adadelta scheme.
37541//
37542// accum = rho() * accum + (1 - rho()) * grad.square();
37543// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
37544// update_accum = rho() * update_accum + (1 - rho()) * update.square();
37545// var -= update;
37546//
37547// Arguments:
37548//	var_: Should be from a Variable().
37549//	accum: Should be from a Variable().
37550//	accum_update: Should be from a Variable().
37551//	lr: Scaling factor. Must be a scalar.
37552//	rho: Decay factor. Must be a scalar.
37553//	epsilon: Constant factor. Must be a scalar.
37554//	grad: The gradient.
37555//
37556// Returns the created operation.
37557func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdadeltaAttr) (o *tf.Operation) {
37558	if scope.Err() != nil {
37559		return
37560	}
37561	attrs := map[string]interface{}{}
37562	for _, a := range optional {
37563		a(attrs)
37564	}
37565	opspec := tf.OpSpec{
37566		Type: "ResourceApplyAdadelta",
37567		Input: []tf.Input{
37568			var_, accum, accum_update, lr, rho, epsilon, grad,
37569		},
37570		Attrs: attrs,
37571	}
37572	return scope.AddOperation(opspec)
37573}
37574
37575// Deserialize bucket boundaries and ready flag into current QuantileAccumulator.
37576//
37577// An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator.
37578//
37579// Arguments:
37580//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
37581//	bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
37582//
37583// Returns the created operation.
37584func BoostedTreesQuantileStreamResourceDeserialize(scope *Scope, quantile_stream_resource_handle tf.Output, bucket_boundaries []tf.Output) (o *tf.Operation) {
37585	if scope.Err() != nil {
37586		return
37587	}
37588	opspec := tf.OpSpec{
37589		Type: "BoostedTreesQuantileStreamResourceDeserialize",
37590		Input: []tf.Input{
37591			quantile_stream_resource_handle, tf.OutputList(bucket_boundaries),
37592		},
37593	}
37594	return scope.AddOperation(opspec)
37595}
37596
37597// StageClearAttr is an optional argument to StageClear.
37598type StageClearAttr func(optionalAttr)
37599
37600// StageClearCapacity sets the optional capacity attribute to value.
37601// If not specified, defaults to 0
37602//
37603// REQUIRES: value >= 0
37604func StageClearCapacity(value int64) StageClearAttr {
37605	return func(m optionalAttr) {
37606		m["capacity"] = value
37607	}
37608}
37609
37610// StageClearMemoryLimit sets the optional memory_limit attribute to value.
37611// If not specified, defaults to 0
37612//
37613// REQUIRES: value >= 0
37614func StageClearMemoryLimit(value int64) StageClearAttr {
37615	return func(m optionalAttr) {
37616		m["memory_limit"] = value
37617	}
37618}
37619
37620// StageClearContainer sets the optional container attribute to value.
37621// If not specified, defaults to ""
37622func StageClearContainer(value string) StageClearAttr {
37623	return func(m optionalAttr) {
37624		m["container"] = value
37625	}
37626}
37627
37628// StageClearSharedName sets the optional shared_name attribute to value.
37629// If not specified, defaults to ""
37630func StageClearSharedName(value string) StageClearAttr {
37631	return func(m optionalAttr) {
37632		m["shared_name"] = value
37633	}
37634}
37635
37636// Op removes all elements in the underlying container.
37637//
37638// Returns the created operation.
37639func StageClear(scope *Scope, dtypes []tf.DataType, optional ...StageClearAttr) (o *tf.Operation) {
37640	if scope.Err() != nil {
37641		return
37642	}
37643	attrs := map[string]interface{}{"dtypes": dtypes}
37644	for _, a := range optional {
37645		a(attrs)
37646	}
37647	opspec := tf.OpSpec{
37648		Type: "StageClear",
37649
37650		Attrs: attrs,
37651	}
37652	return scope.AddOperation(opspec)
37653}
37654
37655// Selects num_to_sample rows of input using the KMeans++ criterion.
37656//
37657// Rows of points are assumed to be input points. One row is selected at random.
37658// Subsequent rows are sampled with probability proportional to the squared L2
37659// distance from the nearest row selected thus far till num_to_sample rows have
37660// been sampled.
37661//
37662// Arguments:
37663//	points: Matrix of shape (n, d). Rows are assumed to be input points.
37664//	num_to_sample: Scalar. The number of rows to sample. This value must not be larger than n.
37665//	seed: Scalar. Seed for initializing the random number generator.
37666//	num_retries_per_sample: Scalar. For each row that is sampled, this parameter
37667// specifies the number of additional points to draw from the current
37668// distribution before selecting the best. If a negative value is specified, a
37669// heuristic is used to sample O(log(num_to_sample)) additional points.
37670//
37671// Returns Matrix of shape (num_to_sample, d). The sampled rows.
37672func KmeansPlusPlusInitialization(scope *Scope, points tf.Output, num_to_sample tf.Output, seed tf.Output, num_retries_per_sample tf.Output) (samples tf.Output) {
37673	if scope.Err() != nil {
37674		return
37675	}
37676	opspec := tf.OpSpec{
37677		Type: "KmeansPlusPlusInitialization",
37678		Input: []tf.Input{
37679			points, num_to_sample, seed, num_retries_per_sample,
37680		},
37681	}
37682	op := scope.AddOperation(opspec)
37683	return op.Output(0)
37684}
37685
37686// BoostedTreesSparseCalculateBestFeatureSplitAttr is an optional argument to BoostedTreesSparseCalculateBestFeatureSplit.
37687type BoostedTreesSparseCalculateBestFeatureSplitAttr func(optionalAttr)
37688
37689// BoostedTreesSparseCalculateBestFeatureSplitSplitType sets the optional split_type attribute to value.
37690//
37691// value: A string indicating if this Op should perform inequality split or equality split.
37692// If not specified, defaults to "inequality"
37693func BoostedTreesSparseCalculateBestFeatureSplitSplitType(value string) BoostedTreesSparseCalculateBestFeatureSplitAttr {
37694	return func(m optionalAttr) {
37695		m["split_type"] = value
37696	}
37697}
37698
37699// Calculates gains for each feature and returns the best possible split information for the feature.
37700//
37701// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
37702//
37703// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
37704//
37705// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
37706//
37707// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
37708//
37709// Arguments:
37710//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
37711//	stats_summary_indices: A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim.
37712// stats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used.
37713//	stats_summary_values: A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices.
37714//	stats_summary_shape: A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim].
37715//	l1: l1 regularization factor on leaf weights, per instance based.
37716//	l2: l2 regularization factor on leaf weights, per instance based.
37717//	tree_complexity: adjustment to the gain, per leaf based.
37718//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
37719//	logits_dimension: The dimension of logit, i.e., number of classes.
37720//
37721// Returns:
37722//	node_ids: A Rank 1 tensor indicating possible node ids that can be split.
37723//	gains: A Rank 1 tensor indicating the best gains to split each node.
37724//	feature_dimensions: A Rank 1 tensor indicating the best feature dimension for each feature to split for each node.
37725//	thresholds: A Rank 1 tensor indicating the bucket id to compare with (as a threshold) for split in each node.
37726//	left_node_contribs: A Rank 2 tensor indicating the contribution of the left nodes when branching from parent nodes to the left direction by the given threshold for each feature.
37727// This value will be used to make the left node value by adding to the parent node value. Second dimension size is logits dimension.
37728//	right_node_contribs: A Rank 2 tensor, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
37729//	split_with_default_directions: A Rank 1 tensor indicating which direction to go if data is missing.
37730// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
37731func BoostedTreesSparseCalculateBestFeatureSplit(scope *Scope, node_id_range tf.Output, stats_summary_indices tf.Output, stats_summary_values tf.Output, stats_summary_shape tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64, optional ...BoostedTreesSparseCalculateBestFeatureSplitAttr) (node_ids tf.Output, gains tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output) {
37732	if scope.Err() != nil {
37733		return
37734	}
37735	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
37736	for _, a := range optional {
37737		a(attrs)
37738	}
37739	opspec := tf.OpSpec{
37740		Type: "BoostedTreesSparseCalculateBestFeatureSplit",
37741		Input: []tf.Input{
37742			node_id_range, stats_summary_indices, stats_summary_values, stats_summary_shape, l1, l2, tree_complexity, min_node_weight,
37743		},
37744		Attrs: attrs,
37745	}
37746	op := scope.AddOperation(opspec)
37747	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
37748}
37749
37750// Flush the quantile summaries from each quantile stream resource.
37751//
37752// An op that outputs a list of quantile summaries of a quantile stream resource.
37753// Each summary Tensor is rank 2, containing summaries (value, weight, min_rank,
37754// max_rank) for a single feature.
37755//
37756// Arguments:
37757//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
37758//
37759func BoostedTreesFlushQuantileSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (summaries []tf.Output) {
37760	if scope.Err() != nil {
37761		return
37762	}
37763	attrs := map[string]interface{}{"num_features": num_features}
37764	opspec := tf.OpSpec{
37765		Type: "BoostedTreesFlushQuantileSummaries",
37766		Input: []tf.Input{
37767			quantile_stream_resource_handle,
37768		},
37769		Attrs: attrs,
37770	}
37771	op := scope.AddOperation(opspec)
37772	if scope.Err() != nil {
37773		return
37774	}
37775	var idx int
37776	var err error
37777	if summaries, idx, err = makeOutputList(op, idx, "summaries"); err != nil {
37778		scope.UpdateErr("BoostedTreesFlushQuantileSummaries", err)
37779		return
37780	}
37781	return summaries
37782}
37783
37784// BoostedTreesCreateQuantileStreamResourceAttr is an optional argument to BoostedTreesCreateQuantileStreamResource.
37785type BoostedTreesCreateQuantileStreamResourceAttr func(optionalAttr)
37786
37787// BoostedTreesCreateQuantileStreamResourceMaxElements sets the optional max_elements attribute to value.
37788//
37789// value: int; The maximum number of data points that can be fed to the stream.
37790// If not specified, defaults to 1099511627776
37791func BoostedTreesCreateQuantileStreamResourceMaxElements(value int64) BoostedTreesCreateQuantileStreamResourceAttr {
37792	return func(m optionalAttr) {
37793		m["max_elements"] = value
37794	}
37795}
37796
37797// Create the Resource for Quantile Streams.
37798//
37799// Arguments:
37800//	quantile_stream_resource_handle: resource; Handle to quantile stream resource.
37801//	epsilon: float; The required approximation error of the stream resource.
37802//	num_streams: int; The number of streams managed by the resource that shares the same epsilon.
37803//
37804// Returns the created operation.
37805func BoostedTreesCreateQuantileStreamResource(scope *Scope, quantile_stream_resource_handle tf.Output, epsilon tf.Output, num_streams tf.Output, optional ...BoostedTreesCreateQuantileStreamResourceAttr) (o *tf.Operation) {
37806	if scope.Err() != nil {
37807		return
37808	}
37809	attrs := map[string]interface{}{}
37810	for _, a := range optional {
37811		a(attrs)
37812	}
37813	opspec := tf.OpSpec{
37814		Type: "BoostedTreesCreateQuantileStreamResource",
37815		Input: []tf.Input{
37816			quantile_stream_resource_handle, epsilon, num_streams,
37817		},
37818		Attrs: attrs,
37819	}
37820	return scope.AddOperation(opspec)
37821}
37822
37823// FakeQuantWithMinMaxArgsGradientAttr is an optional argument to FakeQuantWithMinMaxArgsGradient.
37824type FakeQuantWithMinMaxArgsGradientAttr func(optionalAttr)
37825
37826// FakeQuantWithMinMaxArgsGradientMin sets the optional min attribute to value.
37827// If not specified, defaults to -6
37828func FakeQuantWithMinMaxArgsGradientMin(value float32) FakeQuantWithMinMaxArgsGradientAttr {
37829	return func(m optionalAttr) {
37830		m["min"] = value
37831	}
37832}
37833
37834// FakeQuantWithMinMaxArgsGradientMax sets the optional max attribute to value.
37835// If not specified, defaults to 6
37836func FakeQuantWithMinMaxArgsGradientMax(value float32) FakeQuantWithMinMaxArgsGradientAttr {
37837	return func(m optionalAttr) {
37838		m["max"] = value
37839	}
37840}
37841
37842// FakeQuantWithMinMaxArgsGradientNumBits sets the optional num_bits attribute to value.
37843// If not specified, defaults to 8
37844func FakeQuantWithMinMaxArgsGradientNumBits(value int64) FakeQuantWithMinMaxArgsGradientAttr {
37845	return func(m optionalAttr) {
37846		m["num_bits"] = value
37847	}
37848}
37849
37850// FakeQuantWithMinMaxArgsGradientNarrowRange sets the optional narrow_range attribute to value.
37851// If not specified, defaults to false
37852func FakeQuantWithMinMaxArgsGradientNarrowRange(value bool) FakeQuantWithMinMaxArgsGradientAttr {
37853	return func(m optionalAttr) {
37854		m["narrow_range"] = value
37855	}
37856}
37857
37858// Compute gradients for a FakeQuantWithMinMaxArgs operation.
37859//
37860// Arguments:
37861//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
37862//	inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
37863//
37864// Returns Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
37865// `gradients * (inputs >= min && inputs <= max)`.
37866func FakeQuantWithMinMaxArgsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsGradientAttr) (backprops tf.Output) {
37867	if scope.Err() != nil {
37868		return
37869	}
37870	attrs := map[string]interface{}{}
37871	for _, a := range optional {
37872		a(attrs)
37873	}
37874	opspec := tf.OpSpec{
37875		Type: "FakeQuantWithMinMaxArgsGradient",
37876		Input: []tf.Input{
37877			gradients, inputs,
37878		},
37879		Attrs: attrs,
37880	}
37881	op := scope.AddOperation(opspec)
37882	return op.Output(0)
37883}
37884
37885// Computes the eigen decomposition of a batch of self-adjoint matrices
37886//
37887// (Note: Only real inputs are supported).
37888//
37889// Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in
37890// tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]).
37891//
37892// Arguments:
37893//	a: the input tensor.
37894//	max_iter: maximum number of sweep update, i.e., the whole lower triangular
37895// part or upper triangular part based on parameter lower. Heuristically, it has
37896// been argued that approximately log(min (M, N)) sweeps are needed in practice
37897// (Ref: Golub & van Loan "Matrix Computation").
37898//	epsilon: the tolerance ratio.
37899//	precision_config: a serialized xla::PrecisionConfig proto.
37900//
37901// Returns:
37902//	s: Singular values. The values are sorted in reverse order of magnitude, so
37903// s[..., 0] is the largest value, s[..., 1] is the second largest, etc.
37904//	u: Left singular vectors.
37905//	v: Right singular vectors.
37906func XlaSvd(scope *Scope, a tf.Output, max_iter int64, epsilon float32, precision_config string) (s tf.Output, u tf.Output, v tf.Output) {
37907	if scope.Err() != nil {
37908		return
37909	}
37910	attrs := map[string]interface{}{"max_iter": max_iter, "epsilon": epsilon, "precision_config": precision_config}
37911	opspec := tf.OpSpec{
37912		Type: "XlaSvd",
37913		Input: []tf.Input{
37914			a,
37915		},
37916		Attrs: attrs,
37917	}
37918	op := scope.AddOperation(opspec)
37919	return op.Output(0), op.Output(1), op.Output(2)
37920}
37921
37922// JPEG encode input image with provided compression quality.
37923//
37924// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
37925// `quality` is an int32 jpeg compression quality value between 0 and 100.
37926//
37927//
37928// Arguments:
37929//	images: Images to adjust.  At least 3-D.
37930//	quality: An int quality to encode to.
37931//
37932// Returns 0-D. JPEG-encoded image.
37933func EncodeJpegVariableQuality(scope *Scope, images tf.Output, quality tf.Output) (contents tf.Output) {
37934	if scope.Err() != nil {
37935		return
37936	}
37937	opspec := tf.OpSpec{
37938		Type: "EncodeJpegVariableQuality",
37939		Input: []tf.Input{
37940			images, quality,
37941		},
37942	}
37943	op := scope.AddOperation(opspec)
37944	return op.Output(0)
37945}
37946
37947// DecodeProtoV2Attr is an optional argument to DecodeProtoV2.
37948type DecodeProtoV2Attr func(optionalAttr)
37949
37950// DecodeProtoV2DescriptorSource sets the optional descriptor_source attribute to value.
37951//
37952// value: Either the special value `local://` or a path to a file containing
37953// a serialized `FileDescriptorSet`.
37954// If not specified, defaults to "local://"
37955func DecodeProtoV2DescriptorSource(value string) DecodeProtoV2Attr {
37956	return func(m optionalAttr) {
37957		m["descriptor_source"] = value
37958	}
37959}
37960
37961// DecodeProtoV2MessageFormat sets the optional message_format attribute to value.
37962//
37963// value: Either `binary` or `text`.
37964// If not specified, defaults to "binary"
37965func DecodeProtoV2MessageFormat(value string) DecodeProtoV2Attr {
37966	return func(m optionalAttr) {
37967		m["message_format"] = value
37968	}
37969}
37970
37971// DecodeProtoV2Sanitize sets the optional sanitize attribute to value.
37972//
37973// value: Whether to sanitize the result or not.
37974// If not specified, defaults to false
37975func DecodeProtoV2Sanitize(value bool) DecodeProtoV2Attr {
37976	return func(m optionalAttr) {
37977		m["sanitize"] = value
37978	}
37979}
37980
37981// The op extracts fields from a serialized protocol buffers message into tensors.
37982//
37983// The `decode_proto` op extracts fields from a serialized protocol buffers
37984// message into tensors.  The fields in `field_names` are decoded and converted
37985// to the corresponding `output_types` if possible.
37986//
37987// A `message_type` name must be provided to give context for the field names.
37988// The actual message descriptor can be looked up either in the linked-in
37989// descriptor pool or a filename provided by the caller using the
37990// `descriptor_source` attribute.
37991//
37992// Each output tensor is a dense tensor. This means that it is padded to hold
37993// the largest number of repeated elements seen in the input minibatch. (The
37994// shape is also padded by one to prevent zero-sized dimensions). The actual
37995// repeat counts for each example in the minibatch can be found in the `sizes`
37996// output. In many cases the output of `decode_proto` is fed immediately into
37997// tf.squeeze if missing values are not a concern. When using tf.squeeze, always
37998// pass the squeeze dimension explicitly to avoid surprises.
37999//
38000// For the most part, the mapping between Proto field types and TensorFlow dtypes
38001// is straightforward. However, there are a few special cases:
38002//
38003// - A proto field that contains a submessage or group can only be converted
38004// to `DT_STRING` (the serialized submessage). This is to reduce the complexity
38005// of the API. The resulting string can be used as input to another instance of
38006// the decode_proto op.
38007//
38008// - TensorFlow lacks support for unsigned integers. The ops represent uint64
38009// types as a `DT_INT64` with the same twos-complement bit pattern (the obvious
38010// way). Unsigned int32 values can be represented exactly by specifying type
38011// `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in
38012// the `output_types` attribute.
38013//
38014// Both binary and text proto serializations are supported, and can be
38015// chosen using the `format` attribute.
38016//
38017// The `descriptor_source` attribute selects the source of protocol
38018// descriptors to consult when looking up `message_type`. This may be:
38019//
38020// - An empty string  or "local://", in which case protocol descriptors are
38021// created for C++ (not Python) proto definitions linked to the binary.
38022//
38023// - A file, in which case protocol descriptors are created from the file,
38024// which is expected to contain a `FileDescriptorSet` serialized as a string.
38025// NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out`
38026// and `--include_imports` options to the protocol compiler `protoc`.
38027//
38028// - A "bytes://<bytes>", in which protocol descriptors are created from `<bytes>`,
38029// which is expected to be a `FileDescriptorSet` serialized as a string.
38030//
38031// Arguments:
38032//	bytes: Tensor of serialized protos with shape `batch_shape`.
38033//	message_type: Name of the proto message type to decode.
38034//	field_names: List of strings containing proto field names. An extension field can be decoded
38035// by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME.
38036//	output_types: List of TF types to use for the respective field in field_names.
38037//
38038// Returns:
38039//	sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`.
38040// Each entry is the number of values found for the corresponding field.
38041// Optional fields may have 0 or 1 values.
38042//	values: List of tensors containing values for the corresponding field.
38043// `values[i]` has datatype `output_types[i]`
38044// and shape `[batch_shape, max(sizes[...,i])]`.
38045func DecodeProtoV2(scope *Scope, bytes tf.Output, message_type string, field_names []string, output_types []tf.DataType, optional ...DecodeProtoV2Attr) (sizes tf.Output, values []tf.Output) {
38046	if scope.Err() != nil {
38047		return
38048	}
38049	attrs := map[string]interface{}{"message_type": message_type, "field_names": field_names, "output_types": output_types}
38050	for _, a := range optional {
38051		a(attrs)
38052	}
38053	opspec := tf.OpSpec{
38054		Type: "DecodeProtoV2",
38055		Input: []tf.Input{
38056			bytes,
38057		},
38058		Attrs: attrs,
38059	}
38060	op := scope.AddOperation(opspec)
38061	if scope.Err() != nil {
38062		return
38063	}
38064	var idx int
38065	var err error
38066	sizes = op.Output(idx)
38067	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
38068		scope.UpdateErr("DecodeProtoV2", err)
38069		return
38070	}
38071	return sizes, values
38072}
38073
38074// Aggregates the summary of accumulated stats for the batch.
38075//
38076// The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id.
38077//
38078// Arguments:
38079//	node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].
38080//	gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.
38081//	hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.
38082//	feature_indices: int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]).
38083// Number of sparse entries across all instances from the batch. The first value is
38084// the index of the instance, the second is dimension of the feature. The second axis
38085// can only have 2 values, i.e., the input dense version of Tensor can only be matrix.
38086//	feature_values: int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]).
38087// Number of sparse entries across all instances from the batch. The first value is
38088// the index of the instance, the second is dimension of the feature.
38089//	feature_shape: int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]).
38090// The first axis can only have 2 values, [batch_size, feature_dimension].
38091//	max_splits: int; the maximum number of splits possible in the whole tree.
38092//	num_buckets: int; equals to the maximum possible value of bucketized feature + 1.
38093//
38094// Returns:
38095//	stats_summary_indices: int32; Rank 2 indices of summary sparse Tensors (shape=[number of non zero statistics, 4])
38096// The second axis can only be 4 including node id, feature dimension, bucket id, and statistics_dimension.
38097// statistics_dimension = logits_dimension + hessian_dimension.
38098//	stats_summary_values: output Rank 1 Tensor (shape=[number of non zero statistics])
38099//	stats_summary_shape: output Rank 1 Tensor (shape=[4])
38100// The tensor has following 4 values: [max_splits, feature_dimension, num_buckets, statistics_dimension],
38101// where statistics_dimension = gradient_dimension + hessian_dimension. gradient_dimension
38102// is the same as label_dimension, i.e., the output space. hessian_dimension can be the same
38103// as logits dimension when diagonal hessian is used, or label_dimension^2 when full
38104// hessian is used.
38105func BoostedTreesSparseAggregateStats(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, feature_indices tf.Output, feature_values tf.Output, feature_shape tf.Output, max_splits int64, num_buckets int64) (stats_summary_indices tf.Output, stats_summary_values tf.Output, stats_summary_shape tf.Output) {
38106	if scope.Err() != nil {
38107		return
38108	}
38109	attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
38110	opspec := tf.OpSpec{
38111		Type: "BoostedTreesSparseAggregateStats",
38112		Input: []tf.Input{
38113			node_ids, gradients, hessians, feature_indices, feature_values, feature_shape,
38114		},
38115		Attrs: attrs,
38116	}
38117	op := scope.AddOperation(opspec)
38118	return op.Output(0), op.Output(1), op.Output(2)
38119}
38120
38121// Makes the summary of accumulated stats for the batch.
38122//
38123// The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.
38124//
38125// Arguments:
38126//	node_ids: int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.
38127//	gradients: float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.
38128//	hessians: float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.
38129//	bucketized_features_list: int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).
38130//	max_splits: int; the maximum number of splits possible in the whole tree.
38131//	num_buckets: int; equals to the maximum possible value of bucketized feature.
38132//
38133// Returns output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians.
38134func BoostedTreesMakeStatsSummary(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, bucketized_features_list []tf.Output, max_splits int64, num_buckets int64) (stats_summary tf.Output) {
38135	if scope.Err() != nil {
38136		return
38137	}
38138	attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
38139	opspec := tf.OpSpec{
38140		Type: "BoostedTreesMakeStatsSummary",
38141		Input: []tf.Input{
38142			node_ids, gradients, hessians, tf.OutputList(bucketized_features_list),
38143		},
38144		Attrs: attrs,
38145	}
38146	op := scope.AddOperation(opspec)
38147	return op.Output(0)
38148}
38149
38150// ResourceScatterNdAddAttr is an optional argument to ResourceScatterNdAdd.
38151type ResourceScatterNdAddAttr func(optionalAttr)
38152
38153// ResourceScatterNdAddUseLocking sets the optional use_locking attribute to value.
38154//
38155// value: An optional bool. Defaults to True. If True, the assignment will
38156// be protected by a lock; otherwise the behavior is undefined,
38157// but may exhibit less contention.
38158// If not specified, defaults to true
38159func ResourceScatterNdAddUseLocking(value bool) ResourceScatterNdAddAttr {
38160	return func(m optionalAttr) {
38161		m["use_locking"] = value
38162	}
38163}
38164
38165// Applies sparse addition to individual values or slices in a Variable.
38166//
38167// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
38168//
38169// `indices` must be integer tensor, containing indices into `ref`.
38170// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
38171//
38172// The innermost dimension of `indices` (with length `K`) corresponds to
38173// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
38174// dimension of `ref`.
38175//
38176// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
38177//
38178// ```
38179// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
38180// ```
38181//
38182// For example, say we want to add 4 scattered elements to a rank-1 tensor to
38183// 8 elements. In Python, that addition would look like this:
38184//
38185// ```python
38186// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
38187// indices = tf.constant([[4], [3], [1], [7]])
38188// updates = tf.constant([9, 10, 11, 12])
38189// add = tf.scatter_nd_add(ref, indices, updates)
38190// with tf.Session() as sess:
38191//   print sess.run(add)
38192// ```
38193//
38194// The resulting update to ref would look like this:
38195//
38196//     [1, 13, 3, 14, 14, 6, 7, 20]
38197//
38198// See `tf.scatter_nd` for more details about how to make updates to
38199// slices.
38200//
38201// Arguments:
38202//	ref: A resource handle. Must be from a VarHandleOp.
38203//	indices: A Tensor. Must be one of the following types: int32, int64.
38204// A tensor of indices into ref.
38205//	updates: A Tensor. Must have the same type as ref. A tensor of
38206// values to add to ref.
38207//
38208// Returns the created operation.
38209func ResourceScatterNdAdd(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdAddAttr) (o *tf.Operation) {
38210	if scope.Err() != nil {
38211		return
38212	}
38213	attrs := map[string]interface{}{}
38214	for _, a := range optional {
38215		a(attrs)
38216	}
38217	opspec := tf.OpSpec{
38218		Type: "ResourceScatterNdAdd",
38219		Input: []tf.Input{
38220			ref, indices, updates,
38221		},
38222		Attrs: attrs,
38223	}
38224	return scope.AddOperation(opspec)
38225}
38226
38227// Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.
38228//
38229// Arguments:
38230//	tree_ensemble_handle: Handle to the tree ensemble.
38231//
38232// Returns:
38233//	stamp_token: Stamp token of the tree ensemble resource.
38234//	num_trees: The number of trees in the tree ensemble resource.
38235//	num_finalized_trees: The number of trees that were finished successfully.
38236//	num_attempted_layers: The number of layers we attempted to build (but not necessarily succeeded).
38237//	last_layer_nodes_range: Rank size 2 tensor that contains start and end ids of the nodes in the latest
38238// layer.
38239func BoostedTreesGetEnsembleStates(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, num_trees tf.Output, num_finalized_trees tf.Output, num_attempted_layers tf.Output, last_layer_nodes_range tf.Output) {
38240	if scope.Err() != nil {
38241		return
38242	}
38243	opspec := tf.OpSpec{
38244		Type: "BoostedTreesGetEnsembleStates",
38245		Input: []tf.Input{
38246			tree_ensemble_handle,
38247		},
38248	}
38249	op := scope.AddOperation(opspec)
38250	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
38251}
38252
38253// LeakyReluAttr is an optional argument to LeakyRelu.
38254type LeakyReluAttr func(optionalAttr)
38255
38256// LeakyReluAlpha sets the optional alpha attribute to value.
38257// If not specified, defaults to 0.2
38258func LeakyReluAlpha(value float32) LeakyReluAttr {
38259	return func(m optionalAttr) {
38260		m["alpha"] = value
38261	}
38262}
38263
38264// Computes rectified linear: `max(features, features * alpha)`.
38265func LeakyRelu(scope *Scope, features tf.Output, optional ...LeakyReluAttr) (activations tf.Output) {
38266	if scope.Err() != nil {
38267		return
38268	}
38269	attrs := map[string]interface{}{}
38270	for _, a := range optional {
38271		a(attrs)
38272	}
38273	opspec := tf.OpSpec{
38274		Type: "LeakyRelu",
38275		Input: []tf.Input{
38276			features,
38277		},
38278		Attrs: attrs,
38279	}
38280	op := scope.AddOperation(opspec)
38281	return op.Output(0)
38282}
38283
38284// Deserializes a serialized tree ensemble config and replaces current tree
38285//
38286// ensemble.
38287//
38288// Arguments:
38289//	tree_ensemble_handle: Handle to the tree ensemble.
38290//	stamp_token: Token to use as the new value of the resource stamp.
38291//	tree_ensemble_serialized: Serialized proto of the ensemble.
38292//
38293// Returns the created operation.
38294func BoostedTreesDeserializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, tree_ensemble_serialized tf.Output) (o *tf.Operation) {
38295	if scope.Err() != nil {
38296		return
38297	}
38298	opspec := tf.OpSpec{
38299		Type: "BoostedTreesDeserializeEnsemble",
38300		Input: []tf.Input{
38301			tree_ensemble_handle, stamp_token, tree_ensemble_serialized,
38302		},
38303	}
38304	return scope.AddOperation(opspec)
38305}
38306
38307// SampleDistortedBoundingBoxAttr is an optional argument to SampleDistortedBoundingBox.
38308type SampleDistortedBoundingBoxAttr func(optionalAttr)
38309
38310// SampleDistortedBoundingBoxSeed sets the optional seed attribute to value.
38311//
38312// value: If either `seed` or `seed2` are set to non-zero, the random number
38313// generator is seeded by the given `seed`.  Otherwise, it is seeded by a random
38314// seed.
38315// If not specified, defaults to 0
38316func SampleDistortedBoundingBoxSeed(value int64) SampleDistortedBoundingBoxAttr {
38317	return func(m optionalAttr) {
38318		m["seed"] = value
38319	}
38320}
38321
38322// SampleDistortedBoundingBoxSeed2 sets the optional seed2 attribute to value.
38323//
38324// value: A second seed to avoid seed collision.
38325// If not specified, defaults to 0
38326func SampleDistortedBoundingBoxSeed2(value int64) SampleDistortedBoundingBoxAttr {
38327	return func(m optionalAttr) {
38328		m["seed2"] = value
38329	}
38330}
38331
38332// SampleDistortedBoundingBoxMinObjectCovered sets the optional min_object_covered attribute to value.
38333//
38334// value: The cropped area of the image must contain at least this
38335// fraction of any bounding box supplied. The value of this parameter should be
38336// non-negative. In the case of 0, the cropped area does not need to overlap
38337// any of the bounding boxes supplied.
38338// If not specified, defaults to 0.1
38339func SampleDistortedBoundingBoxMinObjectCovered(value float32) SampleDistortedBoundingBoxAttr {
38340	return func(m optionalAttr) {
38341		m["min_object_covered"] = value
38342	}
38343}
38344
38345// SampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
38346//
38347// value: The cropped area of the image must have an aspect ratio =
38348// width / height within this range.
38349// If not specified, defaults to {f:0.75 f:1.33}
38350func SampleDistortedBoundingBoxAspectRatioRange(value []float32) SampleDistortedBoundingBoxAttr {
38351	return func(m optionalAttr) {
38352		m["aspect_ratio_range"] = value
38353	}
38354}
38355
38356// SampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
38357//
38358// value: The cropped area of the image must contain a fraction of the
38359// supplied image within this range.
38360// If not specified, defaults to {f:0.05 f:1}
38361func SampleDistortedBoundingBoxAreaRange(value []float32) SampleDistortedBoundingBoxAttr {
38362	return func(m optionalAttr) {
38363		m["area_range"] = value
38364	}
38365}
38366
38367// SampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
38368//
38369// value: Number of attempts at generating a cropped region of the image
38370// of the specified constraints. After `max_attempts` failures, return the entire
38371// image.
38372// If not specified, defaults to 100
38373func SampleDistortedBoundingBoxMaxAttempts(value int64) SampleDistortedBoundingBoxAttr {
38374	return func(m optionalAttr) {
38375		m["max_attempts"] = value
38376	}
38377}
38378
38379// SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
38380//
38381// value: Controls behavior if no bounding boxes supplied.
38382// If true, assume an implicit bounding box covering the whole input. If false,
38383// raise an error.
38384// If not specified, defaults to false
38385func SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxAttr {
38386	return func(m optionalAttr) {
38387		m["use_image_if_no_bounding_boxes"] = value
38388	}
38389}
38390
38391// Generate a single randomly distorted bounding box for an image.
38392//
38393// Bounding box annotations are often supplied in addition to ground-truth labels
38394// in image recognition or object localization tasks. A common technique for
38395// training such a system is to randomly distort an image while preserving
38396// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
38397// localization of an object, i.e. bounding box, given an `image_size`,
38398// `bounding_boxes` and a series of constraints.
38399//
38400// The output of this Op is a single bounding box that may be used to crop the
38401// original image. The output is returned as 3 tensors: `begin`, `size` and
38402// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
38403// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
38404// what the bounding box looks like.
38405//
38406// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
38407// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
38408// height of the underlying image.
38409//
38410// For example,
38411//
38412// ```python
38413//     # Generate a single distorted bounding box.
38414//     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
38415//         tf.shape(image),
38416//         bounding_boxes=bounding_boxes)
38417//
38418//     # Draw the bounding box in an image summary.
38419//     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
38420//                                                   bbox_for_draw)
38421//     tf.summary.image('images_with_box', image_with_box)
38422//
38423//     # Employ the bounding box to distort the image.
38424//     distorted_image = tf.slice(image, begin, size)
38425// ```
38426//
38427// Note that if no bounding box information is available, setting
38428// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
38429// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
38430// false and no bounding boxes are supplied, an error is raised.
38431//
38432// Arguments:
38433//	image_size: 1-D, containing `[height, width, channels]`.
38434//	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
38435// associated with the image.
38436//
38437// Returns:
38438//	begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
38439// `tf.slice`.
38440//	size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
38441// `tf.slice`.
38442//	bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
38443// Provide as input to `tf.image.draw_bounding_boxes`.
38444func SampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, optional ...SampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
38445	if scope.Err() != nil {
38446		return
38447	}
38448	attrs := map[string]interface{}{}
38449	for _, a := range optional {
38450		a(attrs)
38451	}
38452	opspec := tf.OpSpec{
38453		Type: "SampleDistortedBoundingBox",
38454		Input: []tf.Input{
38455			image_size, bounding_boxes,
38456		},
38457		Attrs: attrs,
38458	}
38459	op := scope.AddOperation(opspec)
38460	return op.Output(0), op.Output(1), op.Output(2)
38461}
38462
38463// DecodeBmpAttr is an optional argument to DecodeBmp.
38464type DecodeBmpAttr func(optionalAttr)
38465
38466// DecodeBmpChannels sets the optional channels attribute to value.
38467// If not specified, defaults to 0
38468func DecodeBmpChannels(value int64) DecodeBmpAttr {
38469	return func(m optionalAttr) {
38470		m["channels"] = value
38471	}
38472}
38473
38474// Decode the first frame of a BMP-encoded image to a uint8 tensor.
38475//
38476// The attr `channels` indicates the desired number of color channels for the
38477// decoded image.
38478//
38479// Accepted values are:
38480//
38481// *   0: Use the number of channels in the BMP-encoded image.
38482// *   3: output an RGB image.
38483// *   4: output an RGBA image.
38484//
38485// Arguments:
38486//	contents: 0-D.  The BMP-encoded image.
38487//
38488// Returns 3-D with shape `[height, width, channels]`. RGB order
38489func DecodeBmp(scope *Scope, contents tf.Output, optional ...DecodeBmpAttr) (image tf.Output) {
38490	if scope.Err() != nil {
38491		return
38492	}
38493	attrs := map[string]interface{}{}
38494	for _, a := range optional {
38495		a(attrs)
38496	}
38497	opspec := tf.OpSpec{
38498		Type: "DecodeBmp",
38499		Input: []tf.Input{
38500			contents,
38501		},
38502		Attrs: attrs,
38503	}
38504	op := scope.AddOperation(opspec)
38505	return op.Output(0)
38506}
38507
38508// Elementwise computes the bitwise right-shift of `x` and `y`.
38509//
38510// Performs a logical shift for unsigned integer types, and an arithmetic shift
38511// for signed integer types.
38512//
38513// If `y` is negative, or greater than or equal to than the width of `x` in bits
38514// the result is implementation defined.
38515//
38516// Example:
38517//
38518// ```python
38519// import tensorflow as tf
38520// from tensorflow.python.ops import bitwise_ops
38521// import numpy as np
38522// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
38523//
38524// for dtype in dtype_list:
38525//   lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
38526//   rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
38527//
38528//   right_shift_result = bitwise_ops.right_shift(lhs, rhs)
38529//
38530//   print(right_shift_result)
38531//
38532// # This will print:
38533// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8)
38534// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16)
38535// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32)
38536// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64)
38537//
38538// lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
38539// rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
38540// bitwise_ops.right_shift(lhs, rhs)
38541// # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
38542// ```
38543//
38544func RightShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
38545	if scope.Err() != nil {
38546		return
38547	}
38548	opspec := tf.OpSpec{
38549		Type: "RightShift",
38550		Input: []tf.Input{
38551			x, y,
38552		},
38553	}
38554	op := scope.AddOperation(opspec)
38555	return op.Output(0)
38556}
38557
38558// UnbatchGradAttr is an optional argument to UnbatchGrad.
38559type UnbatchGradAttr func(optionalAttr)
38560
38561// UnbatchGradContainer sets the optional container attribute to value.
38562// If not specified, defaults to ""
38563func UnbatchGradContainer(value string) UnbatchGradAttr {
38564	return func(m optionalAttr) {
38565		m["container"] = value
38566	}
38567}
38568
38569// UnbatchGradSharedName sets the optional shared_name attribute to value.
38570// If not specified, defaults to ""
38571func UnbatchGradSharedName(value string) UnbatchGradAttr {
38572	return func(m optionalAttr) {
38573		m["shared_name"] = value
38574	}
38575}
38576
38577// Gradient of Unbatch.
38578//
38579// Acts like Batch but using the given batch_index index of batching things as they
38580// become available. This ensures that the gradients are propagated back in the
38581// same session which did the forward pass.
38582//
38583// original_input: The input to the Unbatch operation this is the gradient of.
38584// batch_index: The batch_index given to the Unbatch operation this is the gradient
38585// of.
38586// grad: The downstream gradient.
38587// id: The id scalar emitted by Batch.
38588// batched_grad: The return value, either an empty tensor or the batched gradient.
38589// container: Container to control resource sharing.
38590// shared_name: Instances of UnbatchGrad with the same container and shared_name
38591//  are assumed to possibly belong to the same batch. If left empty, the op name
38592//  will be used as the shared name.
38593func UnbatchGrad(scope *Scope, original_input tf.Output, batch_index tf.Output, grad tf.Output, id tf.Output, optional ...UnbatchGradAttr) (batched_grad tf.Output) {
38594	if scope.Err() != nil {
38595		return
38596	}
38597	attrs := map[string]interface{}{}
38598	for _, a := range optional {
38599		a(attrs)
38600	}
38601	opspec := tf.OpSpec{
38602		Type: "UnbatchGrad",
38603		Input: []tf.Input{
38604			original_input, batch_index, grad, id,
38605		},
38606		Attrs: attrs,
38607	}
38608	op := scope.AddOperation(opspec)
38609	return op.Output(0)
38610}
38611
38612// Returns locations of nonzero / true values in a tensor.
38613//
38614// This operation returns the coordinates of true elements in `condition`. The
38615// coordinates are returned in a 2-D tensor where the first dimension (rows)
38616// represents the number of true elements, and the second dimension (columns)
38617// represents the coordinates of the true elements. Keep in mind, the shape of
38618// the output tensor can vary depending on how many true values there are in
38619// `condition`. Indices are output in row-major order.
38620//
38621// For example:
38622//
38623// ```
38624// # 'input' tensor is [[True, False]
38625// #                    [True, False]]
38626// # 'input' has two true values, so output has two coordinates.
38627// # 'input' has rank of 2, so coordinates have two indices.
38628// where(input) ==> [[0, 0],
38629//                   [1, 0]]
38630//
38631// # `condition` tensor is [[[True, False]
38632// #                     [True, False]]
38633// #                    [[False, True]
38634// #                     [False, True]]
38635// #                    [[False, False]
38636// #                     [False, True]]]
38637// # 'input' has 5 true values, so output has 5 coordinates.
38638// # 'input' has rank of 3, so coordinates have three indices.
38639// where(input) ==> [[0, 0, 0],
38640//                   [0, 1, 0],
38641//                   [1, 0, 1],
38642//                   [1, 1, 1],
38643//                   [2, 1, 1]]
38644//
38645// # `condition` tensor is [[[1.5,  0.0]
38646// #                     [-0.5, 0.0]]
38647// #                    [[0.0,  0.25]
38648// #                     [0.0,  0.75]]
38649// #                    [[0.0,  0.0]
38650// #                     [0.0,  0.01]]]
38651// # 'input' has 5 nonzero values, so output has 5 coordinates.
38652// # 'input' has rank of 3, so coordinates have three indices.
38653// where(input) ==> [[0, 0, 0],
38654//                   [0, 1, 0],
38655//                   [1, 0, 1],
38656//                   [1, 1, 1],
38657//                   [2, 1, 1]]
38658//
38659// # `condition` tensor is [[[1.5 + 0.0j, 0.0  + 0.0j]
38660// #                     [0.0 + 0.5j, 0.0  + 0.0j]]
38661// #                    [[0.0 + 0.0j, 0.25 + 1.5j]
38662// #                     [0.0 + 0.0j, 0.75 + 0.0j]]
38663// #                    [[0.0 + 0.0j, 0.0  + 0.0j]
38664// #                     [0.0 + 0.0j, 0.01 + 0.0j]]]
38665// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
38666// # 'input' has rank of 3, so coordinates have three indices.
38667// where(input) ==> [[0, 0, 0],
38668//                   [0, 1, 0],
38669//                   [1, 0, 1],
38670//                   [1, 1, 1],
38671//                   [2, 1, 1]]
38672// ```
38673func Where(scope *Scope, condition tf.Output) (index tf.Output) {
38674	if scope.Err() != nil {
38675		return
38676	}
38677	opspec := tf.OpSpec{
38678		Type: "Where",
38679		Input: []tf.Input{
38680			condition,
38681		},
38682	}
38683	op := scope.AddOperation(opspec)
38684	return op.Output(0)
38685}
38686
38687// MfccAttr is an optional argument to Mfcc.
38688type MfccAttr func(optionalAttr)
38689
38690// MfccUpperFrequencyLimit sets the optional upper_frequency_limit attribute to value.
38691//
38692// value: The highest frequency to use when calculating the
38693// ceptstrum.
38694// If not specified, defaults to 4000
38695func MfccUpperFrequencyLimit(value float32) MfccAttr {
38696	return func(m optionalAttr) {
38697		m["upper_frequency_limit"] = value
38698	}
38699}
38700
38701// MfccLowerFrequencyLimit sets the optional lower_frequency_limit attribute to value.
38702//
38703// value: The lowest frequency to use when calculating the
38704// ceptstrum.
38705// If not specified, defaults to 20
38706func MfccLowerFrequencyLimit(value float32) MfccAttr {
38707	return func(m optionalAttr) {
38708		m["lower_frequency_limit"] = value
38709	}
38710}
38711
38712// MfccFilterbankChannelCount sets the optional filterbank_channel_count attribute to value.
38713//
38714// value: Resolution of the Mel bank used internally.
38715// If not specified, defaults to 40
38716func MfccFilterbankChannelCount(value int64) MfccAttr {
38717	return func(m optionalAttr) {
38718		m["filterbank_channel_count"] = value
38719	}
38720}
38721
38722// MfccDctCoefficientCount sets the optional dct_coefficient_count attribute to value.
38723//
38724// value: How many output channels to produce per time slice.
38725// If not specified, defaults to 13
38726func MfccDctCoefficientCount(value int64) MfccAttr {
38727	return func(m optionalAttr) {
38728		m["dct_coefficient_count"] = value
38729	}
38730}
38731
38732// Transforms a spectrogram into a form that's useful for speech recognition.
38733//
38734// Mel Frequency Cepstral Coefficients are a way of representing audio data that's
38735// been effective as an input feature for machine learning. They are created by
38736// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
38737// higher frequencies that are less significant to the human ear. They have a long
38738// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
38739// is a good resource to learn more.
38740//
38741// Arguments:
38742//	spectrogram: Typically produced by the Spectrogram op, with magnitude_squared
38743// set to true.
38744//	sample_rate: How many samples per second the source audio used.
38745func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional ...MfccAttr) (output tf.Output) {
38746	if scope.Err() != nil {
38747		return
38748	}
38749	attrs := map[string]interface{}{}
38750	for _, a := range optional {
38751		a(attrs)
38752	}
38753	opspec := tf.OpSpec{
38754		Type: "Mfcc",
38755		Input: []tf.Input{
38756			spectrogram, sample_rate,
38757		},
38758		Attrs: attrs,
38759	}
38760	op := scope.AddOperation(opspec)
38761	return op.Output(0)
38762}
38763
38764// Produces a summary of any statistics recorded by the given statistics manager.
38765func StatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output) {
38766	if scope.Err() != nil {
38767		return
38768	}
38769	opspec := tf.OpSpec{
38770		Type: "StatsAggregatorSummary",
38771		Input: []tf.Input{
38772			iterator,
38773		},
38774	}
38775	op := scope.AddOperation(opspec)
38776	return op.Output(0)
38777}
38778
38779// Converts an array of flat indices into a tuple of coordinate arrays.
38780//
38781//
38782// Example:
38783//
38784// ```
38785// y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])
38786// # 'dims' represent a hypothetical (3, 3) tensor of indices:
38787// # [[0, 1, *2*],
38788// #  [3, 4, *5*],
38789// #  [6, *7*, 8]]
38790// # For each entry from 'indices', this operation returns
38791// # its coordinates (marked with '*'), such as
38792// # 2 ==> (0, 2)
38793// # 5 ==> (1, 2)
38794// # 7 ==> (2, 1)
38795// y ==> [[0, 1, 2], [2, 2, 1]]
38796// ```
38797//
38798// @compatibility(numpy)
38799// Equivalent to np.unravel_index
38800// @end_compatibility
38801//
38802// Arguments:
38803//	indices: An 0-D or 1-D `int` Tensor whose elements are indices into the
38804// flattened version of an array of dimensions dims.
38805//	dims: An 1-D `int` Tensor. The shape of the array to use for unraveling
38806// indices.
38807//
38808// Returns An 2-D (or 1-D if indices is 0-D) tensor where each row has the
38809// same shape as the indices array.
38810func UnravelIndex(scope *Scope, indices tf.Output, dims tf.Output) (output tf.Output) {
38811	if scope.Err() != nil {
38812		return
38813	}
38814	opspec := tf.OpSpec{
38815		Type: "UnravelIndex",
38816		Input: []tf.Input{
38817			indices, dims,
38818		},
38819	}
38820	op := scope.AddOperation(opspec)
38821	return op.Output(0)
38822}
38823
38824// SparseReduceSumSparseAttr is an optional argument to SparseReduceSumSparse.
38825type SparseReduceSumSparseAttr func(optionalAttr)
38826
38827// SparseReduceSumSparseKeepDims sets the optional keep_dims attribute to value.
38828//
38829// value: If true, retain reduced dimensions with length 1.
38830// If not specified, defaults to false
38831func SparseReduceSumSparseKeepDims(value bool) SparseReduceSumSparseAttr {
38832	return func(m optionalAttr) {
38833		m["keep_dims"] = value
38834	}
38835}
38836
38837// Computes the sum of elements across dimensions of a SparseTensor.
38838//
38839// This Op takes a SparseTensor and is the sparse counterpart to
38840// `tf.reduce_sum()`.  In contrast to SparseReduceSum, this Op returns a
38841// SparseTensor.
38842//
38843// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
38844// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
38845// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
38846// with length 1.
38847//
38848// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
38849// with a single element is returned.  Additionally, the axes can be negative,
38850// which are interpreted according to the indexing rules in Python.
38851//
38852// Arguments:
38853//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
38854// SparseTensor, possibly not in canonical ordering.
38855//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
38856//	input_shape: 1-D.  Shape of the input SparseTensor.
38857//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
38858func SparseReduceSumSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
38859	if scope.Err() != nil {
38860		return
38861	}
38862	attrs := map[string]interface{}{}
38863	for _, a := range optional {
38864		a(attrs)
38865	}
38866	opspec := tf.OpSpec{
38867		Type: "SparseReduceSumSparse",
38868		Input: []tf.Input{
38869			input_indices, input_values, input_shape, reduction_axes,
38870		},
38871		Attrs: attrs,
38872	}
38873	op := scope.AddOperation(opspec)
38874	return op.Output(0), op.Output(1), op.Output(2)
38875}
38876
38877// Computes rectified linear: `max(features, 0)`.
38878//
38879// See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
38880// Example usage:
38881// >>> tf.nn.relu([-2., 0., 3.]).numpy()
38882// array([0., 0., 3.], dtype=float32)
38883func Relu(scope *Scope, features tf.Output) (activations tf.Output) {
38884	if scope.Err() != nil {
38885		return
38886	}
38887	opspec := tf.OpSpec{
38888		Type: "Relu",
38889		Input: []tf.Input{
38890			features,
38891		},
38892	}
38893	op := scope.AddOperation(opspec)
38894	return op.Output(0)
38895}
38896
38897// MatMulAttr is an optional argument to MatMul.
38898type MatMulAttr func(optionalAttr)
38899
38900// MatMulTransposeA sets the optional transpose_a attribute to value.
38901//
38902// value: If true, "a" is transposed before multiplication.
38903// If not specified, defaults to false
38904func MatMulTransposeA(value bool) MatMulAttr {
38905	return func(m optionalAttr) {
38906		m["transpose_a"] = value
38907	}
38908}
38909
38910// MatMulTransposeB sets the optional transpose_b attribute to value.
38911//
38912// value: If true, "b" is transposed before multiplication.
38913// If not specified, defaults to false
38914func MatMulTransposeB(value bool) MatMulAttr {
38915	return func(m optionalAttr) {
38916		m["transpose_b"] = value
38917	}
38918}
38919
38920// Multiply the matrix "a" by the matrix "b".
38921//
38922// The inputs must be two-dimensional matrices and the inner dimension of
38923// "a" (after being transposed if transpose_a is true) must match the
38924// outer dimension of "b" (after being transposed if transposed_b is
38925// true).
38926//
38927// *Note*: The default kernel implementation for MatMul on GPUs uses
38928// cublas.
38929func MatMul(scope *Scope, a tf.Output, b tf.Output, optional ...MatMulAttr) (product tf.Output) {
38930	if scope.Err() != nil {
38931		return
38932	}
38933	attrs := map[string]interface{}{}
38934	for _, a := range optional {
38935		a(attrs)
38936	}
38937	opspec := tf.OpSpec{
38938		Type: "MatMul",
38939		Input: []tf.Input{
38940			a, b,
38941		},
38942		Attrs: attrs,
38943	}
38944	op := scope.AddOperation(opspec)
38945	return op.Output(0)
38946}
38947
38948// Compresses a dataset element.
38949func CompressElement(scope *Scope, components []tf.Output) (compressed tf.Output) {
38950	if scope.Err() != nil {
38951		return
38952	}
38953	opspec := tf.OpSpec{
38954		Type: "CompressElement",
38955		Input: []tf.Input{
38956			tf.OutputList(components),
38957		},
38958	}
38959	op := scope.AddOperation(opspec)
38960	return op.Output(0)
38961}
38962
38963// Calculates gains for each feature and returns the best possible split information for each node. However, if no split is found, then no split information is returned for that node.
38964//
38965// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
38966//
38967// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
38968//
38969// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
38970//
38971// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
38972//
38973// Arguments:
38974//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
38975//	stats_summaries_list: A list of Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.
38976// The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
38977//	split_types: A Rank 1 tensor indicating if this Op should perform inequality split or equality split per feature.
38978//	candidate_feature_ids: Rank 1 tensor with ids for each feature. This is the real id of the feature.
38979//	l1: l1 regularization factor on leaf weights, per instance based.
38980//	l2: l2 regularization factor on leaf weights, per instance based.
38981//	tree_complexity: adjustment to the gain, per leaf based.
38982//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
38983//	logits_dimension: The dimension of logit, i.e., number of classes.
38984//
38985// Returns:
38986//	node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
38987//	gains: A Rank 1 tensor indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
38988//	feature_ids: A Rank 1 tensors indicating the best feature id for each node. See above for details like shapes and sizes.
38989//	feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.
38990//	thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
38991//	left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
38992//	right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
38993//	split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.
38994// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
38995func BoostedTreesCalculateBestFeatureSplitV2(scope *Scope, node_id_range tf.Output, stats_summaries_list []tf.Output, split_types tf.Output, candidate_feature_ids tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64) (node_ids tf.Output, gains tf.Output, feature_ids tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output) {
38996	if scope.Err() != nil {
38997		return
38998	}
38999	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
39000	opspec := tf.OpSpec{
39001		Type: "BoostedTreesCalculateBestFeatureSplitV2",
39002		Input: []tf.Input{
39003			node_id_range, tf.OutputList(stats_summaries_list), split_types, candidate_feature_ids, l1, l2, tree_complexity, min_node_weight,
39004		},
39005		Attrs: attrs,
39006	}
39007	op := scope.AddOperation(opspec)
39008	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6), op.Output(7)
39009}
39010
39011// A placeholder op for a value that will be fed into the computation.
39012//
39013// Arguments:
39014//	dtype: The type of elements in the tensor.
39015//	shape: The shape of the tensor.
39016//
39017// Returns A tensor that will be provided using the infeed mechanism.
39018func InfeedDequeue(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
39019	if scope.Err() != nil {
39020		return
39021	}
39022	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
39023	opspec := tf.OpSpec{
39024		Type: "InfeedDequeue",
39025
39026		Attrs: attrs,
39027	}
39028	op := scope.AddOperation(opspec)
39029	return op.Output(0)
39030}
39031
39032// Encodes a `RaggedTensor` into a `variant` Tensor.
39033//
39034//
39035// Encodes the given `RaggedTensor` and returns a `variant` Tensor. If
39036// `batched_input` is True, then input `RaggedTensor` is unbatched along the
39037// zero-th dimension, each component `RaggedTensor` is encoded into a scalar
39038// `variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.
39039// If `batched_input` is False, then the input `RaggedTensor` is encoded as is and
39040// a scalar `variant` Tensor is returned. A `RaggedTensor` is encoded by first
39041// creating a 1-D `variant` Tensor with `ragged_rank + 1` elements, containing the
39042// splits and values Tensors of the `RaggedTensor`. Then the 1-D `variant` Tensor
39043// is wrapped in a scalar `variant` Tensor. See `RaggedTensorFromVariant` for the
39044// corresponding decoding logic.
39045//
39046//
39047// Arguments:
39048//	rt_nested_splits: A list of one or more Tensors representing the splits of the input
39049// `RaggedTensor`.
39050//	rt_dense_values: A Tensor representing the values of the input `RaggedTensor`.
39051//	batched_input: A `bool` denoting whether the input is a batched `RaggedTensor`.
39052//
39053// Returns A `variant` Tensor that containing encoded `RaggedTensor`.
39054func RaggedTensorToVariant(scope *Scope, rt_nested_splits []tf.Output, rt_dense_values tf.Output, batched_input bool) (encoded_ragged tf.Output) {
39055	if scope.Err() != nil {
39056		return
39057	}
39058	attrs := map[string]interface{}{"batched_input": batched_input}
39059	opspec := tf.OpSpec{
39060		Type: "RaggedTensorToVariant",
39061		Input: []tf.Input{
39062			tf.OutputList(rt_nested_splits), rt_dense_values,
39063		},
39064		Attrs: attrs,
39065	}
39066	op := scope.AddOperation(opspec)
39067	return op.Output(0)
39068}
39069
39070// ResizeNearestNeighborAttr is an optional argument to ResizeNearestNeighbor.
39071type ResizeNearestNeighborAttr func(optionalAttr)
39072
39073// ResizeNearestNeighborAlignCorners sets the optional align_corners attribute to value.
39074//
39075// value: If true, the centers of the 4 corner pixels of the input and output tensors are
39076// aligned, preserving the values at the corner pixels. Defaults to false.
39077// If not specified, defaults to false
39078func ResizeNearestNeighborAlignCorners(value bool) ResizeNearestNeighborAttr {
39079	return func(m optionalAttr) {
39080		m["align_corners"] = value
39081	}
39082}
39083
39084// ResizeNearestNeighborHalfPixelCenters sets the optional half_pixel_centers attribute to value.
39085// If not specified, defaults to false
39086func ResizeNearestNeighborHalfPixelCenters(value bool) ResizeNearestNeighborAttr {
39087	return func(m optionalAttr) {
39088		m["half_pixel_centers"] = value
39089	}
39090}
39091
39092// Resize `images` to `size` using nearest neighbor interpolation.
39093//
39094// Arguments:
39095//	images: 4-D with shape `[batch, height, width, channels]`.
39096//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
39097// new size for the images.
39098//
39099// Returns 4-D with shape
39100// `[batch, new_height, new_width, channels]`.
39101func ResizeNearestNeighbor(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeNearestNeighborAttr) (resized_images tf.Output) {
39102	if scope.Err() != nil {
39103		return
39104	}
39105	attrs := map[string]interface{}{}
39106	for _, a := range optional {
39107		a(attrs)
39108	}
39109	opspec := tf.OpSpec{
39110		Type: "ResizeNearestNeighbor",
39111		Input: []tf.Input{
39112			images, size,
39113		},
39114		Attrs: attrs,
39115	}
39116	op := scope.AddOperation(opspec)
39117	return op.Output(0)
39118}
39119
39120// An op to receive a tensor from the host.
39121//
39122// output: the tensor that will be received from the host.
39123// Toutput: element type for output.
39124// shape: shape for output.
39125// key: A unique identifier for this region used to match up host transfers.
39126func XlaRecvFromHost(scope *Scope, Toutput tf.DataType, shape tf.Shape, key string) (output tf.Output) {
39127	if scope.Err() != nil {
39128		return
39129	}
39130	attrs := map[string]interface{}{"Toutput": Toutput, "shape": shape, "key": key}
39131	opspec := tf.OpSpec{
39132		Type: "XlaRecvFromHost",
39133
39134		Attrs: attrs,
39135	}
39136	op := scope.AddOperation(opspec)
39137	return op.Output(0)
39138}
39139
39140// QuantizedDepthwiseConv2DWithBiasAndReluAttr is an optional argument to QuantizedDepthwiseConv2DWithBiasAndRelu.
39141type QuantizedDepthwiseConv2DWithBiasAndReluAttr func(optionalAttr)
39142
39143// QuantizedDepthwiseConv2DWithBiasAndReluOutType sets the optional out_type attribute to value.
39144//
39145// value: The type of the output.
39146// If not specified, defaults to DT_QINT32
39147func QuantizedDepthwiseConv2DWithBiasAndReluOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAndReluAttr {
39148	return func(m optionalAttr) {
39149		m["out_type"] = value
39150	}
39151}
39152
39153// QuantizedDepthwiseConv2DWithBiasAndReluDilations sets the optional dilations attribute to value.
39154//
39155// value: List of dilation values.
39156// If not specified, defaults to {i:1 i:1 i:1 i:1}
39157func QuantizedDepthwiseConv2DWithBiasAndReluDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAttr {
39158	return func(m optionalAttr) {
39159		m["dilations"] = value
39160	}
39161}
39162
39163// QuantizedDepthwiseConv2DWithBiasAndReluPaddingList sets the optional padding_list attribute to value.
39164// If not specified, defaults to {}
39165func QuantizedDepthwiseConv2DWithBiasAndReluPaddingList(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAttr {
39166	return func(m optionalAttr) {
39167		m["padding_list"] = value
39168	}
39169}
39170
39171// Computes quantized depthwise Conv2D with Bias and Relu.
39172//
39173// Arguments:
39174//	input: The original input tensor.
39175//	filter: The original filter tensor.
39176//	bias: The original bias tensor.
39177//	min_input: The float value that the minimum quantized input value represents.
39178//	max_input: The float value that the maximum quantized input value represents.
39179//	min_filter: The float value that the minimum quantized filter value represents.
39180//	max_filter: The float value that the maximum quantized filter value represents.
39181//	strides: List of stride values.
39182//
39183//
39184// Returns:
39185//	output: The output tensor.
39186//	min_output: The float value that the minimum quantized output value represents.
39187//	max_output: The float value that the maximum quantized output value represents.
39188func QuantizedDepthwiseConv2DWithBiasAndRelu(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAndReluAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
39189	if scope.Err() != nil {
39190		return
39191	}
39192	attrs := map[string]interface{}{"strides": strides, "padding": padding}
39193	for _, a := range optional {
39194		a(attrs)
39195	}
39196	opspec := tf.OpSpec{
39197		Type: "QuantizedDepthwiseConv2DWithBiasAndRelu",
39198		Input: []tf.Input{
39199			input, filter, bias, min_input, max_input, min_filter, max_filter,
39200		},
39201		Attrs: attrs,
39202	}
39203	op := scope.AddOperation(opspec)
39204	return op.Output(0), op.Output(1), op.Output(2)
39205}
39206
39207// OrderedMapUnstageNoKeyAttr is an optional argument to OrderedMapUnstageNoKey.
39208type OrderedMapUnstageNoKeyAttr func(optionalAttr)
39209
39210// OrderedMapUnstageNoKeyCapacity sets the optional capacity attribute to value.
39211// If not specified, defaults to 0
39212//
39213// REQUIRES: value >= 0
39214func OrderedMapUnstageNoKeyCapacity(value int64) OrderedMapUnstageNoKeyAttr {
39215	return func(m optionalAttr) {
39216		m["capacity"] = value
39217	}
39218}
39219
39220// OrderedMapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
39221// If not specified, defaults to 0
39222//
39223// REQUIRES: value >= 0
39224func OrderedMapUnstageNoKeyMemoryLimit(value int64) OrderedMapUnstageNoKeyAttr {
39225	return func(m optionalAttr) {
39226		m["memory_limit"] = value
39227	}
39228}
39229
39230// OrderedMapUnstageNoKeyContainer sets the optional container attribute to value.
39231// If not specified, defaults to ""
39232func OrderedMapUnstageNoKeyContainer(value string) OrderedMapUnstageNoKeyAttr {
39233	return func(m optionalAttr) {
39234		m["container"] = value
39235	}
39236}
39237
39238// OrderedMapUnstageNoKeySharedName sets the optional shared_name attribute to value.
39239// If not specified, defaults to ""
39240func OrderedMapUnstageNoKeySharedName(value string) OrderedMapUnstageNoKeyAttr {
39241	return func(m optionalAttr) {
39242		m["shared_name"] = value
39243	}
39244}
39245
39246// Op removes and returns the (key, value) element with the smallest
39247//
39248// key from the underlying container.   If the underlying container
39249// does not contain elements, the op will block until it does.
39250func OrderedMapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
39251	if scope.Err() != nil {
39252		return
39253	}
39254	attrs := map[string]interface{}{"dtypes": dtypes}
39255	for _, a := range optional {
39256		a(attrs)
39257	}
39258	opspec := tf.OpSpec{
39259		Type: "OrderedMapUnstageNoKey",
39260		Input: []tf.Input{
39261			indices,
39262		},
39263		Attrs: attrs,
39264	}
39265	op := scope.AddOperation(opspec)
39266	if scope.Err() != nil {
39267		return
39268	}
39269	var idx int
39270	var err error
39271	key = op.Output(idx)
39272	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
39273		scope.UpdateErr("OrderedMapUnstageNoKey", err)
39274		return
39275	}
39276	return key, values
39277}
39278
39279// Picks the best algorithm based on device, and scrambles seed into key and counter.
39280//
39281// This op picks the best counter-based RNG algorithm based on device, and scrambles a shape-[2] seed into a key and a counter, both needed by the counter-based algorithm. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
39282//
39283// Arguments:
39284//	seed: 2 seeds (shape [2]).
39285//
39286// Returns:
39287//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
39288//	counter: Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).
39289//	alg: The RNG algorithm (shape int32[]).
39290func StatelessRandomGetKeyCounterAlg(scope *Scope, seed tf.Output) (key tf.Output, counter tf.Output, alg tf.Output) {
39291	if scope.Err() != nil {
39292		return
39293	}
39294	opspec := tf.OpSpec{
39295		Type: "StatelessRandomGetKeyCounterAlg",
39296		Input: []tf.Input{
39297			seed,
39298		},
39299	}
39300	op := scope.AddOperation(opspec)
39301	return op.Output(0), op.Output(1), op.Output(2)
39302}
39303
39304// Computes the sum along sparse segments of a tensor.
39305//
39306// Read
39307// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
39308// for an explanation of segments.
39309//
39310// Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
39311// dimension, selecting a subset of dimension 0, specified by `indices`.
39312//
39313// For example:
39314//
39315// ```python
39316// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
39317//
39318// # Select two rows, one segment.
39319// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
39320// # => [[0 0 0 0]]
39321//
39322// # Select two rows, two segment.
39323// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
39324// # => [[ 1  2  3  4]
39325// #     [-1 -2 -3 -4]]
39326//
39327// # Select all rows, two segments.
39328// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
39329// # => [[0 0 0 0]
39330// #     [5 6 7 8]]
39331//
39332// # Which is equivalent to:
39333// tf.segment_sum(c, tf.constant([0, 0, 1]))
39334// ```
39335//
39336// Arguments:
39337//
39338//	indices: A 1-D tensor. Has same rank as `segment_ids`.
39339//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
39340//
39341// Returns Has same shape as data, except for dimension 0 which
39342// has size `k`, the number of segments.
39343func SparseSegmentSum(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
39344	if scope.Err() != nil {
39345		return
39346	}
39347	opspec := tf.OpSpec{
39348		Type: "SparseSegmentSum",
39349		Input: []tf.Input{
39350			data, indices, segment_ids,
39351		},
39352	}
39353	op := scope.AddOperation(opspec)
39354	return op.Output(0)
39355}
39356
39357// ShuffleDatasetAttr is an optional argument to ShuffleDataset.
39358type ShuffleDatasetAttr func(optionalAttr)
39359
39360// ShuffleDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value.
39361//
39362// value: If true, each iterator over this dataset will be given
39363// a different pseudorandomly generated seed, based on a sequence seeded by the
39364// `seed` and `seed2` inputs. If false, each iterator will be given the same
39365// seed, and repeated iteration over this dataset will yield the exact same
39366// sequence of results.
39367// If not specified, defaults to true
39368func ShuffleDatasetReshuffleEachIteration(value bool) ShuffleDatasetAttr {
39369	return func(m optionalAttr) {
39370		m["reshuffle_each_iteration"] = value
39371	}
39372}
39373
39374// Creates a dataset that shuffles elements from `input_dataset` pseudorandomly.
39375//
39376// Arguments:
39377//
39378//	buffer_size: The number of output elements to buffer in an iterator over
39379// this dataset. Compare with the `min_after_dequeue` attr when creating a
39380// `RandomShuffleQueue`.
39381//	seed: A scalar seed for the random number generator. If either `seed` or
39382// `seed2` is set to be non-zero, the random number generator is seeded
39383// by the given seed.  Otherwise, a random seed is used.
39384//	seed2: A second scalar seed to avoid seed collision.
39385//
39386//
39387func ShuffleDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleDatasetAttr) (handle tf.Output) {
39388	if scope.Err() != nil {
39389		return
39390	}
39391	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
39392	for _, a := range optional {
39393		a(attrs)
39394	}
39395	opspec := tf.OpSpec{
39396		Type: "ShuffleDataset",
39397		Input: []tf.Input{
39398			input_dataset, buffer_size, seed, seed2,
39399		},
39400		Attrs: attrs,
39401	}
39402	op := scope.AddOperation(opspec)
39403	return op.Output(0)
39404}
39405
39406// Creates a dataset containing elements of first component of `input_dataset` having true in the last component.
39407func FilterByLastComponentDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (output tf.Output) {
39408	if scope.Err() != nil {
39409		return
39410	}
39411	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
39412	opspec := tf.OpSpec{
39413		Type: "FilterByLastComponentDataset",
39414		Input: []tf.Input{
39415			input_dataset,
39416		},
39417		Attrs: attrs,
39418	}
39419	op := scope.AddOperation(opspec)
39420	return op.Output(0)
39421}
39422
39423// Stops gradient computation.
39424//
39425// When executed in a graph, this op outputs its input tensor as-is.
39426//
39427// When building ops to compute gradients, this op prevents the contribution of
39428// its inputs to be taken into account.  Normally, the gradient generator adds ops
39429// to a graph to compute the derivatives of a specified 'loss' by recursively
39430// finding out inputs that contributed to its computation.  If you insert this op
39431// in the graph it inputs are masked from the gradient generator.  They are not
39432// taken into account for computing gradients.
39433//
39434// This is useful any time you want to compute a value with TensorFlow but need
39435// to pretend that the value was a constant. For example, the softmax function
39436// for a vector x can be written as
39437//
39438// ```python
39439//
39440//   def softmax(x):
39441//     numerator = tf.exp(x)
39442//     denominator = tf.reduce_sum(numerator)
39443//     return numerator / denominator
39444// ```
39445//
39446// This however is susceptible to overflow if the values in x are large. An
39447// alternative more stable way is to subtract the maximum of x from each of the
39448// values.
39449//
39450// ```python
39451//
39452//   def stable_softmax(x):
39453//     z = x - tf.reduce_max(x)
39454//     numerator = tf.exp(z)
39455//     denominator = tf.reduce_sum(numerator)
39456//     return numerator / denominator
39457// ```
39458//
39459// However, when we backprop through the softmax to x, we dont want to backprop
39460// through the `tf.reduce_max(x)` (if the max values are not unique then the
39461// gradient could flow to the wrong input) calculation and treat that as a
39462// constant. Therefore, we should write this out as
39463//
39464// ```python
39465//
39466//   def stable_softmax(x):
39467//     z = x - tf.stop_gradient(tf.reduce_max(x))
39468//     numerator = tf.exp(z)
39469//     denominator = tf.reduce_sum(numerator)
39470//     return numerator / denominator
39471// ```
39472//
39473// Some other examples include:
39474//
39475// *  The *EM* algorithm where the *M-step* should not involve backpropagation
39476//    through the output of the *E-step*.
39477// *  Contrastive divergence training of Boltzmann machines where, when
39478//    differentiating the energy function, the training must not backpropagate
39479//    through the graph that generated the samples from the model.
39480// *  Adversarial training, where no backprop should happen through the adversarial
39481//    example generation process.
39482func StopGradient(scope *Scope, input tf.Output) (output tf.Output) {
39483	if scope.Err() != nil {
39484		return
39485	}
39486	opspec := tf.OpSpec{
39487		Type: "StopGradient",
39488		Input: []tf.Input{
39489			input,
39490		},
39491	}
39492	op := scope.AddOperation(opspec)
39493	return op.Output(0)
39494}
39495
39496// Helper operator for performing XLA-style broadcasts
39497//
39498// Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to
39499// whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules
39500// for binary operators.
39501//
39502// Arguments:
39503//	lhs: the LHS input tensor
39504//	rhs: the RHS input tensor
39505//	broadcast_dims: an XLA-style broadcast dimension specification
39506//
39507// Returns:
39508//	lhs_output: the broadcasted LHS tensor
39509//	rhs_output: the broadcasted RHS tensor
39510func XlaBroadcastHelper(scope *Scope, lhs tf.Output, rhs tf.Output, broadcast_dims tf.Output) (lhs_output tf.Output, rhs_output tf.Output) {
39511	if scope.Err() != nil {
39512		return
39513	}
39514	opspec := tf.OpSpec{
39515		Type: "XlaBroadcastHelper",
39516		Input: []tf.Input{
39517			lhs, rhs, broadcast_dims,
39518		},
39519	}
39520	op := scope.AddOperation(opspec)
39521	return op.Output(0), op.Output(1)
39522}
39523
39524// Wraps the XLA Pad operator, documented at
39525//
39526//  https://www.tensorflow.org/performance/xla/operation_semantics#pad
39527// .
39528//
39529// Arguments:
39530//	input: A `Tensor` of type T.
39531//	padding_value: A scalar `Tensor` of type T.
39532//	padding_low: the padding to apply at the start of each input dimensions. Must
39533// be a compile-time constant 1D tensor of length equal to rank of input.
39534//	padding_high: the padding to apply at the end of each input dimension. Must
39535// be a compile-time constant 1D tensor of length equal to rank of input.
39536//	padding_interior: the padding to apply between each input element. Must
39537// be a compile-time constant 1D tensor of length equal to rank of input,
39538// containing only non-negative values.
39539//
39540// Returns A `Tensor` of type T.
39541func XlaPad(scope *Scope, input tf.Output, padding_value tf.Output, padding_low tf.Output, padding_high tf.Output, padding_interior tf.Output) (output tf.Output) {
39542	if scope.Err() != nil {
39543		return
39544	}
39545	opspec := tf.OpSpec{
39546		Type: "XlaPad",
39547		Input: []tf.Input{
39548			input, padding_value, padding_low, padding_high, padding_interior,
39549		},
39550	}
39551	op := scope.AddOperation(opspec)
39552	return op.Output(0)
39553}
39554
39555// Updates the accumulator with a new value for global_step.
39556//
39557// Logs warning if the accumulator's value is already higher than
39558// new_global_step.
39559//
39560// Arguments:
39561//	handle: The handle to an accumulator.
39562//	new_global_step: The new global_step value to set.
39563//
39564// Returns the created operation.
39565func ResourceAccumulatorSetGlobalStep(scope *Scope, handle tf.Output, new_global_step tf.Output) (o *tf.Operation) {
39566	if scope.Err() != nil {
39567		return
39568	}
39569	opspec := tf.OpSpec{
39570		Type: "ResourceAccumulatorSetGlobalStep",
39571		Input: []tf.Input{
39572			handle, new_global_step,
39573		},
39574	}
39575	return scope.AddOperation(opspec)
39576}
39577
39578// Returns a tensor of ones with the same shape and type as x.
39579//
39580// Arguments:
39581//	x: a tensor of type T.
39582//
39583// Returns a tensor of the same shape and type as x but filled with ones.
39584func OnesLike(scope *Scope, x tf.Output) (y tf.Output) {
39585	if scope.Err() != nil {
39586		return
39587	}
39588	opspec := tf.OpSpec{
39589		Type: "OnesLike",
39590		Input: []tf.Input{
39591			x,
39592		},
39593	}
39594	op := scope.AddOperation(opspec)
39595	return op.Output(0)
39596}
39597
39598// StatsAggregatorHandleAttr is an optional argument to StatsAggregatorHandle.
39599type StatsAggregatorHandleAttr func(optionalAttr)
39600
39601// StatsAggregatorHandleContainer sets the optional container attribute to value.
39602// If not specified, defaults to ""
39603func StatsAggregatorHandleContainer(value string) StatsAggregatorHandleAttr {
39604	return func(m optionalAttr) {
39605		m["container"] = value
39606	}
39607}
39608
39609// StatsAggregatorHandleSharedName sets the optional shared_name attribute to value.
39610// If not specified, defaults to ""
39611func StatsAggregatorHandleSharedName(value string) StatsAggregatorHandleAttr {
39612	return func(m optionalAttr) {
39613		m["shared_name"] = value
39614	}
39615}
39616
39617// Creates a statistics manager resource.
39618func StatsAggregatorHandle(scope *Scope, optional ...StatsAggregatorHandleAttr) (handle tf.Output) {
39619	if scope.Err() != nil {
39620		return
39621	}
39622	attrs := map[string]interface{}{}
39623	for _, a := range optional {
39624		a(attrs)
39625	}
39626	opspec := tf.OpSpec{
39627		Type: "StatsAggregatorHandle",
39628
39629		Attrs: attrs,
39630	}
39631	op := scope.AddOperation(opspec)
39632	return op.Output(0)
39633}
39634
39635// EuclideanNormAttr is an optional argument to EuclideanNorm.
39636type EuclideanNormAttr func(optionalAttr)
39637
39638// EuclideanNormKeepDims sets the optional keep_dims attribute to value.
39639//
39640// value: If true, retain reduced dimensions with length 1.
39641// If not specified, defaults to false
39642func EuclideanNormKeepDims(value bool) EuclideanNormAttr {
39643	return func(m optionalAttr) {
39644		m["keep_dims"] = value
39645	}
39646}
39647
39648// Computes the euclidean norm of elements across dimensions of a tensor.
39649//
39650// Reduces `input` along the dimensions given in `axis`. Unless
39651// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
39652// `axis`. If `keep_dims` is true, the reduced dimensions are
39653// retained with length 1.
39654//
39655// Arguments:
39656//	input: The tensor to reduce.
39657//	axis: The dimensions to reduce. Must be in the range
39658// `[-rank(input), rank(input))`.
39659//
39660// Returns The reduced tensor.
39661func EuclideanNorm(scope *Scope, input tf.Output, axis tf.Output, optional ...EuclideanNormAttr) (output tf.Output) {
39662	if scope.Err() != nil {
39663		return
39664	}
39665	attrs := map[string]interface{}{}
39666	for _, a := range optional {
39667		a(attrs)
39668	}
39669	opspec := tf.OpSpec{
39670		Type: "EuclideanNorm",
39671		Input: []tf.Input{
39672			input, axis,
39673		},
39674		Attrs: attrs,
39675	}
39676	op := scope.AddOperation(opspec)
39677	return op.Output(0)
39678}
39679
39680// Encode audio data using the WAV file format.
39681//
39682// This operation will generate a string suitable to be saved out to create a .wav
39683// audio file. It will be encoded in the 16-bit PCM format. It takes in float
39684// values in the range -1.0f to 1.0f, and any outside that value will be clamped to
39685// that range.
39686//
39687// `audio` is a 2-D float Tensor of shape `[length, channels]`.
39688// `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
39689//
39690// Arguments:
39691//	audio: 2-D with shape `[length, channels]`.
39692//	sample_rate: Scalar containing the sample frequency.
39693//
39694// Returns 0-D. WAV-encoded file contents.
39695func EncodeWav(scope *Scope, audio tf.Output, sample_rate tf.Output) (contents tf.Output) {
39696	if scope.Err() != nil {
39697		return
39698	}
39699	opspec := tf.OpSpec{
39700		Type: "EncodeWav",
39701		Input: []tf.Input{
39702			audio, sample_rate,
39703		},
39704	}
39705	op := scope.AddOperation(opspec)
39706	return op.Output(0)
39707}
39708
39709// MaxPoolGradV2Attr is an optional argument to MaxPoolGradV2.
39710type MaxPoolGradV2Attr func(optionalAttr)
39711
39712// MaxPoolGradV2DataFormat sets the optional data_format attribute to value.
39713//
39714// value: Specify the data format of the input and output data. With the
39715// default format "NHWC", the data is stored in the order of:
39716//     [batch, in_height, in_width, in_channels].
39717// Alternatively, the format could be "NCHW", the data storage order of:
39718//     [batch, in_channels, in_height, in_width].
39719// If not specified, defaults to "NHWC"
39720func MaxPoolGradV2DataFormat(value string) MaxPoolGradV2Attr {
39721	return func(m optionalAttr) {
39722		m["data_format"] = value
39723	}
39724}
39725
39726// Computes gradients of the maxpooling function.
39727//
39728// Arguments:
39729//	orig_input: The original input tensor.
39730//	orig_output: The original output tensor.
39731//	grad: 4-D.  Gradients w.r.t. the output of `max_pool`.
39732//	ksize: The size of the window for each dimension of the input tensor.
39733//	strides: The stride of the sliding window for each dimension of the
39734// input tensor.
39735//	padding: The type of padding algorithm to use.
39736//
39737// Returns Gradients w.r.t. the input to `max_pool`.
39738func MaxPoolGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradV2Attr) (output tf.Output) {
39739	if scope.Err() != nil {
39740		return
39741	}
39742	attrs := map[string]interface{}{"padding": padding}
39743	for _, a := range optional {
39744		a(attrs)
39745	}
39746	opspec := tf.OpSpec{
39747		Type: "MaxPoolGradV2",
39748		Input: []tf.Input{
39749			orig_input, orig_output, grad, ksize, strides,
39750		},
39751		Attrs: attrs,
39752	}
39753	op := scope.AddOperation(opspec)
39754	return op.Output(0)
39755}
39756
39757// Takes the packed uint32 input and unpacks the input to uint8 to do
39758//
39759// Dequantization on device.
39760//
39761// Arguments:
39762//	input: Input tensors whose types is uint32, shape is [d0, ..., dn].
39763//	min_range: The minimum scalar value possibly produced for the input.
39764//	max_range: The maximum scalar value possibly produced for the input.
39765//	mode: String to determine the dequantize mode in {"MIN_COMBINED", "MIN_FIRST", "SCALED"}.
39766//	transpose_output: Boolean to determine if output is transposed. transpose_output
39767// is faster when input is large and rank of input is higher than 1.
39768//
39769// Returns Output tensors whose types is bloat16. If transpose_output is true,
39770// output shape is [dn * 4, dn-1, ..., d1, d0]. If transpose_output
39771// is false, output shape is [d0,..., dn * 4].
39772func XlaDequantize(scope *Scope, input tf.Output, min_range float32, max_range float32, mode string, transpose_output bool) (output tf.Output) {
39773	if scope.Err() != nil {
39774		return
39775	}
39776	attrs := map[string]interface{}{"min_range": min_range, "max_range": max_range, "mode": mode, "transpose_output": transpose_output}
39777	opspec := tf.OpSpec{
39778		Type: "XlaDequantize",
39779		Input: []tf.Input{
39780			input,
39781		},
39782		Attrs: attrs,
39783	}
39784	op := scope.AddOperation(opspec)
39785	return op.Output(0)
39786}
39787
39788// DenseCountSparseOutputAttr is an optional argument to DenseCountSparseOutput.
39789type DenseCountSparseOutputAttr func(optionalAttr)
39790
39791// DenseCountSparseOutputMinlength sets the optional minlength attribute to value.
39792//
39793// value: Minimum value to count. Can be set to -1 for no minimum.
39794// If not specified, defaults to -1
39795//
39796// REQUIRES: value >= -1
39797func DenseCountSparseOutputMinlength(value int64) DenseCountSparseOutputAttr {
39798	return func(m optionalAttr) {
39799		m["minlength"] = value
39800	}
39801}
39802
39803// DenseCountSparseOutputMaxlength sets the optional maxlength attribute to value.
39804//
39805// value: Maximum value to count. Can be set to -1 for no maximum.
39806// If not specified, defaults to -1
39807//
39808// REQUIRES: value >= -1
39809func DenseCountSparseOutputMaxlength(value int64) DenseCountSparseOutputAttr {
39810	return func(m optionalAttr) {
39811		m["maxlength"] = value
39812	}
39813}
39814
39815// Performs sparse-output bin counting for a tf.tensor input.
39816//
39817//   Counts the number of times each value occurs in the input.
39818//
39819// Arguments:
39820//	values: Tensor containing data to count.
39821//	weights: A Tensor of the same shape as indices containing per-index weight values. May
39822// also be the empty tensor if no weights are used.
39823//	binary_output: Whether to output the number of occurrences of each value or 1.
39824//
39825// Returns:
39826//	output_indices: Indices tensor for the resulting sparse tensor object.
39827//	output_values: Values tensor for the resulting sparse tensor object.
39828//	output_dense_shape: Shape tensor for the resulting sparse tensor object.
39829func DenseCountSparseOutput(scope *Scope, values tf.Output, weights tf.Output, binary_output bool, optional ...DenseCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output) {
39830	if scope.Err() != nil {
39831		return
39832	}
39833	attrs := map[string]interface{}{"binary_output": binary_output}
39834	for _, a := range optional {
39835		a(attrs)
39836	}
39837	opspec := tf.OpSpec{
39838		Type: "DenseCountSparseOutput",
39839		Input: []tf.Input{
39840			values, weights,
39841		},
39842		Attrs: attrs,
39843	}
39844	op := scope.AddOperation(opspec)
39845	return op.Output(0), op.Output(1), op.Output(2)
39846}
39847
39848// Makes a copy of `x`.
39849//
39850// Arguments:
39851//	x: The source tensor of type `T`.
39852//
39853// Returns     y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y`
39854//       is not an alias of `x`.
39855func DeepCopy(scope *Scope, x tf.Output) (y tf.Output) {
39856	if scope.Err() != nil {
39857		return
39858	}
39859	opspec := tf.OpSpec{
39860		Type: "DeepCopy",
39861		Input: []tf.Input{
39862			x,
39863		},
39864	}
39865	op := scope.AddOperation(opspec)
39866	return op.Output(0)
39867}
39868
39869// WriteAudioSummaryAttr is an optional argument to WriteAudioSummary.
39870type WriteAudioSummaryAttr func(optionalAttr)
39871
39872// WriteAudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
39873// If not specified, defaults to 3
39874//
39875// REQUIRES: value >= 1
39876func WriteAudioSummaryMaxOutputs(value int64) WriteAudioSummaryAttr {
39877	return func(m optionalAttr) {
39878		m["max_outputs"] = value
39879	}
39880}
39881
39882// Writes an audio summary.
39883//
39884// Writes encoded audio summary `tensor` at `step` with `tag` using summary `writer`.
39885// `sample_rate` is the audio sample rate is Hz.
39886//
39887// Returns the created operation.
39888func WriteAudioSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...WriteAudioSummaryAttr) (o *tf.Operation) {
39889	if scope.Err() != nil {
39890		return
39891	}
39892	attrs := map[string]interface{}{}
39893	for _, a := range optional {
39894		a(attrs)
39895	}
39896	opspec := tf.OpSpec{
39897		Type: "WriteAudioSummary",
39898		Input: []tf.Input{
39899			writer, step, tag, tensor, sample_rate,
39900		},
39901		Attrs: attrs,
39902	}
39903	return scope.AddOperation(opspec)
39904}
39905
39906// IteratorFromStringHandleAttr is an optional argument to IteratorFromStringHandle.
39907type IteratorFromStringHandleAttr func(optionalAttr)
39908
39909// IteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
39910//
39911// value: If specified, defines the type of each tuple component in an
39912// element produced by the resulting iterator.
39913// If not specified, defaults to {}
39914//
39915// REQUIRES: len(value) >= 0
39916func IteratorFromStringHandleOutputTypes(value []tf.DataType) IteratorFromStringHandleAttr {
39917	return func(m optionalAttr) {
39918		m["output_types"] = value
39919	}
39920}
39921
39922// IteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
39923//
39924// value: If specified, defines the shape of each tuple component in an
39925// element produced by the resulting iterator.
39926// If not specified, defaults to {}
39927//
39928// REQUIRES: len(value) >= 0
39929func IteratorFromStringHandleOutputShapes(value []tf.Shape) IteratorFromStringHandleAttr {
39930	return func(m optionalAttr) {
39931		m["output_shapes"] = value
39932	}
39933}
39934
39935// Converts the given string representing a handle to an iterator to a resource.
39936//
39937// Arguments:
39938//	string_handle: A string representation of the given handle.
39939//
39940// Returns A handle to an iterator resource.
39941func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...IteratorFromStringHandleAttr) (resource_handle tf.Output) {
39942	if scope.Err() != nil {
39943		return
39944	}
39945	attrs := map[string]interface{}{}
39946	for _, a := range optional {
39947		a(attrs)
39948	}
39949	opspec := tf.OpSpec{
39950		Type: "IteratorFromStringHandle",
39951		Input: []tf.Input{
39952			string_handle,
39953		},
39954		Attrs: attrs,
39955	}
39956	op := scope.AddOperation(opspec)
39957	return op.Output(0)
39958}
39959
39960// Checks whether a resource handle-based variable has been initialized.
39961//
39962// Arguments:
39963//	resource: the input resource handle.
39964//
39965// Returns a scalar boolean which is true if the variable has been
39966// initialized.
39967func VarIsInitializedOp(scope *Scope, resource tf.Output) (is_initialized tf.Output) {
39968	if scope.Err() != nil {
39969		return
39970	}
39971	opspec := tf.OpSpec{
39972		Type: "VarIsInitializedOp",
39973		Input: []tf.Input{
39974			resource,
39975		},
39976	}
39977	op := scope.AddOperation(opspec)
39978	return op.Output(0)
39979}
39980
39981// FakeQuantWithMinMaxVarsPerChannelAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannel.
39982type FakeQuantWithMinMaxVarsPerChannelAttr func(optionalAttr)
39983
39984// FakeQuantWithMinMaxVarsPerChannelNumBits sets the optional num_bits attribute to value.
39985// If not specified, defaults to 8
39986func FakeQuantWithMinMaxVarsPerChannelNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelAttr {
39987	return func(m optionalAttr) {
39988		m["num_bits"] = value
39989	}
39990}
39991
39992// FakeQuantWithMinMaxVarsPerChannelNarrowRange sets the optional narrow_range attribute to value.
39993// If not specified, defaults to false
39994func FakeQuantWithMinMaxVarsPerChannelNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelAttr {
39995	return func(m optionalAttr) {
39996		m["narrow_range"] = value
39997	}
39998}
39999
40000// Fake-quantize the 'inputs' tensor of type float via per-channel floats
40001//
40002// Fake-quantize the `inputs` tensor of type float per-channel and one of the
40003// shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max`
40004// of shape `[d]` to `outputs` tensor of same shape as `inputs`.
40005//
40006// Attributes
40007//
40008// *   `[min; max]` define the clamping range for the `inputs` data.
40009// *   `inputs` values are quantized into the quantization range (
40010// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
40011// when it is true) and then de-quantized and output as floats in `[min; max]`
40012// interval.
40013// *   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
40014//
40015// Before quantization, `min` and `max` values are adjusted with the following
40016// logic.
40017// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
40018// the behavior can be unexpected:
40019//
40020// *   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
40021// *   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
40022// *   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
40023// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
40024//
40025// This operation has a gradient and thus allows for training `min` and `max`
40026// values.
40027func FakeQuantWithMinMaxVarsPerChannel(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelAttr) (outputs tf.Output) {
40028	if scope.Err() != nil {
40029		return
40030	}
40031	attrs := map[string]interface{}{}
40032	for _, a := range optional {
40033		a(attrs)
40034	}
40035	opspec := tf.OpSpec{
40036		Type: "FakeQuantWithMinMaxVarsPerChannel",
40037		Input: []tf.Input{
40038			inputs, min, max,
40039		},
40040		Attrs: attrs,
40041	}
40042	op := scope.AddOperation(opspec)
40043	return op.Output(0)
40044}
40045
40046// NonMaxSuppressionV4Attr is an optional argument to NonMaxSuppressionV4.
40047type NonMaxSuppressionV4Attr func(optionalAttr)
40048
40049// NonMaxSuppressionV4PadToMaxOutputSize sets the optional pad_to_max_output_size attribute to value.
40050//
40051// value: If true, the output `selected_indices` is padded to be of length
40052// `max_output_size`. Defaults to false.
40053// If not specified, defaults to false
40054func NonMaxSuppressionV4PadToMaxOutputSize(value bool) NonMaxSuppressionV4Attr {
40055	return func(m optionalAttr) {
40056		m["pad_to_max_output_size"] = value
40057	}
40058}
40059
40060// Greedily selects a subset of bounding boxes in descending order of score,
40061//
40062// pruning away boxes that have high intersection-over-union (IOU) overlap
40063// with previously selected boxes.  Bounding boxes with score less than
40064// `score_threshold` are removed.  Bounding boxes are supplied as
40065// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
40066// diagonal pair of box corners and the coordinates can be provided as normalized
40067// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
40068// is agnostic to where the origin is in the coordinate system and more
40069// generally is invariant to orthogonal transformations and translations
40070// of the coordinate system; thus translating or reflections of the coordinate
40071// system result in the same boxes being selected by the algorithm.
40072// The output of this operation is a set of integers indexing into the input
40073// collection of bounding boxes representing the selected boxes.  The bounding
40074// box coordinates corresponding to the selected indices can then be obtained
40075// using the `tf.gather operation`.  For example:
40076//   selected_indices = tf.image.non_max_suppression_v2(
40077//       boxes, scores, max_output_size, iou_threshold, score_threshold)
40078//   selected_boxes = tf.gather(boxes, selected_indices)
40079//
40080// Arguments:
40081//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
40082//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
40083// score corresponding to each box (each row of boxes).
40084//	max_output_size: A scalar integer tensor representing the maximum number of
40085// boxes to be selected by non max suppression.
40086//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
40087// boxes overlap too much with respect to IOU.
40088//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
40089// boxes based on score.
40090//
40091// Returns:
40092//	selected_indices: A 1-D integer tensor of shape `[M]` representing the selected
40093// indices from the boxes tensor, where `M <= max_output_size`.
40094//	valid_outputs: A 0-D integer tensor representing the number of valid elements in
40095// `selected_indices`, with the valid elements appearing first.
40096func NonMaxSuppressionV4(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, optional ...NonMaxSuppressionV4Attr) (selected_indices tf.Output, valid_outputs tf.Output) {
40097	if scope.Err() != nil {
40098		return
40099	}
40100	attrs := map[string]interface{}{}
40101	for _, a := range optional {
40102		a(attrs)
40103	}
40104	opspec := tf.OpSpec{
40105		Type: "NonMaxSuppressionV4",
40106		Input: []tf.Input{
40107			boxes, scores, max_output_size, iou_threshold, score_threshold,
40108		},
40109		Attrs: attrs,
40110	}
40111	op := scope.AddOperation(opspec)
40112	return op.Output(0), op.Output(1)
40113}
40114
40115// CudnnRNNBackpropAttr is an optional argument to CudnnRNNBackprop.
40116type CudnnRNNBackpropAttr func(optionalAttr)
40117
40118// CudnnRNNBackpropRnnMode sets the optional rnn_mode attribute to value.
40119// If not specified, defaults to "lstm"
40120func CudnnRNNBackpropRnnMode(value string) CudnnRNNBackpropAttr {
40121	return func(m optionalAttr) {
40122		m["rnn_mode"] = value
40123	}
40124}
40125
40126// CudnnRNNBackpropInputMode sets the optional input_mode attribute to value.
40127// If not specified, defaults to "linear_input"
40128func CudnnRNNBackpropInputMode(value string) CudnnRNNBackpropAttr {
40129	return func(m optionalAttr) {
40130		m["input_mode"] = value
40131	}
40132}
40133
40134// CudnnRNNBackpropDirection sets the optional direction attribute to value.
40135// If not specified, defaults to "unidirectional"
40136func CudnnRNNBackpropDirection(value string) CudnnRNNBackpropAttr {
40137	return func(m optionalAttr) {
40138		m["direction"] = value
40139	}
40140}
40141
40142// CudnnRNNBackpropDropout sets the optional dropout attribute to value.
40143// If not specified, defaults to 0
40144func CudnnRNNBackpropDropout(value float32) CudnnRNNBackpropAttr {
40145	return func(m optionalAttr) {
40146		m["dropout"] = value
40147	}
40148}
40149
40150// CudnnRNNBackpropSeed sets the optional seed attribute to value.
40151// If not specified, defaults to 0
40152func CudnnRNNBackpropSeed(value int64) CudnnRNNBackpropAttr {
40153	return func(m optionalAttr) {
40154		m["seed"] = value
40155	}
40156}
40157
40158// CudnnRNNBackpropSeed2 sets the optional seed2 attribute to value.
40159// If not specified, defaults to 0
40160func CudnnRNNBackpropSeed2(value int64) CudnnRNNBackpropAttr {
40161	return func(m optionalAttr) {
40162		m["seed2"] = value
40163	}
40164}
40165
40166// Backprop step of CudnnRNN.
40167//
40168// Compute the backprop of both data and weights in a RNN.
40169//
40170// rnn_mode: Indicates the type of the RNN model.
40171// input_mode: Indicate whether there is a linear projection between the input and
40172//     the actual computation before the first layer. 'skip_input' is only allowed
40173//     when input_size == num_units; 'auto_select' implies 'skip_input' when
40174//     input_size == num_units; otherwise, it implies 'linear_input'.
40175// direction: Indicates whether a bidirectional model will be used. Should be
40176//   "unidirectional" or "bidirectional".
40177// dropout: Dropout probability. When set to 0., dropout is disabled.
40178// seed: The 1st part of a seed to initialize dropout.
40179// seed2: The 2nd part of a seed to initialize dropout.
40180// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
40181// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
40182//     num_units].
40183// input_c: For LSTM, a 3-D tensor with the shape of
40184//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
40185// params: A 1-D tensor that contains the weights and biases in an opaque layout.
40186//     The size must be created through CudnnRNNParamsSize, and initialized
40187//     separately. Note that they might not be compatible across different
40188//     generations. So it is a good idea to save and restore
40189// output: A 3-D tensor with the shape of [seq_length, batch_size,
40190//     dir * num_units].
40191// output_h: The same shape has input_h.
40192// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
40193// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
40194// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
40195//     pass.
40196// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
40197//     pass.
40198// reserve_space: The same reserve_space produced in for forward operation.
40199// input_backprop: The backprop to input in the forward pass. Has the same shape
40200//     as input.
40201// input_h_backprop: The backprop to input_h in the forward pass. Has the same
40202//     shape as input_h.
40203// input_c_backprop: The backprop to input_c in the forward pass. Has the same
40204//     shape as input_c.
40205// params_backprop: The backprop to the params buffer in the forward pass. Has the
40206//     same shape as params.
40207func CudnnRNNBackprop(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, optional ...CudnnRNNBackpropAttr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
40208	if scope.Err() != nil {
40209		return
40210	}
40211	attrs := map[string]interface{}{}
40212	for _, a := range optional {
40213		a(attrs)
40214	}
40215	opspec := tf.OpSpec{
40216		Type: "CudnnRNNBackprop",
40217		Input: []tf.Input{
40218			input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space,
40219		},
40220		Attrs: attrs,
40221	}
40222	op := scope.AddOperation(opspec)
40223	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
40224}
40225
40226// TPUReplicatedInputAttr is an optional argument to TPUReplicatedInput.
40227type TPUReplicatedInputAttr func(optionalAttr)
40228
40229// TPUReplicatedInputIsMirroredVariable sets the optional is_mirrored_variable attribute to value.
40230// If not specified, defaults to false
40231func TPUReplicatedInputIsMirroredVariable(value bool) TPUReplicatedInputAttr {
40232	return func(m optionalAttr) {
40233		m["is_mirrored_variable"] = value
40234	}
40235}
40236
40237// TPUReplicatedInputIndex sets the optional index attribute to value.
40238// If not specified, defaults to -1
40239func TPUReplicatedInputIndex(value int64) TPUReplicatedInputAttr {
40240	return func(m optionalAttr) {
40241		m["index"] = value
40242	}
40243}
40244
40245// TPUReplicatedInputIsPacked sets the optional is_packed attribute to value.
40246// If not specified, defaults to false
40247func TPUReplicatedInputIsPacked(value bool) TPUReplicatedInputAttr {
40248	return func(m optionalAttr) {
40249		m["is_packed"] = value
40250	}
40251}
40252
40253// Connects N inputs to an N-way replicated TPU computation.
40254//
40255// This operation holds a replicated input to a `tpu.replicate()` computation subgraph.
40256// Each replicated input has the same shape and type alongside the output.
40257//
40258// For example:
40259// ```
40260// %a = "tf.opA"()
40261// %b = "tf.opB"()
40262// %replicated_input = "tf.TPUReplicatedInput"(%a, %b)
40263// %computation = "tf.Computation"(%replicated_input)
40264// ```
40265// The above computation has a replicated input of two replicas.
40266func TPUReplicatedInput(scope *Scope, inputs []tf.Output, optional ...TPUReplicatedInputAttr) (output tf.Output) {
40267	if scope.Err() != nil {
40268		return
40269	}
40270	attrs := map[string]interface{}{}
40271	for _, a := range optional {
40272		a(attrs)
40273	}
40274	opspec := tf.OpSpec{
40275		Type: "TPUReplicatedInput",
40276		Input: []tf.Input{
40277			tf.OutputList(inputs),
40278		},
40279		Attrs: attrs,
40280	}
40281	op := scope.AddOperation(opspec)
40282	return op.Output(0)
40283}
40284
40285// SizeAttr is an optional argument to Size.
40286type SizeAttr func(optionalAttr)
40287
40288// SizeOutType sets the optional out_type attribute to value.
40289// If not specified, defaults to DT_INT32
40290func SizeOutType(value tf.DataType) SizeAttr {
40291	return func(m optionalAttr) {
40292		m["out_type"] = value
40293	}
40294}
40295
40296// Returns the size of a tensor.
40297//
40298// This operation returns an integer representing the number of elements in
40299// `input`.
40300//
40301// For example:
40302//
40303// ```
40304// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
40305// size(t) ==> 12
40306// ```
40307func Size(scope *Scope, input tf.Output, optional ...SizeAttr) (output tf.Output) {
40308	if scope.Err() != nil {
40309		return
40310	}
40311	attrs := map[string]interface{}{}
40312	for _, a := range optional {
40313		a(attrs)
40314	}
40315	opspec := tf.OpSpec{
40316		Type: "Size",
40317		Input: []tf.Input{
40318			input,
40319		},
40320		Attrs: attrs,
40321	}
40322	op := scope.AddOperation(opspec)
40323	return op.Output(0)
40324}
40325
40326// FakeQuantWithMinMaxVarsGradientAttr is an optional argument to FakeQuantWithMinMaxVarsGradient.
40327type FakeQuantWithMinMaxVarsGradientAttr func(optionalAttr)
40328
40329// FakeQuantWithMinMaxVarsGradientNumBits sets the optional num_bits attribute to value.
40330//
40331// value: The bitwidth of the quantization; between 2 and 8, inclusive.
40332// If not specified, defaults to 8
40333func FakeQuantWithMinMaxVarsGradientNumBits(value int64) FakeQuantWithMinMaxVarsGradientAttr {
40334	return func(m optionalAttr) {
40335		m["num_bits"] = value
40336	}
40337}
40338
40339// FakeQuantWithMinMaxVarsGradientNarrowRange sets the optional narrow_range attribute to value.
40340//
40341// value: Whether to quantize into 2^num_bits - 1 distinct values.
40342// If not specified, defaults to false
40343func FakeQuantWithMinMaxVarsGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsGradientAttr {
40344	return func(m optionalAttr) {
40345		m["narrow_range"] = value
40346	}
40347}
40348
40349// Compute gradients for a FakeQuantWithMinMaxVars operation.
40350//
40351// Arguments:
40352//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
40353//	inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
40354// min, max: Quantization interval, scalar floats.
40355//
40356//
40357//
40358// Returns:
40359//	backprops_wrt_input: Backpropagated gradients w.r.t. inputs:
40360// `gradients * (inputs >= min && inputs <= max)`.
40361//	backprop_wrt_min: Backpropagated gradients w.r.t. min parameter:
40362// `sum(gradients * (inputs < min))`.
40363//	backprop_wrt_max: Backpropagated gradients w.r.t. max parameter:
40364// `sum(gradients * (inputs > max))`.
40365func FakeQuantWithMinMaxVarsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
40366	if scope.Err() != nil {
40367		return
40368	}
40369	attrs := map[string]interface{}{}
40370	for _, a := range optional {
40371		a(attrs)
40372	}
40373	opspec := tf.OpSpec{
40374		Type: "FakeQuantWithMinMaxVarsGradient",
40375		Input: []tf.Input{
40376			gradients, inputs, min, max,
40377		},
40378		Attrs: attrs,
40379	}
40380	op := scope.AddOperation(opspec)
40381	return op.Output(0), op.Output(1), op.Output(2)
40382}
40383
40384// Aggregates the summary of accumulated stats for the batch.
40385//
40386// The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket.
40387//
40388// Arguments:
40389//	node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].
40390//	gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.
40391//	hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.
40392//	feature: int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]).
40393//	max_splits: int; the maximum number of splits possible in the whole tree.
40394//	num_buckets: int; equals to the maximum possible value of bucketized feature.
40395//
40396// Returns output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension])
40397// containing accumulated stats for each node, feature dimension and bucket.
40398func BoostedTreesAggregateStats(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, feature tf.Output, max_splits int64, num_buckets int64) (stats_summary tf.Output) {
40399	if scope.Err() != nil {
40400		return
40401	}
40402	attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
40403	opspec := tf.OpSpec{
40404		Type: "BoostedTreesAggregateStats",
40405		Input: []tf.Input{
40406			node_ids, gradients, hessians, feature,
40407		},
40408		Attrs: attrs,
40409	}
40410	op := scope.AddOperation(opspec)
40411	return op.Output(0)
40412}
40413
40414// Return a tensor with the same shape and contents as the input tensor or value.
40415func Identity(scope *Scope, input tf.Output) (output tf.Output) {
40416	if scope.Err() != nil {
40417		return
40418	}
40419	opspec := tf.OpSpec{
40420		Type: "Identity",
40421		Input: []tf.Input{
40422			input,
40423		},
40424	}
40425	op := scope.AddOperation(opspec)
40426	return op.Output(0)
40427}
40428
40429// Outputs a `Summary` protocol buffer with scalar values.
40430//
40431// The input `tags` and `values` must have the same shape.  The generated summary
40432// has a summary value for each tag-value pair in `tags` and `values`.
40433//
40434// Arguments:
40435//	tags: Tags for the summary.
40436//	values: Same shape as `tags.  Values for the summary.
40437//
40438// Returns Scalar.  Serialized `Summary` protocol buffer.
40439func ScalarSummary(scope *Scope, tags tf.Output, values tf.Output) (summary tf.Output) {
40440	if scope.Err() != nil {
40441		return
40442	}
40443	opspec := tf.OpSpec{
40444		Type: "ScalarSummary",
40445		Input: []tf.Input{
40446			tags, values,
40447		},
40448	}
40449	op := scope.AddOperation(opspec)
40450	return op.Output(0)
40451}
40452
40453// Store the input tensor in the state of the current session.
40454//
40455// Arguments:
40456//	value: The tensor to be stored.
40457//
40458// Returns The handle for the tensor stored in the session state, represented
40459// as a string.
40460func GetSessionHandle(scope *Scope, value tf.Output) (handle tf.Output) {
40461	if scope.Err() != nil {
40462		return
40463	}
40464	opspec := tf.OpSpec{
40465		Type: "GetSessionHandle",
40466		Input: []tf.Input{
40467			value,
40468		},
40469	}
40470	op := scope.AddOperation(opspec)
40471	return op.Output(0)
40472}
40473
40474// TensorListConcatAttr is an optional argument to TensorListConcat.
40475type TensorListConcatAttr func(optionalAttr)
40476
40477// TensorListConcatElementShape sets the optional element_shape attribute to value.
40478// If not specified, defaults to {unknown_rank:true}
40479func TensorListConcatElementShape(value tf.Shape) TensorListConcatAttr {
40480	return func(m optionalAttr) {
40481		m["element_shape"] = value
40482	}
40483}
40484
40485// Concats all tensors in the list along the 0th dimension.
40486//
40487// Requires that all tensors have the same shape except the first dimension.
40488//
40489// input_handle: The input list.
40490// tensor: The concated result.
40491// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
40492//
40493func TensorListConcat(scope *Scope, input_handle tf.Output, element_dtype tf.DataType, optional ...TensorListConcatAttr) (tensor tf.Output, lengths tf.Output) {
40494	if scope.Err() != nil {
40495		return
40496	}
40497	attrs := map[string]interface{}{"element_dtype": element_dtype}
40498	for _, a := range optional {
40499		a(attrs)
40500	}
40501	opspec := tf.OpSpec{
40502		Type: "TensorListConcat",
40503		Input: []tf.Input{
40504			input_handle,
40505		},
40506		Attrs: attrs,
40507	}
40508	op := scope.AddOperation(opspec)
40509	return op.Output(0), op.Output(1)
40510}
40511
40512// PrefetchDatasetAttr is an optional argument to PrefetchDataset.
40513type PrefetchDatasetAttr func(optionalAttr)
40514
40515// PrefetchDatasetSlackPeriod sets the optional slack_period attribute to value.
40516// If not specified, defaults to 0
40517func PrefetchDatasetSlackPeriod(value int64) PrefetchDatasetAttr {
40518	return func(m optionalAttr) {
40519		m["slack_period"] = value
40520	}
40521}
40522
40523// PrefetchDatasetLegacyAutotune sets the optional legacy_autotune attribute to value.
40524// If not specified, defaults to true
40525func PrefetchDatasetLegacyAutotune(value bool) PrefetchDatasetAttr {
40526	return func(m optionalAttr) {
40527		m["legacy_autotune"] = value
40528	}
40529}
40530
40531// PrefetchDatasetBufferSizeMin sets the optional buffer_size_min attribute to value.
40532// If not specified, defaults to 0
40533func PrefetchDatasetBufferSizeMin(value int64) PrefetchDatasetAttr {
40534	return func(m optionalAttr) {
40535		m["buffer_size_min"] = value
40536	}
40537}
40538
40539// Creates a dataset that asynchronously prefetches elements from `input_dataset`.
40540//
40541// Arguments:
40542//
40543//	buffer_size: The maximum number of elements to buffer in an iterator over
40544// this dataset.
40545//
40546//
40547func PrefetchDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...PrefetchDatasetAttr) (handle tf.Output) {
40548	if scope.Err() != nil {
40549		return
40550	}
40551	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
40552	for _, a := range optional {
40553		a(attrs)
40554	}
40555	opspec := tf.OpSpec{
40556		Type: "PrefetchDataset",
40557		Input: []tf.Input{
40558			input_dataset, buffer_size,
40559		},
40560		Attrs: attrs,
40561	}
40562	op := scope.AddOperation(opspec)
40563	return op.Output(0)
40564}
40565
40566// Computes inverse hyperbolic cosine of x element-wise.
40567//
40568// Given an input tensor, the function computes inverse hyperbolic cosine of every element.
40569// Input range is `[1, inf]`. It returns `nan` if the input lies outside the range.
40570//
40571// ```python
40572// x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")])
40573// tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf]
40574// ```
40575func Acosh(scope *Scope, x tf.Output) (y tf.Output) {
40576	if scope.Err() != nil {
40577		return
40578	}
40579	opspec := tf.OpSpec{
40580		Type: "Acosh",
40581		Input: []tf.Input{
40582			x,
40583		},
40584	}
40585	op := scope.AddOperation(opspec)
40586	return op.Output(0)
40587}
40588
40589// Serializes the tree ensemble to a proto.
40590//
40591// Arguments:
40592//	tree_ensemble_handle: Handle to the tree ensemble.
40593//
40594// Returns:
40595//	stamp_token: Stamp token of the tree ensemble resource.
40596//	tree_ensemble_serialized: Serialized proto of the ensemble.
40597func BoostedTreesSerializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, tree_ensemble_serialized tf.Output) {
40598	if scope.Err() != nil {
40599		return
40600	}
40601	opspec := tf.OpSpec{
40602		Type: "BoostedTreesSerializeEnsemble",
40603		Input: []tf.Input{
40604			tree_ensemble_handle,
40605		},
40606	}
40607	op := scope.AddOperation(opspec)
40608	return op.Output(0), op.Output(1)
40609}
40610
40611// ImageProjectiveTransformV2Attr is an optional argument to ImageProjectiveTransformV2.
40612type ImageProjectiveTransformV2Attr func(optionalAttr)
40613
40614// ImageProjectiveTransformV2FillMode sets the optional fill_mode attribute to value.
40615//
40616// value: Fill mode, "REFLECT", "WRAP", or "CONSTANT".
40617// If not specified, defaults to "CONSTANT"
40618func ImageProjectiveTransformV2FillMode(value string) ImageProjectiveTransformV2Attr {
40619	return func(m optionalAttr) {
40620		m["fill_mode"] = value
40621	}
40622}
40623
40624// Applies the given transform to each of the images.
40625//
40626// If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps
40627// the *output* point `(x, y)` to a transformed *input* point
40628// `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
40629// `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input
40630// image, the output pixel is set to 0.
40631//
40632// Arguments:
40633//	images: 4-D with shape `[batch, height, width, channels]`.
40634//	transforms: 2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3
40635// projective transformation matrix, with the last entry assumed to be 1. If there
40636// is one row, the same transformation will be applied to all images.
40637//	output_shape: 1-D Tensor [new_height, new_width].
40638//	interpolation: Interpolation method, "NEAREST" or "BILINEAR".
40639//
40640// Returns 4-D with shape
40641// `[batch, new_height, new_width, channels]`.
40642func ImageProjectiveTransformV2(scope *Scope, images tf.Output, transforms tf.Output, output_shape tf.Output, interpolation string, optional ...ImageProjectiveTransformV2Attr) (transformed_images tf.Output) {
40643	if scope.Err() != nil {
40644		return
40645	}
40646	attrs := map[string]interface{}{"interpolation": interpolation}
40647	for _, a := range optional {
40648		a(attrs)
40649	}
40650	opspec := tf.OpSpec{
40651		Type: "ImageProjectiveTransformV2",
40652		Input: []tf.Input{
40653			images, transforms, output_shape,
40654		},
40655		Attrs: attrs,
40656	}
40657	op := scope.AddOperation(opspec)
40658	return op.Output(0)
40659}
40660
40661// Checks whether a quantile stream has been initialized.
40662//
40663// An Op that checks if quantile stream resource is initialized.
40664//
40665// Arguments:
40666//	quantile_stream_resource_handle: resource; The reference to quantile stream resource handle.
40667//
40668// Returns bool; True if the resource is initialized, False otherwise.
40669func IsBoostedTreesQuantileStreamResourceInitialized(scope *Scope, quantile_stream_resource_handle tf.Output) (is_initialized tf.Output) {
40670	if scope.Err() != nil {
40671		return
40672	}
40673	opspec := tf.OpSpec{
40674		Type: "IsBoostedTreesQuantileStreamResourceInitialized",
40675		Input: []tf.Input{
40676			quantile_stream_resource_handle,
40677		},
40678	}
40679	op := scope.AddOperation(opspec)
40680	return op.Output(0)
40681}
40682
40683// ExperimentalAutoShardDatasetAttr is an optional argument to ExperimentalAutoShardDataset.
40684type ExperimentalAutoShardDatasetAttr func(optionalAttr)
40685
40686// ExperimentalAutoShardDatasetAutoShardPolicy sets the optional auto_shard_policy attribute to value.
40687// If not specified, defaults to 0
40688func ExperimentalAutoShardDatasetAutoShardPolicy(value int64) ExperimentalAutoShardDatasetAttr {
40689	return func(m optionalAttr) {
40690		m["auto_shard_policy"] = value
40691	}
40692}
40693
40694// Creates a dataset that shards the input dataset.
40695//
40696// Creates a dataset that shards the input dataset by num_workers, returning a
40697// sharded dataset for the index-th worker. This attempts to automatically shard
40698// a dataset by examining the Dataset graph and inserting a shard op before the
40699// inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).
40700//
40701// This dataset will throw a NotFound error if we cannot shard the dataset
40702// automatically.
40703//
40704// Arguments:
40705//	input_dataset: A variant tensor representing the input dataset.
40706//	num_workers: A scalar representing the number of workers to distribute this dataset across.
40707//	index: A scalar representing the index of the current worker out of num_workers.
40708//
40709//
40710func ExperimentalAutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalAutoShardDatasetAttr) (handle tf.Output) {
40711	if scope.Err() != nil {
40712		return
40713	}
40714	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
40715	for _, a := range optional {
40716		a(attrs)
40717	}
40718	opspec := tf.OpSpec{
40719		Type: "ExperimentalAutoShardDataset",
40720		Input: []tf.Input{
40721			input_dataset, num_workers, index,
40722		},
40723		Attrs: attrs,
40724	}
40725	op := scope.AddOperation(opspec)
40726	return op.Output(0)
40727}
40728
40729// Generates a feature cross from a list of tensors, and returns it as a
40730// RaggedTensor.  See `tf.ragged.cross` for more details.
40731//
40732// Arguments:
40733//	ragged_values: The values tensor for each RaggedTensor input.
40734//	ragged_row_splits: The row_splits tensor for each RaggedTensor input.
40735//	sparse_indices: The indices tensor for each SparseTensor input.
40736//	sparse_values: The values tensor for each SparseTensor input.
40737//	sparse_shape: The dense_shape tensor for each SparseTensor input.
40738//	dense_inputs: The tf.Tensor inputs.
40739//	input_order: String specifying the tensor type for each input.  The `i`th character in
40740// this string specifies the type of the `i`th input, and is one of: 'R' (ragged),
40741// 'D' (dense), or 'S' (sparse).  This attr is used to ensure that the crossed
40742// values are combined in the order of the inputs from the call to tf.ragged.cross.
40743//
40744//
40745//
40746//
40747//
40748//
40749// Returns:
40750//	output_values: The `values` for the returned `RaggedTensor`.
40751//	output_row_splits: The `row_splits` for the returned `RaggedTensor`.
40752func RaggedCross(scope *Scope, ragged_values []tf.Output, ragged_row_splits []tf.Output, sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shape []tf.Output, dense_inputs []tf.Output, input_order string, hashed_output bool, num_buckets int64, hash_key int64, out_values_type tf.DataType, out_row_splits_type tf.DataType) (output_values tf.Output, output_row_splits tf.Output) {
40753	if scope.Err() != nil {
40754		return
40755	}
40756	attrs := map[string]interface{}{"input_order": input_order, "hashed_output": hashed_output, "num_buckets": num_buckets, "hash_key": hash_key, "out_values_type": out_values_type, "out_row_splits_type": out_row_splits_type}
40757	opspec := tf.OpSpec{
40758		Type: "RaggedCross",
40759		Input: []tf.Input{
40760			tf.OutputList(ragged_values), tf.OutputList(ragged_row_splits), tf.OutputList(sparse_indices), tf.OutputList(sparse_values), tf.OutputList(sparse_shape), tf.OutputList(dense_inputs),
40761		},
40762		Attrs: attrs,
40763	}
40764	op := scope.AddOperation(opspec)
40765	return op.Output(0), op.Output(1)
40766}
40767
40768// Elementwise computes the bitwise left-shift of `x` and `y`.
40769//
40770// If `y` is negative, or greater than or equal to the width of `x` in bits the
40771// result is implementation defined.
40772//
40773// Example:
40774//
40775// ```python
40776// import tensorflow as tf
40777// from tensorflow.python.ops import bitwise_ops
40778// import numpy as np
40779// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
40780//
40781// for dtype in dtype_list:
40782//   lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
40783//   rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
40784//
40785//   left_shift_result = bitwise_ops.left_shift(lhs, rhs)
40786//
40787//   print(left_shift_result)
40788//
40789// # This will print:
40790// # tf.Tensor([ -32   -5 -128    0], shape=(4,), dtype=int8)
40791// # tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int16)
40792// # tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int32)
40793// # tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int64)
40794//
40795// lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
40796// rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
40797// bitwise_ops.left_shift(lhs, rhs)
40798// # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
40799// ```
40800//
40801func LeftShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
40802	if scope.Err() != nil {
40803		return
40804	}
40805	opspec := tf.OpSpec{
40806		Type: "LeftShift",
40807		Input: []tf.Input{
40808			x, y,
40809		},
40810	}
40811	op := scope.AddOperation(opspec)
40812	return op.Output(0)
40813}
40814
40815// EncodePngAttr is an optional argument to EncodePng.
40816type EncodePngAttr func(optionalAttr)
40817
40818// EncodePngCompression sets the optional compression attribute to value.
40819//
40820// value: Compression level.
40821// If not specified, defaults to -1
40822func EncodePngCompression(value int64) EncodePngAttr {
40823	return func(m optionalAttr) {
40824		m["compression"] = value
40825	}
40826}
40827
40828// PNG-encode an image.
40829//
40830// `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
40831// where `channels` is:
40832//
40833// *   1: for grayscale.
40834// *   2: for grayscale + alpha.
40835// *   3: for RGB.
40836// *   4: for RGBA.
40837//
40838// The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
40839// default or a value from 0 to 9.  9 is the highest compression level, generating
40840// the smallest output, but is slower.
40841//
40842// Arguments:
40843//	image: 3-D with shape `[height, width, channels]`.
40844//
40845// Returns 0-D. PNG-encoded image.
40846func EncodePng(scope *Scope, image tf.Output, optional ...EncodePngAttr) (contents tf.Output) {
40847	if scope.Err() != nil {
40848		return
40849	}
40850	attrs := map[string]interface{}{}
40851	for _, a := range optional {
40852		a(attrs)
40853	}
40854	opspec := tf.OpSpec{
40855		Type: "EncodePng",
40856		Input: []tf.Input{
40857			image,
40858		},
40859		Attrs: attrs,
40860	}
40861	op := scope.AddOperation(opspec)
40862	return op.Output(0)
40863}
40864
40865// EnterAttr is an optional argument to Enter.
40866type EnterAttr func(optionalAttr)
40867
40868// EnterIsConstant sets the optional is_constant attribute to value.
40869//
40870// value: If true, the output is constant within the child frame.
40871// If not specified, defaults to false
40872func EnterIsConstant(value bool) EnterAttr {
40873	return func(m optionalAttr) {
40874		m["is_constant"] = value
40875	}
40876}
40877
40878// EnterParallelIterations sets the optional parallel_iterations attribute to value.
40879//
40880// value: The number of iterations allowed to run in parallel.
40881// If not specified, defaults to 10
40882func EnterParallelIterations(value int64) EnterAttr {
40883	return func(m optionalAttr) {
40884		m["parallel_iterations"] = value
40885	}
40886}
40887
40888// Creates or finds a child frame, and makes `data` available to the child frame.
40889//
40890// This op is used together with `Exit` to create loops in the graph.
40891// The unique `frame_name` is used by the `Executor` to identify frames. If
40892// `is_constant` is true, `output` is a constant in the child frame; otherwise
40893// it may be changed in the child frame. At most `parallel_iterations` iterations
40894// are run in parallel in the child frame.
40895//
40896// Arguments:
40897//	data: The tensor to be made available to the child frame.
40898//	frame_name: The name of the child frame.
40899//
40900// Returns The same tensor as `data`.
40901func Enter(scope *Scope, data tf.Output, frame_name string, optional ...EnterAttr) (output tf.Output) {
40902	if scope.Err() != nil {
40903		return
40904	}
40905	attrs := map[string]interface{}{"frame_name": frame_name}
40906	for _, a := range optional {
40907		a(attrs)
40908	}
40909	opspec := tf.OpSpec{
40910		Type: "Enter",
40911		Input: []tf.Input{
40912			data,
40913		},
40914		Attrs: attrs,
40915	}
40916	op := scope.AddOperation(opspec)
40917	return op.Output(0)
40918}
40919
40920// StageSizeAttr is an optional argument to StageSize.
40921type StageSizeAttr func(optionalAttr)
40922
40923// StageSizeCapacity sets the optional capacity attribute to value.
40924// If not specified, defaults to 0
40925//
40926// REQUIRES: value >= 0
40927func StageSizeCapacity(value int64) StageSizeAttr {
40928	return func(m optionalAttr) {
40929		m["capacity"] = value
40930	}
40931}
40932
40933// StageSizeMemoryLimit sets the optional memory_limit attribute to value.
40934// If not specified, defaults to 0
40935//
40936// REQUIRES: value >= 0
40937func StageSizeMemoryLimit(value int64) StageSizeAttr {
40938	return func(m optionalAttr) {
40939		m["memory_limit"] = value
40940	}
40941}
40942
40943// StageSizeContainer sets the optional container attribute to value.
40944// If not specified, defaults to ""
40945func StageSizeContainer(value string) StageSizeAttr {
40946	return func(m optionalAttr) {
40947		m["container"] = value
40948	}
40949}
40950
40951// StageSizeSharedName sets the optional shared_name attribute to value.
40952// If not specified, defaults to ""
40953func StageSizeSharedName(value string) StageSizeAttr {
40954	return func(m optionalAttr) {
40955		m["shared_name"] = value
40956	}
40957}
40958
40959// Op returns the number of elements in the underlying container.
40960func StageSize(scope *Scope, dtypes []tf.DataType, optional ...StageSizeAttr) (size tf.Output) {
40961	if scope.Err() != nil {
40962		return
40963	}
40964	attrs := map[string]interface{}{"dtypes": dtypes}
40965	for _, a := range optional {
40966		a(attrs)
40967	}
40968	opspec := tf.OpSpec{
40969		Type: "StageSize",
40970
40971		Attrs: attrs,
40972	}
40973	op := scope.AddOperation(opspec)
40974	return op.Output(0)
40975}
40976
40977// Converts a `RaggedTensor` into a `SparseTensor` with the same values.
40978//
40979// input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits)
40980// output=SparseTensor(indices=sparse_indices, values=sparse_values,
40981//                     dense_shape=sparse_dense_shape)
40982//
40983// Arguments:
40984//	rt_nested_splits: The `row_splits` for the `RaggedTensor`.
40985//	rt_dense_values: The `flat_values` for the `RaggedTensor`.
40986//
40987// Returns:
40988//	sparse_indices: The indices for the `SparseTensor`.
40989//	sparse_values: The values of the `SparseTensor`.
40990//	sparse_dense_shape: `sparse_dense_shape` is a tight bounding box of the input `RaggedTensor`.
40991func RaggedTensorToSparse(scope *Scope, rt_nested_splits []tf.Output, rt_dense_values tf.Output) (sparse_indices tf.Output, sparse_values tf.Output, sparse_dense_shape tf.Output) {
40992	if scope.Err() != nil {
40993		return
40994	}
40995	opspec := tf.OpSpec{
40996		Type: "RaggedTensorToSparse",
40997		Input: []tf.Input{
40998			tf.OutputList(rt_nested_splits), rt_dense_values,
40999		},
41000	}
41001	op := scope.AddOperation(opspec)
41002	return op.Output(0), op.Output(1), op.Output(2)
41003}
41004
41005// UniqueAttr is an optional argument to Unique.
41006type UniqueAttr func(optionalAttr)
41007
41008// UniqueOutIdx sets the optional out_idx attribute to value.
41009// If not specified, defaults to DT_INT32
41010func UniqueOutIdx(value tf.DataType) UniqueAttr {
41011	return func(m optionalAttr) {
41012		m["out_idx"] = value
41013	}
41014}
41015
41016// Finds unique elements in a 1-D tensor.
41017//
41018// This operation returns a tensor `y` containing all of the unique elements of `x`
41019// sorted in the same order that they occur in `x`; `x` does not need to be sorted.
41020// This operation also returns a tensor `idx` the same size as `x` that contains
41021// the index of each value of `x` in the unique output `y`. In other words:
41022//
41023// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
41024//
41025// Examples:
41026//
41027// ```
41028// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
41029// y, idx = unique(x)
41030// y ==> [1, 2, 4, 7, 8]
41031// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
41032// ```
41033//
41034// ```
41035// # tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5]
41036// y, idx = unique(x)
41037// y ==> [4, 5, 1, 2, 3]
41038// idx ==> [0, 1, 2, 3, 4, 4, 0, 1]
41039// ```
41040//
41041// Arguments:
41042//	x: 1-D.
41043//
41044// Returns:
41045//	y: 1-D.
41046//	idx: 1-D.
41047func Unique(scope *Scope, x tf.Output, optional ...UniqueAttr) (y tf.Output, idx tf.Output) {
41048	if scope.Err() != nil {
41049		return
41050	}
41051	attrs := map[string]interface{}{}
41052	for _, a := range optional {
41053		a(attrs)
41054	}
41055	opspec := tf.OpSpec{
41056		Type: "Unique",
41057		Input: []tf.Input{
41058			x,
41059		},
41060		Attrs: attrs,
41061	}
41062	op := scope.AddOperation(opspec)
41063	return op.Output(0), op.Output(1)
41064}
41065
41066// Scatters `updates` into a tensor of shape `shape` according to `indices`.
41067//
41068// Update the input tensor by scattering sparse `updates` according to individual values at the specified `indices`.
41069// This op returns an `output` tensor with the `shape` you specify. This op is the
41070// inverse of the `tf.gather_nd` operator which extracts values or slices from a
41071// given tensor.
41072//
41073// This operation is similar to `tf.tensor_scatter_add`, except that the tensor is
41074// zero-initialized. Calling `tf.scatter_nd(indices, values, shape)`
41075// is identical to calling
41076// `tf.tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)`.
41077//
41078// If `indices` contains duplicates, the duplicate `values` are accumulated
41079// (summed).
41080//
41081// **WARNING**: The order in which updates are applied is nondeterministic, so the
41082// output will be nondeterministic if `indices` contains duplicates;
41083// numbers summed in different order may yield different results because of some
41084// numerical approximation issues.
41085//
41086// `indices` is an integer tensor of shape `shape`. The last dimension
41087// of `indices` can be at most the rank of `shape`:
41088//
41089//     indices.shape[-1] <= shape.rank
41090//
41091// The last dimension of `indices` corresponds to indices of elements
41092// (if `indices.shape[-1] = shape.rank`) or slices
41093// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
41094// `shape`.
41095//
41096// `updates` is a tensor with shape:
41097//
41098//     indices.shape[:-1] + shape[indices.shape[-1]:]
41099//
41100// The simplest form of the scatter op is to insert individual elements in
41101// a tensor by index. Consider an example where you want to insert 4 scattered
41102// elements in a rank-1 tensor with 8 elements.
41103//
41104// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
41105// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
41106// </div>
41107//
41108// In Python, this scatter operation would look like this:
41109//
41110// ```python
41111//     indices = tf.constant([[4], [3], [1], [7]])
41112//     updates = tf.constant([9, 10, 11, 12])
41113//     shape = tf.constant([8])
41114//     scatter = tf.scatter_nd(indices, updates, shape)
41115//     print(scatter)
41116// ```
41117//
41118// The resulting tensor would look like this:
41119//
41120//     [0, 11, 0, 10, 9, 0, 0, 12]
41121//
41122// You can also insert entire slices of a higher rank tensor all at once. For
41123// example, you can insert two slices in the first dimension of a rank-3 tensor
41124// with two matrices of new values.
41125//
41126// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
41127// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
41128// </div>
41129//
41130// In Python, this scatter operation would look like this:
41131//
41132// ```python
41133//     indices = tf.constant([[0], [2]])
41134//     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
41135//                             [7, 7, 7, 7], [8, 8, 8, 8]],
41136//                            [[5, 5, 5, 5], [6, 6, 6, 6],
41137//                             [7, 7, 7, 7], [8, 8, 8, 8]]])
41138//     shape = tf.constant([4, 4, 4])
41139//     scatter = tf.scatter_nd(indices, updates, shape)
41140//     print(scatter)
41141// ```
41142//
41143// The resulting tensor would look like this:
41144//
41145//     [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
41146//      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
41147//      [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
41148//      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
41149//
41150// Note that on CPU, if an out of bound index is found, an error is returned.
41151// On GPU, if an out of bound index is found, the index is ignored.
41152//
41153// Arguments:
41154//	indices: Tensor of indices.
41155//	updates: Values to scatter into the output tensor.
41156//	shape: 1-D. The shape of the output tensor.
41157//
41158// Returns A new tensor with the given shape and updates applied according
41159// to the indices.
41160func ScatterNd(scope *Scope, indices tf.Output, updates tf.Output, shape tf.Output) (output tf.Output) {
41161	if scope.Err() != nil {
41162		return
41163	}
41164	opspec := tf.OpSpec{
41165		Type: "ScatterNd",
41166		Input: []tf.Input{
41167			indices, updates, shape,
41168		},
41169	}
41170	op := scope.AddOperation(opspec)
41171	return op.Output(0)
41172}
41173
41174// Returns the truth value of (x > y) element-wise.
41175//
41176// *NOTE*: `Greater` supports broadcasting. More about broadcasting
41177// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
41178//
41179// Example:
41180//
41181// ```python
41182// x = tf.constant([5, 4, 6])
41183// y = tf.constant([5, 2, 5])
41184// tf.math.greater(x, y) ==> [False, True, True]
41185//
41186// x = tf.constant([5, 4, 6])
41187// y = tf.constant([5])
41188// tf.math.greater(x, y) ==> [False, False, True]
41189// ```
41190func Greater(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
41191	if scope.Err() != nil {
41192		return
41193	}
41194	opspec := tf.OpSpec{
41195		Type: "Greater",
41196		Input: []tf.Input{
41197			x, y,
41198		},
41199	}
41200	op := scope.AddOperation(opspec)
41201	return op.Output(0)
41202}
41203
41204// LowerBoundAttr is an optional argument to LowerBound.
41205type LowerBoundAttr func(optionalAttr)
41206
41207// LowerBoundOutType sets the optional out_type attribute to value.
41208// If not specified, defaults to DT_INT32
41209func LowerBoundOutType(value tf.DataType) LowerBoundAttr {
41210	return func(m optionalAttr) {
41211		m["out_type"] = value
41212	}
41213}
41214
41215// Applies lower_bound(sorted_search_values, values) along each row.
41216//
41217// Each set of rows with the same index in (sorted_inputs, values) is treated
41218// independently.  The resulting row is the equivalent of calling
41219// `np.searchsorted(sorted_inputs, values, side='left')`.
41220//
41221// The result is not a global index to the entire
41222// `Tensor`, but rather just the index in the last dimension.
41223//
41224// A 2-D example:
41225//   sorted_sequence = [[0, 3, 9, 9, 10],
41226//                      [1, 2, 3, 4, 5]]
41227//   values = [[2, 4, 9],
41228//             [0, 2, 6]]
41229//
41230//   result = LowerBound(sorted_sequence, values)
41231//
41232//   result == [[1, 2, 2],
41233//              [0, 1, 5]]
41234//
41235// Arguments:
41236//	sorted_inputs: 2-D Tensor where each row is ordered.
41237//	values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
41238// the values that will be searched for in `sorted_search_values`.
41239//
41240// Returns A `Tensor` with the same shape as `values`.  It contains the first scalar index
41241// into the last dimension where values can be inserted without changing the
41242// ordered property.
41243func LowerBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, optional ...LowerBoundAttr) (output tf.Output) {
41244	if scope.Err() != nil {
41245		return
41246	}
41247	attrs := map[string]interface{}{}
41248	for _, a := range optional {
41249		a(attrs)
41250	}
41251	opspec := tf.OpSpec{
41252		Type: "LowerBound",
41253		Input: []tf.Input{
41254			sorted_inputs, values,
41255		},
41256		Attrs: attrs,
41257	}
41258	op := scope.AddOperation(opspec)
41259	return op.Output(0)
41260}
41261
41262// TensorArrayConcatV3Attr is an optional argument to TensorArrayConcatV3.
41263type TensorArrayConcatV3Attr func(optionalAttr)
41264
41265// TensorArrayConcatV3ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
41266//
41267// value: The expected shape of an element, if known,
41268// excluding the first dimension. Used to validate the shapes of
41269// TensorArray elements. If this shape is not fully specified, concatenating
41270// zero-size TensorArrays is an error.
41271// If not specified, defaults to {unknown_rank:true}
41272func TensorArrayConcatV3ElementShapeExcept0(value tf.Shape) TensorArrayConcatV3Attr {
41273	return func(m optionalAttr) {
41274		m["element_shape_except0"] = value
41275	}
41276}
41277
41278// Concat the elements from the TensorArray into value `value`.
41279//
41280// Takes `T` elements of shapes
41281//
41282//   ```
41283//   (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
41284//   ```
41285//
41286// and concatenates them into a Tensor of shape:
41287//
41288//   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
41289//
41290// All elements must have the same shape (excepting the first dimension).
41291//
41292// Arguments:
41293//	handle: The handle to a TensorArray.
41294//	flow_in: A float scalar that enforces proper chaining of operations.
41295//	dtype: The type of the elem that is returned.
41296//
41297// Returns:
41298//	value: All of the elements in the TensorArray, concatenated along the first
41299// axis.
41300//	lengths: A vector of the row sizes of the original T elements in the
41301// value output.  In the example above, this would be the values:
41302// `(n1, n2, ..., n(T-1))`.
41303func TensorArrayConcatV3(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV3Attr) (value tf.Output, lengths tf.Output) {
41304	if scope.Err() != nil {
41305		return
41306	}
41307	attrs := map[string]interface{}{"dtype": dtype}
41308	for _, a := range optional {
41309		a(attrs)
41310	}
41311	opspec := tf.OpSpec{
41312		Type: "TensorArrayConcatV3",
41313		Input: []tf.Input{
41314			handle, flow_in,
41315		},
41316		Attrs: attrs,
41317	}
41318	op := scope.AddOperation(opspec)
41319	return op.Output(0), op.Output(1)
41320}
41321
41322// ResourceApplyFtrlV2Attr is an optional argument to ResourceApplyFtrlV2.
41323type ResourceApplyFtrlV2Attr func(optionalAttr)
41324
41325// ResourceApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
41326//
41327// value: If `True`, updating of the var and accum tensors will be protected
41328// by a lock; otherwise the behavior is undefined, but may exhibit less
41329// contention.
41330// If not specified, defaults to false
41331func ResourceApplyFtrlV2UseLocking(value bool) ResourceApplyFtrlV2Attr {
41332	return func(m optionalAttr) {
41333		m["use_locking"] = value
41334	}
41335}
41336
41337// ResourceApplyFtrlV2MultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
41338// If not specified, defaults to false
41339func ResourceApplyFtrlV2MultiplyLinearByLr(value bool) ResourceApplyFtrlV2Attr {
41340	return func(m optionalAttr) {
41341		m["multiply_linear_by_lr"] = value
41342	}
41343}
41344
41345// Update '*var' according to the Ftrl-proximal scheme.
41346//
41347// accum_new = accum + grad * grad
41348// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
41349// linear += grad_with_shrinkage +
41350//     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
41351// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
41352// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
41353// accum = accum_new
41354//
41355// Arguments:
41356//	var_: Should be from a Variable().
41357//	accum: Should be from a Variable().
41358//	linear: Should be from a Variable().
41359//	grad: The gradient.
41360//	lr: Scaling factor. Must be a scalar.
41361//	l1: L1 regularization. Must be a scalar.
41362//	l2: L2 shrinkage regularization. Must be a scalar.
41363//
41364//	lr_power: Scaling factor. Must be a scalar.
41365//
41366// Returns the created operation.
41367func ResourceApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlV2Attr) (o *tf.Operation) {
41368	if scope.Err() != nil {
41369		return
41370	}
41371	attrs := map[string]interface{}{}
41372	for _, a := range optional {
41373		a(attrs)
41374	}
41375	opspec := tf.OpSpec{
41376		Type: "ResourceApplyFtrlV2",
41377		Input: []tf.Input{
41378			var_, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power,
41379		},
41380		Attrs: attrs,
41381	}
41382	return scope.AddOperation(opspec)
41383}
41384
41385// UpperBoundAttr is an optional argument to UpperBound.
41386type UpperBoundAttr func(optionalAttr)
41387
41388// UpperBoundOutType sets the optional out_type attribute to value.
41389// If not specified, defaults to DT_INT32
41390func UpperBoundOutType(value tf.DataType) UpperBoundAttr {
41391	return func(m optionalAttr) {
41392		m["out_type"] = value
41393	}
41394}
41395
41396// Applies upper_bound(sorted_search_values, values) along each row.
41397//
41398// Each set of rows with the same index in (sorted_inputs, values) is treated
41399// independently.  The resulting row is the equivalent of calling
41400// `np.searchsorted(sorted_inputs, values, side='right')`.
41401//
41402// The result is not a global index to the entire
41403// `Tensor`, but rather just the index in the last dimension.
41404//
41405// A 2-D example:
41406//   sorted_sequence = [[0, 3, 9, 9, 10],
41407//                      [1, 2, 3, 4, 5]]
41408//   values = [[2, 4, 9],
41409//             [0, 2, 6]]
41410//
41411//   result = UpperBound(sorted_sequence, values)
41412//
41413//   result == [[1, 2, 4],
41414//              [0, 2, 5]]
41415//
41416// Arguments:
41417//	sorted_inputs: 2-D Tensor where each row is ordered.
41418//	values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
41419// the values that will be searched for in `sorted_search_values`.
41420//
41421// Returns A `Tensor` with the same shape as `values`.  It contains the last scalar index
41422// into the last dimension where values can be inserted without changing the
41423// ordered property.
41424func UpperBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, optional ...UpperBoundAttr) (output tf.Output) {
41425	if scope.Err() != nil {
41426		return
41427	}
41428	attrs := map[string]interface{}{}
41429	for _, a := range optional {
41430		a(attrs)
41431	}
41432	opspec := tf.OpSpec{
41433		Type: "UpperBound",
41434		Input: []tf.Input{
41435			sorted_inputs, values,
41436		},
41437		Attrs: attrs,
41438	}
41439	op := scope.AddOperation(opspec)
41440	return op.Output(0)
41441}
41442
41443// BatchToSpace for N-D tensors of type T.
41444//
41445// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
41446// `block_shape + [batch]`, interleaves these blocks back into the grid defined by
41447// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
41448// the input.  The spatial dimensions of this intermediate result are then
41449// optionally cropped according to `crops` to produce the output.  This is the
41450// reverse of SpaceToBatch.  See below for a precise description.
41451//
41452// Arguments:
41453//	input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
41454// where spatial_shape has M dimensions.
41455//	block_shape: 1-D with shape `[M]`, all values must be >= 1.
41456//	crops: 2-D with shape `[M, 2]`, all values must be >= 0.
41457//   `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
41458//   dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
41459//   required that
41460//   `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
41461//
41462// This operation is equivalent to the following steps:
41463//
41464// 1. Reshape `input` to `reshaped` of shape:
41465//      [block_shape[0], ..., block_shape[M-1],
41466//       batch / prod(block_shape),
41467//       input_shape[1], ..., input_shape[N-1]]
41468//
41469// 2. Permute dimensions of `reshaped` to produce `permuted` of shape
41470//      [batch / prod(block_shape),
41471//
41472//       input_shape[1], block_shape[0],
41473//       ...,
41474//       input_shape[M], block_shape[M-1],
41475//
41476//       input_shape[M+1], ..., input_shape[N-1]]
41477//
41478// 3. Reshape `permuted` to produce `reshaped_permuted` of shape
41479//      [batch / prod(block_shape),
41480//
41481//       input_shape[1] * block_shape[0],
41482//       ...,
41483//       input_shape[M] * block_shape[M-1],
41484//
41485//       input_shape[M+1],
41486//       ...,
41487//       input_shape[N-1]]
41488//
41489// 4. Crop the start and end of dimensions `[1, ..., M]` of
41490//    `reshaped_permuted` according to `crops` to produce the output of shape:
41491//      [batch / prod(block_shape),
41492//
41493//       input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
41494//       ...,
41495//       input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
41496//
41497//       input_shape[M+1], ..., input_shape[N-1]]
41498//
41499// Some examples:
41500//
41501// (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
41502//     `crops = [[0, 0], [0, 0]]`:
41503//
41504// ```
41505// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
41506// ```
41507//
41508// The output tensor has shape `[1, 2, 2, 1]` and value:
41509//
41510// ```
41511// x = [[[[1], [2]], [[3], [4]]]]
41512// ```
41513//
41514// (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
41515//     `crops = [[0, 0], [0, 0]]`:
41516//
41517// ```
41518// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
41519// ```
41520//
41521// The output tensor has shape `[1, 2, 2, 3]` and value:
41522//
41523// ```
41524// x = [[[[1, 2, 3], [4, 5, 6]],
41525//       [[7, 8, 9], [10, 11, 12]]]]
41526// ```
41527//
41528// (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
41529//     `crops = [[0, 0], [0, 0]]`:
41530//
41531// ```
41532// x = [[[[1], [3]], [[9], [11]]],
41533//      [[[2], [4]], [[10], [12]]],
41534//      [[[5], [7]], [[13], [15]]],
41535//      [[[6], [8]], [[14], [16]]]]
41536// ```
41537//
41538// The output tensor has shape `[1, 4, 4, 1]` and value:
41539//
41540// ```
41541// x = [[[[1],   [2],  [3],  [4]],
41542//      [[5],   [6],  [7],  [8]],
41543//      [[9],  [10], [11],  [12]],
41544//      [[13], [14], [15],  [16]]]]
41545// ```
41546//
41547// (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
41548//     `crops = [[0, 0], [2, 0]]`:
41549//
41550// ```
41551// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
41552//      [[[0], [2], [4]]], [[[0], [10], [12]]],
41553//      [[[0], [5], [7]]], [[[0], [13], [15]]],
41554//      [[[0], [6], [8]]], [[[0], [14], [16]]]]
41555// ```
41556//
41557// The output tensor has shape `[2, 2, 4, 1]` and value:
41558//
41559// ```
41560// x = [[[[1],   [2],  [3],  [4]],
41561//       [[5],   [6],  [7],  [8]]],
41562//      [[[9],  [10], [11],  [12]],
41563//       [[13], [14], [15],  [16]]]]
41564// ```
41565func BatchToSpaceND(scope *Scope, input tf.Output, block_shape tf.Output, crops tf.Output) (output tf.Output) {
41566	if scope.Err() != nil {
41567		return
41568	}
41569	opspec := tf.OpSpec{
41570		Type: "BatchToSpaceND",
41571		Input: []tf.Input{
41572			input, block_shape, crops,
41573		},
41574	}
41575	op := scope.AddOperation(opspec)
41576	return op.Output(0)
41577}
41578
41579// FIFOQueueV2Attr is an optional argument to FIFOQueueV2.
41580type FIFOQueueV2Attr func(optionalAttr)
41581
41582// FIFOQueueV2Shapes sets the optional shapes attribute to value.
41583//
41584// value: The shape of each component in a value. The length of this attr must
41585// be either 0 or the same as the length of component_types. If the length of
41586// this attr is 0, the shapes of queue elements are not constrained, and
41587// only one element may be dequeued at a time.
41588// If not specified, defaults to {}
41589//
41590// REQUIRES: len(value) >= 0
41591func FIFOQueueV2Shapes(value []tf.Shape) FIFOQueueV2Attr {
41592	return func(m optionalAttr) {
41593		m["shapes"] = value
41594	}
41595}
41596
41597// FIFOQueueV2Capacity sets the optional capacity attribute to value.
41598//
41599// value: The upper bound on the number of elements in this queue.
41600// Negative numbers mean no limit.
41601// If not specified, defaults to -1
41602func FIFOQueueV2Capacity(value int64) FIFOQueueV2Attr {
41603	return func(m optionalAttr) {
41604		m["capacity"] = value
41605	}
41606}
41607
41608// FIFOQueueV2Container sets the optional container attribute to value.
41609//
41610// value: If non-empty, this queue is placed in the given container.
41611// Otherwise, a default container is used.
41612// If not specified, defaults to ""
41613func FIFOQueueV2Container(value string) FIFOQueueV2Attr {
41614	return func(m optionalAttr) {
41615		m["container"] = value
41616	}
41617}
41618
41619// FIFOQueueV2SharedName sets the optional shared_name attribute to value.
41620//
41621// value: If non-empty, this queue will be shared under the given name
41622// across multiple sessions.
41623// If not specified, defaults to ""
41624func FIFOQueueV2SharedName(value string) FIFOQueueV2Attr {
41625	return func(m optionalAttr) {
41626		m["shared_name"] = value
41627	}
41628}
41629
41630// A queue that produces elements in first-in first-out order.
41631//
41632// Arguments:
41633//	component_types: The type of each component in a value.
41634//
41635// Returns The handle to the queue.
41636func FIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...FIFOQueueV2Attr) (handle tf.Output) {
41637	if scope.Err() != nil {
41638		return
41639	}
41640	attrs := map[string]interface{}{"component_types": component_types}
41641	for _, a := range optional {
41642		a(attrs)
41643	}
41644	opspec := tf.OpSpec{
41645		Type: "FIFOQueueV2",
41646
41647		Attrs: attrs,
41648	}
41649	op := scope.AddOperation(opspec)
41650	return op.Output(0)
41651}
41652
41653// Returns the batched diagonal part of a batched tensor.
41654//
41655// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
41656// `input`.
41657//
41658// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
41659// Let `max_diag_len` be the maximum length among all diagonals to be extracted,
41660// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
41661// Let `num_diags` be the number of diagonals to extract,
41662// `num_diags = k[1] - k[0] + 1`.
41663//
41664// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
41665// `[I, J, ..., L, max_diag_len]` and values:
41666//
41667// ```
41668// diagonal[i, j, ..., l, n]
41669//   = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
41670//     padding_value                 ; otherwise.
41671// ```
41672// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
41673//
41674// Otherwise, the output tensor has rank `r` with dimensions
41675// `[I, J, ..., L, num_diags, max_diag_len]` with values:
41676//
41677// ```
41678// diagonal[i, j, ..., l, m, n]
41679//   = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
41680//     padding_value                 ; otherwise.
41681// ```
41682// where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`.
41683//
41684// The input must be at least a matrix.
41685//
41686// For example:
41687//
41688// ```
41689// input = np.array([[[1, 2, 3, 4],  # Input shape: (2, 3, 4)
41690//                    [5, 6, 7, 8],
41691//                    [9, 8, 7, 6]],
41692//                   [[5, 4, 3, 2],
41693//                    [1, 2, 3, 4],
41694//                    [5, 6, 7, 8]]])
41695//
41696// # A main diagonal from each batch.
41697// tf.matrix_diag_part(input) ==> [[1, 6, 7],  # Output shape: (2, 3)
41698//                                 [5, 2, 7]]
41699//
41700// # A superdiagonal from each batch.
41701// tf.matrix_diag_part(input, k = 1)
41702//   ==> [[2, 7, 6],  # Output shape: (2, 3)
41703//        [4, 3, 8]]
41704//
41705// # A tridiagonal band from each batch.
41706// tf.matrix_diag_part(input, k = (-1, 1))
41707//   ==> [[[2, 7, 6],  # Output shape: (2, 3, 3)
41708//         [1, 6, 7],
41709//         [5, 8, 0]],
41710//        [[4, 3, 8],
41711//         [5, 2, 7],
41712//         [1, 6, 0]]]
41713//
41714// # Padding value = 9
41715// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
41716//   ==> [[[4, 9, 9],  # Output shape: (2, 3, 3)
41717//         [3, 8, 9],
41718//         [2, 7, 6]],
41719//        [[2, 9, 9],
41720//         [3, 4, 9],
41721//         [4, 3, 8]]]
41722// ```
41723//
41724// Arguments:
41725//	input: Rank `r` tensor where `r >= 2`.
41726//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
41727// diagonal, and negative value means subdiagonals. `k` can be a single integer
41728// (for a single diagonal) or a pair of integers specifying the low and high ends
41729// of a matrix band. `k[0]` must not be larger than `k[1]`.
41730//	padding_value: The value to fill the area outside the specified diagonal band with.
41731// Default is 0.
41732//
41733// Returns The extracted diagonal(s).
41734func MatrixDiagPartV2(scope *Scope, input tf.Output, k tf.Output, padding_value tf.Output) (diagonal tf.Output) {
41735	if scope.Err() != nil {
41736		return
41737	}
41738	opspec := tf.OpSpec{
41739		Type: "MatrixDiagPartV2",
41740		Input: []tf.Input{
41741			input, k, padding_value,
41742		},
41743	}
41744	op := scope.AddOperation(opspec)
41745	return op.Output(0)
41746}
41747
41748// Concatenates quantized tensors along one dimension.
41749//
41750// Arguments:
41751//	concat_dim: 0-D.  The dimension along which to concatenate.  Must be in the
41752// range [0, rank(values)).
41753//	values: The `N` Tensors to concatenate. Their ranks and types must match,
41754// and their sizes must match in all dimensions except `concat_dim`.
41755//	input_mins: The minimum scalar values for each of the input tensors.
41756//	input_maxes: The maximum scalar values for each of the input tensors.
41757//
41758// Returns:
41759//	output: A `Tensor` with the concatenation of values stacked along the
41760// `concat_dim` dimension.  This tensor's shape matches that of `values` except
41761// in `concat_dim` where it has the sum of the sizes.
41762//	output_min: The float value that the minimum quantized output value represents.
41763//	output_max: The float value that the maximum quantized output value represents.
41764func QuantizedConcat(scope *Scope, concat_dim tf.Output, values []tf.Output, input_mins []tf.Output, input_maxes []tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
41765	if scope.Err() != nil {
41766		return
41767	}
41768	opspec := tf.OpSpec{
41769		Type: "QuantizedConcat",
41770		Input: []tf.Input{
41771			concat_dim, tf.OutputList(values), tf.OutputList(input_mins), tf.OutputList(input_maxes),
41772		},
41773	}
41774	op := scope.AddOperation(opspec)
41775	return op.Output(0), op.Output(1), op.Output(2)
41776}
41777
41778// QuantizeAndDequantizeV4GradAttr is an optional argument to QuantizeAndDequantizeV4Grad.
41779type QuantizeAndDequantizeV4GradAttr func(optionalAttr)
41780
41781// QuantizeAndDequantizeV4GradAxis sets the optional axis attribute to value.
41782// If not specified, defaults to -1
41783func QuantizeAndDequantizeV4GradAxis(value int64) QuantizeAndDequantizeV4GradAttr {
41784	return func(m optionalAttr) {
41785		m["axis"] = value
41786	}
41787}
41788
41789// Returns the gradient of `QuantizeAndDequantizeV4`.
41790//
41791// Returns a gradient of 1 for inputs that are within the quantization range,
41792// or 0 otherwise.
41793func QuantizeAndDequantizeV4Grad(scope *Scope, gradients tf.Output, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV4GradAttr) (input_backprop tf.Output, input_min_backprop tf.Output, input_max_backprop tf.Output) {
41794	if scope.Err() != nil {
41795		return
41796	}
41797	attrs := map[string]interface{}{}
41798	for _, a := range optional {
41799		a(attrs)
41800	}
41801	opspec := tf.OpSpec{
41802		Type: "QuantizeAndDequantizeV4Grad",
41803		Input: []tf.Input{
41804			gradients, input, input_min, input_max,
41805		},
41806		Attrs: attrs,
41807	}
41808	op := scope.AddOperation(opspec)
41809	return op.Output(0), op.Output(1), op.Output(2)
41810}
41811
41812// CollectiveBcastRecvV2Attr is an optional argument to CollectiveBcastRecvV2.
41813type CollectiveBcastRecvV2Attr func(optionalAttr)
41814
41815// CollectiveBcastRecvV2CommunicationHint sets the optional communication_hint attribute to value.
41816// If not specified, defaults to "auto"
41817func CollectiveBcastRecvV2CommunicationHint(value string) CollectiveBcastRecvV2Attr {
41818	return func(m optionalAttr) {
41819		m["communication_hint"] = value
41820	}
41821}
41822
41823// CollectiveBcastRecvV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
41824// If not specified, defaults to 0
41825func CollectiveBcastRecvV2TimeoutSeconds(value float32) CollectiveBcastRecvV2Attr {
41826	return func(m optionalAttr) {
41827		m["timeout_seconds"] = value
41828	}
41829}
41830
41831// Receives a tensor value broadcast from another device.
41832func CollectiveBcastRecvV2(scope *Scope, group_size tf.Output, group_key tf.Output, instance_key tf.Output, shape tf.Output, T tf.DataType, optional ...CollectiveBcastRecvV2Attr) (data tf.Output) {
41833	if scope.Err() != nil {
41834		return
41835	}
41836	attrs := map[string]interface{}{"T": T}
41837	for _, a := range optional {
41838		a(attrs)
41839	}
41840	opspec := tf.OpSpec{
41841		Type: "CollectiveBcastRecvV2",
41842		Input: []tf.Input{
41843			group_size, group_key, instance_key, shape,
41844		},
41845		Attrs: attrs,
41846	}
41847	op := scope.AddOperation(opspec)
41848	return op.Output(0)
41849}
41850
41851// PrintAttr is an optional argument to Print.
41852type PrintAttr func(optionalAttr)
41853
41854// PrintMessage sets the optional message attribute to value.
41855//
41856// value: A string, prefix of the error message.
41857// If not specified, defaults to ""
41858func PrintMessage(value string) PrintAttr {
41859	return func(m optionalAttr) {
41860		m["message"] = value
41861	}
41862}
41863
41864// PrintFirstN sets the optional first_n attribute to value.
41865//
41866// value: Only log `first_n` number of times. -1 disables logging.
41867// If not specified, defaults to -1
41868func PrintFirstN(value int64) PrintAttr {
41869	return func(m optionalAttr) {
41870		m["first_n"] = value
41871	}
41872}
41873
41874// PrintSummarize sets the optional summarize attribute to value.
41875//
41876// value: Only print this many entries of each tensor.
41877// If not specified, defaults to 3
41878func PrintSummarize(value int64) PrintAttr {
41879	return func(m optionalAttr) {
41880		m["summarize"] = value
41881	}
41882}
41883
41884// Prints a list of tensors.
41885//
41886// Passes `input` through to `output` and prints `data` when evaluating.
41887//
41888// Arguments:
41889//	input: The tensor passed to `output`
41890//	data: A list of tensors to print out when op is evaluated.
41891//
41892// Returns The unmodified `input` tensor
41893func Print(scope *Scope, input tf.Output, data []tf.Output, optional ...PrintAttr) (output tf.Output) {
41894	if scope.Err() != nil {
41895		return
41896	}
41897	attrs := map[string]interface{}{}
41898	for _, a := range optional {
41899		a(attrs)
41900	}
41901	opspec := tf.OpSpec{
41902		Type: "Print",
41903		Input: []tf.Input{
41904			input, tf.OutputList(data),
41905		},
41906		Attrs: attrs,
41907	}
41908	op := scope.AddOperation(opspec)
41909	return op.Output(0)
41910}
41911
41912// QuantizeAndDequantizeV2Attr is an optional argument to QuantizeAndDequantizeV2.
41913type QuantizeAndDequantizeV2Attr func(optionalAttr)
41914
41915// QuantizeAndDequantizeV2SignedInput sets the optional signed_input attribute to value.
41916//
41917// value: Whether the quantization is signed or unsigned. (actually this parameter should
41918// have been called <b>`signed_output`</b>)
41919// If not specified, defaults to true
41920func QuantizeAndDequantizeV2SignedInput(value bool) QuantizeAndDequantizeV2Attr {
41921	return func(m optionalAttr) {
41922		m["signed_input"] = value
41923	}
41924}
41925
41926// QuantizeAndDequantizeV2NumBits sets the optional num_bits attribute to value.
41927//
41928// value: The bitwidth of the quantization.
41929// If not specified, defaults to 8
41930func QuantizeAndDequantizeV2NumBits(value int64) QuantizeAndDequantizeV2Attr {
41931	return func(m optionalAttr) {
41932		m["num_bits"] = value
41933	}
41934}
41935
41936// QuantizeAndDequantizeV2RangeGiven sets the optional range_given attribute to value.
41937//
41938// value: Whether the range is given or should be determined from the `input` tensor.
41939// If not specified, defaults to false
41940func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr {
41941	return func(m optionalAttr) {
41942		m["range_given"] = value
41943	}
41944}
41945
41946// QuantizeAndDequantizeV2RoundMode sets the optional round_mode attribute to value.
41947//
41948// value: The 'round_mode' attribute controls which rounding tie-breaking algorithm is
41949// used when rounding float values to their quantized equivalents. The following
41950// rounding modes are currently supported:
41951//
41952// *   HALF_TO_EVEN: this is the default round_mode.
41953// *   HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5
41954//     rounds up to -7.
41955//
41956// If not specified, defaults to "HALF_TO_EVEN"
41957func QuantizeAndDequantizeV2RoundMode(value string) QuantizeAndDequantizeV2Attr {
41958	return func(m optionalAttr) {
41959		m["round_mode"] = value
41960	}
41961}
41962
41963// QuantizeAndDequantizeV2NarrowRange sets the optional narrow_range attribute to value.
41964//
41965// value: If True, then the absolute value of the quantized minimum value is the same as
41966// the quantized maximum value, instead of 1 greater.
41967// i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
41968// If not specified, defaults to false
41969func QuantizeAndDequantizeV2NarrowRange(value bool) QuantizeAndDequantizeV2Attr {
41970	return func(m optionalAttr) {
41971		m["narrow_range"] = value
41972	}
41973}
41974
41975// QuantizeAndDequantizeV2Axis sets the optional axis attribute to value.
41976//
41977// value: If specified, this axis is treated as a channel or slice axis, and a separate
41978// quantization range is used for each channel or slice along this axis.
41979// If not specified, defaults to -1
41980func QuantizeAndDequantizeV2Axis(value int64) QuantizeAndDequantizeV2Attr {
41981	return func(m optionalAttr) {
41982		m["axis"] = value
41983	}
41984}
41985
41986// Quantizes then dequantizes a tensor.
41987//
41988// This op simulates the precision loss from the quantized forward pass by:
41989//
41990// 1. Quantizing the tensor to fixed point numbers, which should match the target
41991//    quantization method when it is used in inference.
41992// 2. Dequantizing it back to floating point numbers for the following ops, most
41993//    likely matmul.
41994//
41995// There are different ways to quantize. This version uses only scaling, so 0.0
41996// maps to 0.
41997//
41998// From the specified 'num_bits' in the quantized output type, it determines
41999// minimum and maximum representable quantized values.
42000//
42001// e.g.
42002//
42003// *   [-128, 127] for signed, num_bits = 8, or
42004// *   [0, 255] for unsigned, num_bits = 8.
42005//
42006// If range_given == False, the initial input_min, input_max will be determined
42007// automatically as the minimum and maximum values in the input tensor, otherwise
42008// the specified values of input_min, input_max are used.
42009//
42010// Note: If the input_min, input_max are specified, they do not need to equal the
42011// actual minimum and maximum values in the tensor. e.g. in some cases it may be
42012// beneficial to specify these values such that the low probability extremes of the
42013// input distribution are clipped.
42014//
42015// This op determines the maximum scale_factor that would map the initial
42016// [input_min, input_max] range to a range that lies within the representable
42017// quantized range.
42018//
42019// It determines the scale from one of input_min and input_max, then updates the
42020// other one to maximize the representable range.
42021//
42022// e.g.
42023//
42024// *   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
42025//     5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
42026//     would update input_max to be 127 / 12.8 = 9.921875
42027// *   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
42028//     10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
42029//     would update input_min to be 128.0 / 12.7 = -10.07874
42030// *   if the output is unsigned, input_min is forced to be 0, and only the
42031//     specified input_max is used.
42032//
42033// After determining the scale_factor and updating the input range, it applies the
42034// following to each value in the 'input' tensor.
42035//
42036// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
42037//
42038// The above round function rounds the value based on the given round_mode.
42039//
42040//
42041// Arguments:
42042//	input: Tensor to quantize and then dequantize.
42043//	input_min: If `range_given == True`, this specifies the minimum input value that needs to
42044// be represented, otherwise it is determined from the min value of the `input`
42045// tensor.
42046//	input_max: If `range_given == True`, this specifies the maximum input value that needs to
42047// be represented, otherwise it is determined from the max value of the `input`
42048// tensor.
42049func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV2Attr) (output tf.Output) {
42050	if scope.Err() != nil {
42051		return
42052	}
42053	attrs := map[string]interface{}{}
42054	for _, a := range optional {
42055		a(attrs)
42056	}
42057	opspec := tf.OpSpec{
42058		Type: "QuantizeAndDequantizeV2",
42059		Input: []tf.Input{
42060			input, input_min, input_max,
42061		},
42062		Attrs: attrs,
42063	}
42064	op := scope.AddOperation(opspec)
42065	return op.Output(0)
42066}
42067
42068// MutableHashTableOfTensorsV2Attr is an optional argument to MutableHashTableOfTensorsV2.
42069type MutableHashTableOfTensorsV2Attr func(optionalAttr)
42070
42071// MutableHashTableOfTensorsV2Container sets the optional container attribute to value.
42072//
42073// value: If non-empty, this table is placed in the given container.
42074// Otherwise, a default container is used.
42075// If not specified, defaults to ""
42076func MutableHashTableOfTensorsV2Container(value string) MutableHashTableOfTensorsV2Attr {
42077	return func(m optionalAttr) {
42078		m["container"] = value
42079	}
42080}
42081
42082// MutableHashTableOfTensorsV2SharedName sets the optional shared_name attribute to value.
42083//
42084// value: If non-empty, this table is shared under the given name across
42085// multiple sessions.
42086// If not specified, defaults to ""
42087func MutableHashTableOfTensorsV2SharedName(value string) MutableHashTableOfTensorsV2Attr {
42088	return func(m optionalAttr) {
42089		m["shared_name"] = value
42090	}
42091}
42092
42093// MutableHashTableOfTensorsV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
42094// If not specified, defaults to false
42095func MutableHashTableOfTensorsV2UseNodeNameSharing(value bool) MutableHashTableOfTensorsV2Attr {
42096	return func(m optionalAttr) {
42097		m["use_node_name_sharing"] = value
42098	}
42099}
42100
42101// MutableHashTableOfTensorsV2ValueShape sets the optional value_shape attribute to value.
42102// If not specified, defaults to {}
42103func MutableHashTableOfTensorsV2ValueShape(value tf.Shape) MutableHashTableOfTensorsV2Attr {
42104	return func(m optionalAttr) {
42105		m["value_shape"] = value
42106	}
42107}
42108
42109// Creates an empty hash table.
42110//
42111// This op creates a mutable hash table, specifying the type of its keys and
42112// values. Each value must be a vector. Data can be inserted into the table using
42113// the insert operations. It does not support the initialization operation.
42114//
42115// Arguments:
42116//	key_dtype: Type of the table keys.
42117//	value_dtype: Type of the table values.
42118//
42119// Returns Handle to a table.
42120func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableOfTensorsV2Attr) (table_handle tf.Output) {
42121	if scope.Err() != nil {
42122		return
42123	}
42124	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
42125	for _, a := range optional {
42126		a(attrs)
42127	}
42128	opspec := tf.OpSpec{
42129		Type: "MutableHashTableOfTensorsV2",
42130
42131		Attrs: attrs,
42132	}
42133	op := scope.AddOperation(opspec)
42134	return op.Output(0)
42135}
42136
42137// OneHotAttr is an optional argument to OneHot.
42138type OneHotAttr func(optionalAttr)
42139
42140// OneHotAxis sets the optional axis attribute to value.
42141//
42142// value: The axis to fill (default: -1, a new inner-most axis).
42143// If not specified, defaults to -1
42144func OneHotAxis(value int64) OneHotAttr {
42145	return func(m optionalAttr) {
42146		m["axis"] = value
42147	}
42148}
42149
42150// Returns a one-hot tensor.
42151//
42152// The locations represented by indices in `indices` take value `on_value`,
42153// while all other locations take value `off_value`.
42154//
42155// If the input `indices` is rank `N`, the output will have rank `N+1`,
42156// The new axis is created at dimension `axis` (default: the new axis is
42157// appended at the end).
42158//
42159// If `indices` is a scalar the output shape will be a vector of length `depth`.
42160//
42161// If `indices` is a vector of length `features`, the output shape will be:
42162// ```
42163//   features x depth if axis == -1
42164//   depth x features if axis == 0
42165// ```
42166//
42167// If `indices` is a matrix (batch) with shape `[batch, features]`,
42168// the output shape will be:
42169// ```
42170//   batch x features x depth if axis == -1
42171//   batch x depth x features if axis == 1
42172//   depth x batch x features if axis == 0
42173// ```
42174//
42175//
42176// Examples
42177// =========
42178//
42179// Suppose that
42180// ```
42181//   indices = [0, 2, -1, 1]
42182//   depth = 3
42183//   on_value = 5.0
42184//   off_value = 0.0
42185//   axis = -1
42186// ```
42187//
42188// Then output is `[4 x 3]`:
42189// ```
42190// output =
42191//   [5.0 0.0 0.0]  // one_hot(0)
42192//   [0.0 0.0 5.0]  // one_hot(2)
42193//   [0.0 0.0 0.0]  // one_hot(-1)
42194//   [0.0 5.0 0.0]  // one_hot(1)
42195// ```
42196//
42197// Suppose that
42198// ```
42199//   indices = [0, 2, -1, 1]
42200//   depth = 3
42201//   on_value = 0.0
42202//   off_value = 3.0
42203//   axis = 0
42204// ```
42205//
42206// Then output is `[3 x 4]`:
42207// ```
42208// output =
42209//   [0.0 3.0 3.0 3.0]
42210//   [3.0 3.0 3.0 0.0]
42211//   [3.0 3.0 3.0 3.0]
42212//   [3.0 0.0 3.0 3.0]
42213// //  ^                one_hot(0)
42214// //      ^            one_hot(2)
42215// //          ^        one_hot(-1)
42216// //              ^    one_hot(1)
42217// ```
42218//
42219// Suppose that
42220// ```
42221//   indices = [[0, 2], [1, -1]]
42222//   depth = 3
42223//   on_value = 1.0
42224//   off_value = 0.0
42225//   axis = -1
42226// ```
42227//
42228// Then output is `[2 x 2 x 3]`:
42229// ```
42230// output =
42231//   [
42232//     [1.0, 0.0, 0.0]  // one_hot(0)
42233//     [0.0, 0.0, 1.0]  // one_hot(2)
42234//   ][
42235//     [0.0, 1.0, 0.0]  // one_hot(1)
42236//     [0.0, 0.0, 0.0]  // one_hot(-1)
42237//   ]
42238// ```
42239//
42240// Arguments:
42241//	indices: A tensor of indices.
42242//	depth: A scalar defining the depth of the one hot dimension.
42243//	on_value: A scalar defining the value to fill in output when `indices[j] = i`.
42244//	off_value: A scalar defining the value to fill in output when `indices[j] != i`.
42245//
42246// Returns The one-hot tensor.
42247func OneHot(scope *Scope, indices tf.Output, depth tf.Output, on_value tf.Output, off_value tf.Output, optional ...OneHotAttr) (output tf.Output) {
42248	if scope.Err() != nil {
42249		return
42250	}
42251	attrs := map[string]interface{}{}
42252	for _, a := range optional {
42253		a(attrs)
42254	}
42255	opspec := tf.OpSpec{
42256		Type: "OneHot",
42257		Input: []tf.Input{
42258			indices, depth, on_value, off_value,
42259		},
42260		Attrs: attrs,
42261	}
42262	op := scope.AddOperation(opspec)
42263	return op.Output(0)
42264}
42265
42266// Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension of `extract_image_patches`.
42267//
42268// Arguments:
42269//	input: 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`.
42270//	ksizes: The size of the sliding window for each dimension of `input`.
42271//	strides: 1-D of length 5. How far the centers of two consecutive patches are in
42272// `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.
42273//	padding: The type of padding algorithm to use.
42274//
42275// The size-related attributes are specified as follows:
42276//
42277// ```python
42278// ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
42279// strides = [1, stride_planes, strides_rows, strides_cols, 1]
42280// ```
42281//
42282// Returns 5-D Tensor with shape `[batch, out_planes, out_rows, out_cols,
42283// ksize_planes * ksize_rows * ksize_cols * depth]` containing patches
42284// with size `ksize_planes x ksize_rows x ksize_cols x depth` vectorized
42285// in the "depth" dimension. Note `out_planes`, `out_rows` and `out_cols`
42286// are the dimensions of the output patches.
42287func ExtractVolumePatches(scope *Scope, input tf.Output, ksizes []int64, strides []int64, padding string) (patches tf.Output) {
42288	if scope.Err() != nil {
42289		return
42290	}
42291	attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "padding": padding}
42292	opspec := tf.OpSpec{
42293		Type: "ExtractVolumePatches",
42294		Input: []tf.Input{
42295			input,
42296		},
42297		Attrs: attrs,
42298	}
42299	op := scope.AddOperation(opspec)
42300	return op.Output(0)
42301}
42302
42303// Decode the frame(s) of a GIF-encoded image to a uint8 tensor.
42304//
42305// GIF images with frame or transparency compression are not supported.
42306// On Linux and MacOS systems, convert animated GIFs from compressed to
42307// uncompressed by running:
42308//
42309//     convert $src.gif -coalesce $dst.gif
42310//
42311// This op also supports decoding JPEGs and PNGs, though it is cleaner to use
42312// `tf.io.decode_image`.
42313//
42314// Arguments:
42315//	contents: 0-D.  The GIF-encoded image.
42316//
42317// Returns 4-D with shape `[num_frames, height, width, 3]`. RGB channel order.
42318func DecodeGif(scope *Scope, contents tf.Output) (image tf.Output) {
42319	if scope.Err() != nil {
42320		return
42321	}
42322	opspec := tf.OpSpec{
42323		Type: "DecodeGif",
42324		Input: []tf.Input{
42325			contents,
42326		},
42327	}
42328	op := scope.AddOperation(opspec)
42329	return op.Output(0)
42330}
42331
42332// Calculates gains for each feature and returns the best possible split information for the feature.
42333//
42334// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
42335//
42336// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
42337//
42338// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
42339//
42340// The length of output lists are all of the same length, `num_features`.
42341// The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.
42342//
42343// Arguments:
42344//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
42345//	stats_summary_list: A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
42346//	l1: l1 regularization factor on leaf weights, per instance based.
42347//	l2: l2 regularization factor on leaf weights, per instance based.
42348//	tree_complexity: adjustment to the gain, per leaf based.
42349//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
42350//	max_splits: the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.
42351//
42352// Returns:
42353//	node_ids_list: An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
42354//	gains_list: An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
42355//	thresholds_list: An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
42356//	left_node_contribs_list: A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
42357//	right_node_contribs_list: A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
42358func BoostedTreesCalculateBestGainsPerFeature(scope *Scope, node_id_range tf.Output, stats_summary_list []tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, max_splits int64) (node_ids_list []tf.Output, gains_list []tf.Output, thresholds_list []tf.Output, left_node_contribs_list []tf.Output, right_node_contribs_list []tf.Output) {
42359	if scope.Err() != nil {
42360		return
42361	}
42362	attrs := map[string]interface{}{"max_splits": max_splits}
42363	opspec := tf.OpSpec{
42364		Type: "BoostedTreesCalculateBestGainsPerFeature",
42365		Input: []tf.Input{
42366			node_id_range, tf.OutputList(stats_summary_list), l1, l2, tree_complexity, min_node_weight,
42367		},
42368		Attrs: attrs,
42369	}
42370	op := scope.AddOperation(opspec)
42371	if scope.Err() != nil {
42372		return
42373	}
42374	var idx int
42375	var err error
42376	if node_ids_list, idx, err = makeOutputList(op, idx, "node_ids_list"); err != nil {
42377		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
42378		return
42379	}
42380	if gains_list, idx, err = makeOutputList(op, idx, "gains_list"); err != nil {
42381		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
42382		return
42383	}
42384	if thresholds_list, idx, err = makeOutputList(op, idx, "thresholds_list"); err != nil {
42385		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
42386		return
42387	}
42388	if left_node_contribs_list, idx, err = makeOutputList(op, idx, "left_node_contribs_list"); err != nil {
42389		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
42390		return
42391	}
42392	if right_node_contribs_list, idx, err = makeOutputList(op, idx, "right_node_contribs_list"); err != nil {
42393		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
42394		return
42395	}
42396	return node_ids_list, gains_list, thresholds_list, left_node_contribs_list, right_node_contribs_list
42397}
42398
42399// Computes offsets of concat inputs within its output.
42400//
42401// For example:
42402//
42403// ```
42404// # 'x' is [2, 2, 7]
42405// # 'y' is [2, 3, 7]
42406// # 'z' is [2, 5, 7]
42407// concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
42408// ```
42409//
42410// This is typically used by gradient computations for a concat operation.
42411//
42412// Arguments:
42413//	concat_dim: The dimension along which to concatenate.
42414//	shape: The `N` int32 vectors representing shape of tensors being concatenated.
42415//
42416// Returns The `N` int32 vectors representing the starting offset
42417// of input tensors within the concatenated output.
42418func ConcatOffset(scope *Scope, concat_dim tf.Output, shape []tf.Output) (offset []tf.Output) {
42419	if scope.Err() != nil {
42420		return
42421	}
42422	opspec := tf.OpSpec{
42423		Type: "ConcatOffset",
42424		Input: []tf.Input{
42425			concat_dim, tf.OutputList(shape),
42426		},
42427	}
42428	op := scope.AddOperation(opspec)
42429	if scope.Err() != nil {
42430		return
42431	}
42432	var idx int
42433	var err error
42434	if offset, idx, err = makeOutputList(op, idx, "offset"); err != nil {
42435		scope.UpdateErr("ConcatOffset", err)
42436		return
42437	}
42438	return offset
42439}
42440
42441// Wraps the XLA DotGeneral operator, documented at
42442//
42443//  https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
42444// .
42445//
42446// Arguments:
42447//	lhs: the LHS tensor
42448//	rhs: the RHS tensor
42449//	dimension_numbers: a serialized xla::DotDimensionNumbers proto.
42450//	precision_config: a serialized xla::PrecisionConfig proto.
42451func XlaDot(scope *Scope, lhs tf.Output, rhs tf.Output, dimension_numbers string, precision_config string) (output tf.Output) {
42452	if scope.Err() != nil {
42453		return
42454	}
42455	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "precision_config": precision_config}
42456	opspec := tf.OpSpec{
42457		Type: "XlaDot",
42458		Input: []tf.Input{
42459			lhs, rhs,
42460		},
42461		Attrs: attrs,
42462	}
42463	op := scope.AddOperation(opspec)
42464	return op.Output(0)
42465}
42466
42467// Receives the named tensor from another XLA computation. Wraps the XLA Recv
42468//
42469// operator documented at
42470//  https://www.tensorflow.org/performance/xla/operation_semantics#recv .
42471//
42472// Arguments:
42473//	dtype: The type of the tensor.
42474//	tensor_name: A string key that identifies the channel.
42475//	shape: The shape of the tensor.
42476//
42477// Returns The tensor to receive.
42478func XlaRecv(scope *Scope, dtype tf.DataType, tensor_name string, shape tf.Shape) (tensor tf.Output) {
42479	if scope.Err() != nil {
42480		return
42481	}
42482	attrs := map[string]interface{}{"dtype": dtype, "tensor_name": tensor_name, "shape": shape}
42483	opspec := tf.OpSpec{
42484		Type: "XlaRecv",
42485
42486		Attrs: attrs,
42487	}
42488	op := scope.AddOperation(opspec)
42489	return op.Output(0)
42490}
42491
42492// Concatenates tensors along one dimension.
42493//
42494// Arguments:
42495//	values: List of `N` Tensors to concatenate. Their ranks and types must match,
42496// and their sizes must match in all dimensions except `concat_dim`.
42497//	axis: 0-D.  The dimension along which to concatenate.  Must be in the
42498// range [-rank(values), rank(values)).
42499//
42500// Returns A `Tensor` with the concatenation of values stacked along the
42501// `concat_dim` dimension.  This tensor's shape matches that of `values` except
42502// in `concat_dim` where it has the sum of the sizes.
42503func ConcatV2(scope *Scope, values []tf.Output, axis tf.Output) (output tf.Output) {
42504	if scope.Err() != nil {
42505		return
42506	}
42507	opspec := tf.OpSpec{
42508		Type: "ConcatV2",
42509		Input: []tf.Input{
42510			tf.OutputList(values), axis,
42511		},
42512	}
42513	op := scope.AddOperation(opspec)
42514	return op.Output(0)
42515}
42516
42517// Creates a dataset that changes the batch size.
42518//
42519// Creates a dataset that rebatches elements from `input_dataset` into new batch
42520// sizes.
42521//
42522// Arguments:
42523//	input_dataset: A variant tensor representing the input dataset.
42524//	batch_sizes: A vector of integers representing the size of batches to produce. These values
42525// are cycled through in order.
42526//
42527//
42528//
42529func RebatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_sizes tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
42530	if scope.Err() != nil {
42531		return
42532	}
42533	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
42534	opspec := tf.OpSpec{
42535		Type: "RebatchDatasetV2",
42536		Input: []tf.Input{
42537			input_dataset, batch_sizes, drop_remainder,
42538		},
42539		Attrs: attrs,
42540	}
42541	op := scope.AddOperation(opspec)
42542	return op.Output(0)
42543}
42544
42545// StatefulUniformAttr is an optional argument to StatefulUniform.
42546type StatefulUniformAttr func(optionalAttr)
42547
42548// StatefulUniformDtype sets the optional dtype attribute to value.
42549//
42550// value: The type of the output.
42551// If not specified, defaults to DT_FLOAT
42552func StatefulUniformDtype(value tf.DataType) StatefulUniformAttr {
42553	return func(m optionalAttr) {
42554		m["dtype"] = value
42555	}
42556}
42557
42558// Outputs random values from a uniform distribution.
42559//
42560// The generated values follow a uniform distribution in the range `[0, 1)`. The
42561// lower bound 0 is included in the range, while the upper bound 1 is excluded.
42562//
42563// Arguments:
42564//	resource: The handle of the resource variable that stores the state of the RNG.
42565//	algorithm: The RNG algorithm.
42566//	shape: The shape of the output tensor.
42567//
42568// Returns Random values with specified shape.
42569func StatefulUniform(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulUniformAttr) (output tf.Output) {
42570	if scope.Err() != nil {
42571		return
42572	}
42573	attrs := map[string]interface{}{}
42574	for _, a := range optional {
42575		a(attrs)
42576	}
42577	opspec := tf.OpSpec{
42578		Type: "StatefulUniform",
42579		Input: []tf.Input{
42580			resource, algorithm, shape,
42581		},
42582		Attrs: attrs,
42583	}
42584	op := scope.AddOperation(opspec)
42585	return op.Output(0)
42586}
42587
42588// Debugging/model interpretability outputs for each example.
42589//
42590// It traverses all the trees and computes debug metrics for individual examples,
42591// such as getting split feature ids and logits after each split along the decision
42592// path used to compute directional feature contributions.
42593//
42594// Arguments:
42595//
42596//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
42597// feature.
42598//	logits_dimension: scalar, dimension of the logits, to be used for constructing the protos in
42599// examples_debug_outputs_serialized.
42600//
42601// Returns Output rank 1 Tensor containing a proto serialized as a string for each example.
42602func BoostedTreesExampleDebugOutputs(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, logits_dimension int64) (examples_debug_outputs_serialized tf.Output) {
42603	if scope.Err() != nil {
42604		return
42605	}
42606	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
42607	opspec := tf.OpSpec{
42608		Type: "BoostedTreesExampleDebugOutputs",
42609		Input: []tf.Input{
42610			tree_ensemble_handle, tf.OutputList(bucketized_features),
42611		},
42612		Attrs: attrs,
42613	}
42614	op := scope.AddOperation(opspec)
42615	return op.Output(0)
42616}
42617
42618// AllCandidateSamplerAttr is an optional argument to AllCandidateSampler.
42619type AllCandidateSamplerAttr func(optionalAttr)
42620
42621// AllCandidateSamplerSeed sets the optional seed attribute to value.
42622//
42623// value: If either seed or seed2 are set to be non-zero, the random number
42624// generator is seeded by the given seed.  Otherwise, it is seeded by a
42625// random seed.
42626// If not specified, defaults to 0
42627func AllCandidateSamplerSeed(value int64) AllCandidateSamplerAttr {
42628	return func(m optionalAttr) {
42629		m["seed"] = value
42630	}
42631}
42632
42633// AllCandidateSamplerSeed2 sets the optional seed2 attribute to value.
42634//
42635// value: An second seed to avoid seed collision.
42636// If not specified, defaults to 0
42637func AllCandidateSamplerSeed2(value int64) AllCandidateSamplerAttr {
42638	return func(m optionalAttr) {
42639		m["seed2"] = value
42640	}
42641}
42642
42643// Generates labels for candidate sampling with a learned unigram distribution.
42644//
42645// See explanations of candidate sampling and the data formats at
42646// go/candidate-sampling.
42647//
42648// For each batch, this op picks a single set of sampled candidate labels.
42649//
42650// The advantages of sampling candidates per-batch are simplicity and the
42651// possibility of efficient dense matrix multiplication. The disadvantage is that
42652// the sampled candidates must be chosen independently of the context and of the
42653// true labels.
42654//
42655// Arguments:
42656//	true_classes: A batch_size * num_true matrix, in which each row contains the
42657// IDs of the num_true target_classes in the corresponding original label.
42658//	num_true: Number of true labels per context.
42659//	num_sampled: Number of candidates to produce.
42660//	unique: If unique is true, we sample with rejection, so that all sampled
42661// candidates in a batch are unique. This requires some approximation to
42662// estimate the post-rejection sampling probabilities.
42663//
42664// Returns:
42665//	sampled_candidates: A vector of length num_sampled, in which each element is
42666// the ID of a sampled candidate.
42667//	true_expected_count: A batch_size * num_true matrix, representing
42668// the number of times each candidate is expected to occur in a batch
42669// of sampled candidates. If unique=true, then this is a probability.
42670//	sampled_expected_count: A vector of length num_sampled, for each sampled
42671// candidate representing the number of times the candidate is expected
42672// to occur in a batch of sampled candidates.  If unique=true, then this is a
42673// probability.
42674func AllCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, optional ...AllCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
42675	if scope.Err() != nil {
42676		return
42677	}
42678	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique}
42679	for _, a := range optional {
42680		a(attrs)
42681	}
42682	opspec := tf.OpSpec{
42683		Type: "AllCandidateSampler",
42684		Input: []tf.Input{
42685			true_classes,
42686		},
42687		Attrs: attrs,
42688	}
42689	op := scope.AddOperation(opspec)
42690	return op.Output(0), op.Output(1), op.Output(2)
42691}
42692
42693// Creates a dataset that emits the records from one or more TFRecord files.
42694//
42695// Arguments:
42696//	filenames: A scalar or vector containing the name(s) of the file(s) to be
42697// read.
42698//	compression_type: A scalar containing either (i) the empty string (no
42699// compression), (ii) "ZLIB", or (iii) "GZIP".
42700//	buffer_size: A scalar representing the number of bytes to buffer. A value of
42701// 0 means no buffering will be performed.
42702func TFRecordDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
42703	if scope.Err() != nil {
42704		return
42705	}
42706	opspec := tf.OpSpec{
42707		Type: "TFRecordDataset",
42708		Input: []tf.Input{
42709			filenames, compression_type, buffer_size,
42710		},
42711	}
42712	op := scope.AddOperation(opspec)
42713	return op.Output(0)
42714}
42715
42716// DepthToSpaceAttr is an optional argument to DepthToSpace.
42717type DepthToSpaceAttr func(optionalAttr)
42718
42719// DepthToSpaceDataFormat sets the optional data_format attribute to value.
42720// If not specified, defaults to "NHWC"
42721func DepthToSpaceDataFormat(value string) DepthToSpaceAttr {
42722	return func(m optionalAttr) {
42723		m["data_format"] = value
42724	}
42725}
42726
42727// DepthToSpace for tensors of type T.
42728//
42729// Rearranges data from depth into blocks of spatial data.
42730// This is the reverse transformation of SpaceToDepth. More specifically,
42731// this op outputs a copy of the input tensor where values from the `depth`
42732// dimension are moved in spatial blocks to the `height` and `width` dimensions.
42733// The attr `block_size` indicates the input block size and how the data is moved.
42734//
42735//   * Chunks of data of size `block_size * block_size` from depth are rearranged
42736//     into non-overlapping blocks of size `block_size x block_size`
42737//   * The width the output tensor is `input_depth * block_size`, whereas the
42738//     height is `input_height * block_size`.
42739//   * The Y, X coordinates within each block of the output image are determined
42740//     by the high order component of the input channel index.
42741//   * The depth of the input tensor must be divisible by
42742//     `block_size * block_size`.
42743//
42744// The `data_format` attr specifies the layout of the input and output tensors
42745// with the following options:
42746//   "NHWC": `[ batch, height, width, channels ]`
42747//   "NCHW": `[ batch, channels, height, width ]`
42748//   "NCHW_VECT_C":
42749//       `qint8 [ batch, channels / 4, height, width, 4 ]`
42750//
42751// It is useful to consider the operation as transforming a 6-D Tensor.
42752// e.g. for data_format = NHWC,
42753//      Each element in the input tensor can be specified via 6 coordinates,
42754//      ordered by decreasing memory layout significance as:
42755//      n,iY,iX,bY,bX,oC  (where n=batch index, iX, iY means X or Y coordinates
42756//                         within the input image, bX, bY means coordinates
42757//                         within the output block, oC means output channels).
42758//      The output would be the input transposed to the following layout:
42759//      n,iY,bY,iX,bX,oC
42760//
42761// This operation is useful for resizing the activations between convolutions
42762// (but keeping all data), e.g. instead of pooling. It is also useful for training
42763// purely convolutional models.
42764//
42765// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
42766// block_size = 2:
42767//
42768// ```
42769// x = [[[[1, 2, 3, 4]]]]
42770//
42771// ```
42772//
42773// This operation will output a tensor of shape `[1, 2, 2, 1]`:
42774//
42775// ```
42776//    [[[[1], [2]],
42777//      [[3], [4]]]]
42778// ```
42779//
42780// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
42781// the corresponding output will have 2x2 elements and will have a depth of
42782// 1 channel (1 = `4 / (block_size * block_size)`).
42783// The output element shape is `[2, 2, 1]`.
42784//
42785// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
42786//
42787// ```
42788// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
42789// ```
42790//
42791// This operation, for block size of 2, will return the following tensor of shape
42792// `[1, 2, 2, 3]`
42793//
42794// ```
42795//    [[[[1, 2, 3], [4, 5, 6]],
42796//      [[7, 8, 9], [10, 11, 12]]]]
42797//
42798// ```
42799//
42800// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
42801//
42802// ```
42803// x =  [[[[1, 2, 3, 4],
42804//        [5, 6, 7, 8]],
42805//       [[9, 10, 11, 12],
42806//        [13, 14, 15, 16]]]]
42807// ```
42808//
42809// the operator will return the following tensor of shape `[1 4 4 1]`:
42810//
42811// ```
42812// x = [[[ [1],   [2],  [5],  [6]],
42813//       [ [3],   [4],  [7],  [8]],
42814//       [ [9],  [10], [13],  [14]],
42815//       [ [11], [12], [15],  [16]]]]
42816//
42817// ```
42818//
42819// Arguments:
42820//
42821//	block_size: The size of the spatial block, same as in Space2Depth.
42822func DepthToSpace(scope *Scope, input tf.Output, block_size int64, optional ...DepthToSpaceAttr) (output tf.Output) {
42823	if scope.Err() != nil {
42824		return
42825	}
42826	attrs := map[string]interface{}{"block_size": block_size}
42827	for _, a := range optional {
42828		a(attrs)
42829	}
42830	opspec := tf.OpSpec{
42831		Type: "DepthToSpace",
42832		Input: []tf.Input{
42833			input,
42834		},
42835		Attrs: attrs,
42836	}
42837	op := scope.AddOperation(opspec)
42838	return op.Output(0)
42839}
42840
42841// Table initializer that takes two tensors for keys and values respectively.
42842//
42843// Arguments:
42844//	table_handle: Handle to a table which will be initialized.
42845//	keys: Keys of type Tkey.
42846//	values: Values of type Tval.
42847//
42848// Returns the created operation.
42849func InitializeTableV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
42850	if scope.Err() != nil {
42851		return
42852	}
42853	opspec := tf.OpSpec{
42854		Type: "InitializeTableV2",
42855		Input: []tf.Input{
42856			table_handle, keys, values,
42857		},
42858	}
42859	return scope.AddOperation(opspec)
42860}
42861
42862// Updates specified rows 'i' with values 'v'.
42863//
42864// Computes `x[i, :] = v; return x`.
42865//
42866// Originally this function is mutative however for compilation we make this
42867// operation create / operate on a copy of `x`.
42868//
42869// Arguments:
42870//	x: A tensor of type `T`.
42871//	i: A vector. Indices into the left-most dimension of `x`.
42872//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
42873//
42874// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
42875func InplaceUpdate(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
42876	if scope.Err() != nil {
42877		return
42878	}
42879	opspec := tf.OpSpec{
42880		Type: "InplaceUpdate",
42881		Input: []tf.Input{
42882			x, i, v,
42883		},
42884	}
42885	op := scope.AddOperation(opspec)
42886	return op.Output(0)
42887}
42888
42889// SpaceToDepthAttr is an optional argument to SpaceToDepth.
42890type SpaceToDepthAttr func(optionalAttr)
42891
42892// SpaceToDepthDataFormat sets the optional data_format attribute to value.
42893// If not specified, defaults to "NHWC"
42894func SpaceToDepthDataFormat(value string) SpaceToDepthAttr {
42895	return func(m optionalAttr) {
42896		m["data_format"] = value
42897	}
42898}
42899
42900// SpaceToDepth for tensors of type T.
42901//
42902// Rearranges blocks of spatial data, into depth. More specifically,
42903// this op outputs a copy of the input tensor where values from the `height`
42904// and `width` dimensions are moved to the `depth` dimension.
42905// The attr `block_size` indicates the input block size.
42906//
42907//   * Non-overlapping blocks of size `block_size x block size` are rearranged
42908//     into depth at each location.
42909//   * The depth of the output tensor is `block_size * block_size * input_depth`.
42910//   * The Y, X coordinates within each block of the input become the high order
42911//     component of the output channel index.
42912//   * The input tensor's height and width must be divisible by block_size.
42913//
42914// The `data_format` attr specifies the layout of the input and output tensors
42915// with the following options:
42916//   "NHWC": `[ batch, height, width, channels ]`
42917//   "NCHW": `[ batch, channels, height, width ]`
42918//   "NCHW_VECT_C":
42919//       `qint8 [ batch, channels / 4, height, width, 4 ]`
42920//
42921// It is useful to consider the operation as transforming a 6-D Tensor.
42922// e.g. for data_format = NHWC,
42923//      Each element in the input tensor can be specified via 6 coordinates,
42924//      ordered by decreasing memory layout significance as:
42925//      n,oY,bY,oX,bX,iC  (where n=batch index, oX, oY means X or Y coordinates
42926//                         within the output image, bX, bY means coordinates
42927//                         within the input block, iC means input channels).
42928//      The output would be a transpose to the following layout:
42929//      n,oY,oX,bY,bX,iC
42930//
42931// This operation is useful for resizing the activations between convolutions
42932// (but keeping all data), e.g. instead of pooling. It is also useful for training
42933// purely convolutional models.
42934//
42935// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
42936// block_size = 2:
42937//
42938// ```
42939// x = [[[[1], [2]],
42940//       [[3], [4]]]]
42941// ```
42942//
42943// This operation will output a tensor of shape `[1, 1, 1, 4]`:
42944//
42945// ```
42946// [[[[1, 2, 3, 4]]]]
42947// ```
42948//
42949// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
42950// the corresponding output will have a single element (i.e. width and height are
42951// both 1) and will have a depth of 4 channels (1 * block_size * block_size).
42952// The output element shape is `[1, 1, 4]`.
42953//
42954// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
42955//
42956// ```
42957// x = [[[[1, 2, 3], [4, 5, 6]],
42958//       [[7, 8, 9], [10, 11, 12]]]]
42959// ```
42960//
42961// This operation, for block_size of 2, will return the following tensor of shape
42962// `[1, 1, 1, 12]`
42963//
42964// ```
42965// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
42966// ```
42967//
42968// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
42969//
42970// ```
42971// x = [[[[1],   [2],  [5],  [6]],
42972//       [[3],   [4],  [7],  [8]],
42973//       [[9],  [10], [13],  [14]],
42974//       [[11], [12], [15],  [16]]]]
42975// ```
42976//
42977// the operator will return the following tensor of shape `[1 2 2 4]`:
42978//
42979// ```
42980// x = [[[[1, 2, 3, 4],
42981//        [5, 6, 7, 8]],
42982//       [[9, 10, 11, 12],
42983//        [13, 14, 15, 16]]]]
42984// ```
42985//
42986// Arguments:
42987//
42988//	block_size: The size of the spatial block.
42989func SpaceToDepth(scope *Scope, input tf.Output, block_size int64, optional ...SpaceToDepthAttr) (output tf.Output) {
42990	if scope.Err() != nil {
42991		return
42992	}
42993	attrs := map[string]interface{}{"block_size": block_size}
42994	for _, a := range optional {
42995		a(attrs)
42996	}
42997	opspec := tf.OpSpec{
42998		Type: "SpaceToDepth",
42999		Input: []tf.Input{
43000			input,
43001		},
43002		Attrs: attrs,
43003	}
43004	op := scope.AddOperation(opspec)
43005	return op.Output(0)
43006}
43007
43008// Returns x * y element-wise.
43009//
43010// *NOTE*: `Multiply` supports broadcasting. More about broadcasting
43011// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
43012func Mul(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
43013	if scope.Err() != nil {
43014		return
43015	}
43016	opspec := tf.OpSpec{
43017		Type: "Mul",
43018		Input: []tf.Input{
43019			x, y,
43020		},
43021	}
43022	op := scope.AddOperation(opspec)
43023	return op.Output(0)
43024}
43025
43026// QuantizeAndDequantizeV3Attr is an optional argument to QuantizeAndDequantizeV3.
43027type QuantizeAndDequantizeV3Attr func(optionalAttr)
43028
43029// QuantizeAndDequantizeV3SignedInput sets the optional signed_input attribute to value.
43030// If not specified, defaults to true
43031func QuantizeAndDequantizeV3SignedInput(value bool) QuantizeAndDequantizeV3Attr {
43032	return func(m optionalAttr) {
43033		m["signed_input"] = value
43034	}
43035}
43036
43037// QuantizeAndDequantizeV3RangeGiven sets the optional range_given attribute to value.
43038// If not specified, defaults to true
43039func QuantizeAndDequantizeV3RangeGiven(value bool) QuantizeAndDequantizeV3Attr {
43040	return func(m optionalAttr) {
43041		m["range_given"] = value
43042	}
43043}
43044
43045// QuantizeAndDequantizeV3NarrowRange sets the optional narrow_range attribute to value.
43046// If not specified, defaults to false
43047func QuantizeAndDequantizeV3NarrowRange(value bool) QuantizeAndDequantizeV3Attr {
43048	return func(m optionalAttr) {
43049		m["narrow_range"] = value
43050	}
43051}
43052
43053// QuantizeAndDequantizeV3Axis sets the optional axis attribute to value.
43054// If not specified, defaults to -1
43055func QuantizeAndDequantizeV3Axis(value int64) QuantizeAndDequantizeV3Attr {
43056	return func(m optionalAttr) {
43057		m["axis"] = value
43058	}
43059}
43060
43061// Quantizes then dequantizes a tensor.
43062//
43063// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
43064// tensor, so its value can change during training.
43065func QuantizeAndDequantizeV3(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, num_bits tf.Output, optional ...QuantizeAndDequantizeV3Attr) (output tf.Output) {
43066	if scope.Err() != nil {
43067		return
43068	}
43069	attrs := map[string]interface{}{}
43070	for _, a := range optional {
43071		a(attrs)
43072	}
43073	opspec := tf.OpSpec{
43074		Type: "QuantizeAndDequantizeV3",
43075		Input: []tf.Input{
43076			input, input_min, input_max, num_bits,
43077		},
43078		Attrs: attrs,
43079	}
43080	op := scope.AddOperation(opspec)
43081	return op.Output(0)
43082}
43083
43084// Returns the `tf.data.Options` attached to `input_dataset`.
43085//
43086// Arguments:
43087//	input_dataset: A variant tensor representing the input dataset.
43088func GetOptions(scope *Scope, input_dataset tf.Output) (serialized_options tf.Output) {
43089	if scope.Err() != nil {
43090		return
43091	}
43092	opspec := tf.OpSpec{
43093		Type: "GetOptions",
43094		Input: []tf.Input{
43095			input_dataset,
43096		},
43097	}
43098	op := scope.AddOperation(opspec)
43099	return op.Output(0)
43100}
43101
43102// Deserialize `SparseTensor` objects.
43103//
43104// The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
43105// the last dimension stores serialized `SparseTensor` objects and the other N
43106// dimensions (N >= 0) correspond to a batch. The ranks of the original
43107// `SparseTensor` objects must all match. When the final `SparseTensor` is
43108// created, its rank is the rank of the incoming `SparseTensor` objects plus N;
43109// the sparse tensors have been concatenated along new dimensions, one for each
43110// batch.
43111//
43112// The output `SparseTensor` object's shape values for the original dimensions
43113// are the max across the input `SparseTensor` objects' shape values for the
43114// corresponding dimensions. The new dimensions match the size of the batch.
43115//
43116// The input `SparseTensor` objects' indices are assumed ordered in
43117// standard lexicographic order.  If this is not the case, after this
43118// step run `SparseReorder` to restore index ordering.
43119//
43120// For example, if the serialized input is a `[2 x 3]` matrix representing two
43121// original `SparseTensor` objects:
43122//
43123//     index = [ 0]
43124//             [10]
43125//             [20]
43126//     values = [1, 2, 3]
43127//     shape = [50]
43128//
43129// and
43130//
43131//     index = [ 2]
43132//             [10]
43133//     values = [4, 5]
43134//     shape = [30]
43135//
43136// then the final deserialized `SparseTensor` will be:
43137//
43138//     index = [0  0]
43139//             [0 10]
43140//             [0 20]
43141//             [1  2]
43142//             [1 10]
43143//     values = [1, 2, 3, 4, 5]
43144//     shape = [2 50]
43145//
43146// Arguments:
43147//	serialized_sparse: The serialized `SparseTensor` objects. The last dimension
43148// must have 3 columns.
43149//	dtype: The `dtype` of the serialized `SparseTensor` objects.
43150func DeserializeSparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
43151	if scope.Err() != nil {
43152		return
43153	}
43154	attrs := map[string]interface{}{"dtype": dtype}
43155	opspec := tf.OpSpec{
43156		Type: "DeserializeSparse",
43157		Input: []tf.Input{
43158			serialized_sparse,
43159		},
43160		Attrs: attrs,
43161	}
43162	op := scope.AddOperation(opspec)
43163	return op.Output(0), op.Output(1), op.Output(2)
43164}
43165
43166// MapClearAttr is an optional argument to MapClear.
43167type MapClearAttr func(optionalAttr)
43168
43169// MapClearCapacity sets the optional capacity attribute to value.
43170// If not specified, defaults to 0
43171//
43172// REQUIRES: value >= 0
43173func MapClearCapacity(value int64) MapClearAttr {
43174	return func(m optionalAttr) {
43175		m["capacity"] = value
43176	}
43177}
43178
43179// MapClearMemoryLimit sets the optional memory_limit attribute to value.
43180// If not specified, defaults to 0
43181//
43182// REQUIRES: value >= 0
43183func MapClearMemoryLimit(value int64) MapClearAttr {
43184	return func(m optionalAttr) {
43185		m["memory_limit"] = value
43186	}
43187}
43188
43189// MapClearContainer sets the optional container attribute to value.
43190// If not specified, defaults to ""
43191func MapClearContainer(value string) MapClearAttr {
43192	return func(m optionalAttr) {
43193		m["container"] = value
43194	}
43195}
43196
43197// MapClearSharedName sets the optional shared_name attribute to value.
43198// If not specified, defaults to ""
43199func MapClearSharedName(value string) MapClearAttr {
43200	return func(m optionalAttr) {
43201		m["shared_name"] = value
43202	}
43203}
43204
43205// Op removes all elements in the underlying container.
43206//
43207// Returns the created operation.
43208func MapClear(scope *Scope, dtypes []tf.DataType, optional ...MapClearAttr) (o *tf.Operation) {
43209	if scope.Err() != nil {
43210		return
43211	}
43212	attrs := map[string]interface{}{"dtypes": dtypes}
43213	for _, a := range optional {
43214		a(attrs)
43215	}
43216	opspec := tf.OpSpec{
43217		Type: "MapClear",
43218
43219		Attrs: attrs,
43220	}
43221	return scope.AddOperation(opspec)
43222}
43223
43224// CropAndResizeGradBoxesAttr is an optional argument to CropAndResizeGradBoxes.
43225type CropAndResizeGradBoxesAttr func(optionalAttr)
43226
43227// CropAndResizeGradBoxesMethod sets the optional method attribute to value.
43228//
43229// value: A string specifying the interpolation method. Only 'bilinear' is
43230// supported for now.
43231// If not specified, defaults to "bilinear"
43232func CropAndResizeGradBoxesMethod(value string) CropAndResizeGradBoxesAttr {
43233	return func(m optionalAttr) {
43234		m["method"] = value
43235	}
43236}
43237
43238// Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
43239//
43240// Arguments:
43241//	grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
43242//	image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
43243// Both `image_height` and `image_width` need to be positive.
43244//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
43245// specifies the coordinates of a box in the `box_ind[i]` image and is specified
43246// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
43247// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
43248// `[0, 1]` interval of normalized image height is mapped to
43249// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
43250// which case the sampled crop is an up-down flipped version of the original
43251// image. The width dimension is treated similarly. Normalized coordinates
43252// outside the `[0, 1]` range are allowed, in which case we use
43253// `extrapolation_value` to extrapolate the input image values.
43254//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
43255// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
43256//
43257// Returns A 2-D tensor of shape `[num_boxes, 4]`.
43258func CropAndResizeGradBoxes(scope *Scope, grads tf.Output, image tf.Output, boxes tf.Output, box_ind tf.Output, optional ...CropAndResizeGradBoxesAttr) (output tf.Output) {
43259	if scope.Err() != nil {
43260		return
43261	}
43262	attrs := map[string]interface{}{}
43263	for _, a := range optional {
43264		a(attrs)
43265	}
43266	opspec := tf.OpSpec{
43267		Type: "CropAndResizeGradBoxes",
43268		Input: []tf.Input{
43269			grads, image, boxes, box_ind,
43270		},
43271		Attrs: attrs,
43272	}
43273	op := scope.AddOperation(opspec)
43274	return op.Output(0)
43275}
43276
43277// Returns the cardinality of `input_dataset`.
43278//
43279// Returns the cardinality of `input_dataset`.
43280//
43281// Arguments:
43282//	input_dataset: A variant tensor representing the dataset to return cardinality for.
43283//
43284// Returns The cardinality of `input_dataset`. Named constants are used to represent
43285// infinite and unknown cardinality.
43286func ExperimentalDatasetCardinality(scope *Scope, input_dataset tf.Output) (cardinality tf.Output) {
43287	if scope.Err() != nil {
43288		return
43289	}
43290	opspec := tf.OpSpec{
43291		Type: "ExperimentalDatasetCardinality",
43292		Input: []tf.Input{
43293			input_dataset,
43294		},
43295	}
43296	op := scope.AddOperation(opspec)
43297	return op.Output(0)
43298}
43299
43300// Delete the stack from its resource container.
43301//
43302// Arguments:
43303//	handle: The handle to a stack.
43304//
43305// Returns the created operation.
43306func StackCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
43307	if scope.Err() != nil {
43308		return
43309	}
43310	opspec := tf.OpSpec{
43311		Type: "StackCloseV2",
43312		Input: []tf.Input{
43313			handle,
43314		},
43315	}
43316	return scope.AddOperation(opspec)
43317}
43318
43319// ThreadUnsafeUnigramCandidateSamplerAttr is an optional argument to ThreadUnsafeUnigramCandidateSampler.
43320type ThreadUnsafeUnigramCandidateSamplerAttr func(optionalAttr)
43321
43322// ThreadUnsafeUnigramCandidateSamplerSeed sets the optional seed attribute to value.
43323//
43324// value: If either seed or seed2 are set to be non-zero, the random number
43325// generator is seeded by the given seed.  Otherwise, it is seeded by a
43326// random seed.
43327// If not specified, defaults to 0
43328func ThreadUnsafeUnigramCandidateSamplerSeed(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
43329	return func(m optionalAttr) {
43330		m["seed"] = value
43331	}
43332}
43333
43334// ThreadUnsafeUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
43335//
43336// value: An second seed to avoid seed collision.
43337// If not specified, defaults to 0
43338func ThreadUnsafeUnigramCandidateSamplerSeed2(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
43339	return func(m optionalAttr) {
43340		m["seed2"] = value
43341	}
43342}
43343
43344// Generates labels for candidate sampling with a learned unigram distribution.
43345//
43346// See explanations of candidate sampling and the data formats at
43347// go/candidate-sampling.
43348//
43349// For each batch, this op picks a single set of sampled candidate labels.
43350//
43351// The advantages of sampling candidates per-batch are simplicity and the
43352// possibility of efficient dense matrix multiplication. The disadvantage is that
43353// the sampled candidates must be chosen independently of the context and of the
43354// true labels.
43355//
43356// Arguments:
43357//	true_classes: A batch_size * num_true matrix, in which each row contains the
43358// IDs of the num_true target_classes in the corresponding original label.
43359//	num_true: Number of true labels per context.
43360//	num_sampled: Number of candidates to randomly sample.
43361//	unique: If unique is true, we sample with rejection, so that all sampled
43362// candidates in a batch are unique. This requires some approximation to
43363// estimate the post-rejection sampling probabilities.
43364//	range_max: The sampler will sample integers from the interval [0, range_max).
43365//
43366// Returns:
43367//	sampled_candidates: A vector of length num_sampled, in which each element is
43368// the ID of a sampled candidate.
43369//	true_expected_count: A batch_size * num_true matrix, representing
43370// the number of times each candidate is expected to occur in a batch
43371// of sampled candidates. If unique=true, then this is a probability.
43372//	sampled_expected_count: A vector of length num_sampled, for each sampled
43373// candidate representing the number of times the candidate is expected
43374// to occur in a batch of sampled candidates.  If unique=true, then this is a
43375// probability.
43376func ThreadUnsafeUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...ThreadUnsafeUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
43377	if scope.Err() != nil {
43378		return
43379	}
43380	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
43381	for _, a := range optional {
43382		a(attrs)
43383	}
43384	opspec := tf.OpSpec{
43385		Type: "ThreadUnsafeUnigramCandidateSampler",
43386		Input: []tf.Input{
43387			true_classes,
43388		},
43389		Attrs: attrs,
43390	}
43391	op := scope.AddOperation(opspec)
43392	return op.Output(0), op.Output(1), op.Output(2)
43393}
43394
43395// Computes the Kth order statistic of a data set. The current
43396//
43397// implementation uses a binary search requiring exactly 32 passes over
43398// the input data. The running time is linear with respect to input
43399// size. The median-of-medians algorithm is probably faster, but is
43400// difficult to implement efficiently in XLA. The implementation imposes
43401// a total ordering on floats. The ordering is consistent with the usual
43402// partial order.  Positive NaNs are greater than positive
43403// infinity. Negative NaNs are less than negative infinity. NaNs with
43404// distinct payloads are treated as distinct. Subnormal numbers are
43405// preserved (not flushed to zero). Positive infinity is greater than all
43406// numbers. Negative infinity is less than all numbers. Positive is
43407// greater than negative zero. There are less than k values greater than
43408// the kth order statistic. There are at least k values greater than or
43409// equal to the Kth order statistic. The semantics are not the same as
43410// top_k_unique.
43411func KthOrderStatistic(scope *Scope, input tf.Output, k int64) (output tf.Output) {
43412	if scope.Err() != nil {
43413		return
43414	}
43415	attrs := map[string]interface{}{"k": k}
43416	opspec := tf.OpSpec{
43417		Type: "KthOrderStatistic",
43418		Input: []tf.Input{
43419			input,
43420		},
43421		Attrs: attrs,
43422	}
43423	op := scope.AddOperation(opspec)
43424	return op.Output(0)
43425}
43426
43427// AvgPool3DGradAttr is an optional argument to AvgPool3DGrad.
43428type AvgPool3DGradAttr func(optionalAttr)
43429
43430// AvgPool3DGradDataFormat sets the optional data_format attribute to value.
43431//
43432// value: The data format of the input and output data. With the
43433// default format "NDHWC", the data is stored in the order of:
43434//     [batch, in_depth, in_height, in_width, in_channels].
43435// Alternatively, the format could be "NCDHW", the data storage order is:
43436//     [batch, in_channels, in_depth, in_height, in_width].
43437// If not specified, defaults to "NDHWC"
43438func AvgPool3DGradDataFormat(value string) AvgPool3DGradAttr {
43439	return func(m optionalAttr) {
43440		m["data_format"] = value
43441	}
43442}
43443
43444// Computes gradients of average pooling function.
43445//
43446// Arguments:
43447//	orig_input_shape: The original input dimensions.
43448//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
43449//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
43450// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
43451//	strides: 1-D tensor of length 5. The stride of the sliding window for each
43452// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
43453//	padding: The type of padding algorithm to use.
43454//
43455// Returns The backprop for input.
43456func AvgPool3DGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DGradAttr) (output tf.Output) {
43457	if scope.Err() != nil {
43458		return
43459	}
43460	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
43461	for _, a := range optional {
43462		a(attrs)
43463	}
43464	opspec := tf.OpSpec{
43465		Type: "AvgPool3DGrad",
43466		Input: []tf.Input{
43467			orig_input_shape, grad,
43468		},
43469		Attrs: attrs,
43470	}
43471	op := scope.AddOperation(opspec)
43472	return op.Output(0)
43473}
43474
43475// Deprecated. Use TensorArrayCloseV3
43476//
43477// DEPRECATED at GraphDef version 26: Use TensorArrayCloseV3
43478//
43479// Returns the created operation.
43480func TensorArrayCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
43481	if scope.Err() != nil {
43482		return
43483	}
43484	opspec := tf.OpSpec{
43485		Type: "TensorArrayCloseV2",
43486		Input: []tf.Input{
43487			handle,
43488		},
43489	}
43490	return scope.AddOperation(opspec)
43491}
43492
43493// Calculate product with tridiagonal matrix.
43494//
43495// Calculates product of two matrices, where left matrix is a tridiagonal matrix.
43496//
43497// Arguments:
43498//	superdiag: Tensor of shape `[..., 1, M]`, representing superdiagonals of
43499// tri-diagonal matrices to the left of multiplication. Last element is ignored.
43500//	maindiag: Tensor of shape `[..., 1, M]`, representing main diagonals of tri-diagonal
43501// matrices to the left of multiplication.
43502//	subdiag: Tensor of shape `[..., 1, M]`, representing subdiagonals of tri-diagonal
43503// matrices to the left of multiplication. First element is ignored.
43504//	rhs: Tensor of shape `[..., M, N]`, representing MxN matrices to the right of
43505// multiplication.
43506//
43507// Returns Tensor of shape `[..., M, N]` containing the product.
43508func TridiagonalMatMul(scope *Scope, superdiag tf.Output, maindiag tf.Output, subdiag tf.Output, rhs tf.Output) (output tf.Output) {
43509	if scope.Err() != nil {
43510		return
43511	}
43512	opspec := tf.OpSpec{
43513		Type: "TridiagonalMatMul",
43514		Input: []tf.Input{
43515			superdiag, maindiag, subdiag, rhs,
43516		},
43517	}
43518	op := scope.AddOperation(opspec)
43519	return op.Output(0)
43520}
43521
43522// QuantizedRelu6Attr is an optional argument to QuantizedRelu6.
43523type QuantizedRelu6Attr func(optionalAttr)
43524
43525// QuantizedRelu6OutType sets the optional out_type attribute to value.
43526// If not specified, defaults to DT_QUINT8
43527func QuantizedRelu6OutType(value tf.DataType) QuantizedRelu6Attr {
43528	return func(m optionalAttr) {
43529		m["out_type"] = value
43530	}
43531}
43532
43533// Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
43534//
43535// Arguments:
43536//
43537//	min_features: The float value that the lowest quantized value represents.
43538//	max_features: The float value that the highest quantized value represents.
43539//
43540// Returns:
43541//	activations: Has the same output shape as "features".
43542//	min_activations: The float value that the lowest quantized value represents.
43543//	max_activations: The float value that the highest quantized value represents.
43544func QuantizedRelu6(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedRelu6Attr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
43545	if scope.Err() != nil {
43546		return
43547	}
43548	attrs := map[string]interface{}{}
43549	for _, a := range optional {
43550		a(attrs)
43551	}
43552	opspec := tf.OpSpec{
43553		Type: "QuantizedRelu6",
43554		Input: []tf.Input{
43555			features, min_features, max_features,
43556		},
43557		Attrs: attrs,
43558	}
43559	op := scope.AddOperation(opspec)
43560	return op.Output(0), op.Output(1), op.Output(2)
43561}
43562
43563// ResourceStridedSliceAssignAttr is an optional argument to ResourceStridedSliceAssign.
43564type ResourceStridedSliceAssignAttr func(optionalAttr)
43565
43566// ResourceStridedSliceAssignBeginMask sets the optional begin_mask attribute to value.
43567// If not specified, defaults to 0
43568func ResourceStridedSliceAssignBeginMask(value int64) ResourceStridedSliceAssignAttr {
43569	return func(m optionalAttr) {
43570		m["begin_mask"] = value
43571	}
43572}
43573
43574// ResourceStridedSliceAssignEndMask sets the optional end_mask attribute to value.
43575// If not specified, defaults to 0
43576func ResourceStridedSliceAssignEndMask(value int64) ResourceStridedSliceAssignAttr {
43577	return func(m optionalAttr) {
43578		m["end_mask"] = value
43579	}
43580}
43581
43582// ResourceStridedSliceAssignEllipsisMask sets the optional ellipsis_mask attribute to value.
43583// If not specified, defaults to 0
43584func ResourceStridedSliceAssignEllipsisMask(value int64) ResourceStridedSliceAssignAttr {
43585	return func(m optionalAttr) {
43586		m["ellipsis_mask"] = value
43587	}
43588}
43589
43590// ResourceStridedSliceAssignNewAxisMask sets the optional new_axis_mask attribute to value.
43591// If not specified, defaults to 0
43592func ResourceStridedSliceAssignNewAxisMask(value int64) ResourceStridedSliceAssignAttr {
43593	return func(m optionalAttr) {
43594		m["new_axis_mask"] = value
43595	}
43596}
43597
43598// ResourceStridedSliceAssignShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
43599// If not specified, defaults to 0
43600func ResourceStridedSliceAssignShrinkAxisMask(value int64) ResourceStridedSliceAssignAttr {
43601	return func(m optionalAttr) {
43602		m["shrink_axis_mask"] = value
43603	}
43604}
43605
43606// Assign `value` to the sliced l-value reference of `ref`.
43607//
43608// The values of `value` are assigned to the positions in the variable
43609// `ref` that are selected by the slice parameters. The slice parameters
43610// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
43611//
43612// NOTE this op currently does not support broadcasting and so `value`'s
43613// shape must be exactly the shape produced by the slice of `ref`.
43614//
43615// Returns the created operation.
43616func ResourceStridedSliceAssign(scope *Scope, ref tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...ResourceStridedSliceAssignAttr) (o *tf.Operation) {
43617	if scope.Err() != nil {
43618		return
43619	}
43620	attrs := map[string]interface{}{}
43621	for _, a := range optional {
43622		a(attrs)
43623	}
43624	opspec := tf.OpSpec{
43625		Type: "ResourceStridedSliceAssign",
43626		Input: []tf.Input{
43627			ref, begin, end, strides, value,
43628		},
43629		Attrs: attrs,
43630	}
43631	return scope.AddOperation(opspec)
43632}
43633
43634// SqueezeAttr is an optional argument to Squeeze.
43635type SqueezeAttr func(optionalAttr)
43636
43637// SqueezeAxis sets the optional axis attribute to value.
43638//
43639// value: If specified, only squeezes the dimensions listed. The dimension
43640// index starts at 0. It is an error to squeeze a dimension that is not 1. Must
43641// be in the range `[-rank(input), rank(input))`.
43642// If not specified, defaults to {}
43643//
43644// REQUIRES: len(value) >= 0
43645func SqueezeAxis(value []int64) SqueezeAttr {
43646	return func(m optionalAttr) {
43647		m["squeeze_dims"] = value
43648	}
43649}
43650
43651// Removes dimensions of size 1 from the shape of a tensor.
43652//
43653// Given a tensor `input`, this operation returns a tensor of the same type with
43654// all dimensions of size 1 removed. If you don't want to remove all size 1
43655// dimensions, you can remove specific size 1 dimensions by specifying
43656// `axis`.
43657//
43658// For example:
43659//
43660// ```
43661// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
43662// shape(squeeze(t)) ==> [2, 3]
43663// ```
43664//
43665// Or, to remove specific size 1 dimensions:
43666//
43667// ```
43668// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
43669// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
43670// ```
43671//
43672// Arguments:
43673//	input: The `input` to squeeze.
43674//
43675// Returns Contains the same data as `input`, but has one or more dimensions of
43676// size 1 removed.
43677func Squeeze(scope *Scope, input tf.Output, optional ...SqueezeAttr) (output tf.Output) {
43678	if scope.Err() != nil {
43679		return
43680	}
43681	attrs := map[string]interface{}{}
43682	for _, a := range optional {
43683		a(attrs)
43684	}
43685	opspec := tf.OpSpec{
43686		Type: "Squeeze",
43687		Input: []tf.Input{
43688			input,
43689		},
43690		Attrs: attrs,
43691	}
43692	op := scope.AddOperation(opspec)
43693	return op.Output(0)
43694}
43695
43696// StringLengthAttr is an optional argument to StringLength.
43697type StringLengthAttr func(optionalAttr)
43698
43699// StringLengthUnit sets the optional unit attribute to value.
43700//
43701// value: The unit that is counted to compute string length.  One of: `"BYTE"` (for
43702// the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8
43703// encoded Unicode code points in each string).  Results are undefined
43704// if `unit=UTF8_CHAR` and the `input` strings do not contain structurally
43705// valid UTF-8.
43706// If not specified, defaults to "BYTE"
43707func StringLengthUnit(value string) StringLengthAttr {
43708	return func(m optionalAttr) {
43709		m["unit"] = value
43710	}
43711}
43712
43713// String lengths of `input`.
43714//
43715// Computes the length of each string given in the input tensor.
43716//
43717// >>> strings = tf.constant(['Hello','TensorFlow', '\U0001F642'])
43718// >>> tf.strings.length(strings).numpy() # default counts bytes
43719// array([ 5, 10, 4], dtype=int32)
43720// >>> tf.strings.length(strings, unit="UTF8_CHAR").numpy()
43721// array([ 5, 10, 1], dtype=int32)
43722//
43723//
43724// Arguments:
43725//	input: The strings for which to compute the length for each element.
43726//
43727// Returns Integer tensor that has the same shape as `input`. The output contains the
43728// element-wise string lengths of `input`.
43729func StringLength(scope *Scope, input tf.Output, optional ...StringLengthAttr) (output tf.Output) {
43730	if scope.Err() != nil {
43731		return
43732	}
43733	attrs := map[string]interface{}{}
43734	for _, a := range optional {
43735		a(attrs)
43736	}
43737	opspec := tf.OpSpec{
43738		Type: "StringLength",
43739		Input: []tf.Input{
43740			input,
43741		},
43742		Attrs: attrs,
43743	}
43744	op := scope.AddOperation(opspec)
43745	return op.Output(0)
43746}
43747
43748// TensorSummaryAttr is an optional argument to TensorSummary.
43749type TensorSummaryAttr func(optionalAttr)
43750
43751// TensorSummaryDescription sets the optional description attribute to value.
43752//
43753// value: A json-encoded SummaryDescription proto.
43754// If not specified, defaults to ""
43755func TensorSummaryDescription(value string) TensorSummaryAttr {
43756	return func(m optionalAttr) {
43757		m["description"] = value
43758	}
43759}
43760
43761// TensorSummaryLabels sets the optional labels attribute to value.
43762//
43763// value: An unused list of strings.
43764// If not specified, defaults to {}
43765func TensorSummaryLabels(value []string) TensorSummaryAttr {
43766	return func(m optionalAttr) {
43767		m["labels"] = value
43768	}
43769}
43770
43771// TensorSummaryDisplayName sets the optional display_name attribute to value.
43772//
43773// value: An unused string.
43774// If not specified, defaults to ""
43775func TensorSummaryDisplayName(value string) TensorSummaryAttr {
43776	return func(m optionalAttr) {
43777		m["display_name"] = value
43778	}
43779}
43780
43781// Outputs a `Summary` protocol buffer with a tensor.
43782//
43783// This op is being phased out in favor of TensorSummaryV2, which lets callers pass
43784// a tag as well as a serialized SummaryMetadata proto string that contains
43785// plugin-specific data. We will keep this op to maintain backwards compatibility.
43786//
43787// Arguments:
43788//	tensor: A tensor to serialize.
43789func TensorSummary(scope *Scope, tensor tf.Output, optional ...TensorSummaryAttr) (summary tf.Output) {
43790	if scope.Err() != nil {
43791		return
43792	}
43793	attrs := map[string]interface{}{}
43794	for _, a := range optional {
43795		a(attrs)
43796	}
43797	opspec := tf.OpSpec{
43798		Type: "TensorSummary",
43799		Input: []tf.Input{
43800			tensor,
43801		},
43802		Attrs: attrs,
43803	}
43804	op := scope.AddOperation(opspec)
43805	return op.Output(0)
43806}
43807
43808// StringFormatAttr is an optional argument to StringFormat.
43809type StringFormatAttr func(optionalAttr)
43810
43811// StringFormatTemplate sets the optional template attribute to value.
43812//
43813// value: A string, the template to format tensor summaries into.
43814// If not specified, defaults to "%s"
43815func StringFormatTemplate(value string) StringFormatAttr {
43816	return func(m optionalAttr) {
43817		m["template"] = value
43818	}
43819}
43820
43821// StringFormatPlaceholder sets the optional placeholder attribute to value.
43822//
43823// value: A string, at each placeholder in the template a subsequent tensor summary will be inserted.
43824// If not specified, defaults to "%s"
43825func StringFormatPlaceholder(value string) StringFormatAttr {
43826	return func(m optionalAttr) {
43827		m["placeholder"] = value
43828	}
43829}
43830
43831// StringFormatSummarize sets the optional summarize attribute to value.
43832//
43833// value: When formatting the tensor summaries print the first and last summarize entries of each tensor dimension.
43834// If not specified, defaults to 3
43835func StringFormatSummarize(value int64) StringFormatAttr {
43836	return func(m optionalAttr) {
43837		m["summarize"] = value
43838	}
43839}
43840
43841// Formats a string template using a list of tensors.
43842//
43843// Formats a string template using a list of tensors, pretty-printing tensor summaries.
43844//
43845// Arguments:
43846//	inputs: The list of tensors to format into the placeholder string.
43847//
43848// Returns = The resulting string scalar.
43849func StringFormat(scope *Scope, inputs []tf.Output, optional ...StringFormatAttr) (output tf.Output) {
43850	if scope.Err() != nil {
43851		return
43852	}
43853	attrs := map[string]interface{}{}
43854	for _, a := range optional {
43855		a(attrs)
43856	}
43857	opspec := tf.OpSpec{
43858		Type: "StringFormat",
43859		Input: []tf.Input{
43860			tf.OutputList(inputs),
43861		},
43862		Attrs: attrs,
43863	}
43864	op := scope.AddOperation(opspec)
43865	return op.Output(0)
43866}
43867
43868// Creates a dataset that contains the unique elements of `input_dataset`.
43869func ExperimentalUniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
43870	if scope.Err() != nil {
43871		return
43872	}
43873	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
43874	opspec := tf.OpSpec{
43875		Type: "ExperimentalUniqueDataset",
43876		Input: []tf.Input{
43877			input_dataset,
43878		},
43879		Attrs: attrs,
43880	}
43881	op := scope.AddOperation(opspec)
43882	return op.Output(0)
43883}
43884
43885// Decodes a `variant` scalar Tensor into an `ExtensionType` value.
43886//
43887// Returns the Tensor components encoded in a `CompositeTensorVariant`.
43888//
43889// Raises an error if `type_spec_proto` doesn't match the TypeSpec
43890// in `encoded`.
43891//
43892// Arguments:
43893//	encoded: A scalar `variant` Tensor containing an encoded ExtensionType value.
43894//	metadata: String serialization for the TypeSpec.  Must be compatible with the
43895// `TypeSpec` contained in `encoded`.  (Note: the encoding for the TypeSpec
43896// may change in future versions of TensorFlow.)
43897//	Tcomponents: Expected dtypes for components.
43898//
43899// Returns The component tensors for the ExtensionType value in `encoded`.
43900func CompositeTensorVariantToComponents(scope *Scope, encoded tf.Output, metadata string, Tcomponents []tf.DataType) (components []tf.Output) {
43901	if scope.Err() != nil {
43902		return
43903	}
43904	attrs := map[string]interface{}{"metadata": metadata, "Tcomponents": Tcomponents}
43905	opspec := tf.OpSpec{
43906		Type: "CompositeTensorVariantToComponents",
43907		Input: []tf.Input{
43908			encoded,
43909		},
43910		Attrs: attrs,
43911	}
43912	op := scope.AddOperation(opspec)
43913	if scope.Err() != nil {
43914		return
43915	}
43916	var idx int
43917	var err error
43918	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
43919		scope.UpdateErr("CompositeTensorVariantToComponents", err)
43920		return
43921	}
43922	return components
43923}
43924
43925// Constructs a tensor by tiling a given tensor.
43926//
43927// This operation creates a new tensor by replicating `input` `multiples` times.
43928// The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
43929// and the values of `input` are replicated `multiples[i]` times along the 'i'th
43930// dimension. For example, tiling `[a b c d]` by `[2]` produces
43931// `[a b c d a b c d]`.
43932//
43933// >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32)
43934// >>> b = tf.constant([1,2], tf.int32)
43935// >>> tf.tile(a, b)
43936// <tf.Tensor: shape=(2, 6), dtype=int32, numpy=
43937// array([[1, 2, 3, 1, 2, 3],
43938//        [4, 5, 6, 4, 5, 6]], dtype=int32)>
43939// >>> c = tf.constant([2,1], tf.int32)
43940// >>> tf.tile(a, c)
43941// <tf.Tensor: shape=(4, 3), dtype=int32, numpy=
43942// array([[1, 2, 3],
43943//        [4, 5, 6],
43944//        [1, 2, 3],
43945//        [4, 5, 6]], dtype=int32)>
43946// >>> d = tf.constant([2,2], tf.int32)
43947// >>> tf.tile(a, d)
43948// <tf.Tensor: shape=(4, 6), dtype=int32, numpy=
43949// array([[1, 2, 3, 1, 2, 3],
43950//        [4, 5, 6, 4, 5, 6],
43951//        [1, 2, 3, 1, 2, 3],
43952//        [4, 5, 6, 4, 5, 6]], dtype=int32)>
43953//
43954// Arguments:
43955//	input: 1-D or higher.
43956//	multiples: 1-D. Length must be the same as the number of dimensions in `input`
43957func Tile(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
43958	if scope.Err() != nil {
43959		return
43960	}
43961	opspec := tf.OpSpec{
43962		Type: "Tile",
43963		Input: []tf.Input{
43964			input, multiples,
43965		},
43966	}
43967	op := scope.AddOperation(opspec)
43968	return op.Output(0)
43969}
43970
43971// Inserts a dimension of 1 into a tensor's shape.
43972//
43973// Given a tensor `input`, this operation inserts a dimension of 1 at the
43974// dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
43975// zero; if you specify a negative number for `axis` it is counted backward from
43976// the end.
43977//
43978// This operation is useful if you want to add a batch dimension to a single
43979// element. For example, if you have a single image of shape `[height, width,
43980// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
43981// which will make the shape `[1, height, width, channels]`.
43982//
43983// Other examples:
43984//
43985// ```
43986// # 't' is a tensor of shape [2]
43987// shape(expand_dims(t, 0)) ==> [1, 2]
43988// shape(expand_dims(t, 1)) ==> [2, 1]
43989// shape(expand_dims(t, -1)) ==> [2, 1]
43990//
43991// # 't2' is a tensor of shape [2, 3, 5]
43992// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
43993// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
43994// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
43995// ```
43996//
43997// This operation requires that:
43998//
43999// `-1-input.dims() <= dim <= input.dims()`
44000//
44001// This operation is related to `squeeze()`, which removes dimensions of
44002// size 1.
44003//
44004// Arguments:
44005//
44006//	axis: 0-D (scalar). Specifies the dimension index at which to
44007// expand the shape of `input`. Must be in the range
44008// `[-rank(input) - 1, rank(input)]`.
44009//
44010// Returns Contains the same data as `input`, but its shape has an additional
44011// dimension of size 1 added.
44012func ExpandDims(scope *Scope, input tf.Output, axis tf.Output) (output tf.Output) {
44013	if scope.Err() != nil {
44014		return
44015	}
44016	opspec := tf.OpSpec{
44017		Type: "ExpandDims",
44018		Input: []tf.Input{
44019			input, axis,
44020		},
44021	}
44022	op := scope.AddOperation(opspec)
44023	return op.Output(0)
44024}
44025
44026// PlaceholderAttr is an optional argument to Placeholder.
44027type PlaceholderAttr func(optionalAttr)
44028
44029// PlaceholderShape sets the optional shape attribute to value.
44030//
44031// value: (Optional) The shape of the tensor. If the shape has 0 dimensions, the
44032// shape is unconstrained.
44033// If not specified, defaults to {unknown_rank:true}
44034func PlaceholderShape(value tf.Shape) PlaceholderAttr {
44035	return func(m optionalAttr) {
44036		m["shape"] = value
44037	}
44038}
44039
44040// A placeholder op for a value that will be fed into the computation.
44041//
44042// N.B. This operation will fail with an error if it is executed. It is
44043// intended as a way to represent a value that will always be fed, and to
44044// provide attrs that enable the fed value to be checked at runtime.
44045//
44046// Arguments:
44047//	dtype: The type of elements in the tensor.
44048//
44049// Returns A placeholder tensor that must be replaced using the feed mechanism.
44050func Placeholder(scope *Scope, dtype tf.DataType, optional ...PlaceholderAttr) (output tf.Output) {
44051	if scope.Err() != nil {
44052		return
44053	}
44054	attrs := map[string]interface{}{"dtype": dtype}
44055	for _, a := range optional {
44056		a(attrs)
44057	}
44058	opspec := tf.OpSpec{
44059		Type: "Placeholder",
44060
44061		Attrs: attrs,
44062	}
44063	op := scope.AddOperation(opspec)
44064	return op.Output(0)
44065}
44066
44067// MatrixSolveLsAttr is an optional argument to MatrixSolveLs.
44068type MatrixSolveLsAttr func(optionalAttr)
44069
44070// MatrixSolveLsFast sets the optional fast attribute to value.
44071// If not specified, defaults to true
44072func MatrixSolveLsFast(value bool) MatrixSolveLsAttr {
44073	return func(m optionalAttr) {
44074		m["fast"] = value
44075	}
44076}
44077
44078// Solves one or more linear least-squares problems.
44079//
44080// `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
44081// form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same
44082// type as `matrix` and shape `[..., M, K]`.
44083// The output is a tensor shape `[..., N, K]` where each output matrix solves
44084// each of the equations
44085// `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`
44086// in the least squares sense.
44087//
44088// We use the following notation for (complex) matrix and right-hand sides
44089// in the batch:
44090//
44091// `matrix`=\\(A \in \mathbb{C}^{m \times n}\\),
44092// `rhs`=\\(B  \in \mathbb{C}^{m \times k}\\),
44093// `output`=\\(X  \in \mathbb{C}^{n \times k}\\),
44094// `l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
44095//
44096// If `fast` is `True`, then the solution is computed by solving the normal
44097// equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
44098// \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares
44099// problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||_F^2\\).
44100// If \\(m \lt n\\) then `output` is computed as
44101// \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
44102// minimum-norm solution to the under-determined linear system, i.e.
44103// \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\),
44104// subject to \\(A Z = B\\). Notice that the fast path is only numerically stable
44105// when \\(A\\) is numerically full rank and has a condition number
44106// \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is
44107// sufficiently large.
44108//
44109// If `fast` is `False` an algorithm based on the numerically robust complete
44110// orthogonal decomposition is used. This computes the minimum-norm
44111// least-squares solution, even when \\(A\\) is rank deficient. This path is
44112// typically 6-7 times slower than the fast path. If `fast` is `False` then
44113// `l2_regularizer` is ignored.
44114//
44115// Arguments:
44116//	matrix: Shape is `[..., M, N]`.
44117//	rhs: Shape is `[..., M, K]`.
44118//	l2_regularizer: Scalar tensor.
44119//
44120// @compatibility(numpy)
44121// Equivalent to np.linalg.lstsq
44122// @end_compatibility
44123//
44124// Returns Shape is `[..., N, K]`.
44125func MatrixSolveLs(scope *Scope, matrix tf.Output, rhs tf.Output, l2_regularizer tf.Output, optional ...MatrixSolveLsAttr) (output tf.Output) {
44126	if scope.Err() != nil {
44127		return
44128	}
44129	attrs := map[string]interface{}{}
44130	for _, a := range optional {
44131		a(attrs)
44132	}
44133	opspec := tf.OpSpec{
44134		Type: "MatrixSolveLs",
44135		Input: []tf.Input{
44136			matrix, rhs, l2_regularizer,
44137		},
44138		Attrs: attrs,
44139	}
44140	op := scope.AddOperation(opspec)
44141	return op.Output(0)
44142}
44143
44144// TensorArrayV3Attr is an optional argument to TensorArrayV3.
44145type TensorArrayV3Attr func(optionalAttr)
44146
44147// TensorArrayV3ElementShape sets the optional element_shape attribute to value.
44148//
44149// value: The expected shape of an element, if known. Used to
44150// validate the shapes of TensorArray elements. If this shape is not
44151// fully specified, gathering zero-size TensorArrays is an error.
44152// If not specified, defaults to {unknown_rank:true}
44153func TensorArrayV3ElementShape(value tf.Shape) TensorArrayV3Attr {
44154	return func(m optionalAttr) {
44155		m["element_shape"] = value
44156	}
44157}
44158
44159// TensorArrayV3DynamicSize sets the optional dynamic_size attribute to value.
44160//
44161// value: A boolean that determines whether writes to the TensorArray
44162// are allowed to grow the size.  By default, this is not allowed.
44163// If not specified, defaults to false
44164func TensorArrayV3DynamicSize(value bool) TensorArrayV3Attr {
44165	return func(m optionalAttr) {
44166		m["dynamic_size"] = value
44167	}
44168}
44169
44170// TensorArrayV3ClearAfterRead sets the optional clear_after_read attribute to value.
44171//
44172// value: If true (default), Tensors in the TensorArray are cleared
44173// after being read.  This disables multiple read semantics but allows early
44174// release of memory.
44175// If not specified, defaults to true
44176func TensorArrayV3ClearAfterRead(value bool) TensorArrayV3Attr {
44177	return func(m optionalAttr) {
44178		m["clear_after_read"] = value
44179	}
44180}
44181
44182// TensorArrayV3IdenticalElementShapes sets the optional identical_element_shapes attribute to value.
44183//
44184// value: If true (default is false), then all
44185// elements in the TensorArray will be expected to have identical shapes.
44186// This allows certain behaviors, like dynamically checking for
44187// consistent shapes on write, and being able to fill in properly
44188// shaped zero tensors on stack -- even if the element_shape attribute
44189// is not fully defined.
44190// If not specified, defaults to false
44191func TensorArrayV3IdenticalElementShapes(value bool) TensorArrayV3Attr {
44192	return func(m optionalAttr) {
44193		m["identical_element_shapes"] = value
44194	}
44195}
44196
44197// TensorArrayV3TensorArrayName sets the optional tensor_array_name attribute to value.
44198//
44199// value: Overrides the name used for the temporary tensor_array
44200// resource. Default value is the name of the 'TensorArray' op (which
44201// is guaranteed unique).
44202// If not specified, defaults to ""
44203func TensorArrayV3TensorArrayName(value string) TensorArrayV3Attr {
44204	return func(m optionalAttr) {
44205		m["tensor_array_name"] = value
44206	}
44207}
44208
44209// An array of Tensors of given size.
44210//
44211// Write data via Write and read via Read or Pack.
44212//
44213// Arguments:
44214//	size: The size of the array.
44215//	dtype: The type of the elements on the tensor_array.
44216//
44217// Returns:
44218//	handle: The handle to the TensorArray.
44219//	flow: A scalar used to control gradient flow.
44220func TensorArrayV3(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV3Attr) (handle tf.Output, flow tf.Output) {
44221	if scope.Err() != nil {
44222		return
44223	}
44224	attrs := map[string]interface{}{"dtype": dtype}
44225	for _, a := range optional {
44226		a(attrs)
44227	}
44228	opspec := tf.OpSpec{
44229		Type: "TensorArrayV3",
44230		Input: []tf.Input{
44231			size,
44232		},
44233		Attrs: attrs,
44234	}
44235	op := scope.AddOperation(opspec)
44236	return op.Output(0), op.Output(1)
44237}
44238
44239// Pads a tensor with mirrored values.
44240//
44241// This operation pads a `input` with mirrored values according to the `paddings`
44242// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
44243// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
44244// how many values to add before the contents of `input` in that dimension, and
44245// `paddings[D, 1]` indicates how many values to add after the contents of `input`
44246// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
44247// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
44248// (if false, respectively).
44249//
44250// The padded size of each dimension D of the output is:
44251//
44252// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
44253//
44254// For example:
44255//
44256// ```
44257// # 't' is [[1, 2, 3], [4, 5, 6]].
44258// # 'paddings' is [[1, 1]], [2, 2]].
44259// # 'mode' is SYMMETRIC.
44260// # rank of 't' is 2.
44261// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
44262//                       [2, 1, 1, 2, 3, 3, 2]
44263//                       [5, 4, 4, 5, 6, 6, 5]
44264//                       [5, 4, 4, 5, 6, 6, 5]]
44265// ```
44266//
44267// Arguments:
44268//	input: The input tensor to be padded.
44269//	paddings: A two-column matrix specifying the padding sizes. The number of
44270// rows must be the same as the rank of `input`.
44271//	mode: Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
44272// do not include the borders, while in symmetric mode the padded regions
44273// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`
44274// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and
44275// it is `[1, 2, 3, 3, 2]` in symmetric mode.
44276//
44277// Returns The padded tensor.
44278func MirrorPad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
44279	if scope.Err() != nil {
44280		return
44281	}
44282	attrs := map[string]interface{}{"mode": mode}
44283	opspec := tf.OpSpec{
44284		Type: "MirrorPad",
44285		Input: []tf.Input{
44286			input, paddings,
44287		},
44288		Attrs: attrs,
44289	}
44290	op := scope.AddOperation(opspec)
44291	return op.Output(0)
44292}
44293
44294// Return the shape of s0 op s1 with broadcast.
44295//
44296// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
44297// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
44298func BroadcastArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output) {
44299	if scope.Err() != nil {
44300		return
44301	}
44302	opspec := tf.OpSpec{
44303		Type: "BroadcastArgs",
44304		Input: []tf.Input{
44305			s0, s1,
44306		},
44307	}
44308	op := scope.AddOperation(opspec)
44309	return op.Output(0)
44310}
44311
44312// Returns the min of x and y (i.e. x < y ? x : y) element-wise.
44313//
44314// *NOTE*: `Minimum` supports broadcasting. More about broadcasting
44315// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
44316func Minimum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
44317	if scope.Err() != nil {
44318		return
44319	}
44320	opspec := tf.OpSpec{
44321		Type: "Minimum",
44322		Input: []tf.Input{
44323			x, y,
44324		},
44325	}
44326	op := scope.AddOperation(opspec)
44327	return op.Output(0)
44328}
44329
44330// A substitute for `InterleaveDataset` on a fixed list of `N` datasets.
44331//
44332// Arguments:
44333//	selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the
44334// `N` data inputs should produce the next output element.
44335//	data_input_datasets: `N` datasets with the same type that will be interleaved according to
44336// the values of `selector_input_dataset`.
44337//
44338//
44339func ExperimentalDirectedInterleaveDataset(scope *Scope, selector_input_dataset tf.Output, data_input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
44340	if scope.Err() != nil {
44341		return
44342	}
44343	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
44344	opspec := tf.OpSpec{
44345		Type: "ExperimentalDirectedInterleaveDataset",
44346		Input: []tf.Input{
44347			selector_input_dataset, tf.OutputList(data_input_datasets),
44348		},
44349		Attrs: attrs,
44350	}
44351	op := scope.AddOperation(opspec)
44352	return op.Output(0)
44353}
44354
44355// EmptyAttr is an optional argument to Empty.
44356type EmptyAttr func(optionalAttr)
44357
44358// EmptyInit sets the optional init attribute to value.
44359//
44360// value: If True, initialize the returned tensor with the default value of dtype.  Otherwise, the implementation is free not to initializethe tensor's content.
44361// If not specified, defaults to false
44362func EmptyInit(value bool) EmptyAttr {
44363	return func(m optionalAttr) {
44364		m["init"] = value
44365	}
44366}
44367
44368// Creates a tensor with the given shape.
44369//
44370// This operation creates a tensor of `shape` and `dtype`.
44371//
44372// Arguments:
44373//	shape: 1-D. Represents the shape of the output tensor.
44374//
44375//
44376// Returns A `Tensor` of type `T`.
44377func Empty(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...EmptyAttr) (output tf.Output) {
44378	if scope.Err() != nil {
44379		return
44380	}
44381	attrs := map[string]interface{}{"dtype": dtype}
44382	for _, a := range optional {
44383		a(attrs)
44384	}
44385	opspec := tf.OpSpec{
44386		Type: "Empty",
44387		Input: []tf.Input{
44388			shape,
44389		},
44390		Attrs: attrs,
44391	}
44392	op := scope.AddOperation(opspec)
44393	return op.Output(0)
44394}
44395
44396// Splits a tensor into `num_split` tensors along one dimension.
44397//
44398// Arguments:
44399//	axis: 0-D.  The dimension along which to split.  Must be in the range
44400// `[-rank(value), rank(value))`.
44401//	value: The tensor to split.
44402//	num_split: The number of ways to split.  Must evenly divide
44403// `value.shape[split_dim]`.
44404//
44405// Returns They are identically shaped tensors, whose shape matches that of `value`
44406// except along `axis`, where their sizes are
44407// `values.shape[split_dim] / num_split`.
44408func Split(scope *Scope, axis tf.Output, value tf.Output, num_split int64) (output []tf.Output) {
44409	if scope.Err() != nil {
44410		return
44411	}
44412	attrs := map[string]interface{}{"num_split": num_split}
44413	opspec := tf.OpSpec{
44414		Type: "Split",
44415		Input: []tf.Input{
44416			axis, value,
44417		},
44418		Attrs: attrs,
44419	}
44420	op := scope.AddOperation(opspec)
44421	if scope.Err() != nil {
44422		return
44423	}
44424	var idx int
44425	var err error
44426	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
44427		scope.UpdateErr("Split", err)
44428		return
44429	}
44430	return output
44431}
44432
44433// TensorStridedSliceUpdateAttr is an optional argument to TensorStridedSliceUpdate.
44434type TensorStridedSliceUpdateAttr func(optionalAttr)
44435
44436// TensorStridedSliceUpdateBeginMask sets the optional begin_mask attribute to value.
44437// If not specified, defaults to 0
44438func TensorStridedSliceUpdateBeginMask(value int64) TensorStridedSliceUpdateAttr {
44439	return func(m optionalAttr) {
44440		m["begin_mask"] = value
44441	}
44442}
44443
44444// TensorStridedSliceUpdateEndMask sets the optional end_mask attribute to value.
44445// If not specified, defaults to 0
44446func TensorStridedSliceUpdateEndMask(value int64) TensorStridedSliceUpdateAttr {
44447	return func(m optionalAttr) {
44448		m["end_mask"] = value
44449	}
44450}
44451
44452// TensorStridedSliceUpdateEllipsisMask sets the optional ellipsis_mask attribute to value.
44453// If not specified, defaults to 0
44454func TensorStridedSliceUpdateEllipsisMask(value int64) TensorStridedSliceUpdateAttr {
44455	return func(m optionalAttr) {
44456		m["ellipsis_mask"] = value
44457	}
44458}
44459
44460// TensorStridedSliceUpdateNewAxisMask sets the optional new_axis_mask attribute to value.
44461// If not specified, defaults to 0
44462func TensorStridedSliceUpdateNewAxisMask(value int64) TensorStridedSliceUpdateAttr {
44463	return func(m optionalAttr) {
44464		m["new_axis_mask"] = value
44465	}
44466}
44467
44468// TensorStridedSliceUpdateShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
44469// If not specified, defaults to 0
44470func TensorStridedSliceUpdateShrinkAxisMask(value int64) TensorStridedSliceUpdateAttr {
44471	return func(m optionalAttr) {
44472		m["shrink_axis_mask"] = value
44473	}
44474}
44475
44476// Assign `value` to the sliced l-value reference of `input`.
44477//
44478// The values of `value` are assigned to the positions in the tensor `input` that
44479// are selected by the slice parameters. The slice parameters `begin` `end`
44480// `strides` etc. work exactly as in `StridedSlice`.
44481//
44482// NOTE this op currently does not support broadcasting and so `value`'s shape
44483// must be exactly the shape produced by the slice of `input`.
44484func TensorStridedSliceUpdate(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...TensorStridedSliceUpdateAttr) (output tf.Output) {
44485	if scope.Err() != nil {
44486		return
44487	}
44488	attrs := map[string]interface{}{}
44489	for _, a := range optional {
44490		a(attrs)
44491	}
44492	opspec := tf.OpSpec{
44493		Type: "TensorStridedSliceUpdate",
44494		Input: []tf.Input{
44495			input, begin, end, strides, value,
44496		},
44497		Attrs: attrs,
44498	}
44499	op := scope.AddOperation(opspec)
44500	return op.Output(0)
44501}
44502
44503// Asserts that compilation succeeded.
44504//
44505// This op produces no output and closes the device during failure to ensure all
44506// pending device interactions fail.
44507//
44508// 'compilation_status' is a serialized CompilationResultProto.
44509//
44510// Returns the created operation.
44511func TPUCompileSucceededAssert(scope *Scope, compilation_status tf.Output) (o *tf.Operation) {
44512	if scope.Err() != nil {
44513		return
44514	}
44515	opspec := tf.OpSpec{
44516		Type: "TPUCompileSucceededAssert",
44517		Input: []tf.Input{
44518			compilation_status,
44519		},
44520	}
44521	return scope.AddOperation(opspec)
44522}
44523
44524// Scrambles seed into key and counter, using the best algorithm based on device.
44525//
44526// This op scrambles a shape-[2] seed into a key and a counter, both needed by counter-based RNG algorithms. The scrambing uses the best algorithm based on device. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
44527//
44528// Arguments:
44529//	seed: 2 seeds (shape [2]).
44530//
44531// Returns:
44532//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
44533//	counter: Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).
44534func StatelessRandomGetKeyCounter(scope *Scope, seed tf.Output) (key tf.Output, counter tf.Output) {
44535	if scope.Err() != nil {
44536		return
44537	}
44538	opspec := tf.OpSpec{
44539		Type: "StatelessRandomGetKeyCounter",
44540		Input: []tf.Input{
44541			seed,
44542		},
44543	}
44544	op := scope.AddOperation(opspec)
44545	return op.Output(0), op.Output(1)
44546}
44547
44548// Wraps the XLA Sort operator, documented at
44549//
44550//  https://www.tensorflow.org/performance/xla/operation_semantics#sort
44551// .
44552//
44553// Sorts a tensor. Currently only sorts in ascending order are supported.
44554//
44555// Arguments:
44556//	keys: A `Tensor` of type K.
44557//	values: A `Tensor` of type V.
44558//
44559// Returns:
44560//	sorted_keys: A `Tensor` of type K.
44561//	sorted_values: A `Tensor` of type V.
44562func XlaKeyValueSort(scope *Scope, keys tf.Output, values tf.Output) (sorted_keys tf.Output, sorted_values tf.Output) {
44563	if scope.Err() != nil {
44564		return
44565	}
44566	opspec := tf.OpSpec{
44567		Type: "XlaKeyValueSort",
44568		Input: []tf.Input{
44569			keys, values,
44570		},
44571	}
44572	op := scope.AddOperation(opspec)
44573	return op.Output(0), op.Output(1)
44574}
44575
44576// ResourceSparseApplyAdagradAttr is an optional argument to ResourceSparseApplyAdagrad.
44577type ResourceSparseApplyAdagradAttr func(optionalAttr)
44578
44579// ResourceSparseApplyAdagradUseLocking sets the optional use_locking attribute to value.
44580//
44581// value: If `True`, updating of the var and accum tensors will be protected
44582// by a lock; otherwise the behavior is undefined, but may exhibit less
44583// contention.
44584// If not specified, defaults to false
44585func ResourceSparseApplyAdagradUseLocking(value bool) ResourceSparseApplyAdagradAttr {
44586	return func(m optionalAttr) {
44587		m["use_locking"] = value
44588	}
44589}
44590
44591// ResourceSparseApplyAdagradUpdateSlots sets the optional update_slots attribute to value.
44592// If not specified, defaults to true
44593func ResourceSparseApplyAdagradUpdateSlots(value bool) ResourceSparseApplyAdagradAttr {
44594	return func(m optionalAttr) {
44595		m["update_slots"] = value
44596	}
44597}
44598
44599// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
44600//
44601// That is for rows we have grad for, we update var and accum as follows:
44602// accum += grad * grad
44603// var -= lr * grad * (1 / sqrt(accum))
44604//
44605// Arguments:
44606//	var_: Should be from a Variable().
44607//	accum: Should be from a Variable().
44608//	lr: Learning rate. Must be a scalar.
44609//	grad: The gradient.
44610//	indices: A vector of indices into the first dimension of var and accum.
44611//
44612// Returns the created operation.
44613func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradAttr) (o *tf.Operation) {
44614	if scope.Err() != nil {
44615		return
44616	}
44617	attrs := map[string]interface{}{}
44618	for _, a := range optional {
44619		a(attrs)
44620	}
44621	opspec := tf.OpSpec{
44622		Type: "ResourceSparseApplyAdagrad",
44623		Input: []tf.Input{
44624			var_, accum, lr, grad, indices,
44625		},
44626		Attrs: attrs,
44627	}
44628	return scope.AddOperation(opspec)
44629}
44630
44631// SpaceToBatch for 4-D tensors of type T.
44632//
44633// This is a legacy version of the more general SpaceToBatchND.
44634//
44635// Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
44636// More specifically, this op outputs a copy of the input tensor where values from
44637// the `height` and `width` dimensions are moved to the `batch` dimension. After
44638// the zero-padding, both `height` and `width` of the input must be divisible by the
44639// block size.
44640//
44641// The attr `block_size` must be greater than one. It indicates the block size.
44642//
44643//   * Non-overlapping blocks of size `block_size x block size` in the height and
44644//     width dimensions are rearranged into the batch dimension at each location.
44645//   * The batch of the output tensor is `batch * block_size * block_size`.
44646//   * Both height_pad and width_pad must be divisible by block_size.
44647//
44648// The shape of the output will be:
44649//
44650//     [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
44651//      depth]
44652//
44653// Some examples:
44654//
44655// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
44656//
44657// ```
44658// x = [[[[1], [2]], [[3], [4]]]]
44659// ```
44660//
44661// The output tensor has shape `[4, 1, 1, 1]` and value:
44662//
44663// ```
44664// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
44665// ```
44666//
44667// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
44668//
44669// ```
44670// x = [[[[1, 2, 3], [4, 5, 6]],
44671//       [[7, 8, 9], [10, 11, 12]]]]
44672// ```
44673//
44674// The output tensor has shape `[4, 1, 1, 3]` and value:
44675//
44676// ```
44677// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
44678// ```
44679//
44680// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
44681//
44682// ```
44683// x = [[[[1],   [2],  [3],  [4]],
44684//       [[5],   [6],  [7],  [8]],
44685//       [[9],  [10], [11],  [12]],
44686//       [[13], [14], [15],  [16]]]]
44687// ```
44688//
44689// The output tensor has shape `[4, 2, 2, 1]` and value:
44690//
44691// ```
44692// x = [[[[1], [3]], [[9], [11]]],
44693//      [[[2], [4]], [[10], [12]]],
44694//      [[[5], [7]], [[13], [15]]],
44695//      [[[6], [8]], [[14], [16]]]]
44696// ```
44697//
44698// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
44699//
44700// ```
44701// x = [[[[1],   [2],  [3],  [4]],
44702//       [[5],   [6],  [7],  [8]]],
44703//      [[[9],  [10], [11],  [12]],
44704//       [[13], [14], [15],  [16]]]]
44705// ```
44706//
44707// The output tensor has shape `[8, 1, 2, 1]` and value:
44708//
44709// ```
44710// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
44711//      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
44712// ```
44713//
44714// Among others, this operation is useful for reducing atrous convolution into
44715// regular convolution.
44716//
44717// Arguments:
44718//	input: 4-D with shape `[batch, height, width, depth]`.
44719//	paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
44720//   the padding of the input with zeros across the spatial dimensions as follows:
44721//
44722//       paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
44723//
44724//   The effective spatial dimensions of the zero-padded input tensor will be:
44725//
44726//       height_pad = pad_top + height + pad_bottom
44727//       width_pad = pad_left + width + pad_right
44728//
44729func SpaceToBatch(scope *Scope, input tf.Output, paddings tf.Output, block_size int64) (output tf.Output) {
44730	if scope.Err() != nil {
44731		return
44732	}
44733	attrs := map[string]interface{}{"block_size": block_size}
44734	opspec := tf.OpSpec{
44735		Type: "SpaceToBatch",
44736		Input: []tf.Input{
44737			input, paddings,
44738		},
44739		Attrs: attrs,
44740	}
44741	op := scope.AddOperation(opspec)
44742	return op.Output(0)
44743}
44744
44745// Wraps the XLA Sort operator, documented at
44746//
44747//  https://www.tensorflow.org/performance/xla/operation_semantics#sort
44748// .
44749//
44750// Sorts a tensor. Currently only sorts in ascending order are supported.
44751//
44752// Arguments:
44753//	input: A `Tensor` of type T.
44754//
44755// Returns A `Tensor` of type T.
44756func XlaSort(scope *Scope, input tf.Output) (output tf.Output) {
44757	if scope.Err() != nil {
44758		return
44759	}
44760	opspec := tf.OpSpec{
44761		Type: "XlaSort",
44762		Input: []tf.Input{
44763			input,
44764		},
44765	}
44766	op := scope.AddOperation(opspec)
44767	return op.Output(0)
44768}
44769
44770// ResourceApplyFtrlAttr is an optional argument to ResourceApplyFtrl.
44771type ResourceApplyFtrlAttr func(optionalAttr)
44772
44773// ResourceApplyFtrlUseLocking sets the optional use_locking attribute to value.
44774//
44775// value: If `True`, updating of the var and accum tensors will be protected
44776// by a lock; otherwise the behavior is undefined, but may exhibit less
44777// contention.
44778// If not specified, defaults to false
44779func ResourceApplyFtrlUseLocking(value bool) ResourceApplyFtrlAttr {
44780	return func(m optionalAttr) {
44781		m["use_locking"] = value
44782	}
44783}
44784
44785// ResourceApplyFtrlMultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
44786// If not specified, defaults to false
44787func ResourceApplyFtrlMultiplyLinearByLr(value bool) ResourceApplyFtrlAttr {
44788	return func(m optionalAttr) {
44789		m["multiply_linear_by_lr"] = value
44790	}
44791}
44792
44793// Update '*var' according to the Ftrl-proximal scheme.
44794//
44795// accum_new = accum + grad * grad
44796// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
44797// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
44798// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
44799// accum = accum_new
44800//
44801// Arguments:
44802//	var_: Should be from a Variable().
44803//	accum: Should be from a Variable().
44804//	linear: Should be from a Variable().
44805//	grad: The gradient.
44806//	lr: Scaling factor. Must be a scalar.
44807//	l1: L1 regularization. Must be a scalar.
44808//	l2: L2 regularization. Must be a scalar.
44809//	lr_power: Scaling factor. Must be a scalar.
44810//
44811// Returns the created operation.
44812func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlAttr) (o *tf.Operation) {
44813	if scope.Err() != nil {
44814		return
44815	}
44816	attrs := map[string]interface{}{}
44817	for _, a := range optional {
44818		a(attrs)
44819	}
44820	opspec := tf.OpSpec{
44821		Type: "ResourceApplyFtrl",
44822		Input: []tf.Input{
44823			var_, accum, linear, grad, lr, l1, l2, lr_power,
44824		},
44825		Attrs: attrs,
44826	}
44827	return scope.AddOperation(opspec)
44828}
44829
44830// StridedSliceGradAttr is an optional argument to StridedSliceGrad.
44831type StridedSliceGradAttr func(optionalAttr)
44832
44833// StridedSliceGradBeginMask sets the optional begin_mask attribute to value.
44834// If not specified, defaults to 0
44835func StridedSliceGradBeginMask(value int64) StridedSliceGradAttr {
44836	return func(m optionalAttr) {
44837		m["begin_mask"] = value
44838	}
44839}
44840
44841// StridedSliceGradEndMask sets the optional end_mask attribute to value.
44842// If not specified, defaults to 0
44843func StridedSliceGradEndMask(value int64) StridedSliceGradAttr {
44844	return func(m optionalAttr) {
44845		m["end_mask"] = value
44846	}
44847}
44848
44849// StridedSliceGradEllipsisMask sets the optional ellipsis_mask attribute to value.
44850// If not specified, defaults to 0
44851func StridedSliceGradEllipsisMask(value int64) StridedSliceGradAttr {
44852	return func(m optionalAttr) {
44853		m["ellipsis_mask"] = value
44854	}
44855}
44856
44857// StridedSliceGradNewAxisMask sets the optional new_axis_mask attribute to value.
44858// If not specified, defaults to 0
44859func StridedSliceGradNewAxisMask(value int64) StridedSliceGradAttr {
44860	return func(m optionalAttr) {
44861		m["new_axis_mask"] = value
44862	}
44863}
44864
44865// StridedSliceGradShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
44866// If not specified, defaults to 0
44867func StridedSliceGradShrinkAxisMask(value int64) StridedSliceGradAttr {
44868	return func(m optionalAttr) {
44869		m["shrink_axis_mask"] = value
44870	}
44871}
44872
44873// Returns the gradient of `StridedSlice`.
44874//
44875// Since `StridedSlice` cuts out pieces of its `input` which is size
44876// `shape`, its gradient will have the same shape (which is passed here
44877// as `shape`). The gradient will be zero in any element that the slice
44878// does not select.
44879//
44880// Arguments are the same as StridedSliceGrad with the exception that
44881// `dy` is the input gradient to be propagated and `shape` is the
44882// shape of `StridedSlice`'s `input`.
44883func StridedSliceGrad(scope *Scope, shape tf.Output, begin tf.Output, end tf.Output, strides tf.Output, dy tf.Output, optional ...StridedSliceGradAttr) (output tf.Output) {
44884	if scope.Err() != nil {
44885		return
44886	}
44887	attrs := map[string]interface{}{}
44888	for _, a := range optional {
44889		a(attrs)
44890	}
44891	opspec := tf.OpSpec{
44892		Type: "StridedSliceGrad",
44893		Input: []tf.Input{
44894			shape, begin, end, strides, dy,
44895		},
44896		Attrs: attrs,
44897	}
44898	op := scope.AddOperation(opspec)
44899	return op.Output(0)
44900}
44901
44902// CudnnRNNBackpropV2Attr is an optional argument to CudnnRNNBackpropV2.
44903type CudnnRNNBackpropV2Attr func(optionalAttr)
44904
44905// CudnnRNNBackpropV2RnnMode sets the optional rnn_mode attribute to value.
44906// If not specified, defaults to "lstm"
44907func CudnnRNNBackpropV2RnnMode(value string) CudnnRNNBackpropV2Attr {
44908	return func(m optionalAttr) {
44909		m["rnn_mode"] = value
44910	}
44911}
44912
44913// CudnnRNNBackpropV2InputMode sets the optional input_mode attribute to value.
44914// If not specified, defaults to "linear_input"
44915func CudnnRNNBackpropV2InputMode(value string) CudnnRNNBackpropV2Attr {
44916	return func(m optionalAttr) {
44917		m["input_mode"] = value
44918	}
44919}
44920
44921// CudnnRNNBackpropV2Direction sets the optional direction attribute to value.
44922// If not specified, defaults to "unidirectional"
44923func CudnnRNNBackpropV2Direction(value string) CudnnRNNBackpropV2Attr {
44924	return func(m optionalAttr) {
44925		m["direction"] = value
44926	}
44927}
44928
44929// CudnnRNNBackpropV2Dropout sets the optional dropout attribute to value.
44930// If not specified, defaults to 0
44931func CudnnRNNBackpropV2Dropout(value float32) CudnnRNNBackpropV2Attr {
44932	return func(m optionalAttr) {
44933		m["dropout"] = value
44934	}
44935}
44936
44937// CudnnRNNBackpropV2Seed sets the optional seed attribute to value.
44938// If not specified, defaults to 0
44939func CudnnRNNBackpropV2Seed(value int64) CudnnRNNBackpropV2Attr {
44940	return func(m optionalAttr) {
44941		m["seed"] = value
44942	}
44943}
44944
44945// CudnnRNNBackpropV2Seed2 sets the optional seed2 attribute to value.
44946// If not specified, defaults to 0
44947func CudnnRNNBackpropV2Seed2(value int64) CudnnRNNBackpropV2Attr {
44948	return func(m optionalAttr) {
44949		m["seed2"] = value
44950	}
44951}
44952
44953// Backprop step of CudnnRNN.
44954//
44955// Compute the backprop of both data and weights in a RNN. Takes an extra
44956//     "host_reserved" inupt than CudnnRNNBackprop, which is used to determine RNN
44957//     cudnnRNNAlgo_t and cudnnMathType_t.
44958//
44959// rnn_mode: Indicates the type of the RNN model.
44960// input_mode: Indicates whether there is a linear projection between the input and
44961//     the actual computation before the first layer. 'skip_input' is only allowed
44962//     when input_size == num_units; 'auto_select' implies 'skip_input' when
44963//     input_size == num_units; otherwise, it implies 'linear_input'.
44964// direction: Indicates whether a bidirectional model will be used. Should be
44965//   "unidirectional" or "bidirectional".
44966// dropout: Dropout probability. When set to 0., dropout is disabled.
44967// seed: The 1st part of a seed to initialize dropout.
44968// seed2: The 2nd part of a seed to initialize dropout.
44969// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
44970// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
44971//     num_units].
44972// input_c: For LSTM, a 3-D tensor with the shape of
44973//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
44974// params: A 1-D tensor that contains the weights and biases in an opaque layout.
44975//     The size must be created through CudnnRNNParamsSize, and initialized
44976//     separately. Note that they might not be compatible across different
44977//     generations. So it is a good idea to save and restore
44978// output: A 3-D tensor with the shape of [seq_length, batch_size,
44979//     dir * num_units].
44980// output_h: The same shape has input_h.
44981// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
44982// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
44983// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
44984//     pass.
44985// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
44986//     pass.
44987// reserve_space: The same reserve_space produced in the forward operation.
44988// host_reserved: The same host_reserved produced in the forward operation.
44989// input_backprop: The backprop to input in the forward pass. Has the same shape
44990//     as input.
44991// input_h_backprop: The backprop to input_h in the forward pass. Has the same
44992//     shape as input_h.
44993// input_c_backprop: The backprop to input_c in the forward pass. Has the same
44994//     shape as input_c.
44995// params_backprop: The backprop to the params buffer in the forward pass. Has the
44996//     same shape as params.
44997func CudnnRNNBackpropV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, host_reserved tf.Output, optional ...CudnnRNNBackpropV2Attr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
44998	if scope.Err() != nil {
44999		return
45000	}
45001	attrs := map[string]interface{}{}
45002	for _, a := range optional {
45003		a(attrs)
45004	}
45005	opspec := tf.OpSpec{
45006		Type: "CudnnRNNBackpropV2",
45007		Input: []tf.Input{
45008			input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved,
45009		},
45010		Attrs: attrs,
45011	}
45012	op := scope.AddOperation(opspec)
45013	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
45014}
45015
45016// DirectedInterleaveDatasetAttr is an optional argument to DirectedInterleaveDataset.
45017type DirectedInterleaveDatasetAttr func(optionalAttr)
45018
45019// DirectedInterleaveDatasetStopOnEmptyDataset sets the optional stop_on_empty_dataset attribute to value.
45020// If not specified, defaults to false
45021func DirectedInterleaveDatasetStopOnEmptyDataset(value bool) DirectedInterleaveDatasetAttr {
45022	return func(m optionalAttr) {
45023		m["stop_on_empty_dataset"] = value
45024	}
45025}
45026
45027// A substitute for `InterleaveDataset` on a fixed list of `N` datasets.
45028//
45029// Arguments:
45030//	selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the
45031// `N` data inputs should produce the next output element.
45032//	data_input_datasets: `N` datasets with the same type that will be interleaved according to
45033// the values of `selector_input_dataset`.
45034//
45035//
45036func DirectedInterleaveDataset(scope *Scope, selector_input_dataset tf.Output, data_input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DirectedInterleaveDatasetAttr) (handle tf.Output) {
45037	if scope.Err() != nil {
45038		return
45039	}
45040	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
45041	for _, a := range optional {
45042		a(attrs)
45043	}
45044	opspec := tf.OpSpec{
45045		Type: "DirectedInterleaveDataset",
45046		Input: []tf.Input{
45047			selector_input_dataset, tf.OutputList(data_input_datasets),
45048		},
45049		Attrs: attrs,
45050	}
45051	op := scope.AddOperation(opspec)
45052	return op.Output(0)
45053}
45054
45055// Subtracts sparse `updates` from an existing tensor according to `indices`.
45056//
45057// This operation creates a new tensor by subtracting sparse `updates` from the
45058// passed in `tensor`.
45059// This operation is very similar to `tf.scatter_nd_sub`, except that the updates
45060// are subtracted from an existing tensor (as opposed to a variable). If the memory
45061// for the existing tensor cannot be re-used, a copy is made and updated.
45062//
45063// `indices` is an integer tensor containing indices into a new tensor of shape
45064// `shape`.  The last dimension of `indices` can be at most the rank of `shape`:
45065//
45066//     indices.shape[-1] <= shape.rank
45067//
45068// The last dimension of `indices` corresponds to indices into elements
45069// (if `indices.shape[-1] = shape.rank`) or slices
45070// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
45071// `shape`.  `updates` is a tensor with shape
45072//
45073//     indices.shape[:-1] + shape[indices.shape[-1]:]
45074//
45075// The simplest form of tensor_scatter_sub is to subtract individual elements
45076// from a tensor by index. For example, say we want to insert 4 scattered elements
45077// in a rank-1 tensor with 8 elements.
45078//
45079// In Python, this scatter subtract operation would look like this:
45080//
45081// ```python
45082//     indices = tf.constant([[4], [3], [1], [7]])
45083//     updates = tf.constant([9, 10, 11, 12])
45084//     tensor = tf.ones([8], dtype=tf.int32)
45085//     updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
45086//     print(updated)
45087// ```
45088//
45089// The resulting tensor would look like this:
45090//
45091//     [1, -10, 1, -9, -8, 1, 1, -11]
45092//
45093// We can also, insert entire slices of a higher rank tensor all at once. For
45094// example, if we wanted to insert two slices in the first dimension of a
45095// rank-3 tensor with two matrices of new values.
45096//
45097// In Python, this scatter add operation would look like this:
45098//
45099// ```python
45100//     indices = tf.constant([[0], [2]])
45101//     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
45102//                             [7, 7, 7, 7], [8, 8, 8, 8]],
45103//                            [[5, 5, 5, 5], [6, 6, 6, 6],
45104//                             [7, 7, 7, 7], [8, 8, 8, 8]]])
45105//     tensor = tf.ones([4, 4, 4],dtype=tf.int32)
45106//     updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
45107//     print(updated)
45108// ```
45109//
45110// The resulting tensor would look like this:
45111//
45112//     [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
45113//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
45114//      [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
45115//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
45116//
45117// Note that on CPU, if an out of bound index is found, an error is returned.
45118// On GPU, if an out of bound index is found, the index is ignored.
45119//
45120// Arguments:
45121//	tensor: Tensor to copy/update.
45122//	indices: Index tensor.
45123//	updates: Updates to scatter into output.
45124//
45125// Returns A new tensor copied from tensor and updates subtracted according to the indices.
45126func TensorScatterSub(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
45127	if scope.Err() != nil {
45128		return
45129	}
45130	opspec := tf.OpSpec{
45131		Type: "TensorScatterSub",
45132		Input: []tf.Input{
45133			tensor, indices, updates,
45134		},
45135	}
45136	op := scope.AddOperation(opspec)
45137	return op.Output(0)
45138}
45139
45140// PreventGradientAttr is an optional argument to PreventGradient.
45141type PreventGradientAttr func(optionalAttr)
45142
45143// PreventGradientMessage sets the optional message attribute to value.
45144//
45145// value: Will be printed in the error when anyone tries to differentiate
45146// this operation.
45147// If not specified, defaults to ""
45148func PreventGradientMessage(value string) PreventGradientAttr {
45149	return func(m optionalAttr) {
45150		m["message"] = value
45151	}
45152}
45153
45154// An identity op that triggers an error if a gradient is requested.
45155//
45156// When executed in a graph, this op outputs its input tensor as-is.
45157//
45158// When building ops to compute gradients, the TensorFlow gradient system
45159// will return an error when trying to lookup the gradient of this op,
45160// because no gradient must ever be registered for this function.  This
45161// op exists to prevent subtle bugs from silently returning unimplemented
45162// gradients in some corner cases.
45163//
45164// Arguments:
45165//	input: any tensor.
45166//
45167// Returns the same input tensor.
45168func PreventGradient(scope *Scope, input tf.Output, optional ...PreventGradientAttr) (output tf.Output) {
45169	if scope.Err() != nil {
45170		return
45171	}
45172	attrs := map[string]interface{}{}
45173	for _, a := range optional {
45174		a(attrs)
45175	}
45176	opspec := tf.OpSpec{
45177		Type: "PreventGradient",
45178		Input: []tf.Input{
45179			input,
45180		},
45181		Attrs: attrs,
45182	}
45183	op := scope.AddOperation(opspec)
45184	return op.Output(0)
45185}
45186
45187// StridedSliceAttr is an optional argument to StridedSlice.
45188type StridedSliceAttr func(optionalAttr)
45189
45190// StridedSliceBeginMask sets the optional begin_mask attribute to value.
45191//
45192// value: a bitmask where a bit i being 1 means to ignore the begin
45193// value and instead use the largest interval possible. At runtime
45194// begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or
45195// `[-1, n-1]` if `stride[i] < 0`
45196// If not specified, defaults to 0
45197func StridedSliceBeginMask(value int64) StridedSliceAttr {
45198	return func(m optionalAttr) {
45199		m["begin_mask"] = value
45200	}
45201}
45202
45203// StridedSliceEndMask sets the optional end_mask attribute to value.
45204//
45205// value: analogous to `begin_mask`
45206// If not specified, defaults to 0
45207func StridedSliceEndMask(value int64) StridedSliceAttr {
45208	return func(m optionalAttr) {
45209		m["end_mask"] = value
45210	}
45211}
45212
45213// StridedSliceEllipsisMask sets the optional ellipsis_mask attribute to value.
45214//
45215// value: a bitmask where bit `i` being 1 means the `i`th
45216// position is actually an ellipsis. One bit at most can be 1.
45217// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`
45218// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis
45219// implicitly creates as many range specifications as necessary to fully
45220// specify the sliced range for every dimension. For example for a 4-dimensional
45221// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.
45222// If not specified, defaults to 0
45223func StridedSliceEllipsisMask(value int64) StridedSliceAttr {
45224	return func(m optionalAttr) {
45225		m["ellipsis_mask"] = value
45226	}
45227}
45228
45229// StridedSliceNewAxisMask sets the optional new_axis_mask attribute to value.
45230//
45231// value: a bitmask where bit `i` being 1 means the `i`th
45232// specification creates a new shape 1 dimension. For example
45233// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
45234// If not specified, defaults to 0
45235func StridedSliceNewAxisMask(value int64) StridedSliceAttr {
45236	return func(m optionalAttr) {
45237		m["new_axis_mask"] = value
45238	}
45239}
45240
45241// StridedSliceShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
45242//
45243// value: a bitmask where bit `i` implies that the `i`th
45244// specification should shrink the dimensionality. begin and end
45245// must imply a slice of size 1 in the dimension. For example in
45246// python one might do `foo[:, 3, :]` which would result in
45247// `shrink_axis_mask` being 2.
45248// If not specified, defaults to 0
45249func StridedSliceShrinkAxisMask(value int64) StridedSliceAttr {
45250	return func(m optionalAttr) {
45251		m["shrink_axis_mask"] = value
45252	}
45253}
45254
45255// Return a strided slice from `input`.
45256//
45257// Note, most python users will want to use the Python `Tensor.__getitem__`
45258// or `Variable.__getitem__` rather than this op directly.
45259//
45260// The goal of this op is to produce a new tensor with a subset of
45261// the elements from the `n` dimensional `input` tensor. The subset is chosen using
45262// a sequence of `m` sparse range specifications encoded into the arguments
45263// of this function. Note, in some cases
45264// `m` could be equal to `n`, but this need not be the case. Each
45265// range specification entry can be one of the following:
45266//
45267// - An ellipsis (...). Ellipses are used to imply zero or more
45268//   dimensions of full-dimension selection and are produced using
45269//   `ellipsis_mask`. For example, `foo[...]` is the identity slice.
45270//
45271// - A new axis. This is used to insert a new shape=1 dimension and is
45272//   produced using `new_axis_mask`. For example, `foo[:, ...]` where
45273//   `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
45274//
45275//
45276// - A range `begin:end:stride`. This is used to specify how much to choose from
45277//   a given dimension. `stride` can be any integer but 0.  `begin` is an integer
45278//   which represents the index of the first value to select while `end` represents
45279//   the index of the last value to select. The number of values selected in each
45280//   dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
45281//   `begin` and `end` can be negative where `-1` is the last element, `-2` is
45282//   the second to last. `begin_mask` controls whether to replace the explicitly
45283//   given `begin` with an implicit effective value of `0` if `stride > 0` and
45284//   `-1` if `stride < 0`. `end_mask` is analogous but produces the number
45285//   required to create the largest open interval. For example, given a shape
45286//   `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
45287//   not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
45288//   and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
45289//   first dimension of a tensor while dropping the last two (in the original
45290//   order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
45291//
45292// - A single index. This is used to keep only elements that have a given
45293//   index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
45294//   shape `(6,)` tensor. This is encoded in `begin` and `end` and
45295//   `shrink_axis_mask`.
45296//
45297// Each conceptual range specification is encoded in the op's argument. This
45298// encoding is best understand by considering a non-trivial example. In
45299// particular,
45300// `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
45301//
45302// ```
45303// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
45304// end = [2, 4, x, x, -3, x]
45305// strides = [1, 1, x, x, -1, 1]
45306// begin_mask = 1<<4 | 1<<5 = 48
45307// end_mask = 1<<5 = 32
45308// ellipsis_mask = 1<<3 = 8
45309// new_axis_mask = 1<<2 = 4
45310// shrink_axis_mask = 1<<0 = 1
45311// ```
45312//
45313// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
45314// the slice becomes (2, 1, 5, 5, 2, 5).
45315// Let us walk step by step through each argument specification.
45316//
45317// 1.  The first argument in the example slice is turned into `begin = 1` and
45318// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
45319// also set the appropriate bit in `shrink_axis_mask`.
45320//
45321// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
45322// zero bits contributed.
45323//
45324// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
45325// dimension in the final shape. Dummy values are contributed to begin,
45326// end and stride, while the new_axis_mask bit is set.
45327//
45328// 4. `...` grab the full ranges from as many dimensions as needed to
45329// fully specify a slice for every dimension of the input shape.
45330//
45331// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
45332// with a dimension that has shape `s` is converted to a positive index
45333// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
45334// is done internally so begin, end and strides receive x, -3, and -1.
45335// The appropriate begin_mask bit is set to indicate the start range is the
45336// full range (ignoring the x).
45337//
45338// 6. `:` indicates that the entire contents of the corresponding dimension
45339// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
45340// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
45341// `end_mask` are also set.
45342//
45343// *Requirements*:
45344//   `0 != strides[i] for i in [0, m)`
45345//   `ellipsis_mask must be a power of two (only one ellipsis)`
45346//
45347// Arguments:
45348//
45349//	begin: `begin[k]` specifies the offset into the `k`th range specification.
45350// The exact dimension this corresponds to will be determined by context.
45351// Out-of-bounds values will be silently clamped. If the `k`th bit of
45352// `begin_mask` then `begin[k]` is ignored and the full range of the
45353// appropriate dimension is used instead. Negative values causes indexing
45354// to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
45355//	end: `end[i]` is like `begin` with the exception that `end_mask` is
45356// used to determine full ranges.
45357//	strides: `strides[i]` specifies the increment in the `i`th specification
45358// after extracting a given element. Negative indices will reverse
45359// the original order. Out or range values are
45360// clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
45361func StridedSlice(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, optional ...StridedSliceAttr) (output tf.Output) {
45362	if scope.Err() != nil {
45363		return
45364	}
45365	attrs := map[string]interface{}{}
45366	for _, a := range optional {
45367		a(attrs)
45368	}
45369	opspec := tf.OpSpec{
45370		Type: "StridedSlice",
45371		Input: []tf.Input{
45372			input, begin, end, strides,
45373		},
45374		Attrs: attrs,
45375	}
45376	op := scope.AddOperation(opspec)
45377	return op.Output(0)
45378}
45379
45380// Updates the tree ensemble by either adding a layer to the last tree being grown
45381//
45382// or by starting a new tree.
45383//
45384// Arguments:
45385//	tree_ensemble_handle: Handle to the ensemble variable.
45386//	feature_ids: Rank 1 tensor with ids for each feature. This is the real id of
45387// the feature that will be used in the split.
45388//	node_ids: List of rank 1 tensors representing the nodes for which this feature
45389// has a split.
45390//	gains: List of rank 1 tensors representing the gains for each of the feature's
45391// split.
45392//	thresholds: List of rank 1 tensors representing the thesholds for each of the
45393// feature's split.
45394//	left_node_contribs: List of rank 2 tensors with left leaf contribs for each of
45395// the feature's splits. Will be added to the previous node values to constitute
45396// the values of the left nodes.
45397//	right_node_contribs: List of rank 2 tensors with right leaf contribs for each
45398// of the feature's splits. Will be added to the previous node values to constitute
45399// the values of the right nodes.
45400//	max_depth: Max depth of the tree to build.
45401//	learning_rate: shrinkage const for each new tree.
45402//	pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning.
45403//
45404// Returns the created operation.
45405func BoostedTreesUpdateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, feature_ids tf.Output, node_ids []tf.Output, gains []tf.Output, thresholds []tf.Output, left_node_contribs []tf.Output, right_node_contribs []tf.Output, max_depth tf.Output, learning_rate tf.Output, pruning_mode int64) (o *tf.Operation) {
45406	if scope.Err() != nil {
45407		return
45408	}
45409	attrs := map[string]interface{}{"pruning_mode": pruning_mode}
45410	opspec := tf.OpSpec{
45411		Type: "BoostedTreesUpdateEnsemble",
45412		Input: []tf.Input{
45413			tree_ensemble_handle, feature_ids, tf.OutputList(node_ids), tf.OutputList(gains), tf.OutputList(thresholds), tf.OutputList(left_node_contribs), tf.OutputList(right_node_contribs), max_depth, learning_rate,
45414		},
45415		Attrs: attrs,
45416	}
45417	return scope.AddOperation(opspec)
45418}
45419
45420// RestoreAttr is an optional argument to Restore.
45421type RestoreAttr func(optionalAttr)
45422
45423// RestorePreferredShard sets the optional preferred_shard attribute to value.
45424//
45425// value: Index of file to open first if multiple files match
45426// `file_pattern`.
45427// If not specified, defaults to -1
45428func RestorePreferredShard(value int64) RestoreAttr {
45429	return func(m optionalAttr) {
45430		m["preferred_shard"] = value
45431	}
45432}
45433
45434// Restores a tensor from checkpoint files.
45435//
45436// Reads a tensor stored in one or several files. If there are several files (for
45437// instance because a tensor was saved as slices), `file_pattern` may contain
45438// wildcard symbols (`*` and `?`) in the filename portion only, not in the
45439// directory portion.
45440//
45441// If a `file_pattern` matches several files, `preferred_shard` can be used to hint
45442// in which file the requested tensor is likely to be found. This op will first
45443// open the file at index `preferred_shard` in the list of matching files and try
45444// to restore tensors from that file.  Only if some tensors or tensor slices are
45445// not found in that first file, then the Op opens all the files. Setting
45446// `preferred_shard` to match the value passed as the `shard` input
45447// of a matching `Save` Op may speed up Restore.  This attribute only affects
45448// performance, not correctness.  The default value -1 means files are processed in
45449// order.
45450//
45451// See also `RestoreSlice`.
45452//
45453// Arguments:
45454//	file_pattern: Must have a single element. The pattern of the files from
45455// which we read the tensor.
45456//	tensor_name: Must have a single element. The name of the tensor to be
45457// restored.
45458//	dt: The type of the tensor to be restored.
45459//
45460// Returns The restored tensor.
45461func Restore(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, dt tf.DataType, optional ...RestoreAttr) (tensor tf.Output) {
45462	if scope.Err() != nil {
45463		return
45464	}
45465	attrs := map[string]interface{}{"dt": dt}
45466	for _, a := range optional {
45467		a(attrs)
45468	}
45469	opspec := tf.OpSpec{
45470		Type: "Restore",
45471		Input: []tf.Input{
45472			file_pattern, tensor_name,
45473		},
45474		Attrs: attrs,
45475	}
45476	op := scope.AddOperation(opspec)
45477	return op.Output(0)
45478}
45479
45480// Reorders a SparseTensor into the canonical, row-major ordering.
45481//
45482// Note that by convention, all sparse ops preserve the canonical ordering along
45483// increasing dimension number. The only time ordering can be violated is during
45484// manual manipulation of the indices and values vectors to add entries.
45485//
45486// Reordering does not affect the shape of the SparseTensor.
45487//
45488// If the tensor has rank `R` and `N` non-empty values, `input_indices` has
45489// shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
45490//
45491// Arguments:
45492//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
45493// SparseTensor, possibly not in canonical ordering.
45494//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
45495//	input_shape: 1-D.  Shape of the input SparseTensor.
45496//
45497// Returns:
45498//	output_indices: 2-D.  `N x R` matrix with the same indices as input_indices, but
45499// in canonical row-major ordering.
45500//	output_values: 1-D.  `N` non-empty values corresponding to `output_indices`.
45501func SparseReorder(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
45502	if scope.Err() != nil {
45503		return
45504	}
45505	opspec := tf.OpSpec{
45506		Type: "SparseReorder",
45507		Input: []tf.Input{
45508			input_indices, input_values, input_shape,
45509		},
45510	}
45511	op := scope.AddOperation(opspec)
45512	return op.Output(0), op.Output(1)
45513}
45514
45515// Inverse 3D fast Fourier transform.
45516//
45517// Computes the inverse 3-dimensional discrete Fourier transform over the
45518// inner-most 3 dimensions of `input`.
45519//
45520// Arguments:
45521//	input: A complex tensor.
45522//
45523// Returns A complex tensor of the same shape as `input`. The inner-most 3
45524//   dimensions of `input` are replaced with their inverse 3D Fourier transform.
45525//
45526// @compatibility(numpy)
45527// Equivalent to np.fft.ifftn with 3 dimensions.
45528// @end_compatibility
45529func IFFT3D(scope *Scope, input tf.Output) (output tf.Output) {
45530	if scope.Err() != nil {
45531		return
45532	}
45533	opspec := tf.OpSpec{
45534		Type: "IFFT3D",
45535		Input: []tf.Input{
45536			input,
45537		},
45538	}
45539	op := scope.AddOperation(opspec)
45540	return op.Output(0)
45541}
45542
45543// Returns a map that is the 'input_handle' with the given key-value pair inserted.
45544//
45545// input_handle: the original map
45546// output_handle: the map with key and value inserted
45547// key: the key to be inserted
45548// value: the value to be inserted
45549func TensorMapInsert(scope *Scope, input_handle tf.Output, key tf.Output, value tf.Output) (output_handle tf.Output) {
45550	if scope.Err() != nil {
45551		return
45552	}
45553	opspec := tf.OpSpec{
45554		Type: "TensorMapInsert",
45555		Input: []tf.Input{
45556			input_handle, key, value,
45557		},
45558	}
45559	op := scope.AddOperation(opspec)
45560	return op.Output(0)
45561}
45562
45563// Checks whether a tree ensemble has been initialized.
45564//
45565// Arguments:
45566//	tree_ensemble_handle: Handle to the tree ensemble resource.
45567//
45568// Returns output boolean on whether it is initialized or not.
45569func IsBoostedTreesEnsembleInitialized(scope *Scope, tree_ensemble_handle tf.Output) (is_initialized tf.Output) {
45570	if scope.Err() != nil {
45571		return
45572	}
45573	opspec := tf.OpSpec{
45574		Type: "IsBoostedTreesEnsembleInitialized",
45575		Input: []tf.Input{
45576			tree_ensemble_handle,
45577		},
45578	}
45579	op := scope.AddOperation(opspec)
45580	return op.Output(0)
45581}
45582
45583// ExperimentalStatsAggregatorHandleAttr is an optional argument to ExperimentalStatsAggregatorHandle.
45584type ExperimentalStatsAggregatorHandleAttr func(optionalAttr)
45585
45586// ExperimentalStatsAggregatorHandleContainer sets the optional container attribute to value.
45587// If not specified, defaults to ""
45588func ExperimentalStatsAggregatorHandleContainer(value string) ExperimentalStatsAggregatorHandleAttr {
45589	return func(m optionalAttr) {
45590		m["container"] = value
45591	}
45592}
45593
45594// ExperimentalStatsAggregatorHandleSharedName sets the optional shared_name attribute to value.
45595// If not specified, defaults to ""
45596func ExperimentalStatsAggregatorHandleSharedName(value string) ExperimentalStatsAggregatorHandleAttr {
45597	return func(m optionalAttr) {
45598		m["shared_name"] = value
45599	}
45600}
45601
45602// Creates a statistics manager resource.
45603func ExperimentalStatsAggregatorHandle(scope *Scope, optional ...ExperimentalStatsAggregatorHandleAttr) (handle tf.Output) {
45604	if scope.Err() != nil {
45605		return
45606	}
45607	attrs := map[string]interface{}{}
45608	for _, a := range optional {
45609		a(attrs)
45610	}
45611	opspec := tf.OpSpec{
45612		Type: "ExperimentalStatsAggregatorHandle",
45613
45614		Attrs: attrs,
45615	}
45616	op := scope.AddOperation(opspec)
45617	return op.Output(0)
45618}
45619
45620// RetrieveTPUEmbeddingADAMParametersAttr is an optional argument to RetrieveTPUEmbeddingADAMParameters.
45621type RetrieveTPUEmbeddingADAMParametersAttr func(optionalAttr)
45622
45623// RetrieveTPUEmbeddingADAMParametersTableId sets the optional table_id attribute to value.
45624// If not specified, defaults to -1
45625func RetrieveTPUEmbeddingADAMParametersTableId(value int64) RetrieveTPUEmbeddingADAMParametersAttr {
45626	return func(m optionalAttr) {
45627		m["table_id"] = value
45628	}
45629}
45630
45631// RetrieveTPUEmbeddingADAMParametersTableName sets the optional table_name attribute to value.
45632// If not specified, defaults to ""
45633func RetrieveTPUEmbeddingADAMParametersTableName(value string) RetrieveTPUEmbeddingADAMParametersAttr {
45634	return func(m optionalAttr) {
45635		m["table_name"] = value
45636	}
45637}
45638
45639// RetrieveTPUEmbeddingADAMParametersConfig sets the optional config attribute to value.
45640// If not specified, defaults to ""
45641func RetrieveTPUEmbeddingADAMParametersConfig(value string) RetrieveTPUEmbeddingADAMParametersAttr {
45642	return func(m optionalAttr) {
45643		m["config"] = value
45644	}
45645}
45646
45647// Retrieve ADAM embedding parameters.
45648//
45649// An op that retrieves optimization parameters from embedding to host
45650// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
45651// the correct embedding table configuration. For example, this op is
45652// used to retrieve updated parameters before saving a checkpoint.
45653//
45654// Returns:
45655//	parameters: Parameter parameters updated by the ADAM optimization algorithm.
45656//	momenta: Parameter momenta updated by the ADAM optimization algorithm.
45657//	velocities: Parameter velocities updated by the ADAM optimization algorithm.
45658func RetrieveTPUEmbeddingADAMParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingADAMParametersAttr) (parameters tf.Output, momenta tf.Output, velocities tf.Output) {
45659	if scope.Err() != nil {
45660		return
45661	}
45662	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
45663	for _, a := range optional {
45664		a(attrs)
45665	}
45666	opspec := tf.OpSpec{
45667		Type: "RetrieveTPUEmbeddingADAMParameters",
45668
45669		Attrs: attrs,
45670	}
45671	op := scope.AddOperation(opspec)
45672	return op.Output(0), op.Output(1), op.Output(2)
45673}
45674
45675// StatelessRandomBinomialAttr is an optional argument to StatelessRandomBinomial.
45676type StatelessRandomBinomialAttr func(optionalAttr)
45677
45678// StatelessRandomBinomialDtype sets the optional dtype attribute to value.
45679//
45680// value: The type of the output.
45681// If not specified, defaults to DT_INT64
45682func StatelessRandomBinomialDtype(value tf.DataType) StatelessRandomBinomialAttr {
45683	return func(m optionalAttr) {
45684		m["dtype"] = value
45685	}
45686}
45687
45688// Outputs deterministic pseudorandom random numbers from a binomial distribution.
45689//
45690// Outputs random values from a binomial distribution.
45691//
45692// The outputs are a deterministic function of `shape`, `seed`, `counts`, and `probs`.
45693//
45694// Arguments:
45695//	shape: The shape of the output tensor.
45696//	seed: 2 seeds (shape [2]).
45697//	counts: The counts of the binomial distribution. Must be broadcastable with `probs`,
45698// and broadcastable with the rightmost dimensions of `shape`.
45699//	probs: The probability of success for the binomial distribution. Must be broadcastable
45700// with `counts` and broadcastable with the rightmost dimensions of `shape`.
45701//
45702// Returns Random values with specified shape.
45703func StatelessRandomBinomial(scope *Scope, shape tf.Output, seed tf.Output, counts tf.Output, probs tf.Output, optional ...StatelessRandomBinomialAttr) (output tf.Output) {
45704	if scope.Err() != nil {
45705		return
45706	}
45707	attrs := map[string]interface{}{}
45708	for _, a := range optional {
45709		a(attrs)
45710	}
45711	opspec := tf.OpSpec{
45712		Type: "StatelessRandomBinomial",
45713		Input: []tf.Input{
45714			shape, seed, counts, probs,
45715		},
45716		Attrs: attrs,
45717	}
45718	op := scope.AddOperation(opspec)
45719	return op.Output(0)
45720}
45721
45722// DecodePaddedRawAttr is an optional argument to DecodePaddedRaw.
45723type DecodePaddedRawAttr func(optionalAttr)
45724
45725// DecodePaddedRawLittleEndian sets the optional little_endian attribute to value.
45726//
45727// value: Whether the input `input_bytes` is in little-endian order. Ignored for
45728// `out_type` values that are stored in a single byte, like `uint8`
45729// If not specified, defaults to true
45730func DecodePaddedRawLittleEndian(value bool) DecodePaddedRawAttr {
45731	return func(m optionalAttr) {
45732		m["little_endian"] = value
45733	}
45734}
45735
45736// Reinterpret the bytes of a string as a vector of numbers.
45737//
45738// Arguments:
45739//	input_bytes: Tensor of string to be decoded.
45740//	fixed_length: Length in bytes for each element of the decoded output. Must be a multiple
45741// of the size of the output type.
45742//
45743//
45744// Returns A Tensor with one more dimension than the input `bytes`. The added dimension
45745// will have size equal to the length of the elements of `bytes` divided by the
45746// number of bytes to represent `out_type`.
45747func DecodePaddedRaw(scope *Scope, input_bytes tf.Output, fixed_length tf.Output, out_type tf.DataType, optional ...DecodePaddedRawAttr) (output tf.Output) {
45748	if scope.Err() != nil {
45749		return
45750	}
45751	attrs := map[string]interface{}{"out_type": out_type}
45752	for _, a := range optional {
45753		a(attrs)
45754	}
45755	opspec := tf.OpSpec{
45756		Type: "DecodePaddedRaw",
45757		Input: []tf.Input{
45758			input_bytes, fixed_length,
45759		},
45760		Attrs: attrs,
45761	}
45762	op := scope.AddOperation(opspec)
45763	return op.Output(0)
45764}
45765
45766// UniqueV2Attr is an optional argument to UniqueV2.
45767type UniqueV2Attr func(optionalAttr)
45768
45769// UniqueV2OutIdx sets the optional out_idx attribute to value.
45770// If not specified, defaults to DT_INT32
45771func UniqueV2OutIdx(value tf.DataType) UniqueV2Attr {
45772	return func(m optionalAttr) {
45773		m["out_idx"] = value
45774	}
45775}
45776
45777// Finds unique elements along an axis of a tensor.
45778//
45779// This operation either returns a tensor `y` containing unique elements
45780// along the `axis` of a tensor. The returned unique elements is sorted
45781// in the same order as they occur along `axis` in `x`.
45782// This operation also returns a tensor `idx` that is the same size as
45783// the number of the elements in `x` along the `axis` dimension. It
45784// contains the index in the unique output `y`.
45785// In other words, for an `1-D` tensor `x` with `axis = None:
45786//
45787// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
45788//
45789// For example:
45790//
45791// ```
45792// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
45793// y, idx = unique(x)
45794// y ==> [1, 2, 4, 7, 8]
45795// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
45796// ```
45797//
45798// For an `2-D` tensor `x` with `axis = 0`:
45799//
45800// ```
45801// # tensor 'x' is [[1, 0, 0],
45802// #                [1, 0, 0],
45803// #                [2, 0, 0]]
45804// y, idx = unique(x, axis=0)
45805// y ==> [[1, 0, 0],
45806//        [2, 0, 0]]
45807// idx ==> [0, 0, 1]
45808// ```
45809//
45810// For an `2-D` tensor `x` with `axis = 1`:
45811//
45812// ```
45813// # tensor 'x' is [[1, 0, 0],
45814// #                [1, 0, 0],
45815// #                [2, 0, 0]]
45816// y, idx = unique(x, axis=1)
45817// y ==> [[1, 0],
45818//        [1, 0],
45819//        [2, 0]]
45820// idx ==> [0, 1, 1]
45821// ```
45822//
45823// Arguments:
45824//	x: A `Tensor`.
45825//	axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
45826// find the unique elements.
45827//
45828// Returns:
45829//	y: A `Tensor`. Unique elements along the `axis` of `Tensor` x.
45830//	idx: A 1-D Tensor. Has the same type as x that contains the index of each
45831// value of x in the output y.
45832func UniqueV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueV2Attr) (y tf.Output, idx tf.Output) {
45833	if scope.Err() != nil {
45834		return
45835	}
45836	attrs := map[string]interface{}{}
45837	for _, a := range optional {
45838		a(attrs)
45839	}
45840	opspec := tf.OpSpec{
45841		Type: "UniqueV2",
45842		Input: []tf.Input{
45843			x, axis,
45844		},
45845		Attrs: attrs,
45846	}
45847	op := scope.AddOperation(opspec)
45848	return op.Output(0), op.Output(1)
45849}
45850
45851// Return the index of device the op runs.
45852//
45853// Given a list of device names, this operation returns the index of the device
45854// this op runs. The length of the list is returned in two cases:
45855// (1) Device does not exist in the given device list.
45856// (2) It is in XLA compilation.
45857func DeviceIndex(scope *Scope, device_names []string) (index tf.Output) {
45858	if scope.Err() != nil {
45859		return
45860	}
45861	attrs := map[string]interface{}{"device_names": device_names}
45862	opspec := tf.OpSpec{
45863		Type: "DeviceIndex",
45864
45865		Attrs: attrs,
45866	}
45867	op := scope.AddOperation(opspec)
45868	return op.Output(0)
45869}
45870
45871// Greedily selects a subset of bounding boxes in descending order of score,
45872//
45873// pruning away boxes that have high intersection-over-union (IOU) overlap
45874// with previously selected boxes.  Bounding boxes with score less than
45875// `score_threshold` are removed.  Bounding boxes are supplied as
45876// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
45877// diagonal pair of box corners and the coordinates can be provided as normalized
45878// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
45879// is agnostic to where the origin is in the coordinate system and more
45880// generally is invariant to orthogonal transformations and translations
45881// of the coordinate system; thus translating or reflections of the coordinate
45882// system result in the same boxes being selected by the algorithm.
45883// The output of this operation is a set of integers indexing into the input
45884// collection of bounding boxes representing the selected boxes.  The bounding
45885// box coordinates corresponding to the selected indices can then be obtained
45886// using the `tf.gather operation`.  For example:
45887//   selected_indices = tf.image.non_max_suppression_v2(
45888//       boxes, scores, max_output_size, iou_threshold, score_threshold)
45889//   selected_boxes = tf.gather(boxes, selected_indices)
45890//
45891// Arguments:
45892//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
45893//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
45894// score corresponding to each box (each row of boxes).
45895//	max_output_size: A scalar integer tensor representing the maximum number of
45896// boxes to be selected by non max suppression.
45897//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
45898// boxes overlap too much with respect to IOU.
45899//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
45900// boxes based on score.
45901//
45902// Returns A 1-D integer tensor of shape `[M]` representing the selected
45903// indices from the boxes tensor, where `M <= max_output_size`.
45904func NonMaxSuppressionV3(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output) {
45905	if scope.Err() != nil {
45906		return
45907	}
45908	opspec := tf.OpSpec{
45909		Type: "NonMaxSuppressionV3",
45910		Input: []tf.Input{
45911			boxes, scores, max_output_size, iou_threshold, score_threshold,
45912		},
45913	}
45914	op := scope.AddOperation(opspec)
45915	return op.Output(0)
45916}
45917
45918// Return the reduction indices for computing gradients of s0 op s1 with broadcast.
45919//
45920// This is typically used by gradient computations for a broadcasting operation.
45921func BroadcastGradientArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output, r1 tf.Output) {
45922	if scope.Err() != nil {
45923		return
45924	}
45925	opspec := tf.OpSpec{
45926		Type: "BroadcastGradientArgs",
45927		Input: []tf.Input{
45928			s0, s1,
45929		},
45930	}
45931	op := scope.AddOperation(opspec)
45932	return op.Output(0), op.Output(1)
45933}
45934
45935// Make all elements in the non-Batch dimension unique, but \"close\" to
45936//
45937// their initial value. Never returns a sub-normal number. Never returns
45938// zero. The sign of each input element is always identical to the sign
45939// of the corresponding output element. Behavior for infinite elements is
45940// undefined. Behavior for subnormal elements is undefined.
45941func MakeUnique(scope *Scope, input tf.Output) (output tf.Output) {
45942	if scope.Err() != nil {
45943		return
45944	}
45945	opspec := tf.OpSpec{
45946		Type: "MakeUnique",
45947		Input: []tf.Input{
45948			input,
45949		},
45950	}
45951	op := scope.AddOperation(opspec)
45952	return op.Output(0)
45953}
45954
45955// Broadcast an array for a compatible shape.
45956//
45957// Broadcasting is the process of making arrays to have compatible shapes
45958// for arithmetic operations. Two shapes are compatible if for each
45959// dimension pair they are either equal or one of them is one. When trying
45960// to broadcast a Tensor to a shape, it starts with the trailing dimensions,
45961// and works its way forward.
45962//
45963// For example,
45964//
45965// >>> x = tf.constant([1, 2, 3])
45966// >>> y = tf.broadcast_to(x, [3, 3])
45967// >>> print(y)
45968// tf.Tensor(
45969//     [[1 2 3]
45970//      [1 2 3]
45971//      [1 2 3]], shape=(3, 3), dtype=int32)
45972//
45973// In the above example, the input Tensor with the shape of `[1, 3]`
45974// is broadcasted to output Tensor with shape of `[3, 3]`.
45975//
45976// When doing broadcasted operations such as multiplying a tensor
45977// by a scalar, broadcasting (usually) confers some time or space
45978// benefit, as the broadcasted tensor is never materialized.
45979//
45980// However, `broadcast_to` does not carry with it any such benefits.
45981// The newly-created tensor takes the full memory of the broadcasted
45982// shape. (In a graph context, `broadcast_to` might be fused to
45983// subsequent operation and then be optimized away, however.)
45984//
45985// Arguments:
45986//	input: A Tensor to broadcast.
45987//	shape: An 1-D `int` Tensor. The shape of the desired output.
45988//
45989// Returns A Tensor.
45990func BroadcastTo(scope *Scope, input tf.Output, shape tf.Output) (output tf.Output) {
45991	if scope.Err() != nil {
45992		return
45993	}
45994	opspec := tf.OpSpec{
45995		Type: "BroadcastTo",
45996		Input: []tf.Input{
45997			input, shape,
45998		},
45999	}
46000	op := scope.AddOperation(opspec)
46001	return op.Output(0)
46002}
46003
46004// Checks a tensor for NaN and Inf values.
46005//
46006// When run, reports an `InvalidArgument` error if `tensor` has any values
46007// that are not a number (NaN) or infinity (Inf). Otherwise, returns the input
46008// tensor.
46009//
46010// Example usage:
46011//
46012// ``` python
46013// a = tf.Variable(1.0)
46014// tf.debugging.check_numerics(a, message='')
46015//
46016// b = tf.Variable(np.nan)
46017// try:
46018//   tf.debugging.check_numerics(b, message='Checking b')
46019// except Exception as e:
46020//   assert "Checking b : Tensor had NaN values" in e.message
46021//
46022// c = tf.Variable(np.inf)
46023// try:
46024//   tf.debugging.check_numerics(c, message='Checking c')
46025// except Exception as e:
46026//   assert "Checking c : Tensor had Inf values" in e.message
46027// ```
46028//
46029//
46030// Arguments:
46031//
46032//	message: Prefix of the error message.
46033func CheckNumerics(scope *Scope, tensor tf.Output, message string) (output tf.Output) {
46034	if scope.Err() != nil {
46035		return
46036	}
46037	attrs := map[string]interface{}{"message": message}
46038	opspec := tf.OpSpec{
46039		Type: "CheckNumerics",
46040		Input: []tf.Input{
46041			tensor,
46042		},
46043		Attrs: attrs,
46044	}
46045	op := scope.AddOperation(opspec)
46046	return op.Output(0)
46047}
46048
46049// Looks up keys in a table, outputs the corresponding values.
46050//
46051// The tensor `keys` must of the same type as the keys of the table.
46052// The output `values` is of the type of the table values.
46053//
46054// The scalar `default_value` is the value output for keys not present in the
46055// table. It must also be of the same type as the table values.
46056//
46057// Arguments:
46058//	table_handle: Handle to the table.
46059//	keys: Any shape.  Keys to look up.
46060//
46061//
46062// Returns Same shape as `keys`.  Values found in the table, or `default_values`
46063// for missing keys.
46064func LookupTableFindV2(scope *Scope, table_handle tf.Output, keys tf.Output, default_value tf.Output) (values tf.Output) {
46065	if scope.Err() != nil {
46066		return
46067	}
46068	opspec := tf.OpSpec{
46069		Type: "LookupTableFindV2",
46070		Input: []tf.Input{
46071			table_handle, keys, default_value,
46072		},
46073	}
46074	op := scope.AddOperation(opspec)
46075	return op.Output(0)
46076}
46077
46078// Computes inverse hyperbolic sine of x element-wise.
46079//
46080//   Given an input tensor, this function computes inverse hyperbolic sine
46081//   for every element in the tensor. Both input and output has a range of
46082//   `[-inf, inf]`.
46083//
46084//   ```python
46085//   x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")])
46086//   tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf]
46087//   ```
46088func Asinh(scope *Scope, x tf.Output) (y tf.Output) {
46089	if scope.Err() != nil {
46090		return
46091	}
46092	opspec := tf.OpSpec{
46093		Type: "Asinh",
46094		Input: []tf.Input{
46095			x,
46096		},
46097	}
46098	op := scope.AddOperation(opspec)
46099	return op.Output(0)
46100}
46101
46102// Wraps the XLA DynamicSlice operator, documented at
46103//
46104//  https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice
46105// .
46106//
46107// DynamicSlice extracts a sub-array from the input array at dynamic
46108// start_indices. The size of the slice in each dimension is passed in
46109// size_indices, which specify the end point of exclusive slice intervals in each
46110// dimension -- [start, start + size). The shape of start_indices must have rank 1,
46111// with dimension size equal to the rank of operand.
46112//
46113// Arguments:
46114//	input: A `Tensor` of type T.
46115//	start_indices: List of N integers containing the slice size for each
46116// dimension. Each value must be strictly greater than zero, and start + size
46117// must be less than or equal to the size of the dimension to avoid
46118// implementation defined behavior.
46119//
46120func XlaDynamicSlice(scope *Scope, input tf.Output, start_indices tf.Output, size_indices tf.Output) (output tf.Output) {
46121	if scope.Err() != nil {
46122		return
46123	}
46124	opspec := tf.OpSpec{
46125		Type: "XlaDynamicSlice",
46126		Input: []tf.Input{
46127			input, start_indices, size_indices,
46128		},
46129	}
46130	op := scope.AddOperation(opspec)
46131	return op.Output(0)
46132}
46133
46134// Creates a dataset that zips together `input_datasets`.
46135//
46136// The elements of the resulting dataset are created by zipping corresponding
46137// elements from each of the input datasets.
46138//
46139// The size of the resulting dataset will match the size of the smallest input
46140// dataset, and no error will be raised if input datasets have different sizes.
46141//
46142// Arguments:
46143//	input_datasets: List of `N` variant Tensors representing datasets to be zipped together.
46144//
46145//
46146func ZipDataset(scope *Scope, input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
46147	if scope.Err() != nil {
46148		return
46149	}
46150	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
46151	opspec := tf.OpSpec{
46152		Type: "ZipDataset",
46153		Input: []tf.Input{
46154			tf.OutputList(input_datasets),
46155		},
46156		Attrs: attrs,
46157	}
46158	op := scope.AddOperation(opspec)
46159	return op.Output(0)
46160}
46161
46162// Rounds the values of a tensor to the nearest integer, element-wise.
46163//
46164// Rounds half to even.  Also known as bankers rounding. If you want to round
46165// according to the current system rounding mode use std::cint.
46166func Round(scope *Scope, x tf.Output) (y tf.Output) {
46167	if scope.Err() != nil {
46168		return
46169	}
46170	opspec := tf.OpSpec{
46171		Type: "Round",
46172		Input: []tf.Input{
46173			x,
46174		},
46175	}
46176	op := scope.AddOperation(opspec)
46177	return op.Output(0)
46178}
46179
46180// Returns true if and only if the given Optional variant has a value.
46181func OptionalHasValue(scope *Scope, optional tf.Output) (has_value tf.Output) {
46182	if scope.Err() != nil {
46183		return
46184	}
46185	opspec := tf.OpSpec{
46186		Type: "OptionalHasValue",
46187		Input: []tf.Input{
46188			optional,
46189		},
46190	}
46191	op := scope.AddOperation(opspec)
46192	return op.Output(0)
46193}
46194
46195// Returns a list of tensors with the same shapes and contents as the input
46196//
46197// tensors.
46198//
46199// This op can be used to override the gradient for complicated functions. For
46200// example, suppose y = f(x) and we wish to apply a custom function g for backprop
46201// such that dx = g(dy). In Python,
46202//
46203// ```python
46204// with tf.get_default_graph().gradient_override_map(
46205//     {'IdentityN': 'OverrideGradientWithG'}):
46206//   y, _ = identity_n([f(x), x])
46207//
46208// @tf.RegisterGradient('OverrideGradientWithG')
46209// def ApplyG(op, dy, _):
46210//   return [None, g(dy)]  # Do not backprop to f(x).
46211// ```
46212func IdentityN(scope *Scope, input []tf.Output) (output []tf.Output) {
46213	if scope.Err() != nil {
46214		return
46215	}
46216	opspec := tf.OpSpec{
46217		Type: "IdentityN",
46218		Input: []tf.Input{
46219			tf.OutputList(input),
46220		},
46221	}
46222	op := scope.AddOperation(opspec)
46223	if scope.Err() != nil {
46224		return
46225	}
46226	var idx int
46227	var err error
46228	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
46229		scope.UpdateErr("IdentityN", err)
46230		return
46231	}
46232	return output
46233}
46234
46235// Reverses specific dimensions of a tensor.
46236//
46237// Given a `tensor`, and a `int32` tensor `axis` representing the set of
46238// dimensions of `tensor` to reverse. This operation reverses each dimension
46239// `i` for which there exists `j` s.t. `axis[j] == i`.
46240//
46241// `tensor` can have up to 8 dimensions. The number of dimensions specified
46242// in `axis` may be 0 or more entries. If an index is specified more than
46243// once, a InvalidArgument error is raised.
46244//
46245// For example:
46246//
46247// ```
46248// # tensor 't' is [[[[ 0,  1,  2,  3],
46249// #                  [ 4,  5,  6,  7],
46250// #                  [ 8,  9, 10, 11]],
46251// #                 [[12, 13, 14, 15],
46252// #                  [16, 17, 18, 19],
46253// #                  [20, 21, 22, 23]]]]
46254// # tensor 't' shape is [1, 2, 3, 4]
46255//
46256// # 'dims' is [3] or 'dims' is [-1]
46257// reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
46258//                         [ 7,  6,  5,  4],
46259//                         [ 11, 10, 9, 8]],
46260//                        [[15, 14, 13, 12],
46261//                         [19, 18, 17, 16],
46262//                         [23, 22, 21, 20]]]]
46263//
46264// # 'dims' is '[1]' (or 'dims' is '[-3]')
46265// reverse(t, dims) ==> [[[[12, 13, 14, 15],
46266//                         [16, 17, 18, 19],
46267//                         [20, 21, 22, 23]
46268//                        [[ 0,  1,  2,  3],
46269//                         [ 4,  5,  6,  7],
46270//                         [ 8,  9, 10, 11]]]]
46271//
46272// # 'dims' is '[2]' (or 'dims' is '[-2]')
46273// reverse(t, dims) ==> [[[[8, 9, 10, 11],
46274//                         [4, 5, 6, 7],
46275//                         [0, 1, 2, 3]]
46276//                        [[20, 21, 22, 23],
46277//                         [16, 17, 18, 19],
46278//                         [12, 13, 14, 15]]]]
46279// ```
46280//
46281// Arguments:
46282//	tensor: Up to 8-D.
46283//	axis: 1-D. The indices of the dimensions to reverse. Must be in the range
46284// `[-rank(tensor), rank(tensor))`.
46285//
46286// Returns The same shape as `tensor`.
46287func ReverseV2(scope *Scope, tensor tf.Output, axis tf.Output) (output tf.Output) {
46288	if scope.Err() != nil {
46289		return
46290	}
46291	opspec := tf.OpSpec{
46292		Type: "ReverseV2",
46293		Input: []tf.Input{
46294			tensor, axis,
46295		},
46296	}
46297	op := scope.AddOperation(opspec)
46298	return op.Output(0)
46299}
46300
46301// RetrieveTPUEmbeddingFTRLParametersAttr is an optional argument to RetrieveTPUEmbeddingFTRLParameters.
46302type RetrieveTPUEmbeddingFTRLParametersAttr func(optionalAttr)
46303
46304// RetrieveTPUEmbeddingFTRLParametersTableId sets the optional table_id attribute to value.
46305// If not specified, defaults to -1
46306func RetrieveTPUEmbeddingFTRLParametersTableId(value int64) RetrieveTPUEmbeddingFTRLParametersAttr {
46307	return func(m optionalAttr) {
46308		m["table_id"] = value
46309	}
46310}
46311
46312// RetrieveTPUEmbeddingFTRLParametersTableName sets the optional table_name attribute to value.
46313// If not specified, defaults to ""
46314func RetrieveTPUEmbeddingFTRLParametersTableName(value string) RetrieveTPUEmbeddingFTRLParametersAttr {
46315	return func(m optionalAttr) {
46316		m["table_name"] = value
46317	}
46318}
46319
46320// RetrieveTPUEmbeddingFTRLParametersConfig sets the optional config attribute to value.
46321// If not specified, defaults to ""
46322func RetrieveTPUEmbeddingFTRLParametersConfig(value string) RetrieveTPUEmbeddingFTRLParametersAttr {
46323	return func(m optionalAttr) {
46324		m["config"] = value
46325	}
46326}
46327
46328// Retrieve FTRL embedding parameters.
46329//
46330// An op that retrieves optimization parameters from embedding to host
46331// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
46332// the correct embedding table configuration. For example, this op is
46333// used to retrieve updated parameters before saving a checkpoint.
46334//
46335// Returns:
46336//	parameters: Parameter parameters updated by the FTRL optimization algorithm.
46337//	accumulators: Parameter accumulators updated by the FTRL optimization algorithm.
46338//	linears: Parameter linears updated by the FTRL optimization algorithm.
46339func RetrieveTPUEmbeddingFTRLParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFTRLParametersAttr) (parameters tf.Output, accumulators tf.Output, linears tf.Output) {
46340	if scope.Err() != nil {
46341		return
46342	}
46343	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
46344	for _, a := range optional {
46345		a(attrs)
46346	}
46347	opspec := tf.OpSpec{
46348		Type: "RetrieveTPUEmbeddingFTRLParameters",
46349
46350		Attrs: attrs,
46351	}
46352	op := scope.AddOperation(opspec)
46353	return op.Output(0), op.Output(1), op.Output(2)
46354}
46355
46356// Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
46357//
46358// This Op does not require `a_indices` be sorted in standard lexicographic order.
46359//
46360// Arguments:
46361//	a_indices: 2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
46362//	a_values: 1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`.
46363//	a_shape: 1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`.
46364//	b: `ndims`-D Tensor.  With shape `a_shape`.
46365func SparseTensorDenseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output) (output tf.Output) {
46366	if scope.Err() != nil {
46367		return
46368	}
46369	opspec := tf.OpSpec{
46370		Type: "SparseTensorDenseAdd",
46371		Input: []tf.Input{
46372			a_indices, a_values, a_shape, b,
46373		},
46374	}
46375	op := scope.AddOperation(opspec)
46376	return op.Output(0)
46377}
46378
46379// Returns a copy of the input tensor.
46380func Snapshot(scope *Scope, input tf.Output) (output tf.Output) {
46381	if scope.Err() != nil {
46382		return
46383	}
46384	opspec := tf.OpSpec{
46385		Type: "Snapshot",
46386		Input: []tf.Input{
46387			input,
46388		},
46389	}
46390	op := scope.AddOperation(opspec)
46391	return op.Output(0)
46392}
46393
46394// Creates a dataset that emits the outputs of `input_dataset` `count` times.
46395//
46396// Arguments:
46397//
46398//	count: A scalar representing the number of times that `input_dataset` should
46399// be repeated. A value of `-1` indicates that it should be repeated infinitely.
46400//
46401//
46402func RepeatDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
46403	if scope.Err() != nil {
46404		return
46405	}
46406	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
46407	opspec := tf.OpSpec{
46408		Type: "RepeatDataset",
46409		Input: []tf.Input{
46410			input_dataset, count,
46411		},
46412		Attrs: attrs,
46413	}
46414	op := scope.AddOperation(opspec)
46415	return op.Output(0)
46416}
46417
46418// Converts a dense tensor to a (possibly batched) CSRSparseMatrix.
46419//
46420// Arguments:
46421//	dense_input: A Dense tensor.
46422//	indices: Indices of nonzero elements.
46423//
46424// Returns A (possibly batched) CSRSparseMatrix.
46425func DenseToCSRSparseMatrix(scope *Scope, dense_input tf.Output, indices tf.Output) (sparse_output tf.Output) {
46426	if scope.Err() != nil {
46427		return
46428	}
46429	opspec := tf.OpSpec{
46430		Type: "DenseToCSRSparseMatrix",
46431		Input: []tf.Input{
46432			dense_input, indices,
46433		},
46434	}
46435	op := scope.AddOperation(opspec)
46436	return op.Output(0)
46437}
46438
46439// Creates a tensor filled with a scalar value.
46440//
46441// This operation creates a tensor of shape `dims` and fills it with `value`.
46442//
46443// For example:
46444//
46445// ```
46446// # Output tensor has shape [2, 3].
46447// fill([2, 3], 9) ==> [[9, 9, 9]
46448//                      [9, 9, 9]]
46449// ```
46450//
46451// `tf.fill` differs from `tf.constant` in a few ways:
46452//
46453// *   `tf.fill` only supports scalar contents, whereas `tf.constant` supports
46454//     Tensor values.
46455// *   `tf.fill` creates an Op in the computation graph that constructs the actual
46456//     Tensor value at runtime. This is in contrast to `tf.constant` which embeds
46457//     the entire Tensor into the graph with a `Const` node.
46458// *   Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
46459//     based on other runtime Tensors, unlike `tf.constant`.
46460//
46461// Arguments:
46462//	dims: 1-D. Represents the shape of the output tensor.
46463//	value: 0-D (scalar). Value to fill the returned tensor.
46464//
46465// @compatibility(numpy)
46466// Equivalent to np.full
46467// @end_compatibility
46468func Fill(scope *Scope, dims tf.Output, value tf.Output) (output tf.Output) {
46469	if scope.Err() != nil {
46470		return
46471	}
46472	opspec := tf.OpSpec{
46473		Type: "Fill",
46474		Input: []tf.Input{
46475			dims, value,
46476		},
46477	}
46478	op := scope.AddOperation(opspec)
46479	return op.Output(0)
46480}
46481
46482// Sends `input` to all devices that are connected to the output.
46483//
46484// Sends `input` to all devices that are connected to the output.
46485//
46486// The graph should be constructed so that all ops connected to the output have a
46487// valid device assignment, and the op itself is assigned one of these devices.
46488//
46489// input: The input to the broadcast.
46490// output: The same as input.
46491// shape: The shape of the input tensor.
46492//
46493func NcclBroadcast(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
46494	if scope.Err() != nil {
46495		return
46496	}
46497	attrs := map[string]interface{}{"shape": shape}
46498	opspec := tf.OpSpec{
46499		Type: "NcclBroadcast",
46500		Input: []tf.Input{
46501			input,
46502		},
46503		Attrs: attrs,
46504	}
46505	op := scope.AddOperation(opspec)
46506	return op.Output(0)
46507}
46508
46509// Computes cos of x element-wise.
46510//
46511//   Given an input tensor, this function computes cosine of every
46512//   element in the tensor. Input range is `(-inf, inf)` and
46513//   output range is `[-1,1]`. If input lies outside the boundary, `nan`
46514//   is returned.
46515//
46516//   ```python
46517//   x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
46518//   tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan]
46519//   ```
46520func Cos(scope *Scope, x tf.Output) (y tf.Output) {
46521	if scope.Err() != nil {
46522		return
46523	}
46524	opspec := tf.OpSpec{
46525		Type: "Cos",
46526		Input: []tf.Input{
46527			x,
46528		},
46529	}
46530	op := scope.AddOperation(opspec)
46531	return op.Output(0)
46532}
46533
46534// Pads a tensor.
46535//
46536// This operation pads `input` according to the `paddings` and `constant_values`
46537// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
46538// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
46539// how many padding values to add before the contents of `input` in that dimension,
46540// and `paddings[D, 1]` indicates how many padding values to add after the contents
46541// of `input` in that dimension. `constant_values` is a scalar tensor of the same
46542// type as `input` that indicates the value to use for padding `input`.
46543//
46544// The padded size of each dimension D of the output is:
46545//
46546// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
46547//
46548// For example:
46549//
46550// ```
46551// # 't' is [[1, 1], [2, 2]]
46552// # 'paddings' is [[1, 1], [2, 2]]
46553// # 'constant_values' is 0
46554// # rank of 't' is 2
46555// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
46556//                       [0, 0, 1, 1, 0, 0]
46557//                       [0, 0, 2, 2, 0, 0]
46558//                       [0, 0, 0, 0, 0, 0]]
46559// ```
46560func PadV2(scope *Scope, input tf.Output, paddings tf.Output, constant_values tf.Output) (output tf.Output) {
46561	if scope.Err() != nil {
46562		return
46563	}
46564	opspec := tf.OpSpec{
46565		Type: "PadV2",
46566		Input: []tf.Input{
46567			input, paddings, constant_values,
46568		},
46569	}
46570	op := scope.AddOperation(opspec)
46571	return op.Output(0)
46572}
46573
46574// ShapeAttr is an optional argument to Shape.
46575type ShapeAttr func(optionalAttr)
46576
46577// ShapeOutType sets the optional out_type attribute to value.
46578// If not specified, defaults to DT_INT32
46579func ShapeOutType(value tf.DataType) ShapeAttr {
46580	return func(m optionalAttr) {
46581		m["out_type"] = value
46582	}
46583}
46584
46585// Returns the shape of a tensor.
46586//
46587// This operation returns a 1-D integer tensor representing the shape of `input`.
46588//
46589// For example:
46590//
46591// ```
46592// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
46593// shape(t) ==> [2, 2, 3]
46594// ```
46595func Shape(scope *Scope, input tf.Output, optional ...ShapeAttr) (output tf.Output) {
46596	if scope.Err() != nil {
46597		return
46598	}
46599	attrs := map[string]interface{}{}
46600	for _, a := range optional {
46601		a(attrs)
46602	}
46603	opspec := tf.OpSpec{
46604		Type: "Shape",
46605		Input: []tf.Input{
46606			input,
46607		},
46608		Attrs: attrs,
46609	}
46610	op := scope.AddOperation(opspec)
46611	return op.Output(0)
46612}
46613
46614// QueueEnqueueManyV2Attr is an optional argument to QueueEnqueueManyV2.
46615type QueueEnqueueManyV2Attr func(optionalAttr)
46616
46617// QueueEnqueueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
46618//
46619// value: If the queue is too full, this operation will block for up
46620// to timeout_ms milliseconds.
46621// Note: This option is not supported yet.
46622// If not specified, defaults to -1
46623func QueueEnqueueManyV2TimeoutMs(value int64) QueueEnqueueManyV2Attr {
46624	return func(m optionalAttr) {
46625		m["timeout_ms"] = value
46626	}
46627}
46628
46629// Enqueues zero or more tuples of one or more tensors in the given queue.
46630//
46631// This operation slices each component tensor along the 0th dimension to
46632// make multiple queue elements. All of the tuple components must have the
46633// same size in the 0th dimension.
46634//
46635// The components input has k elements, which correspond to the components of
46636// tuples stored in the given queue.
46637//
46638// N.B. If the queue is full, this operation will block until the given
46639// elements have been enqueued (or 'timeout_ms' elapses, if specified).
46640//
46641// Arguments:
46642//	handle: The handle to a queue.
46643//	components: One or more tensors from which the enqueued tensors should
46644// be taken.
46645//
46646// Returns the created operation.
46647func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueManyV2Attr) (o *tf.Operation) {
46648	if scope.Err() != nil {
46649		return
46650	}
46651	attrs := map[string]interface{}{}
46652	for _, a := range optional {
46653		a(attrs)
46654	}
46655	opspec := tf.OpSpec{
46656		Type: "QueueEnqueueManyV2",
46657		Input: []tf.Input{
46658			handle, tf.OutputList(components),
46659		},
46660		Attrs: attrs,
46661	}
46662	return scope.AddOperation(opspec)
46663}
46664
46665// AbortAttr is an optional argument to Abort.
46666type AbortAttr func(optionalAttr)
46667
46668// AbortErrorMsg sets the optional error_msg attribute to value.
46669//
46670// value: A string which is the message associated with the exception.
46671// If not specified, defaults to ""
46672func AbortErrorMsg(value string) AbortAttr {
46673	return func(m optionalAttr) {
46674		m["error_msg"] = value
46675	}
46676}
46677
46678// AbortExitWithoutError sets the optional exit_without_error attribute to value.
46679// If not specified, defaults to false
46680func AbortExitWithoutError(value bool) AbortAttr {
46681	return func(m optionalAttr) {
46682		m["exit_without_error"] = value
46683	}
46684}
46685
46686// Raise a exception to abort the process when called.
46687//
46688// If exit_without_error is true, the process will exit normally,
46689// otherwise it will exit with a SIGABORT signal.
46690//
46691// Returns nothing but an exception.
46692//
46693// Returns the created operation.
46694func Abort(scope *Scope, optional ...AbortAttr) (o *tf.Operation) {
46695	if scope.Err() != nil {
46696		return
46697	}
46698	attrs := map[string]interface{}{}
46699	for _, a := range optional {
46700		a(attrs)
46701	}
46702	opspec := tf.OpSpec{
46703		Type: "Abort",
46704
46705		Attrs: attrs,
46706	}
46707	return scope.AddOperation(opspec)
46708}
46709
46710// Merges summaries.
46711//
46712// This op creates a
46713// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
46714// protocol buffer that contains the union of all the values in the input
46715// summaries.
46716//
46717// When the Op is run, it reports an `InvalidArgument` error if multiple values
46718// in the summaries to merge use the same tag.
46719//
46720// Arguments:
46721//	inputs: Can be of any shape.  Each must contain serialized `Summary` protocol
46722// buffers.
46723//
46724// Returns Scalar. Serialized `Summary` protocol buffer.
46725func MergeSummary(scope *Scope, inputs []tf.Output) (summary tf.Output) {
46726	if scope.Err() != nil {
46727		return
46728	}
46729	opspec := tf.OpSpec{
46730		Type: "MergeSummary",
46731		Input: []tf.Input{
46732			tf.OutputList(inputs),
46733		},
46734	}
46735	op := scope.AddOperation(opspec)
46736	return op.Output(0)
46737}
46738
46739// Uncompresses a compressed dataset element.
46740func UncompressElement(scope *Scope, compressed tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
46741	if scope.Err() != nil {
46742		return
46743	}
46744	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
46745	opspec := tf.OpSpec{
46746		Type: "UncompressElement",
46747		Input: []tf.Input{
46748			compressed,
46749		},
46750		Attrs: attrs,
46751	}
46752	op := scope.AddOperation(opspec)
46753	if scope.Err() != nil {
46754		return
46755	}
46756	var idx int
46757	var err error
46758	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
46759		scope.UpdateErr("UncompressElement", err)
46760		return
46761	}
46762	return components
46763}
46764
46765// ExperimentalParseExampleDatasetAttr is an optional argument to ExperimentalParseExampleDataset.
46766type ExperimentalParseExampleDatasetAttr func(optionalAttr)
46767
46768// ExperimentalParseExampleDatasetSloppy sets the optional sloppy attribute to value.
46769// If not specified, defaults to false
46770func ExperimentalParseExampleDatasetSloppy(value bool) ExperimentalParseExampleDatasetAttr {
46771	return func(m optionalAttr) {
46772		m["sloppy"] = value
46773	}
46774}
46775
46776// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
46777//
46778// Arguments:
46779//
46780//
46781//	dense_defaults: A dict mapping string keys to `Tensor`s.
46782// The keys of the dict must match the dense_keys of the feature.
46783//	sparse_keys: A list of string keys in the examples features.
46784// The results for these keys will be returned as `SparseTensor` objects.
46785//	dense_keys: A list of Ndense string Tensors (scalars).
46786// The keys expected in the Examples features associated with dense values.
46787//	sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
46788// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
46789// and `tf.string` (`BytesList`) are supported.
46790//	dense_shapes: List of tuples with the same length as `dense_keys`.
46791// The shape of the data for each dense feature referenced by `dense_keys`.
46792// Required for any input tensors identified by `dense_keys`.  Must be
46793// either fully defined, or may contain an unknown first dimension.
46794// An unknown first dimension means the feature is treated as having
46795// a variable number of blocks, and the output shape along this dimension
46796// is considered unknown at graph build time.  Padding is applied for
46797// minibatch elements smaller than the maximum number of blocks for the
46798// given feature along this dimension.
46799//	output_types: The type list for the return values.
46800//	output_shapes: The list of shapes being produced.
46801func ExperimentalParseExampleDataset(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalParseExampleDatasetAttr) (handle tf.Output) {
46802	if scope.Err() != nil {
46803		return
46804	}
46805	attrs := map[string]interface{}{"sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes, "output_types": output_types, "output_shapes": output_shapes}
46806	for _, a := range optional {
46807		a(attrs)
46808	}
46809	opspec := tf.OpSpec{
46810		Type: "ExperimentalParseExampleDataset",
46811		Input: []tf.Input{
46812			input_dataset, num_parallel_calls, tf.OutputList(dense_defaults),
46813		},
46814		Attrs: attrs,
46815	}
46816	op := scope.AddOperation(opspec)
46817	return op.Output(0)
46818}
46819
46820// GatherV2Attr is an optional argument to GatherV2.
46821type GatherV2Attr func(optionalAttr)
46822
46823// GatherV2BatchDims sets the optional batch_dims attribute to value.
46824// If not specified, defaults to 0
46825func GatherV2BatchDims(value int64) GatherV2Attr {
46826	return func(m optionalAttr) {
46827		m["batch_dims"] = value
46828	}
46829}
46830
46831// Gather slices from `params` axis `axis` according to `indices`.
46832//
46833// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
46834// Produces an output tensor with shape `params.shape[:axis] +
46835// indices.shape[batch_dims:] + params.shape[axis + 1:]` where:
46836//
46837// ```python
46838//     # Scalar indices (output is rank(params) - 1).
46839//     output[a_0, ..., a_n, b_0, ..., b_n] =
46840//       params[a_0, ..., a_n, indices, b_0, ..., b_n]
46841//
46842//     # Vector indices (output is rank(params)).
46843//     output[a_0, ..., a_n, i, b_0, ..., b_n] =
46844//       params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
46845//
46846//     # Higher rank indices (output is rank(params) + rank(indices) - 1).
46847//     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
46848//       params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
46849// ```
46850//
46851// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
46852// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
46853// </div>
46854//
46855// Note that on CPU, if an out of bound index is found, an error is returned.
46856// On GPU, if an out of bound index is found, a 0 is stored in the
46857// corresponding output value.
46858//
46859// See also `tf.batch_gather` and `tf.gather_nd`.
46860//
46861// Arguments:
46862//	params: The tensor from which to gather values. Must be at least rank
46863// `axis + 1`.
46864//	indices: Index tensor. Must be in range `[0, params.shape[axis])`.
46865//	axis: The axis in `params` to gather `indices` from. Defaults to the first
46866// dimension. Supports negative indexes.
46867//
46868// Returns Values from `params` gathered from indices given by `indices`, with
46869// shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
46870func GatherV2(scope *Scope, params tf.Output, indices tf.Output, axis tf.Output, optional ...GatherV2Attr) (output tf.Output) {
46871	if scope.Err() != nil {
46872		return
46873	}
46874	attrs := map[string]interface{}{}
46875	for _, a := range optional {
46876		a(attrs)
46877	}
46878	opspec := tf.OpSpec{
46879		Type: "GatherV2",
46880		Input: []tf.Input{
46881			params, indices, axis,
46882		},
46883		Attrs: attrs,
46884	}
46885	op := scope.AddOperation(opspec)
46886	return op.Output(0)
46887}
46888
46889// Split the data from the input value into TensorArray elements.
46890//
46891// Assuming that `lengths` takes on values
46892//
46893//   ```(n0, n1, ..., n(T-1))```
46894//
46895// and that `value` has shape
46896//
46897//   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
46898//
46899// this splits values into a TensorArray with T tensors.
46900//
46901// TensorArray index t will be the subtensor of values with starting position
46902//
46903//   ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
46904//
46905// and having size
46906//
46907//   ```nt x d0 x d1 x ...```
46908//
46909// Arguments:
46910//	handle: The handle to a TensorArray.
46911//	value: The concatenated tensor to write to the TensorArray.
46912//	lengths: The vector of lengths, how to split the rows of value into the
46913// TensorArray.
46914//	flow_in: A float scalar that enforces proper chaining of operations.
46915//
46916// Returns A float scalar that enforces proper chaining of operations.
46917func TensorArraySplitV3(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
46918	if scope.Err() != nil {
46919		return
46920	}
46921	opspec := tf.OpSpec{
46922		Type: "TensorArraySplitV3",
46923		Input: []tf.Input{
46924			handle, value, lengths, flow_in,
46925		},
46926	}
46927	op := scope.AddOperation(opspec)
46928	return op.Output(0)
46929}
46930
46931// Creates a MultiDeviceIterator resource.
46932//
46933// Arguments:
46934//	devices: A list of devices the iterator works across.
46935//	shared_name: If non-empty, this resource will be shared under the given name
46936// across multiple sessions.
46937//	container: If non-empty, this resource is placed in the given container.
46938// Otherwise, a default container is used.
46939//	output_types: The type list for the return values.
46940//	output_shapes: The list of shapes being produced.
46941//
46942// Returns Handle to the resource created.
46943func MultiDeviceIterator(scope *Scope, devices []string, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
46944	if scope.Err() != nil {
46945		return
46946	}
46947	attrs := map[string]interface{}{"devices": devices, "shared_name": shared_name, "container": container, "output_types": output_types, "output_shapes": output_shapes}
46948	opspec := tf.OpSpec{
46949		Type: "MultiDeviceIterator",
46950
46951		Attrs: attrs,
46952	}
46953	op := scope.AddOperation(opspec)
46954	return op.Output(0)
46955}
46956
46957// CudnnRNNParamsSizeAttr is an optional argument to CudnnRNNParamsSize.
46958type CudnnRNNParamsSizeAttr func(optionalAttr)
46959
46960// CudnnRNNParamsSizeRnnMode sets the optional rnn_mode attribute to value.
46961// If not specified, defaults to "lstm"
46962func CudnnRNNParamsSizeRnnMode(value string) CudnnRNNParamsSizeAttr {
46963	return func(m optionalAttr) {
46964		m["rnn_mode"] = value
46965	}
46966}
46967
46968// CudnnRNNParamsSizeInputMode sets the optional input_mode attribute to value.
46969// If not specified, defaults to "linear_input"
46970func CudnnRNNParamsSizeInputMode(value string) CudnnRNNParamsSizeAttr {
46971	return func(m optionalAttr) {
46972		m["input_mode"] = value
46973	}
46974}
46975
46976// CudnnRNNParamsSizeDirection sets the optional direction attribute to value.
46977// If not specified, defaults to "unidirectional"
46978func CudnnRNNParamsSizeDirection(value string) CudnnRNNParamsSizeAttr {
46979	return func(m optionalAttr) {
46980		m["direction"] = value
46981	}
46982}
46983
46984// CudnnRNNParamsSizeDropout sets the optional dropout attribute to value.
46985// If not specified, defaults to 0
46986func CudnnRNNParamsSizeDropout(value float32) CudnnRNNParamsSizeAttr {
46987	return func(m optionalAttr) {
46988		m["dropout"] = value
46989	}
46990}
46991
46992// CudnnRNNParamsSizeSeed sets the optional seed attribute to value.
46993// If not specified, defaults to 0
46994func CudnnRNNParamsSizeSeed(value int64) CudnnRNNParamsSizeAttr {
46995	return func(m optionalAttr) {
46996		m["seed"] = value
46997	}
46998}
46999
47000// CudnnRNNParamsSizeSeed2 sets the optional seed2 attribute to value.
47001// If not specified, defaults to 0
47002func CudnnRNNParamsSizeSeed2(value int64) CudnnRNNParamsSizeAttr {
47003	return func(m optionalAttr) {
47004		m["seed2"] = value
47005	}
47006}
47007
47008// CudnnRNNParamsSizeNumProj sets the optional num_proj attribute to value.
47009// If not specified, defaults to 0
47010func CudnnRNNParamsSizeNumProj(value int64) CudnnRNNParamsSizeAttr {
47011	return func(m optionalAttr) {
47012		m["num_proj"] = value
47013	}
47014}
47015
47016// Computes size of weights that can be used by a Cudnn RNN model.
47017//
47018// Return the params size that can be used by the Cudnn RNN model. Subsequent
47019// weight allocation and initialization should use this size.
47020//
47021// num_layers: Specifies the number of layers in the RNN model.
47022// num_units: Specifies the size of the hidden state.
47023// input_size: Specifies the size of the input state.
47024// rnn_mode: Indicates the type of the RNN model.
47025// input_mode: Indicate whether there is a linear projection between the input and
47026//   The actual computation before the first layer. 'skip_input' is only allowed
47027//   when input_size == num_units; 'auto_select' implies 'skip_input' when
47028//   input_size == num_units; otherwise, it implies 'linear_input'.
47029// direction: Indicates whether a bidirectional model will be used.
47030//   dir = (direction == bidirectional) ? 2 : 1
47031// dropout: dropout probability. When set to 0., dropout is disabled.
47032// seed: the 1st part of a seed to initialize dropout.
47033// seed2: the 2nd part of a seed to initialize dropout.
47034// params_size: The size of the params buffer that should be allocated and
47035//   initialized for this RNN model. Note that this params buffer may not be
47036//   compatible across GPUs. Please use CudnnRNNParamsWeights and
47037//   CudnnRNNParamsBiases to save and restore them in a way that is compatible
47038//   across different runs.
47039func CudnnRNNParamsSize(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, T tf.DataType, S tf.DataType, optional ...CudnnRNNParamsSizeAttr) (params_size tf.Output) {
47040	if scope.Err() != nil {
47041		return
47042	}
47043	attrs := map[string]interface{}{"T": T, "S": S}
47044	for _, a := range optional {
47045		a(attrs)
47046	}
47047	opspec := tf.OpSpec{
47048		Type: "CudnnRNNParamsSize",
47049		Input: []tf.Input{
47050			num_layers, num_units, input_size,
47051		},
47052		Attrs: attrs,
47053	}
47054	op := scope.AddOperation(opspec)
47055	return op.Output(0)
47056}
47057
47058// ResourceGatherAttr is an optional argument to ResourceGather.
47059type ResourceGatherAttr func(optionalAttr)
47060
47061// ResourceGatherBatchDims sets the optional batch_dims attribute to value.
47062// If not specified, defaults to 0
47063func ResourceGatherBatchDims(value int64) ResourceGatherAttr {
47064	return func(m optionalAttr) {
47065		m["batch_dims"] = value
47066	}
47067}
47068
47069// ResourceGatherValidateIndices sets the optional validate_indices attribute to value.
47070// If not specified, defaults to true
47071func ResourceGatherValidateIndices(value bool) ResourceGatherAttr {
47072	return func(m optionalAttr) {
47073		m["validate_indices"] = value
47074	}
47075}
47076
47077// Gather slices from the variable pointed to by `resource` according to `indices`.
47078//
47079// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
47080// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
47081//
47082// ```python
47083//     # Scalar indices
47084//     output[:, ..., :] = params[indices, :, ... :]
47085//
47086//     # Vector indices
47087//     output[i, :, ..., :] = params[indices[i], :, ... :]
47088//
47089//     # Higher rank indices
47090//     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
47091// ```
47092func ResourceGather(scope *Scope, resource tf.Output, indices tf.Output, dtype tf.DataType, optional ...ResourceGatherAttr) (output tf.Output) {
47093	if scope.Err() != nil {
47094		return
47095	}
47096	attrs := map[string]interface{}{"dtype": dtype}
47097	for _, a := range optional {
47098		a(attrs)
47099	}
47100	opspec := tf.OpSpec{
47101		Type: "ResourceGather",
47102		Input: []tf.Input{
47103			resource, indices,
47104		},
47105		Attrs: attrs,
47106	}
47107	op := scope.AddOperation(opspec)
47108	return op.Output(0)
47109}
47110
47111// Returns x + y element-wise.
47112//
47113// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
47114// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
47115//
47116// Given two input tensors, the `tf.add` operation computes the sum for every element in the tensor.
47117//
47118// Both input and output have a range `(-inf, inf)`.
47119//
47120func Add(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
47121	if scope.Err() != nil {
47122		return
47123	}
47124	opspec := tf.OpSpec{
47125		Type: "Add",
47126		Input: []tf.Input{
47127			x, y,
47128		},
47129	}
47130	op := scope.AddOperation(opspec)
47131	return op.Output(0)
47132}
47133
47134// UniformCandidateSamplerAttr is an optional argument to UniformCandidateSampler.
47135type UniformCandidateSamplerAttr func(optionalAttr)
47136
47137// UniformCandidateSamplerSeed sets the optional seed attribute to value.
47138//
47139// value: If either seed or seed2 are set to be non-zero, the random number
47140// generator is seeded by the given seed.  Otherwise, it is seeded by a
47141// random seed.
47142// If not specified, defaults to 0
47143func UniformCandidateSamplerSeed(value int64) UniformCandidateSamplerAttr {
47144	return func(m optionalAttr) {
47145		m["seed"] = value
47146	}
47147}
47148
47149// UniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
47150//
47151// value: An second seed to avoid seed collision.
47152// If not specified, defaults to 0
47153func UniformCandidateSamplerSeed2(value int64) UniformCandidateSamplerAttr {
47154	return func(m optionalAttr) {
47155		m["seed2"] = value
47156	}
47157}
47158
47159// Generates labels for candidate sampling with a uniform distribution.
47160//
47161// See explanations of candidate sampling and the data formats at
47162// go/candidate-sampling.
47163//
47164// For each batch, this op picks a single set of sampled candidate labels.
47165//
47166// The advantages of sampling candidates per-batch are simplicity and the
47167// possibility of efficient dense matrix multiplication. The disadvantage is that
47168// the sampled candidates must be chosen independently of the context and of the
47169// true labels.
47170//
47171// Arguments:
47172//	true_classes: A batch_size * num_true matrix, in which each row contains the
47173// IDs of the num_true target_classes in the corresponding original label.
47174//	num_true: Number of true labels per context.
47175//	num_sampled: Number of candidates to randomly sample.
47176//	unique: If unique is true, we sample with rejection, so that all sampled
47177// candidates in a batch are unique. This requires some approximation to
47178// estimate the post-rejection sampling probabilities.
47179//	range_max: The sampler will sample integers from the interval [0, range_max).
47180//
47181// Returns:
47182//	sampled_candidates: A vector of length num_sampled, in which each element is
47183// the ID of a sampled candidate.
47184//	true_expected_count: A batch_size * num_true matrix, representing
47185// the number of times each candidate is expected to occur in a batch
47186// of sampled candidates. If unique=true, then this is a probability.
47187//	sampled_expected_count: A vector of length num_sampled, for each sampled
47188// candidate representing the number of times the candidate is expected
47189// to occur in a batch of sampled candidates.  If unique=true, then this is a
47190// probability.
47191func UniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...UniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
47192	if scope.Err() != nil {
47193		return
47194	}
47195	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
47196	for _, a := range optional {
47197		a(attrs)
47198	}
47199	opspec := tf.OpSpec{
47200		Type: "UniformCandidateSampler",
47201		Input: []tf.Input{
47202			true_classes,
47203		},
47204		Attrs: attrs,
47205	}
47206	op := scope.AddOperation(opspec)
47207	return op.Output(0), op.Output(1), op.Output(2)
47208}
47209
47210// Read an element from the TensorArray into output `value`.
47211//
47212// Arguments:
47213//	handle: The handle to a TensorArray.
47214//
47215//	flow_in: A float scalar that enforces proper chaining of operations.
47216//	dtype: The type of the elem that is returned.
47217//
47218// Returns The tensor that is read from the TensorArray.
47219func TensorArrayReadV3(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
47220	if scope.Err() != nil {
47221		return
47222	}
47223	attrs := map[string]interface{}{"dtype": dtype}
47224	opspec := tf.OpSpec{
47225		Type: "TensorArrayReadV3",
47226		Input: []tf.Input{
47227			handle, index, flow_in,
47228		},
47229		Attrs: attrs,
47230	}
47231	op := scope.AddOperation(opspec)
47232	return op.Output(0)
47233}
47234
47235// GatherAttr is an optional argument to Gather.
47236type GatherAttr func(optionalAttr)
47237
47238// GatherValidateIndices sets the optional validate_indices attribute to value.
47239// If not specified, defaults to true
47240func GatherValidateIndices(value bool) GatherAttr {
47241	return func(m optionalAttr) {
47242		m["validate_indices"] = value
47243	}
47244}
47245
47246// Gather slices from `params` according to `indices`.
47247//
47248// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
47249// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
47250//
47251// ```python
47252//     # Scalar indices
47253//     output[:, ..., :] = params[indices, :, ... :]
47254//
47255//     # Vector indices
47256//     output[i, :, ..., :] = params[indices[i], :, ... :]
47257//
47258//     # Higher rank indices
47259//     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
47260// ```
47261//
47262// If `indices` is a permutation and `len(indices) == params.shape[0]` then
47263// this operation will permute `params` accordingly.
47264//
47265// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
47266// `indices` are always validated to be within range. If assigned to GPU,
47267// out-of-bound indices result in safe but unspecified behavior, which may include
47268// raising an error.
47269//
47270// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
47271// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
47272// </div>
47273func Gather(scope *Scope, params tf.Output, indices tf.Output, optional ...GatherAttr) (output tf.Output) {
47274	if scope.Err() != nil {
47275		return
47276	}
47277	attrs := map[string]interface{}{}
47278	for _, a := range optional {
47279		a(attrs)
47280	}
47281	opspec := tf.OpSpec{
47282		Type: "Gather",
47283		Input: []tf.Input{
47284			params, indices,
47285		},
47286		Attrs: attrs,
47287	}
47288	op := scope.AddOperation(opspec)
47289	return op.Output(0)
47290}
47291
47292// AvgPoolGradAttr is an optional argument to AvgPoolGrad.
47293type AvgPoolGradAttr func(optionalAttr)
47294
47295// AvgPoolGradDataFormat sets the optional data_format attribute to value.
47296//
47297// value: Specify the data format of the input and output data. With the
47298// default format "NHWC", the data is stored in the order of:
47299//     [batch, in_height, in_width, in_channels].
47300// Alternatively, the format could be "NCHW", the data storage order of:
47301//     [batch, in_channels, in_height, in_width].
47302// If not specified, defaults to "NHWC"
47303func AvgPoolGradDataFormat(value string) AvgPoolGradAttr {
47304	return func(m optionalAttr) {
47305		m["data_format"] = value
47306	}
47307}
47308
47309// Computes gradients of the average pooling function.
47310//
47311// Arguments:
47312//	orig_input_shape: 1-D.  Shape of the original input to `avg_pool`.
47313//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
47314// the output of `avg_pool`.
47315//	ksize: The size of the sliding window for each dimension of the input.
47316//	strides: The stride of the sliding window for each dimension of the input.
47317//	padding: The type of padding algorithm to use.
47318//
47319// Returns 4-D.  Gradients w.r.t. the input of `avg_pool`.
47320func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolGradAttr) (output tf.Output) {
47321	if scope.Err() != nil {
47322		return
47323	}
47324	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
47325	for _, a := range optional {
47326		a(attrs)
47327	}
47328	opspec := tf.OpSpec{
47329		Type: "AvgPoolGrad",
47330		Input: []tf.Input{
47331			orig_input_shape, grad,
47332		},
47333		Attrs: attrs,
47334	}
47335	op := scope.AddOperation(opspec)
47336	return op.Output(0)
47337}
47338
47339// Concatenates a list of `N` tensors along the first dimension.
47340//
47341// The input tensors are all required to have size 1 in the first dimension.
47342//
47343// For example:
47344//
47345// ```
47346// # 'x' is [[1, 4]]
47347// # 'y' is [[2, 5]]
47348// # 'z' is [[3, 6]]
47349// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
47350// ```
47351//
47352// The difference between concat and parallel_concat is that concat requires all
47353// of the inputs be computed before the operation will begin but doesn't require
47354// that the input shapes be known during graph construction.  Parallel concat
47355// will copy pieces of the input into the output as they become available, in
47356// some situations this can provide a performance benefit.
47357//
47358// Arguments:
47359//	values: Tensors to be concatenated. All must have size 1 in the first dimension
47360// and same shape.
47361//	shape: the final shape of the result; should be equal to the shapes of any input
47362// but with the number of input values in the first dimension.
47363//
47364// Returns The concatenated tensor.
47365func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output) {
47366	if scope.Err() != nil {
47367		return
47368	}
47369	attrs := map[string]interface{}{"shape": shape}
47370	opspec := tf.OpSpec{
47371		Type: "ParallelConcat",
47372		Input: []tf.Input{
47373			tf.OutputList(values),
47374		},
47375		Attrs: attrs,
47376	}
47377	op := scope.AddOperation(opspec)
47378	return op.Output(0)
47379}
47380
47381// EditDistanceAttr is an optional argument to EditDistance.
47382type EditDistanceAttr func(optionalAttr)
47383
47384// EditDistanceNormalize sets the optional normalize attribute to value.
47385//
47386// value: boolean (if true, edit distances are normalized by length of truth).
47387//
47388// The output is:
47389// If not specified, defaults to true
47390func EditDistanceNormalize(value bool) EditDistanceAttr {
47391	return func(m optionalAttr) {
47392		m["normalize"] = value
47393	}
47394}
47395
47396// Computes the (possibly normalized) Levenshtein Edit Distance.
47397//
47398// The inputs are variable-length sequences provided by SparseTensors
47399//   (hypothesis_indices, hypothesis_values, hypothesis_shape)
47400// and
47401//   (truth_indices, truth_values, truth_shape).
47402//
47403// The inputs are:
47404//
47405// Arguments:
47406//	hypothesis_indices: The indices of the hypothesis list SparseTensor.
47407// This is an N x R int64 matrix.
47408//	hypothesis_values: The values of the hypothesis list SparseTensor.
47409// This is an N-length vector.
47410//	hypothesis_shape: The shape of the hypothesis list SparseTensor.
47411// This is an R-length vector.
47412//	truth_indices: The indices of the truth list SparseTensor.
47413// This is an M x R int64 matrix.
47414//	truth_values: The values of the truth list SparseTensor.
47415// This is an M-length vector.
47416//	truth_shape: truth indices, vector.
47417//
47418// Returns A dense float tensor with rank R - 1.
47419//
47420// For the example input:
47421//
47422//     // hypothesis represents a 2x1 matrix with variable-length values:
47423//     //   (0,0) = ["a"]
47424//     //   (1,0) = ["b"]
47425//     hypothesis_indices = [[0, 0, 0],
47426//                           [1, 0, 0]]
47427//     hypothesis_values = ["a", "b"]
47428//     hypothesis_shape = [2, 1, 1]
47429//
47430//     // truth represents a 2x2 matrix with variable-length values:
47431//     //   (0,0) = []
47432//     //   (0,1) = ["a"]
47433//     //   (1,0) = ["b", "c"]
47434//     //   (1,1) = ["a"]
47435//     truth_indices = [[0, 1, 0],
47436//                      [1, 0, 0],
47437//                      [1, 0, 1],
47438//                      [1, 1, 0]]
47439//     truth_values = ["a", "b", "c", "a"]
47440//     truth_shape = [2, 2, 2]
47441//     normalize = true
47442//
47443// The output will be:
47444//
47445//     // output is a 2x2 matrix with edit distances normalized by truth lengths.
47446//     output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis
47447//               [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis
47448func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values tf.Output, hypothesis_shape tf.Output, truth_indices tf.Output, truth_values tf.Output, truth_shape tf.Output, optional ...EditDistanceAttr) (output tf.Output) {
47449	if scope.Err() != nil {
47450		return
47451	}
47452	attrs := map[string]interface{}{}
47453	for _, a := range optional {
47454		a(attrs)
47455	}
47456	opspec := tf.OpSpec{
47457		Type: "EditDistance",
47458		Input: []tf.Input{
47459			hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape,
47460		},
47461		Attrs: attrs,
47462	}
47463	op := scope.AddOperation(opspec)
47464	return op.Output(0)
47465}
47466
47467// Wraps an arbitrary MLIR computation expressed as a module with a main() function.
47468//
47469// This operation does not have an associated kernel and is not intended to be
47470// executed in a regular TensorFlow session. Instead it is intended to be used for
47471// testing or for special case where a user intends to pass custom MLIR computation
47472// through a TensorFlow graph with the intent of having custom tooling processing
47473// it downstream (when targeting a different environment, like TensorFlow lite for
47474// example).
47475// The MLIR module is expected to have a main() function that will be used as an
47476// entry point. The inputs to the operations will be passed as argument to the
47477// main() function and the returned values of the main function mapped to the
47478// outputs.
47479// Example usage:
47480//
47481// ```
47482// import tensorflow as tf
47483// from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
47484//
47485// mlir_module = '''python
47486// func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {
47487//    %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>
47488//    return %ret : tensor<10x10xf32>
47489// }
47490// '''
47491//
47492// @tf.function
47493// def foo(x, y):
47494//   return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
47495//
47496// graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()
47497// ```
47498func MlirPassthroughOp(scope *Scope, inputs []tf.Output, mlir_module string, Toutputs []tf.DataType) (outputs []tf.Output) {
47499	if scope.Err() != nil {
47500		return
47501	}
47502	attrs := map[string]interface{}{"mlir_module": mlir_module, "Toutputs": Toutputs}
47503	opspec := tf.OpSpec{
47504		Type: "MlirPassthroughOp",
47505		Input: []tf.Input{
47506			tf.OutputList(inputs),
47507		},
47508		Attrs: attrs,
47509	}
47510	op := scope.AddOperation(opspec)
47511	if scope.Err() != nil {
47512		return
47513	}
47514	var idx int
47515	var err error
47516	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
47517		scope.UpdateErr("MlirPassthroughOp", err)
47518		return
47519	}
47520	return outputs
47521}
47522
47523// StringLowerAttr is an optional argument to StringLower.
47524type StringLowerAttr func(optionalAttr)
47525
47526// StringLowerEncoding sets the optional encoding attribute to value.
47527//
47528// value: Character encoding of `input`. Allowed values are '' and 'utf-8'.
47529// Value '' is interpreted as ASCII.
47530// If not specified, defaults to ""
47531func StringLowerEncoding(value string) StringLowerAttr {
47532	return func(m optionalAttr) {
47533		m["encoding"] = value
47534	}
47535}
47536
47537// Converts all uppercase characters into their respective lowercase replacements.
47538//
47539// Example:
47540//
47541// >>> tf.strings.lower("CamelCase string and ALL CAPS")
47542// <tf.Tensor: shape=(), dtype=string, numpy=b'camelcase string and all caps'>
47543//
47544//
47545// Arguments:
47546//	input: The input to be lower-cased.
47547func StringLower(scope *Scope, input tf.Output, optional ...StringLowerAttr) (output tf.Output) {
47548	if scope.Err() != nil {
47549		return
47550	}
47551	attrs := map[string]interface{}{}
47552	for _, a := range optional {
47553		a(attrs)
47554	}
47555	opspec := tf.OpSpec{
47556		Type: "StringLower",
47557		Input: []tf.Input{
47558			input,
47559		},
47560		Attrs: attrs,
47561	}
47562	op := scope.AddOperation(opspec)
47563	return op.Output(0)
47564}
47565
47566// Reverses specific dimensions of a tensor.
47567//
47568// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
47569// of `tensor`, this operation reverses each dimension i of `tensor` where
47570// `dims[i]` is `True`.
47571//
47572// `tensor` can have up to 8 dimensions. The number of dimensions
47573// of `tensor` must equal the number of elements in `dims`. In other words:
47574//
47575// `rank(tensor) = size(dims)`
47576//
47577// For example:
47578//
47579// ```
47580// # tensor 't' is [[[[ 0,  1,  2,  3],
47581// #                  [ 4,  5,  6,  7],
47582// #                  [ 8,  9, 10, 11]],
47583// #                 [[12, 13, 14, 15],
47584// #                  [16, 17, 18, 19],
47585// #                  [20, 21, 22, 23]]]]
47586// # tensor 't' shape is [1, 2, 3, 4]
47587//
47588// # 'dims' is [False, False, False, True]
47589// reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
47590//                         [ 7,  6,  5,  4],
47591//                         [ 11, 10, 9, 8]],
47592//                        [[15, 14, 13, 12],
47593//                         [19, 18, 17, 16],
47594//                         [23, 22, 21, 20]]]]
47595//
47596// # 'dims' is [False, True, False, False]
47597// reverse(t, dims) ==> [[[[12, 13, 14, 15],
47598//                         [16, 17, 18, 19],
47599//                         [20, 21, 22, 23]
47600//                        [[ 0,  1,  2,  3],
47601//                         [ 4,  5,  6,  7],
47602//                         [ 8,  9, 10, 11]]]]
47603//
47604// # 'dims' is [False, False, True, False]
47605// reverse(t, dims) ==> [[[[8, 9, 10, 11],
47606//                         [4, 5, 6, 7],
47607//                         [0, 1, 2, 3]]
47608//                        [[20, 21, 22, 23],
47609//                         [16, 17, 18, 19],
47610//                         [12, 13, 14, 15]]]]
47611// ```
47612//
47613// Arguments:
47614//	tensor: Up to 8-D.
47615//	dims: 1-D. The dimensions to reverse.
47616//
47617// Returns The same shape as `tensor`.
47618func Reverse(scope *Scope, tensor tf.Output, dims tf.Output) (output tf.Output) {
47619	if scope.Err() != nil {
47620		return
47621	}
47622	opspec := tf.OpSpec{
47623		Type: "Reverse",
47624		Input: []tf.Input{
47625			tensor, dims,
47626		},
47627	}
47628	op := scope.AddOperation(opspec)
47629	return op.Output(0)
47630}
47631
47632// NthElementAttr is an optional argument to NthElement.
47633type NthElementAttr func(optionalAttr)
47634
47635// NthElementReverse sets the optional reverse attribute to value.
47636//
47637// value: When set to True, find the nth-largest value in the vector and vice
47638// versa.
47639// If not specified, defaults to false
47640func NthElementReverse(value bool) NthElementAttr {
47641	return func(m optionalAttr) {
47642		m["reverse"] = value
47643	}
47644}
47645
47646// Finds values of the `n`-th order statistic for the last dimension.
47647//
47648// If the input is a vector (rank-1), finds the entries which is the nth-smallest
47649// value in the vector and outputs their values as scalar tensor.
47650//
47651// For matrices (resp. higher rank input), computes the entries which is the
47652// nth-smallest value in each row (resp. vector along the last dimension). Thus,
47653//
47654//     values.shape = input.shape[:-1]
47655//
47656// Arguments:
47657//	input: 1-D or higher with last dimension at least `n+1`.
47658//	n: 0-D. Position of sorted vector to select along the last dimension (along
47659// each row for matrices). Valid range of n is `[0, input.shape[:-1])`
47660//
47661// Returns The `n`-th order statistic along each last dimensional slice.
47662func NthElement(scope *Scope, input tf.Output, n tf.Output, optional ...NthElementAttr) (values tf.Output) {
47663	if scope.Err() != nil {
47664		return
47665	}
47666	attrs := map[string]interface{}{}
47667	for _, a := range optional {
47668		a(attrs)
47669	}
47670	opspec := tf.OpSpec{
47671		Type: "NthElement",
47672		Input: []tf.Input{
47673			input, n,
47674		},
47675		Attrs: attrs,
47676	}
47677	op := scope.AddOperation(opspec)
47678	return op.Output(0)
47679}
47680
47681// Wraps the XLA ConvGeneralDilated operator, documented at
47682//
47683//  https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
47684// .
47685//
47686// Arguments:
47687//	lhs: the input tensor
47688//	rhs: the kernel tensor
47689//	window_strides: the inter-window strides
47690//	padding: the padding to apply at the start and end of each input dimensions
47691//	lhs_dilation: dilation to apply between input elements
47692//	rhs_dilation: dilation to apply between kernel elements
47693//	feature_group_count: number of feature groups for grouped convolution.
47694//	dimension_numbers: a serialized xla::ConvolutionDimensionNumbers proto.
47695//	precision_config: a serialized xla::PrecisionConfig proto.
47696func XlaConv(scope *Scope, lhs tf.Output, rhs tf.Output, window_strides tf.Output, padding tf.Output, lhs_dilation tf.Output, rhs_dilation tf.Output, feature_group_count tf.Output, dimension_numbers string, precision_config string) (output tf.Output) {
47697	if scope.Err() != nil {
47698		return
47699	}
47700	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "precision_config": precision_config}
47701	opspec := tf.OpSpec{
47702		Type: "XlaConv",
47703		Input: []tf.Input{
47704			lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count,
47705		},
47706		Attrs: attrs,
47707	}
47708	op := scope.AddOperation(opspec)
47709	return op.Output(0)
47710}
47711
47712// Returns the value from a given key in a tensor map.
47713//
47714// input_handle: the input map
47715// key: the key to be looked up
47716// value: the value found from the given key
47717func TensorMapLookup(scope *Scope, input_handle tf.Output, key tf.Output, value_dtype tf.DataType) (value tf.Output) {
47718	if scope.Err() != nil {
47719		return
47720	}
47721	attrs := map[string]interface{}{"value_dtype": value_dtype}
47722	opspec := tf.OpSpec{
47723		Type: "TensorMapLookup",
47724		Input: []tf.Input{
47725			input_handle, key,
47726		},
47727		Attrs: attrs,
47728	}
47729	op := scope.AddOperation(opspec)
47730	return op.Output(0)
47731}
47732
47733// OrderedMapPeekAttr is an optional argument to OrderedMapPeek.
47734type OrderedMapPeekAttr func(optionalAttr)
47735
47736// OrderedMapPeekCapacity sets the optional capacity attribute to value.
47737// If not specified, defaults to 0
47738//
47739// REQUIRES: value >= 0
47740func OrderedMapPeekCapacity(value int64) OrderedMapPeekAttr {
47741	return func(m optionalAttr) {
47742		m["capacity"] = value
47743	}
47744}
47745
47746// OrderedMapPeekMemoryLimit sets the optional memory_limit attribute to value.
47747// If not specified, defaults to 0
47748//
47749// REQUIRES: value >= 0
47750func OrderedMapPeekMemoryLimit(value int64) OrderedMapPeekAttr {
47751	return func(m optionalAttr) {
47752		m["memory_limit"] = value
47753	}
47754}
47755
47756// OrderedMapPeekContainer sets the optional container attribute to value.
47757// If not specified, defaults to ""
47758func OrderedMapPeekContainer(value string) OrderedMapPeekAttr {
47759	return func(m optionalAttr) {
47760		m["container"] = value
47761	}
47762}
47763
47764// OrderedMapPeekSharedName sets the optional shared_name attribute to value.
47765// If not specified, defaults to ""
47766func OrderedMapPeekSharedName(value string) OrderedMapPeekAttr {
47767	return func(m optionalAttr) {
47768		m["shared_name"] = value
47769	}
47770}
47771
47772// Op peeks at the values at the specified key.  If the
47773//
47774// underlying container does not contain this key
47775// this op will block until it does.   This Op is optimized for
47776// performance.
47777func OrderedMapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapPeekAttr) (values []tf.Output) {
47778	if scope.Err() != nil {
47779		return
47780	}
47781	attrs := map[string]interface{}{"dtypes": dtypes}
47782	for _, a := range optional {
47783		a(attrs)
47784	}
47785	opspec := tf.OpSpec{
47786		Type: "OrderedMapPeek",
47787		Input: []tf.Input{
47788			key, indices,
47789		},
47790		Attrs: attrs,
47791	}
47792	op := scope.AddOperation(opspec)
47793	if scope.Err() != nil {
47794		return
47795	}
47796	var idx int
47797	var err error
47798	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
47799		scope.UpdateErr("OrderedMapPeek", err)
47800		return
47801	}
47802	return values
47803}
47804
47805// DecodeRawAttr is an optional argument to DecodeRaw.
47806type DecodeRawAttr func(optionalAttr)
47807
47808// DecodeRawLittleEndian sets the optional little_endian attribute to value.
47809//
47810// value: Whether the input `bytes` are in little-endian order.
47811// Ignored for `out_type` values that are stored in a single byte like
47812// `uint8`.
47813// If not specified, defaults to true
47814func DecodeRawLittleEndian(value bool) DecodeRawAttr {
47815	return func(m optionalAttr) {
47816		m["little_endian"] = value
47817	}
47818}
47819
47820// Reinterpret the bytes of a string as a vector of numbers.
47821//
47822// Arguments:
47823//	bytes: All the elements must have the same length.
47824//
47825//
47826// Returns A Tensor with one more dimension than the input `bytes`.  The
47827// added dimension will have size equal to the length of the elements
47828// of `bytes` divided by the number of bytes to represent `out_type`.
47829func DecodeRaw(scope *Scope, bytes tf.Output, out_type tf.DataType, optional ...DecodeRawAttr) (output tf.Output) {
47830	if scope.Err() != nil {
47831		return
47832	}
47833	attrs := map[string]interface{}{"out_type": out_type}
47834	for _, a := range optional {
47835		a(attrs)
47836	}
47837	opspec := tf.OpSpec{
47838		Type: "DecodeRaw",
47839		Input: []tf.Input{
47840			bytes,
47841		},
47842		Attrs: attrs,
47843	}
47844	op := scope.AddOperation(opspec)
47845	return op.Output(0)
47846}
47847
47848// Returns the number of records this Reader has produced.
47849//
47850// This is the same as the number of ReaderRead executions that have
47851// succeeded.
47852//
47853// Arguments:
47854//	reader_handle: Handle to a Reader.
47855func ReaderNumRecordsProducedV2(scope *Scope, reader_handle tf.Output) (records_produced tf.Output) {
47856	if scope.Err() != nil {
47857		return
47858	}
47859	opspec := tf.OpSpec{
47860		Type: "ReaderNumRecordsProducedV2",
47861		Input: []tf.Input{
47862			reader_handle,
47863		},
47864	}
47865	op := scope.AddOperation(opspec)
47866	return op.Output(0)
47867}
47868
47869// MatrixSetDiagV3Attr is an optional argument to MatrixSetDiagV3.
47870type MatrixSetDiagV3Attr func(optionalAttr)
47871
47872// MatrixSetDiagV3Align sets the optional align attribute to value.
47873//
47874// value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
47875// a string specifying how superdiagonals and subdiagonals should be aligned,
47876// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
47877// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
47878// to the right (left-pads the row) and subdiagonals to the left (right-pads the
47879// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
47880// the opposite alignment.
47881// If not specified, defaults to "RIGHT_LEFT"
47882func MatrixSetDiagV3Align(value string) MatrixSetDiagV3Attr {
47883	return func(m optionalAttr) {
47884		m["align"] = value
47885	}
47886}
47887
47888// Returns a batched matrix tensor with new batched diagonal values.
47889//
47890// Given `input` and `diagonal`, this operation returns a tensor with the
47891// same shape and values as `input`, except for the specified diagonals of the
47892// innermost matrices. These will be overwritten by the values in `diagonal`.
47893//
47894// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
47895// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
47896// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
47897// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
47898// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
47899// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
47900//
47901// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
47902// If `k` is scalar or `k[0] == k[1]`:
47903//
47904// ```
47905// output[i, j, ..., l, m, n]
47906//   = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
47907//     input[i, j, ..., l, m, n]              ; otherwise
47908// ```
47909//
47910// Otherwise,
47911//
47912// ```
47913// output[i, j, ..., l, m, n]
47914//   = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
47915//     input[i, j, ..., l, m, n]                         ; otherwise
47916// ```
47917// where `d = n - m`, `diag_index = k[1] - d`, and
47918// `index_in_diag = n - max(d, 0) + offset`.
47919//
47920// `offset` is zero except when the alignment of the diagonal is to the right.
47921// ```
47922// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
47923//                                            and `d >= 0`) or
47924//                                          (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
47925//                                            and `d <= 0`)
47926//          0                          ; otherwise
47927// ```
47928// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
47929//
47930// For example:
47931//
47932// ```
47933// # The main diagonal.
47934// input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
47935//                    [7, 7, 7, 7],
47936//                    [7, 7, 7, 7]],
47937//                   [[7, 7, 7, 7],
47938//                    [7, 7, 7, 7],
47939//                    [7, 7, 7, 7]]])
47940// diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
47941//                      [4, 5, 6]])
47942// tf.matrix_set_diag(input, diagonal)
47943//   ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
47944//         [7, 2, 7, 7],
47945//         [7, 7, 3, 7]],
47946//        [[4, 7, 7, 7],
47947//         [7, 5, 7, 7],
47948//         [7, 7, 6, 7]]]
47949//
47950// # A superdiagonal (per batch).
47951// tf.matrix_set_diag(input, diagonal, k = 1)
47952//   ==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
47953//         [7, 7, 2, 7],
47954//         [7, 7, 7, 3]],
47955//        [[7, 4, 7, 7],
47956//         [7, 7, 5, 7],
47957//         [7, 7, 7, 6]]]
47958//
47959// # A band of diagonals.
47960// diagonals = np.array([[[0, 9, 1],  # Diagonal shape: (2, 4, 3)
47961//                        [6, 5, 8],
47962//                        [1, 2, 3],
47963//                        [4, 5, 0]],
47964//                       [[0, 1, 2],
47965//                        [5, 6, 4],
47966//                        [6, 1, 2],
47967//                        [3, 4, 0]]])
47968// tf.matrix_set_diag(input, diagonals, k = (-1, 2))
47969//   ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
47970//         [4, 2, 5, 1],
47971//         [7, 5, 3, 8]],
47972//        [[6, 5, 1, 7],
47973//         [3, 1, 6, 2],
47974//         [7, 4, 2, 4]]]
47975//
47976// # LEFT_RIGHT alignment.
47977// diagonals = np.array([[[9, 1, 0],  # Diagonal shape: (2, 4, 3)
47978//                        [6, 5, 8],
47979//                        [1, 2, 3],
47980//                        [0, 4, 5]],
47981//                       [[1, 2, 0],
47982//                        [5, 6, 4],
47983//                        [6, 1, 2],
47984//                        [0, 3, 4]]])
47985// tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT")
47986//   ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
47987//         [4, 2, 5, 1],
47988//         [7, 5, 3, 8]],
47989//        [[6, 5, 1, 7],
47990//         [3, 1, 6, 2],
47991//         [7, 4, 2, 4]]]
47992//
47993// ```
47994//
47995// Arguments:
47996//	input: Rank `r+1`, where `r >= 1`.
47997//	diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
47998// `k >= 1`.
47999//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
48000// diagonal, and negative value means subdiagonals. `k` can be a single integer
48001// (for a single diagonal) or a pair of integers specifying the low and high ends
48002// of a matrix band. `k[0]` must not be larger than `k[1]`.
48003//
48004// Returns Rank `r+1`, with `output.shape = input.shape`.
48005func MatrixSetDiagV3(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output, optional ...MatrixSetDiagV3Attr) (output tf.Output) {
48006	if scope.Err() != nil {
48007		return
48008	}
48009	attrs := map[string]interface{}{}
48010	for _, a := range optional {
48011		a(attrs)
48012	}
48013	opspec := tf.OpSpec{
48014		Type: "MatrixSetDiagV3",
48015		Input: []tf.Input{
48016			input, diagonal, k,
48017		},
48018		Attrs: attrs,
48019	}
48020	op := scope.AddOperation(opspec)
48021	return op.Output(0)
48022}
48023
48024// RecordInputAttr is an optional argument to RecordInput.
48025type RecordInputAttr func(optionalAttr)
48026
48027// RecordInputFileRandomSeed sets the optional file_random_seed attribute to value.
48028//
48029// value: Random seeds used to produce randomized records.
48030// If not specified, defaults to 301
48031func RecordInputFileRandomSeed(value int64) RecordInputAttr {
48032	return func(m optionalAttr) {
48033		m["file_random_seed"] = value
48034	}
48035}
48036
48037// RecordInputFileShuffleShiftRatio sets the optional file_shuffle_shift_ratio attribute to value.
48038//
48039// value: Shifts the list of files after the list is randomly
48040// shuffled.
48041// If not specified, defaults to 0
48042func RecordInputFileShuffleShiftRatio(value float32) RecordInputAttr {
48043	return func(m optionalAttr) {
48044		m["file_shuffle_shift_ratio"] = value
48045	}
48046}
48047
48048// RecordInputFileBufferSize sets the optional file_buffer_size attribute to value.
48049//
48050// value: The randomization shuffling buffer.
48051// If not specified, defaults to 10000
48052func RecordInputFileBufferSize(value int64) RecordInputAttr {
48053	return func(m optionalAttr) {
48054		m["file_buffer_size"] = value
48055	}
48056}
48057
48058// RecordInputFileParallelism sets the optional file_parallelism attribute to value.
48059//
48060// value: How many sstables are opened and concurrently iterated over.
48061// If not specified, defaults to 16
48062func RecordInputFileParallelism(value int64) RecordInputAttr {
48063	return func(m optionalAttr) {
48064		m["file_parallelism"] = value
48065	}
48066}
48067
48068// RecordInputBatchSize sets the optional batch_size attribute to value.
48069//
48070// value: The batch size.
48071// If not specified, defaults to 32
48072func RecordInputBatchSize(value int64) RecordInputAttr {
48073	return func(m optionalAttr) {
48074		m["batch_size"] = value
48075	}
48076}
48077
48078// RecordInputCompressionType sets the optional compression_type attribute to value.
48079//
48080// value: The type of compression for the file. Currently ZLIB and
48081// GZIP are supported. Defaults to none.
48082// If not specified, defaults to ""
48083func RecordInputCompressionType(value string) RecordInputAttr {
48084	return func(m optionalAttr) {
48085		m["compression_type"] = value
48086	}
48087}
48088
48089// Emits randomized records.
48090//
48091// Arguments:
48092//	file_pattern: Glob pattern for the data files.
48093//
48094// Returns A tensor of shape [batch_size].
48095func RecordInput(scope *Scope, file_pattern string, optional ...RecordInputAttr) (records tf.Output) {
48096	if scope.Err() != nil {
48097		return
48098	}
48099	attrs := map[string]interface{}{"file_pattern": file_pattern}
48100	for _, a := range optional {
48101		a(attrs)
48102	}
48103	opspec := tf.OpSpec{
48104		Type: "RecordInput",
48105
48106		Attrs: attrs,
48107	}
48108	op := scope.AddOperation(opspec)
48109	return op.Output(0)
48110}
48111
48112// Elementwise computes the bitwise AND of `x` and `y`.
48113//
48114// The result will have those bits set, that are set in both `x` and `y`. The
48115// computation is performed on the underlying representations of `x` and `y`.
48116//
48117// For example:
48118//
48119// ```python
48120// import tensorflow as tf
48121// from tensorflow.python.ops import bitwise_ops
48122// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
48123//               tf.uint8, tf.uint16, tf.uint32, tf.uint64]
48124//
48125// for dtype in dtype_list:
48126//   lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
48127//   rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
48128//   exp = tf.constant([0, 0, 3, 10], dtype=tf.float32)
48129//
48130//   res = bitwise_ops.bitwise_and(lhs, rhs)
48131//   tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
48132// ```
48133//
48134func BitwiseAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
48135	if scope.Err() != nil {
48136		return
48137	}
48138	opspec := tf.OpSpec{
48139		Type: "BitwiseAnd",
48140		Input: []tf.Input{
48141			x, y,
48142		},
48143	}
48144	op := scope.AddOperation(opspec)
48145	return op.Output(0)
48146}
48147
48148// Produces the max pool of the input tensor for quantized types.
48149//
48150// Arguments:
48151//	input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
48152//	min_input: The float value that the lowest quantized input value represents.
48153//	max_input: The float value that the highest quantized input value represents.
48154//	ksize: The size of the window for each dimension of the input tensor.
48155// The length must be 4 to match the number of dimensions of the input.
48156//	strides: The stride of the sliding window for each dimension of the input
48157// tensor. The length must be 4 to match the number of dimensions of the input.
48158//	padding: The type of padding algorithm to use.
48159//
48160// Returns:
48161//	output
48162//	min_output: The float value that the lowest quantized output value represents.
48163//	max_output: The float value that the highest quantized output value represents.
48164func QuantizedMaxPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
48165	if scope.Err() != nil {
48166		return
48167	}
48168	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
48169	opspec := tf.OpSpec{
48170		Type: "QuantizedMaxPool",
48171		Input: []tf.Input{
48172			input, min_input, max_input,
48173		},
48174		Attrs: attrs,
48175	}
48176	op := scope.AddOperation(opspec)
48177	return op.Output(0), op.Output(1), op.Output(2)
48178}
48179
48180// Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
48181//
48182// This operation folds the padded areas of `input` by `MirrorPad` according to the
48183// `paddings` you specify. `paddings` must be the same as `paddings` argument
48184// given to the corresponding `MirrorPad` op.
48185//
48186// The folded size of each dimension D of the output is:
48187//
48188// `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
48189//
48190// For example:
48191//
48192// ```
48193// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
48194// # 'paddings' is [[0, 1]], [0, 1]].
48195// # 'mode' is SYMMETRIC.
48196// # rank of 't' is 2.
48197// pad(t, paddings) ==> [[ 1,  5]
48198//                       [11, 28]]
48199// ```
48200//
48201// Arguments:
48202//	input: The input tensor to be folded.
48203//	paddings: A two-column matrix specifying the padding sizes. The number of
48204// rows must be the same as the rank of `input`.
48205//	mode: The mode used in the `MirrorPad` op.
48206//
48207// Returns The folded tensor.
48208func MirrorPadGrad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
48209	if scope.Err() != nil {
48210		return
48211	}
48212	attrs := map[string]interface{}{"mode": mode}
48213	opspec := tf.OpSpec{
48214		Type: "MirrorPadGrad",
48215		Input: []tf.Input{
48216			input, paddings,
48217		},
48218		Attrs: attrs,
48219	}
48220	op := scope.AddOperation(opspec)
48221	return op.Output(0)
48222}
48223
48224// NonMaxSuppressionV5Attr is an optional argument to NonMaxSuppressionV5.
48225type NonMaxSuppressionV5Attr func(optionalAttr)
48226
48227// NonMaxSuppressionV5PadToMaxOutputSize sets the optional pad_to_max_output_size attribute to value.
48228//
48229// value: If true, the output `selected_indices` is padded to be of length
48230// `max_output_size`. Defaults to false.
48231// If not specified, defaults to false
48232func NonMaxSuppressionV5PadToMaxOutputSize(value bool) NonMaxSuppressionV5Attr {
48233	return func(m optionalAttr) {
48234		m["pad_to_max_output_size"] = value
48235	}
48236}
48237
48238// Greedily selects a subset of bounding boxes in descending order of score,
48239//
48240// pruning away boxes that have high intersection-over-union (IOU) overlap
48241// with previously selected boxes.  Bounding boxes with score less than
48242// `score_threshold` are removed.  Bounding boxes are supplied as
48243// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
48244// diagonal pair of box corners and the coordinates can be provided as normalized
48245// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
48246// is agnostic to where the origin is in the coordinate system and more
48247// generally is invariant to orthogonal transformations and translations
48248// of the coordinate system; thus translating or reflections of the coordinate
48249// system result in the same boxes being selected by the algorithm.
48250// The output of this operation is a set of integers indexing into the input
48251// collection of bounding boxes representing the selected boxes.  The bounding
48252// box coordinates corresponding to the selected indices can then be obtained
48253// using the `tf.gather operation`.  For example:
48254//   selected_indices = tf.image.non_max_suppression_v2(
48255//       boxes, scores, max_output_size, iou_threshold, score_threshold)
48256//   selected_boxes = tf.gather(boxes, selected_indices)
48257// This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f.
48258// Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
48259// of other overlapping boxes instead of directly causing them to be pruned.
48260// To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be
48261// larger than 0.
48262//
48263// Arguments:
48264//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
48265//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
48266// score corresponding to each box (each row of boxes).
48267//	max_output_size: A scalar integer tensor representing the maximum number of
48268// boxes to be selected by non max suppression.
48269//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
48270// boxes overlap too much with respect to IOU.
48271//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
48272// boxes based on score.
48273//	soft_nms_sigma: A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et
48274// al (c.f. https://arxiv.org/abs/1704.04503).  When `soft_nms_sigma=0.0` (which
48275// is default), we fall back to standard (hard) NMS.
48276//
48277// Returns:
48278//	selected_indices: A 1-D integer tensor of shape `[M]` representing the selected
48279// indices from the boxes tensor, where `M <= max_output_size`.
48280//	selected_scores: A 1-D float tensor of shape `[M]` representing the corresponding
48281// scores for each selected box, where `M <= max_output_size`.  Scores only differ
48282// from corresponding input scores when using Soft NMS (i.e. when
48283// `soft_nms_sigma>0`)
48284//	valid_outputs: A 0-D integer tensor representing the number of valid elements in
48285// `selected_indices`, with the valid elements appearing first.
48286func NonMaxSuppressionV5(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, soft_nms_sigma tf.Output, optional ...NonMaxSuppressionV5Attr) (selected_indices tf.Output, selected_scores tf.Output, valid_outputs tf.Output) {
48287	if scope.Err() != nil {
48288		return
48289	}
48290	attrs := map[string]interface{}{}
48291	for _, a := range optional {
48292		a(attrs)
48293	}
48294	opspec := tf.OpSpec{
48295		Type: "NonMaxSuppressionV5",
48296		Input: []tf.Input{
48297			boxes, scores, max_output_size, iou_threshold, score_threshold, soft_nms_sigma,
48298		},
48299		Attrs: attrs,
48300	}
48301	op := scope.AddOperation(opspec)
48302	return op.Output(0), op.Output(1), op.Output(2)
48303}
48304
48305// LogUniformCandidateSamplerAttr is an optional argument to LogUniformCandidateSampler.
48306type LogUniformCandidateSamplerAttr func(optionalAttr)
48307
48308// LogUniformCandidateSamplerSeed sets the optional seed attribute to value.
48309//
48310// value: If either seed or seed2 are set to be non-zero, the random number
48311// generator is seeded by the given seed.  Otherwise, it is seeded by a
48312// random seed.
48313// If not specified, defaults to 0
48314func LogUniformCandidateSamplerSeed(value int64) LogUniformCandidateSamplerAttr {
48315	return func(m optionalAttr) {
48316		m["seed"] = value
48317	}
48318}
48319
48320// LogUniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
48321//
48322// value: An second seed to avoid seed collision.
48323// If not specified, defaults to 0
48324func LogUniformCandidateSamplerSeed2(value int64) LogUniformCandidateSamplerAttr {
48325	return func(m optionalAttr) {
48326		m["seed2"] = value
48327	}
48328}
48329
48330// Generates labels for candidate sampling with a log-uniform distribution.
48331//
48332// See explanations of candidate sampling and the data formats at
48333// go/candidate-sampling.
48334//
48335// For each batch, this op picks a single set of sampled candidate labels.
48336//
48337// The advantages of sampling candidates per-batch are simplicity and the
48338// possibility of efficient dense matrix multiplication. The disadvantage is that
48339// the sampled candidates must be chosen independently of the context and of the
48340// true labels.
48341//
48342// Arguments:
48343//	true_classes: A batch_size * num_true matrix, in which each row contains the
48344// IDs of the num_true target_classes in the corresponding original label.
48345//	num_true: Number of true labels per context.
48346//	num_sampled: Number of candidates to randomly sample.
48347//	unique: If unique is true, we sample with rejection, so that all sampled
48348// candidates in a batch are unique. This requires some approximation to
48349// estimate the post-rejection sampling probabilities.
48350//	range_max: The sampler will sample integers from the interval [0, range_max).
48351//
48352// Returns:
48353//	sampled_candidates: A vector of length num_sampled, in which each element is
48354// the ID of a sampled candidate.
48355//	true_expected_count: A batch_size * num_true matrix, representing
48356// the number of times each candidate is expected to occur in a batch
48357// of sampled candidates. If unique=true, then this is a probability.
48358//	sampled_expected_count: A vector of length num_sampled, for each sampled
48359// candidate representing the number of times the candidate is expected
48360// to occur in a batch of sampled candidates.  If unique=true, then this is a
48361// probability.
48362func LogUniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LogUniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
48363	if scope.Err() != nil {
48364		return
48365	}
48366	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
48367	for _, a := range optional {
48368		a(attrs)
48369	}
48370	opspec := tf.OpSpec{
48371		Type: "LogUniformCandidateSampler",
48372		Input: []tf.Input{
48373			true_classes,
48374		},
48375		Attrs: attrs,
48376	}
48377	op := scope.AddOperation(opspec)
48378	return op.Output(0), op.Output(1), op.Output(2)
48379}
48380
48381// Computes the eigen decomposition of a batch of self-adjoint matrices
48382//
48383// (Note: Only real inputs are supported).
48384//
48385// Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in
48386// tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for
48387// i=0...N-1.
48388//
48389// Arguments:
48390//	a: the input tensor.
48391//	lower: a boolean specifies whether the calculation is done with the lower
48392// triangular part or the upper triangular part.
48393//	max_iter: maximum number of sweep update, i.e., the whole lower triangular
48394// part or upper triangular part based on parameter lower. Heuristically, it has
48395// been argued that approximately logN sweeps are needed in practice (Ref: Golub &
48396// van Loan "Matrix Computation").
48397//	epsilon: the tolerance ratio.
48398//
48399// Returns:
48400//	w: The eigenvalues in ascending order, each repeated according to its
48401// multiplicity.
48402//	v: The column v[..., :, i] is the normalized eigenvector corresponding to the
48403// eigenvalue w[..., i].
48404func XlaSelfAdjointEig(scope *Scope, a tf.Output, lower bool, max_iter int64, epsilon float32) (w tf.Output, v tf.Output) {
48405	if scope.Err() != nil {
48406		return
48407	}
48408	attrs := map[string]interface{}{"lower": lower, "max_iter": max_iter, "epsilon": epsilon}
48409	opspec := tf.OpSpec{
48410		Type: "XlaSelfAdjointEig",
48411		Input: []tf.Input{
48412			a,
48413		},
48414		Attrs: attrs,
48415	}
48416	op := scope.AddOperation(opspec)
48417	return op.Output(0), op.Output(1)
48418}
48419
48420// Ensures that the tensor's shape matches the expected shape.
48421//
48422// Raises an error if the input tensor's shape does not match the specified shape.
48423// Returns the input tensor otherwise.
48424//
48425// Arguments:
48426//	input: A tensor, whose shape is to be validated.
48427//	shape: The expected (possibly partially specified) shape of the input tensor.
48428//
48429// Returns A tensor with the same shape and contents as the input tensor or value.
48430func EnsureShape(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
48431	if scope.Err() != nil {
48432		return
48433	}
48434	attrs := map[string]interface{}{"shape": shape}
48435	opspec := tf.OpSpec{
48436		Type: "EnsureShape",
48437		Input: []tf.Input{
48438			input,
48439		},
48440		Attrs: attrs,
48441	}
48442	op := scope.AddOperation(opspec)
48443	return op.Output(0)
48444}
48445
48446// StatelessTruncatedNormalAttr is an optional argument to StatelessTruncatedNormal.
48447type StatelessTruncatedNormalAttr func(optionalAttr)
48448
48449// StatelessTruncatedNormalDtype sets the optional dtype attribute to value.
48450//
48451// value: The type of the output.
48452// If not specified, defaults to DT_FLOAT
48453func StatelessTruncatedNormalDtype(value tf.DataType) StatelessTruncatedNormalAttr {
48454	return func(m optionalAttr) {
48455		m["dtype"] = value
48456	}
48457}
48458
48459// Outputs deterministic pseudorandom values from a truncated normal distribution.
48460//
48461// The generated values follow a normal distribution with mean 0 and standard
48462// deviation 1, except that values whose magnitude is more than 2 standard
48463// deviations from the mean are dropped and re-picked.
48464//
48465// The outputs are a deterministic function of `shape` and `seed`.
48466//
48467// Arguments:
48468//	shape: The shape of the output tensor.
48469//	seed: 2 seeds (shape [2]).
48470//
48471// Returns Random values with specified shape.
48472func StatelessTruncatedNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessTruncatedNormalAttr) (output tf.Output) {
48473	if scope.Err() != nil {
48474		return
48475	}
48476	attrs := map[string]interface{}{}
48477	for _, a := range optional {
48478		a(attrs)
48479	}
48480	opspec := tf.OpSpec{
48481		Type: "StatelessTruncatedNormal",
48482		Input: []tf.Input{
48483			shape, seed,
48484		},
48485		Attrs: attrs,
48486	}
48487	op := scope.AddOperation(opspec)
48488	return op.Output(0)
48489}
48490
48491// Returns a batched diagonal tensor with a given batched diagonal values.
48492//
48493// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
48494// everything else padded with zeros. The diagonal is computed as follows:
48495//
48496// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
48497// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
48498//
48499// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
48500//
48501// For example:
48502//
48503// ```
48504// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
48505//
48506// and diagonal.shape = (2, 4)
48507//
48508// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
48509//                                      [0, 2, 0, 0]
48510//                                      [0, 0, 3, 0]
48511//                                      [0, 0, 0, 4]],
48512//                                     [[5, 0, 0, 0]
48513//                                      [0, 6, 0, 0]
48514//                                      [0, 0, 7, 0]
48515//                                      [0, 0, 0, 8]]]
48516//
48517// which has shape (2, 4, 4)
48518// ```
48519//
48520// Arguments:
48521//	diagonal: Rank `k`, where `k >= 1`.
48522//
48523// Returns Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
48524func MatrixDiag(scope *Scope, diagonal tf.Output) (output tf.Output) {
48525	if scope.Err() != nil {
48526		return
48527	}
48528	opspec := tf.OpSpec{
48529		Type: "MatrixDiag",
48530		Input: []tf.Input{
48531			diagonal,
48532		},
48533	}
48534	op := scope.AddOperation(opspec)
48535	return op.Output(0)
48536}
48537
48538// Transforms a tf.Example proto (as a string) into typed tensors.
48539//
48540// Arguments:
48541//	serialized: A vector containing a batch of binary serialized Example protos.
48542//	dense_defaults: A list of Tensors (some may be empty), whose length matches
48543// the length of `dense_keys`. dense_defaults[j] provides default values
48544// when the example's feature_map lacks dense_key[j].  If an empty Tensor is
48545// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
48546// The input type is inferred from dense_defaults[j], even when it's empty.
48547// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
48548// then the shape of dense_defaults[j] must match that of dense_shapes[j].
48549// If dense_shapes[j] has an undefined major dimension (variable strides dense
48550// feature), dense_defaults[j] must contain a single element:
48551// the padding element.
48552//	num_sparse: The number of sparse features to be parsed from the example. This
48553// must match the lengths of `sparse_keys` and `sparse_types`.
48554//	sparse_keys: A list of `num_sparse` strings.
48555// The keys expected in the Examples' features associated with sparse values.
48556//	dense_keys: The keys expected in the Examples' features associated with dense
48557// values.
48558//	sparse_types: A list of `num_sparse` types; the data types of data in each
48559// Feature given in sparse_keys.
48560// Currently the ParseSingleExample op supports DT_FLOAT (FloatList),
48561// DT_INT64 (Int64List), and DT_STRING (BytesList).
48562//	dense_shapes: The shapes of data in each Feature given in dense_keys.
48563// The length of this list must match the length of `dense_keys`.  The
48564// number of elements in the Feature corresponding to dense_key[j] must
48565// always equal dense_shapes[j].NumEntries().  If dense_shapes[j] ==
48566// (D0, D1, ..., DN) then the shape of output Tensor dense_values[j]
48567// will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1,
48568// ..., DN), the shape of the output Tensor dense_values[j] will be (M,
48569// D1, .., DN), where M is the number of blocks of elements of length
48570// D1 * .... * DN, in the input.
48571func ParseSingleExample(scope *Scope, serialized tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
48572	if scope.Err() != nil {
48573		return
48574	}
48575	attrs := map[string]interface{}{"num_sparse": num_sparse, "sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes}
48576	opspec := tf.OpSpec{
48577		Type: "ParseSingleExample",
48578		Input: []tf.Input{
48579			serialized, tf.OutputList(dense_defaults),
48580		},
48581		Attrs: attrs,
48582	}
48583	op := scope.AddOperation(opspec)
48584	if scope.Err() != nil {
48585		return
48586	}
48587	var idx int
48588	var err error
48589	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
48590		scope.UpdateErr("ParseSingleExample", err)
48591		return
48592	}
48593	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
48594		scope.UpdateErr("ParseSingleExample", err)
48595		return
48596	}
48597	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
48598		scope.UpdateErr("ParseSingleExample", err)
48599		return
48600	}
48601	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
48602		scope.UpdateErr("ParseSingleExample", err)
48603		return
48604	}
48605	return sparse_indices, sparse_values, sparse_shapes, dense_values
48606}
48607
48608// Gives a guarantee to the TF runtime that the input tensor is a constant.
48609//
48610// The runtime is then free to make optimizations based on this.
48611//
48612// Only accepts value typed tensors as inputs and rejects resource variable handles
48613// as input.
48614//
48615// Returns the input tensor without modification.
48616func GuaranteeConst(scope *Scope, input tf.Output) (output tf.Output) {
48617	if scope.Err() != nil {
48618		return
48619	}
48620	opspec := tf.OpSpec{
48621		Type: "GuaranteeConst",
48622		Input: []tf.Input{
48623			input,
48624		},
48625	}
48626	op := scope.AddOperation(opspec)
48627	return op.Output(0)
48628}
48629
48630// Converts the given `resource_handle` representing an iterator to a string.
48631//
48632// Arguments:
48633//	resource_handle: A handle to an iterator resource.
48634//
48635// Returns A string representation of the given handle.
48636func IteratorToStringHandle(scope *Scope, resource_handle tf.Output) (string_handle tf.Output) {
48637	if scope.Err() != nil {
48638		return
48639	}
48640	opspec := tf.OpSpec{
48641		Type: "IteratorToStringHandle",
48642		Input: []tf.Input{
48643			resource_handle,
48644		},
48645	}
48646	op := scope.AddOperation(opspec)
48647	return op.Output(0)
48648}
48649
48650// Outputs a `Summary` protocol buffer with a histogram.
48651//
48652// The generated
48653// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
48654// has one summary value containing a histogram for `values`.
48655//
48656// This op reports an `InvalidArgument` error if any value is not finite.
48657//
48658// Arguments:
48659//	tag: Scalar.  Tag to use for the `Summary.Value`.
48660//	values: Any shape. Values to use to build the histogram.
48661//
48662// Returns Scalar. Serialized `Summary` protocol buffer.
48663func HistogramSummary(scope *Scope, tag tf.Output, values tf.Output) (summary tf.Output) {
48664	if scope.Err() != nil {
48665		return
48666	}
48667	opspec := tf.OpSpec{
48668		Type: "HistogramSummary",
48669		Input: []tf.Input{
48670			tag, values,
48671		},
48672	}
48673	op := scope.AddOperation(opspec)
48674	return op.Output(0)
48675}
48676
48677// ResourceApplyPowerSignAttr is an optional argument to ResourceApplyPowerSign.
48678type ResourceApplyPowerSignAttr func(optionalAttr)
48679
48680// ResourceApplyPowerSignUseLocking sets the optional use_locking attribute to value.
48681//
48682// value: If `True`, updating of the var and m tensors is
48683// protected by a lock; otherwise the behavior is undefined, but may exhibit less
48684// contention.
48685// If not specified, defaults to false
48686func ResourceApplyPowerSignUseLocking(value bool) ResourceApplyPowerSignAttr {
48687	return func(m optionalAttr) {
48688		m["use_locking"] = value
48689	}
48690}
48691
48692// Update '*var' according to the AddSign update.
48693//
48694// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
48695// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
48696// variable <- variable - lr_t * update
48697//
48698// Arguments:
48699//	var_: Should be from a Variable().
48700//	m: Should be from a Variable().
48701//	lr: Scaling factor. Must be a scalar.
48702//	logbase: Must be a scalar.
48703//	sign_decay: Must be a scalar.
48704//	beta: Must be a scalar.
48705//	grad: The gradient.
48706//
48707// Returns the created operation.
48708func ResourceApplyPowerSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, logbase tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyPowerSignAttr) (o *tf.Operation) {
48709	if scope.Err() != nil {
48710		return
48711	}
48712	attrs := map[string]interface{}{}
48713	for _, a := range optional {
48714		a(attrs)
48715	}
48716	opspec := tf.OpSpec{
48717		Type: "ResourceApplyPowerSign",
48718		Input: []tf.Input{
48719			var_, m, lr, logbase, sign_decay, beta, grad,
48720		},
48721		Attrs: attrs,
48722	}
48723	return scope.AddOperation(opspec)
48724}
48725
48726// Concatenates tensors along one dimension.
48727//
48728// Arguments:
48729//	concat_dim: 0-D.  The dimension along which to concatenate.  Must be in the
48730// range [0, rank(values)).
48731//	values: The `N` Tensors to concatenate. Their ranks and types must match,
48732// and their sizes must match in all dimensions except `concat_dim`.
48733//
48734// Returns A `Tensor` with the concatenation of values stacked along the
48735// `concat_dim` dimension.  This tensor's shape matches that of `values` except
48736// in `concat_dim` where it has the sum of the sizes.
48737func Concat(scope *Scope, concat_dim tf.Output, values []tf.Output) (output tf.Output) {
48738	if scope.Err() != nil {
48739		return
48740	}
48741	opspec := tf.OpSpec{
48742		Type: "Concat",
48743		Input: []tf.Input{
48744			concat_dim, tf.OutputList(values),
48745		},
48746	}
48747	op := scope.AddOperation(opspec)
48748	return op.Output(0)
48749}
48750
48751// Inverse of XlaSetDynamicDimensionSize.
48752//
48753// Make an xla bounded dynamic dimension into a static dimension. The bound of the
48754// size of dimension `dim_index` becomes the static dimension size.
48755func XlaRemoveDynamicDimensionSize(scope *Scope, input tf.Output, dim_index tf.Output) (output tf.Output) {
48756	if scope.Err() != nil {
48757		return
48758	}
48759	opspec := tf.OpSpec{
48760		Type: "XlaRemoveDynamicDimensionSize",
48761		Input: []tf.Input{
48762			input, dim_index,
48763		},
48764	}
48765	op := scope.AddOperation(opspec)
48766	return op.Output(0)
48767}
48768
48769// RetrieveTPUEmbeddingAdadeltaParametersAttr is an optional argument to RetrieveTPUEmbeddingAdadeltaParameters.
48770type RetrieveTPUEmbeddingAdadeltaParametersAttr func(optionalAttr)
48771
48772// RetrieveTPUEmbeddingAdadeltaParametersTableId sets the optional table_id attribute to value.
48773// If not specified, defaults to -1
48774func RetrieveTPUEmbeddingAdadeltaParametersTableId(value int64) RetrieveTPUEmbeddingAdadeltaParametersAttr {
48775	return func(m optionalAttr) {
48776		m["table_id"] = value
48777	}
48778}
48779
48780// RetrieveTPUEmbeddingAdadeltaParametersTableName sets the optional table_name attribute to value.
48781// If not specified, defaults to ""
48782func RetrieveTPUEmbeddingAdadeltaParametersTableName(value string) RetrieveTPUEmbeddingAdadeltaParametersAttr {
48783	return func(m optionalAttr) {
48784		m["table_name"] = value
48785	}
48786}
48787
48788// RetrieveTPUEmbeddingAdadeltaParametersConfig sets the optional config attribute to value.
48789// If not specified, defaults to ""
48790func RetrieveTPUEmbeddingAdadeltaParametersConfig(value string) RetrieveTPUEmbeddingAdadeltaParametersAttr {
48791	return func(m optionalAttr) {
48792		m["config"] = value
48793	}
48794}
48795
48796// Retrieve Adadelta embedding parameters.
48797//
48798// An op that retrieves optimization parameters from embedding to host
48799// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
48800// the correct embedding table configuration. For example, this op is
48801// used to retrieve updated parameters before saving a checkpoint.
48802//
48803// Returns:
48804//	parameters: Parameter parameters updated by the Adadelta optimization algorithm.
48805//	accumulators: Parameter accumulators updated by the Adadelta optimization algorithm.
48806//	updates: Parameter updates updated by the Adadelta optimization algorithm.
48807func RetrieveTPUEmbeddingAdadeltaParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdadeltaParametersAttr) (parameters tf.Output, accumulators tf.Output, updates tf.Output) {
48808	if scope.Err() != nil {
48809		return
48810	}
48811	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
48812	for _, a := range optional {
48813		a(attrs)
48814	}
48815	opspec := tf.OpSpec{
48816		Type: "RetrieveTPUEmbeddingAdadeltaParameters",
48817
48818		Attrs: attrs,
48819	}
48820	op := scope.AddOperation(opspec)
48821	return op.Output(0), op.Output(1), op.Output(2)
48822}
48823
48824// Returns a tensor of zeros with the same shape and type as x.
48825//
48826// Arguments:
48827//	x: a tensor of type T.
48828//
48829// Returns a tensor of the same shape and type as x but filled with zeros.
48830func ZerosLike(scope *Scope, x tf.Output) (y tf.Output) {
48831	if scope.Err() != nil {
48832		return
48833	}
48834	opspec := tf.OpSpec{
48835		Type: "ZerosLike",
48836		Input: []tf.Input{
48837			x,
48838		},
48839	}
48840	op := scope.AddOperation(opspec)
48841	return op.Output(0)
48842}
48843
48844// Returns a constant tensor on the host. Only for writing C++ tests.
48845//
48846// Arguments:
48847//	value: Attr `value` is the tensor to return.
48848//
48849func HostConst(scope *Scope, value tf.Tensor, dtype tf.DataType) (output tf.Output) {
48850	if scope.Err() != nil {
48851		return
48852	}
48853	attrs := map[string]interface{}{"value": value, "dtype": dtype}
48854	opspec := tf.OpSpec{
48855		Type: "HostConst",
48856
48857		Attrs: attrs,
48858	}
48859	op := scope.AddOperation(opspec)
48860	return op.Output(0)
48861}
48862
48863// A placeholder op that passes through `input` when its output is not fed.
48864//
48865// Arguments:
48866//	input: The default value to produce when `output` is not fed.
48867//	shape: The (possibly partial) shape of the tensor.
48868//
48869// Returns A placeholder tensor that defaults to `input` if it is not fed.
48870func PlaceholderWithDefault(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
48871	if scope.Err() != nil {
48872		return
48873	}
48874	attrs := map[string]interface{}{"shape": shape}
48875	opspec := tf.OpSpec{
48876		Type: "PlaceholderWithDefault",
48877		Input: []tf.Input{
48878			input,
48879		},
48880		Attrs: attrs,
48881	}
48882	op := scope.AddOperation(opspec)
48883	return op.Output(0)
48884}
48885
48886// NotEqualAttr is an optional argument to NotEqual.
48887type NotEqualAttr func(optionalAttr)
48888
48889// NotEqualIncompatibleShapeError sets the optional incompatible_shape_error attribute to value.
48890// If not specified, defaults to true
48891func NotEqualIncompatibleShapeError(value bool) NotEqualAttr {
48892	return func(m optionalAttr) {
48893		m["incompatible_shape_error"] = value
48894	}
48895}
48896
48897// Returns the truth value of (x != y) element-wise.
48898//
48899// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting
48900// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
48901func NotEqual(scope *Scope, x tf.Output, y tf.Output, optional ...NotEqualAttr) (z tf.Output) {
48902	if scope.Err() != nil {
48903		return
48904	}
48905	attrs := map[string]interface{}{}
48906	for _, a := range optional {
48907		a(attrs)
48908	}
48909	opspec := tf.OpSpec{
48910		Type: "NotEqual",
48911		Input: []tf.Input{
48912			x, y,
48913		},
48914		Attrs: attrs,
48915	}
48916	op := scope.AddOperation(opspec)
48917	return op.Output(0)
48918}
48919
48920// Adds v into specified rows of x.
48921//
48922//     Computes y = x; y[i, :] += v; return y.
48923//
48924// Arguments:
48925//	x: A `Tensor` of type T.
48926//	i: A vector. Indices into the left-most dimension of `x`.
48927//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
48928//
48929// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
48930func InplaceAdd(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
48931	if scope.Err() != nil {
48932		return
48933	}
48934	opspec := tf.OpSpec{
48935		Type: "InplaceAdd",
48936		Input: []tf.Input{
48937			x, i, v,
48938		},
48939	}
48940	op := scope.AddOperation(opspec)
48941	return op.Output(0)
48942}
48943
48944// An op used by XLA SPMD partitioner to switch from manual partitioning to
48945//
48946// automatic partitioning. It converts the shard-shaped, manually partitioned input
48947// into full-shaped tensor to be partitioned automatically with the same sharding
48948// used by manual partitioning.
48949func XlaSpmdShardToFullShape(scope *Scope, input tf.Output, manual_sharding string, full_shape tf.Shape) (output tf.Output) {
48950	if scope.Err() != nil {
48951		return
48952	}
48953	attrs := map[string]interface{}{"manual_sharding": manual_sharding, "full_shape": full_shape}
48954	opspec := tf.OpSpec{
48955		Type: "XlaSpmdShardToFullShape",
48956		Input: []tf.Input{
48957			input,
48958		},
48959		Attrs: attrs,
48960	}
48961	op := scope.AddOperation(opspec)
48962	return op.Output(0)
48963}
48964
48965// Updates the table to associates keys with values.
48966//
48967// The tensor `keys` must be of the same type as the keys of the table.
48968// The tensor `values` must be of the type of the table values.
48969//
48970// Arguments:
48971//	table_handle: Handle to the table.
48972//	keys: Any shape.  Keys to look up.
48973//	values: Values to associate with keys.
48974//
48975// Returns the created operation.
48976func LookupTableInsertV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
48977	if scope.Err() != nil {
48978		return
48979	}
48980	opspec := tf.OpSpec{
48981		Type: "LookupTableInsertV2",
48982		Input: []tf.Input{
48983			table_handle, keys, values,
48984		},
48985	}
48986	return scope.AddOperation(opspec)
48987}
48988
48989// UnsortedSegmentJoinAttr is an optional argument to UnsortedSegmentJoin.
48990type UnsortedSegmentJoinAttr func(optionalAttr)
48991
48992// UnsortedSegmentJoinSeparator sets the optional separator attribute to value.
48993//
48994// value: The separator to use when joining.
48995// If not specified, defaults to ""
48996func UnsortedSegmentJoinSeparator(value string) UnsortedSegmentJoinAttr {
48997	return func(m optionalAttr) {
48998		m["separator"] = value
48999	}
49000}
49001
49002// Joins the elements of `inputs` based on `segment_ids`.
49003//
49004// Computes the string join along segments of a tensor.
49005// Given `segment_ids` with rank `N` and `data` with rank `N+M`:
49006//
49007//     `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])`
49008//
49009// where the join is over all [j1...jN] such that segment_ids[j1...jN] = i.
49010// Strings are joined in row-major order.
49011//
49012// For example:
49013//
49014// ```python
49015// inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
49016// output_array = string_ops.unsorted_segment_join(inputs=inputs,
49017//                                                 segment_ids=[1, 0, 1],
49018//                                                 num_segments=2,
49019//                                                 separator=':'))
49020// # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']]
49021//
49022//
49023// inputs = ['this', 'is', 'a', 'test']
49024// output_array = string_ops.unsorted_segment_join(inputs=inputs,
49025//                                                 segment_ids=[0, 0, 0, 0],
49026//                                                 num_segments=1,
49027//                                                 separator=':'))
49028// # output_array ==> ['this:is:a:test']
49029// ```
49030//
49031// Arguments:
49032//	inputs: The input to be joined.
49033//	segment_ids: A tensor whose shape is a prefix of data.shape.  Negative segment ids are not
49034// supported.
49035//	num_segments: A scalar.
49036func UnsortedSegmentJoin(scope *Scope, inputs tf.Output, segment_ids tf.Output, num_segments tf.Output, optional ...UnsortedSegmentJoinAttr) (output tf.Output) {
49037	if scope.Err() != nil {
49038		return
49039	}
49040	attrs := map[string]interface{}{}
49041	for _, a := range optional {
49042		a(attrs)
49043	}
49044	opspec := tf.OpSpec{
49045		Type: "UnsortedSegmentJoin",
49046		Input: []tf.Input{
49047			inputs, segment_ids, num_segments,
49048		},
49049		Attrs: attrs,
49050	}
49051	op := scope.AddOperation(opspec)
49052	return op.Output(0)
49053}
49054
49055// MapUnstageNoKeyAttr is an optional argument to MapUnstageNoKey.
49056type MapUnstageNoKeyAttr func(optionalAttr)
49057
49058// MapUnstageNoKeyCapacity sets the optional capacity attribute to value.
49059// If not specified, defaults to 0
49060//
49061// REQUIRES: value >= 0
49062func MapUnstageNoKeyCapacity(value int64) MapUnstageNoKeyAttr {
49063	return func(m optionalAttr) {
49064		m["capacity"] = value
49065	}
49066}
49067
49068// MapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
49069// If not specified, defaults to 0
49070//
49071// REQUIRES: value >= 0
49072func MapUnstageNoKeyMemoryLimit(value int64) MapUnstageNoKeyAttr {
49073	return func(m optionalAttr) {
49074		m["memory_limit"] = value
49075	}
49076}
49077
49078// MapUnstageNoKeyContainer sets the optional container attribute to value.
49079// If not specified, defaults to ""
49080func MapUnstageNoKeyContainer(value string) MapUnstageNoKeyAttr {
49081	return func(m optionalAttr) {
49082		m["container"] = value
49083	}
49084}
49085
49086// MapUnstageNoKeySharedName sets the optional shared_name attribute to value.
49087// If not specified, defaults to ""
49088func MapUnstageNoKeySharedName(value string) MapUnstageNoKeyAttr {
49089	return func(m optionalAttr) {
49090		m["shared_name"] = value
49091	}
49092}
49093
49094// Op removes and returns a random (key, value)
49095//
49096// from the underlying container.   If the underlying container
49097// does not contain elements, the op will block until it does.
49098func MapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
49099	if scope.Err() != nil {
49100		return
49101	}
49102	attrs := map[string]interface{}{"dtypes": dtypes}
49103	for _, a := range optional {
49104		a(attrs)
49105	}
49106	opspec := tf.OpSpec{
49107		Type: "MapUnstageNoKey",
49108		Input: []tf.Input{
49109			indices,
49110		},
49111		Attrs: attrs,
49112	}
49113	op := scope.AddOperation(opspec)
49114	if scope.Err() != nil {
49115		return
49116	}
49117	var idx int
49118	var err error
49119	key = op.Output(idx)
49120	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
49121		scope.UpdateErr("MapUnstageNoKey", err)
49122		return
49123	}
49124	return key, values
49125}
49126
49127// BoostedTreesQuantileStreamResourceFlushAttr is an optional argument to BoostedTreesQuantileStreamResourceFlush.
49128type BoostedTreesQuantileStreamResourceFlushAttr func(optionalAttr)
49129
49130// BoostedTreesQuantileStreamResourceFlushGenerateQuantiles sets the optional generate_quantiles attribute to value.
49131//
49132// value: bool; If True, the output will be the num_quantiles for each stream where the ith
49133// entry is the ith quantile of the input with an approximation error of epsilon.
49134// Duplicate values may be present.
49135// If False, the output will be the points in the histogram that we got which roughly
49136// translates to 1/epsilon boundaries and without any duplicates.
49137// Default to False.
49138// If not specified, defaults to false
49139func BoostedTreesQuantileStreamResourceFlushGenerateQuantiles(value bool) BoostedTreesQuantileStreamResourceFlushAttr {
49140	return func(m optionalAttr) {
49141		m["generate_quantiles"] = value
49142	}
49143}
49144
49145// Flush the summaries for a quantile stream resource.
49146//
49147// An op that flushes the summaries for a quantile stream resource.
49148//
49149// Arguments:
49150//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
49151//	num_buckets: int; approximate number of buckets unless using generate_quantiles.
49152//
49153// Returns the created operation.
49154func BoostedTreesQuantileStreamResourceFlush(scope *Scope, quantile_stream_resource_handle tf.Output, num_buckets tf.Output, optional ...BoostedTreesQuantileStreamResourceFlushAttr) (o *tf.Operation) {
49155	if scope.Err() != nil {
49156		return
49157	}
49158	attrs := map[string]interface{}{}
49159	for _, a := range optional {
49160		a(attrs)
49161	}
49162	opspec := tf.OpSpec{
49163		Type: "BoostedTreesQuantileStreamResourceFlush",
49164		Input: []tf.Input{
49165			quantile_stream_resource_handle, num_buckets,
49166		},
49167		Attrs: attrs,
49168	}
49169	return scope.AddOperation(opspec)
49170}
49171
49172// WholeFileReaderV2Attr is an optional argument to WholeFileReaderV2.
49173type WholeFileReaderV2Attr func(optionalAttr)
49174
49175// WholeFileReaderV2Container sets the optional container attribute to value.
49176//
49177// value: If non-empty, this reader is placed in the given container.
49178// Otherwise, a default container is used.
49179// If not specified, defaults to ""
49180func WholeFileReaderV2Container(value string) WholeFileReaderV2Attr {
49181	return func(m optionalAttr) {
49182		m["container"] = value
49183	}
49184}
49185
49186// WholeFileReaderV2SharedName sets the optional shared_name attribute to value.
49187//
49188// value: If non-empty, this reader is named in the given bucket
49189// with this shared_name. Otherwise, the node name is used instead.
49190// If not specified, defaults to ""
49191func WholeFileReaderV2SharedName(value string) WholeFileReaderV2Attr {
49192	return func(m optionalAttr) {
49193		m["shared_name"] = value
49194	}
49195}
49196
49197// A Reader that outputs the entire contents of a file as a value.
49198//
49199// To use, enqueue filenames in a Queue.  The output of ReaderRead will
49200// be a filename (key) and the contents of that file (value).
49201//
49202// Returns The handle to reference the Reader.
49203func WholeFileReaderV2(scope *Scope, optional ...WholeFileReaderV2Attr) (reader_handle tf.Output) {
49204	if scope.Err() != nil {
49205		return
49206	}
49207	attrs := map[string]interface{}{}
49208	for _, a := range optional {
49209		a(attrs)
49210	}
49211	opspec := tf.OpSpec{
49212		Type: "WholeFileReaderV2",
49213
49214		Attrs: attrs,
49215	}
49216	op := scope.AddOperation(opspec)
49217	return op.Output(0)
49218}
49219
49220// Adds sparse `updates` to an existing tensor according to `indices`.
49221//
49222// This operation creates a new tensor by adding sparse `updates` to the passed
49223// in `tensor`.
49224// This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the updates
49225// are added onto an existing tensor (as opposed to a variable). If the memory
49226// for the existing tensor cannot be re-used, a copy is made and updated.
49227//
49228// `indices` is an integer tensor containing indices into a new tensor of shape
49229// `tensor.shape`.  The last dimension of `indices` can be at most the rank of
49230// `tensor.shape`:
49231//
49232//     indices.shape[-1] <= tensor.shape.rank
49233//
49234// The last dimension of `indices` corresponds to indices into elements
49235// (if `indices.shape[-1] = tensor.shape.rank`) or slices
49236// (if `indices.shape[-1] < tensor.shape.rank`) along dimension
49237// `indices.shape[-1]` of `tensor.shape`.  `updates` is a tensor with shape
49238//
49239//     indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
49240//
49241// The simplest form of tensor_scatter_add is to add individual elements to a
49242// tensor by index. For example, say we want to add 4 elements in a rank-1
49243// tensor with 8 elements.
49244//
49245// In Python, this scatter add operation would look like this:
49246//
49247// ```python
49248//     indices = tf.constant([[4], [3], [1], [7]])
49249//     updates = tf.constant([9, 10, 11, 12])
49250//     tensor = tf.ones([8], dtype=tf.int32)
49251//     updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
49252//     print(updated)
49253// ```
49254//
49255// The resulting tensor would look like this:
49256//
49257//     [1, 12, 1, 11, 10, 1, 1, 13]
49258//
49259// We can also, insert entire slices of a higher rank tensor all at once. For
49260// example, if we wanted to insert two slices in the first dimension of a
49261// rank-3 tensor with two matrices of new values.
49262//
49263// In Python, this scatter add operation would look like this:
49264//
49265// ```python
49266//     indices = tf.constant([[0], [2]])
49267//     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
49268//                             [7, 7, 7, 7], [8, 8, 8, 8]],
49269//                            [[5, 5, 5, 5], [6, 6, 6, 6],
49270//                             [7, 7, 7, 7], [8, 8, 8, 8]]])
49271//     tensor = tf.ones([4, 4, 4],dtype=tf.int32)
49272//     updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
49273//     print(updated)
49274// ```
49275//
49276// The resulting tensor would look like this:
49277//
49278//     [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
49279//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
49280//      [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
49281//      [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
49282//
49283// Note that on CPU, if an out of bound index is found, an error is returned.
49284// On GPU, if an out of bound index is found, the index is ignored.
49285//
49286// Arguments:
49287//	tensor: Tensor to copy/update.
49288//	indices: Index tensor.
49289//	updates: Updates to scatter into output.
49290//
49291// Returns A new tensor copied from tensor and updates added according to the indices.
49292func TensorScatterAdd(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
49293	if scope.Err() != nil {
49294		return
49295	}
49296	opspec := tf.OpSpec{
49297		Type: "TensorScatterAdd",
49298		Input: []tf.Input{
49299			tensor, indices, updates,
49300		},
49301	}
49302	op := scope.AddOperation(opspec)
49303	return op.Output(0)
49304}
49305
49306// QuantizedConv2DAttr is an optional argument to QuantizedConv2D.
49307type QuantizedConv2DAttr func(optionalAttr)
49308
49309// QuantizedConv2DOutType sets the optional out_type attribute to value.
49310// If not specified, defaults to DT_QINT32
49311func QuantizedConv2DOutType(value tf.DataType) QuantizedConv2DAttr {
49312	return func(m optionalAttr) {
49313		m["out_type"] = value
49314	}
49315}
49316
49317// QuantizedConv2DDilations sets the optional dilations attribute to value.
49318//
49319// value: 1-D tensor of length 4.  The dilation factor for each dimension of
49320// `input`. If set to k > 1, there will be k-1 skipped cells between each
49321// filter element on that dimension. The dimension order is determined by the
49322// value of `data_format`, see above for details. Dilations in the batch and
49323// depth dimensions must be 1.
49324// If not specified, defaults to {i:1 i:1 i:1 i:1}
49325func QuantizedConv2DDilations(value []int64) QuantizedConv2DAttr {
49326	return func(m optionalAttr) {
49327		m["dilations"] = value
49328	}
49329}
49330
49331// Computes a 2D convolution given quantized 4D input and filter tensors.
49332//
49333// The inputs are quantized tensors where the lowest value represents the real
49334// number of the associated minimum, and the highest represents the maximum.
49335// This means that you can only interpret the quantized output in the same way, by
49336// taking the returned minimum and maximum values into account.
49337//
49338// Arguments:
49339//
49340//	filter: filter's input_depth dimension must match input's depth dimensions.
49341//	min_input: The float value that the lowest quantized input value represents.
49342//	max_input: The float value that the highest quantized input value represents.
49343//	min_filter: The float value that the lowest quantized filter value represents.
49344//	max_filter: The float value that the highest quantized filter value represents.
49345//	strides: The stride of the sliding window for each dimension of the input
49346// tensor.
49347//	padding: The type of padding algorithm to use.
49348//
49349// Returns:
49350//	output
49351//	min_output: The float value that the lowest quantized output value represents.
49352//	max_output: The float value that the highest quantized output value represents.
49353func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
49354	if scope.Err() != nil {
49355		return
49356	}
49357	attrs := map[string]interface{}{"strides": strides, "padding": padding}
49358	for _, a := range optional {
49359		a(attrs)
49360	}
49361	opspec := tf.OpSpec{
49362		Type: "QuantizedConv2D",
49363		Input: []tf.Input{
49364			input, filter, min_input, max_input, min_filter, max_filter,
49365		},
49366		Attrs: attrs,
49367	}
49368	op := scope.AddOperation(opspec)
49369	return op.Output(0), op.Output(1), op.Output(2)
49370}
49371
49372// Wraps the XLA Gather operator documented at
49373//
49374//   https://www.tensorflow.org/xla/operation_semantics#gather
49375//
49376// Arguments:
49377//	operand: The array we're gathering from.
49378//	start_indices: Array containing the starting indices of the slices we gather.
49379//	slice_sizes: slice_sizes[i] is the bounds for the slice on dimension i.
49380//	dimension_numbers: A serialized xla::GatherDimensionNumbers proto.
49381//	indices_are_sorted: Boolean indicating if the indices are sorted.
49382func XlaGather(scope *Scope, operand tf.Output, start_indices tf.Output, slice_sizes tf.Output, dimension_numbers string, indices_are_sorted bool) (output tf.Output) {
49383	if scope.Err() != nil {
49384		return
49385	}
49386	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "indices_are_sorted": indices_are_sorted}
49387	opspec := tf.OpSpec{
49388		Type: "XlaGather",
49389		Input: []tf.Input{
49390			operand, start_indices, slice_sizes,
49391		},
49392		Attrs: attrs,
49393	}
49394	op := scope.AddOperation(opspec)
49395	return op.Output(0)
49396}
49397
49398// QueueEnqueueV2Attr is an optional argument to QueueEnqueueV2.
49399type QueueEnqueueV2Attr func(optionalAttr)
49400
49401// QueueEnqueueV2TimeoutMs sets the optional timeout_ms attribute to value.
49402//
49403// value: If the queue is full, this operation will block for up to
49404// timeout_ms milliseconds.
49405// Note: This option is not supported yet.
49406// If not specified, defaults to -1
49407func QueueEnqueueV2TimeoutMs(value int64) QueueEnqueueV2Attr {
49408	return func(m optionalAttr) {
49409		m["timeout_ms"] = value
49410	}
49411}
49412
49413// Enqueues a tuple of one or more tensors in the given queue.
49414//
49415// The components input has k elements, which correspond to the components of
49416// tuples stored in the given queue.
49417//
49418// N.B. If the queue is full, this operation will block until the given
49419// element has been enqueued (or 'timeout_ms' elapses, if specified).
49420//
49421// Arguments:
49422//	handle: The handle to a queue.
49423//	components: One or more tensors from which the enqueued tensors should be taken.
49424//
49425// Returns the created operation.
49426func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueV2Attr) (o *tf.Operation) {
49427	if scope.Err() != nil {
49428		return
49429	}
49430	attrs := map[string]interface{}{}
49431	for _, a := range optional {
49432		a(attrs)
49433	}
49434	opspec := tf.OpSpec{
49435		Type: "QueueEnqueueV2",
49436		Input: []tf.Input{
49437			handle, tf.OutputList(components),
49438		},
49439		Attrs: attrs,
49440	}
49441	return scope.AddOperation(opspec)
49442}
49443
49444// BoostedTreesQuantileStreamResourceHandleOpAttr is an optional argument to BoostedTreesQuantileStreamResourceHandleOp.
49445type BoostedTreesQuantileStreamResourceHandleOpAttr func(optionalAttr)
49446
49447// BoostedTreesQuantileStreamResourceHandleOpContainer sets the optional container attribute to value.
49448// If not specified, defaults to ""
49449func BoostedTreesQuantileStreamResourceHandleOpContainer(value string) BoostedTreesQuantileStreamResourceHandleOpAttr {
49450	return func(m optionalAttr) {
49451		m["container"] = value
49452	}
49453}
49454
49455// BoostedTreesQuantileStreamResourceHandleOpSharedName sets the optional shared_name attribute to value.
49456// If not specified, defaults to ""
49457func BoostedTreesQuantileStreamResourceHandleOpSharedName(value string) BoostedTreesQuantileStreamResourceHandleOpAttr {
49458	return func(m optionalAttr) {
49459		m["shared_name"] = value
49460	}
49461}
49462
49463// Creates a handle to a BoostedTreesQuantileStreamResource.
49464func BoostedTreesQuantileStreamResourceHandleOp(scope *Scope, optional ...BoostedTreesQuantileStreamResourceHandleOpAttr) (resource tf.Output) {
49465	if scope.Err() != nil {
49466		return
49467	}
49468	attrs := map[string]interface{}{}
49469	for _, a := range optional {
49470		a(attrs)
49471	}
49472	opspec := tf.OpSpec{
49473		Type: "BoostedTreesQuantileStreamResourceHandleOp",
49474
49475		Attrs: attrs,
49476	}
49477	op := scope.AddOperation(opspec)
49478	return op.Output(0)
49479}
49480
49481//     Subtracts `v` into specified rows of `x`.
49482//
49483//     Computes y = x; y[i, :] -= v; return y.
49484//
49485// Arguments:
49486//	x: A `Tensor` of type T.
49487//	i: A vector. Indices into the left-most dimension of `x`.
49488//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
49489//
49490// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
49491func InplaceSub(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
49492	if scope.Err() != nil {
49493		return
49494	}
49495	opspec := tf.OpSpec{
49496		Type: "InplaceSub",
49497		Input: []tf.Input{
49498			x, i, v,
49499		},
49500	}
49501	op := scope.AddOperation(opspec)
49502	return op.Output(0)
49503}
49504
49505// Creates a dataset with a range of values. Corresponds to python's xrange.
49506//
49507// Arguments:
49508//	start: corresponds to start in python's xrange().
49509//	stop: corresponds to stop in python's xrange().
49510//	step: corresponds to step in python's xrange().
49511//
49512//
49513func RangeDataset(scope *Scope, start tf.Output, stop tf.Output, step tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
49514	if scope.Err() != nil {
49515		return
49516	}
49517	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
49518	opspec := tf.OpSpec{
49519		Type: "RangeDataset",
49520		Input: []tf.Input{
49521			start, stop, step,
49522		},
49523		Attrs: attrs,
49524	}
49525	op := scope.AddOperation(opspec)
49526	return op.Output(0)
49527}
49528
49529// Computes the absolute value of a tensor.
49530//
49531// Given a tensor `x`, this operation returns a tensor containing the absolute
49532// value of each element in `x`. For example, if x is an input element and y is
49533// an output element, this operation computes \\(y = |x|\\).
49534func Abs(scope *Scope, x tf.Output) (y tf.Output) {
49535	if scope.Err() != nil {
49536		return
49537	}
49538	opspec := tf.OpSpec{
49539		Type: "Abs",
49540		Input: []tf.Input{
49541			x,
49542		},
49543	}
49544	op := scope.AddOperation(opspec)
49545	return op.Output(0)
49546}
49547
49548// Wraps the XLA DotGeneral operator, documented at
49549//
49550//  https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
49551// .
49552//
49553// Arguments:
49554//	lhs: the LHS tensor
49555//	rhs: the RHS tensor
49556//	dimension_numbers: a serialized xla::DotDimensionNumbers proto.
49557//	precision_config: a serialized xla::PrecisionConfig proto.
49558//	preferred_element_type: The type of the tensor.
49559func XlaDotV2(scope *Scope, lhs tf.Output, rhs tf.Output, dimension_numbers string, precision_config string, preferred_element_type tf.DataType) (output tf.Output) {
49560	if scope.Err() != nil {
49561		return
49562	}
49563	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "precision_config": precision_config, "preferred_element_type": preferred_element_type}
49564	opspec := tf.OpSpec{
49565		Type: "XlaDotV2",
49566		Input: []tf.Input{
49567			lhs, rhs,
49568		},
49569		Attrs: attrs,
49570	}
49571	op := scope.AddOperation(opspec)
49572	return op.Output(0)
49573}
49574
49575// A container for an iterator resource.
49576//
49577// Returns A handle to the iterator that can be passed to a "MakeIterator" or
49578// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
49579// resource sharing by name, and does not keep a reference to the resource
49580// container.
49581func AnonymousIterator(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
49582	if scope.Err() != nil {
49583		return
49584	}
49585	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
49586	opspec := tf.OpSpec{
49587		Type: "AnonymousIterator",
49588
49589		Attrs: attrs,
49590	}
49591	op := scope.AddOperation(opspec)
49592	return op.Output(0)
49593}
49594
49595// UniqueWithCountsV2Attr is an optional argument to UniqueWithCountsV2.
49596type UniqueWithCountsV2Attr func(optionalAttr)
49597
49598// UniqueWithCountsV2OutIdx sets the optional out_idx attribute to value.
49599// If not specified, defaults to DT_INT32
49600func UniqueWithCountsV2OutIdx(value tf.DataType) UniqueWithCountsV2Attr {
49601	return func(m optionalAttr) {
49602		m["out_idx"] = value
49603	}
49604}
49605
49606// Finds unique elements along an axis of a tensor.
49607//
49608// This operation either returns a tensor `y` containing unique elements
49609// along the `axis` of a tensor. The returned unique elements is sorted
49610// in the same order as they occur along `axis` in `x`.
49611// This operation also returns a tensor `idx` and a tensor `count`
49612// that are the same size as the number of the elements in `x` along the
49613// `axis` dimension. The `idx` contains the index in the unique output `y`
49614// and the `count` contains the count in the unique output `y`.
49615// In other words, for an `1-D` tensor `x` with `axis = None:
49616//
49617// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
49618//
49619// For example:
49620//
49621// ```
49622// x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
49623// y, idx, count = UniqueWithCountsV2(x, axis = [0])
49624// y ==> [1, 2, 4, 7, 8]
49625// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
49626// count ==> [2, 1, 3, 1, 2]
49627// ```
49628//
49629// For a `2-D` tensor `x` with `axis = 0`:
49630//
49631// ```
49632// x = tf.constant([[1, 0, 0],
49633//                 [1, 0, 0],
49634//                 [2, 0, 0]])
49635// y, idx, count = UniqueWithCountsV2(x, axis=[0])
49636// y ==> [[1, 0, 0],
49637//        [2, 0, 0]]
49638// idx ==> [0, 0, 1]
49639// count ==> [2, 1]
49640// ```
49641//
49642// For a `2-D` tensor `x` with `axis = 1`:
49643//
49644// ```
49645// x = tf.constant([[1, 0, 0],
49646//                 [1, 0, 0],
49647//                 [2, 0, 0]])
49648// y, idx, count = UniqueWithCountsV2(x, axis=[1])
49649// y ==> [[1, 0],
49650//        [1, 0],
49651//        [2, 0]]
49652// idx ==> [0, 1, 1]
49653// count ==> [1, 2]
49654// ```
49655//
49656// Arguments:
49657//	x: A `Tensor`.
49658//	axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
49659// find the unique elements.
49660//
49661// Returns:
49662//	y: A `Tensor`. Unique elements along the `axis` of `Tensor` x.
49663//	idx: A 1-D Tensor. Has the same type as x that contains the index of each
49664// value of x in the output y.
49665//	count: A 1-D Tensor. The count of each value of x in the output y.
49666func UniqueWithCountsV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueWithCountsV2Attr) (y tf.Output, idx tf.Output, count tf.Output) {
49667	if scope.Err() != nil {
49668		return
49669	}
49670	attrs := map[string]interface{}{}
49671	for _, a := range optional {
49672		a(attrs)
49673	}
49674	opspec := tf.OpSpec{
49675		Type: "UniqueWithCountsV2",
49676		Input: []tf.Input{
49677			x, axis,
49678		},
49679		Attrs: attrs,
49680	}
49681	op := scope.AddOperation(opspec)
49682	return op.Output(0), op.Output(1), op.Output(2)
49683}
49684
49685// Draw bounding boxes on a batch of images.
49686//
49687// Outputs a copy of `images` but draws on top of the pixels zero or more bounding
49688// boxes specified by the locations in `boxes`. The coordinates of the each
49689// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
49690// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
49691// height of the underlying image.
49692//
49693// For example, if an image is 100 x 200 pixels (height x width) and the bounding
49694// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
49695// the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
49696//
49697// Parts of the bounding box may fall outside the image.
49698//
49699// Arguments:
49700//	images: 4-D with shape `[batch, height, width, depth]`. A batch of images.
49701//	boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
49702// boxes.
49703//
49704// Returns 4-D with the same shape as `images`. The batch of input images with
49705// bounding boxes drawn on the images.
49706func DrawBoundingBoxes(scope *Scope, images tf.Output, boxes tf.Output) (output tf.Output) {
49707	if scope.Err() != nil {
49708		return
49709	}
49710	opspec := tf.OpSpec{
49711		Type: "DrawBoundingBoxes",
49712		Input: []tf.Input{
49713			images, boxes,
49714		},
49715	}
49716	op := scope.AddOperation(opspec)
49717	return op.Output(0)
49718}
49719
49720// ComplexAttr is an optional argument to Complex.
49721type ComplexAttr func(optionalAttr)
49722
49723// ComplexTout sets the optional Tout attribute to value.
49724// If not specified, defaults to DT_COMPLEX64
49725func ComplexTout(value tf.DataType) ComplexAttr {
49726	return func(m optionalAttr) {
49727		m["Tout"] = value
49728	}
49729}
49730
49731// Converts two real numbers to a complex number.
49732//
49733// Given a tensor `real` representing the real part of a complex number, and a
49734// tensor `imag` representing the imaginary part of a complex number, this
49735// operation returns complex numbers elementwise of the form \\(a + bj\\), where
49736// *a* represents the `real` part and *b* represents the `imag` part.
49737//
49738// The input tensors `real` and `imag` must have the same shape.
49739//
49740// For example:
49741//
49742// ```
49743// # tensor 'real' is [2.25, 3.25]
49744// # tensor `imag` is [4.75, 5.75]
49745// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
49746// ```
49747func Complex(scope *Scope, real tf.Output, imag tf.Output, optional ...ComplexAttr) (out tf.Output) {
49748	if scope.Err() != nil {
49749		return
49750	}
49751	attrs := map[string]interface{}{}
49752	for _, a := range optional {
49753		a(attrs)
49754	}
49755	opspec := tf.OpSpec{
49756		Type: "Complex",
49757		Input: []tf.Input{
49758			real, imag,
49759		},
49760		Attrs: attrs,
49761	}
49762	op := scope.AddOperation(opspec)
49763	return op.Output(0)
49764}
49765
49766// DequantizeAttr is an optional argument to Dequantize.
49767type DequantizeAttr func(optionalAttr)
49768
49769// DequantizeMode sets the optional mode attribute to value.
49770// If not specified, defaults to "MIN_COMBINED"
49771func DequantizeMode(value string) DequantizeAttr {
49772	return func(m optionalAttr) {
49773		m["mode"] = value
49774	}
49775}
49776
49777// DequantizeNarrowRange sets the optional narrow_range attribute to value.
49778// If not specified, defaults to false
49779func DequantizeNarrowRange(value bool) DequantizeAttr {
49780	return func(m optionalAttr) {
49781		m["narrow_range"] = value
49782	}
49783}
49784
49785// DequantizeAxis sets the optional axis attribute to value.
49786// If not specified, defaults to -1
49787func DequantizeAxis(value int64) DequantizeAttr {
49788	return func(m optionalAttr) {
49789		m["axis"] = value
49790	}
49791}
49792
49793// DequantizeDtype sets the optional dtype attribute to value.
49794//
49795// value: Type of the output tensor. Currently Dequantize supports float and bfloat16.
49796// If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode.
49797// If not specified, defaults to DT_FLOAT
49798func DequantizeDtype(value tf.DataType) DequantizeAttr {
49799	return func(m optionalAttr) {
49800		m["dtype"] = value
49801	}
49802}
49803
49804// Dequantize the 'input' tensor into a float or bfloat16 Tensor.
49805//
49806// [min_range, max_range] are scalar floats that specify the range for
49807// the output. The 'mode' attribute controls exactly which calculations are
49808// used to convert the float values to their quantized equivalents.
49809//
49810// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
49811//
49812// ```
49813// if T == qint8: in[i] += (range(T) + 1)/ 2.0
49814// out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
49815// ```
49816// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
49817//
49818// *MIN_COMBINED Mode Example*
49819//
49820// If the input comes from a QuantizedRelu6, the output type is
49821// quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
49822// 0-6.  The min_range and max_range values are therefore 0.0 and 6.0.
49823// Dequantize on quint8 will take each value, cast to float, and multiply
49824// by 6 / 255.
49825// Note that if quantizedtype is qint8, the operation will additionally add
49826// each value by 128 prior to casting.
49827//
49828// If the mode is 'MIN_FIRST', then this approach is used:
49829//
49830// ```c++
49831// num_discrete_values = 1 << (# of bits in T)
49832// range_adjust = num_discrete_values / (num_discrete_values - 1)
49833// range = (range_max - range_min) * range_adjust
49834// range_scale = range / num_discrete_values
49835// const double offset_input = static_cast<double>(input) - lowest_quantized;
49836// result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
49837// ```
49838//
49839// If the mode is `SCALED`, dequantization is performed by multiplying each
49840// input value by a scaling_factor. (Thus an input of 0 always maps to 0.0).
49841//
49842// The scaling_factor is determined from `min_range`, `max_range`, and
49843// `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}`
49844// and `QuantizeV2`, using the following algorithm:
49845//
49846// ```c++
49847//
49848//   const int min_expected_T = std::numeric_limits<T>::min() +
49849//     (narrow_range ? 1 : 0);
49850//   const int max_expected_T = std::numeric_limits<T>::max();
49851//   const float max_expected_T = std::numeric_limits<float>::max();
49852//
49853//   const float scale_factor =
49854//     (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T)
49855//                                          : std::max(min_range / min_expected_T,
49856//                                                     max_range / max_expected_T);
49857// ```
49858//
49859// Arguments:
49860//
49861//	min_range: The minimum scalar value possibly produced for the input.
49862//	max_range: The maximum scalar value possibly produced for the input.
49863func Dequantize(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, optional ...DequantizeAttr) (output tf.Output) {
49864	if scope.Err() != nil {
49865		return
49866	}
49867	attrs := map[string]interface{}{}
49868	for _, a := range optional {
49869		a(attrs)
49870	}
49871	opspec := tf.OpSpec{
49872		Type: "Dequantize",
49873		Input: []tf.Input{
49874			input, min_range, max_range,
49875		},
49876		Attrs: attrs,
49877	}
49878	op := scope.AddOperation(opspec)
49879	return op.Output(0)
49880}
49881
49882// Returns the item in the list with the given index.
49883//
49884// input_handle: the list
49885// index: the position in the list from which an element will be retrieved
49886// item: the element at that position
49887//
49888//
49889func TensorListGetItem(scope *Scope, input_handle tf.Output, index tf.Output, element_shape tf.Output, element_dtype tf.DataType) (item tf.Output) {
49890	if scope.Err() != nil {
49891		return
49892	}
49893	attrs := map[string]interface{}{"element_dtype": element_dtype}
49894	opspec := tf.OpSpec{
49895		Type: "TensorListGetItem",
49896		Input: []tf.Input{
49897			input_handle, index, element_shape,
49898		},
49899		Attrs: attrs,
49900	}
49901	op := scope.AddOperation(opspec)
49902	return op.Output(0)
49903}
49904
49905// Returns a diagonal tensor with a given diagonal values.
49906//
49907// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
49908// everything else padded with zeros. The diagonal is computed as follows:
49909//
49910// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
49911// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
49912//
49913// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
49914//
49915// For example:
49916//
49917// ```
49918// # 'diagonal' is [1, 2, 3, 4]
49919// tf.diag(diagonal) ==> [[1, 0, 0, 0]
49920//                        [0, 2, 0, 0]
49921//                        [0, 0, 3, 0]
49922//                        [0, 0, 0, 4]]
49923// ```
49924//
49925// Arguments:
49926//	diagonal: Rank k tensor where k is at most 1.
49927func Diag(scope *Scope, diagonal tf.Output) (output tf.Output) {
49928	if scope.Err() != nil {
49929		return
49930	}
49931	opspec := tf.OpSpec{
49932		Type: "Diag",
49933		Input: []tf.Input{
49934			diagonal,
49935		},
49936	}
49937	op := scope.AddOperation(opspec)
49938	return op.Output(0)
49939}
49940
49941// Partitions `data` into `num_partitions` tensors using indices from `partitions`.
49942//
49943// For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
49944// becomes part of `outputs[partitions[js]]`.  The slices with `partitions[js] = i`
49945// are placed in `outputs[i]` in lexicographic order of `js`, and the first
49946// dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
49947// In detail,
49948//
49949// ```python
49950//     outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
49951//
49952//     outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
49953// ```
49954//
49955// `data.shape` must start with `partitions.shape`.
49956//
49957// For example:
49958//
49959// ```python
49960//     # Scalar partitions.
49961//     partitions = 1
49962//     num_partitions = 2
49963//     data = [10, 20]
49964//     outputs[0] = []  # Empty with shape [0, 2]
49965//     outputs[1] = [[10, 20]]
49966//
49967//     # Vector partitions.
49968//     partitions = [0, 0, 1, 1, 0]
49969//     num_partitions = 2
49970//     data = [10, 20, 30, 40, 50]
49971//     outputs[0] = [10, 20, 50]
49972//     outputs[1] = [30, 40]
49973// ```
49974//
49975// See `dynamic_stitch` for an example on how to merge partitions back.
49976//
49977// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
49978// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
49979// </div>
49980//
49981// Arguments:
49982//
49983//	partitions: Any shape.  Indices in the range `[0, num_partitions)`.
49984//	num_partitions: The number of partitions to output.
49985func DynamicPartition(scope *Scope, data tf.Output, partitions tf.Output, num_partitions int64) (outputs []tf.Output) {
49986	if scope.Err() != nil {
49987		return
49988	}
49989	attrs := map[string]interface{}{"num_partitions": num_partitions}
49990	opspec := tf.OpSpec{
49991		Type: "DynamicPartition",
49992		Input: []tf.Input{
49993			data, partitions,
49994		},
49995		Attrs: attrs,
49996	}
49997	op := scope.AddOperation(opspec)
49998	if scope.Err() != nil {
49999		return
50000	}
50001	var idx int
50002	var err error
50003	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
50004		scope.UpdateErr("DynamicPartition", err)
50005		return
50006	}
50007	return outputs
50008}
50009
50010// BoostedTreesEnsembleResourceHandleOpAttr is an optional argument to BoostedTreesEnsembleResourceHandleOp.
50011type BoostedTreesEnsembleResourceHandleOpAttr func(optionalAttr)
50012
50013// BoostedTreesEnsembleResourceHandleOpContainer sets the optional container attribute to value.
50014// If not specified, defaults to ""
50015func BoostedTreesEnsembleResourceHandleOpContainer(value string) BoostedTreesEnsembleResourceHandleOpAttr {
50016	return func(m optionalAttr) {
50017		m["container"] = value
50018	}
50019}
50020
50021// BoostedTreesEnsembleResourceHandleOpSharedName sets the optional shared_name attribute to value.
50022// If not specified, defaults to ""
50023func BoostedTreesEnsembleResourceHandleOpSharedName(value string) BoostedTreesEnsembleResourceHandleOpAttr {
50024	return func(m optionalAttr) {
50025		m["shared_name"] = value
50026	}
50027}
50028
50029// Creates a handle to a BoostedTreesEnsembleResource
50030func BoostedTreesEnsembleResourceHandleOp(scope *Scope, optional ...BoostedTreesEnsembleResourceHandleOpAttr) (resource tf.Output) {
50031	if scope.Err() != nil {
50032		return
50033	}
50034	attrs := map[string]interface{}{}
50035	for _, a := range optional {
50036		a(attrs)
50037	}
50038	opspec := tf.OpSpec{
50039		Type: "BoostedTreesEnsembleResourceHandleOp",
50040
50041		Attrs: attrs,
50042	}
50043	op := scope.AddOperation(opspec)
50044	return op.Output(0)
50045}
50046
50047// CollectiveGatherV2Attr is an optional argument to CollectiveGatherV2.
50048type CollectiveGatherV2Attr func(optionalAttr)
50049
50050// CollectiveGatherV2CommunicationHint sets the optional communication_hint attribute to value.
50051// If not specified, defaults to "auto"
50052func CollectiveGatherV2CommunicationHint(value string) CollectiveGatherV2Attr {
50053	return func(m optionalAttr) {
50054		m["communication_hint"] = value
50055	}
50056}
50057
50058// CollectiveGatherV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
50059// If not specified, defaults to 0
50060func CollectiveGatherV2TimeoutSeconds(value float32) CollectiveGatherV2Attr {
50061	return func(m optionalAttr) {
50062		m["timeout_seconds"] = value
50063	}
50064}
50065
50066// Mutually accumulates multiple tensors of identical type and shape.
50067func CollectiveGatherV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, ordering_token []tf.Output, optional ...CollectiveGatherV2Attr) (data tf.Output) {
50068	if scope.Err() != nil {
50069		return
50070	}
50071	attrs := map[string]interface{}{}
50072	for _, a := range optional {
50073		a(attrs)
50074	}
50075	opspec := tf.OpSpec{
50076		Type: "CollectiveGatherV2",
50077		Input: []tf.Input{
50078			input, group_size, group_key, instance_key, tf.OutputList(ordering_token),
50079		},
50080		Attrs: attrs,
50081	}
50082	op := scope.AddOperation(opspec)
50083	return op.Output(0)
50084}
50085
50086// Returns the number of nonzeroes of `sparse_matrix`.
50087//
50088// Arguments:
50089//	sparse_matrix: A CSRSparseMatrix.
50090//
50091// Returns The number of nonzeroes of `sparse_matrix`.
50092func SparseMatrixNNZ(scope *Scope, sparse_matrix tf.Output) (nnz tf.Output) {
50093	if scope.Err() != nil {
50094		return
50095	}
50096	opspec := tf.OpSpec{
50097		Type: "SparseMatrixNNZ",
50098		Input: []tf.Input{
50099			sparse_matrix,
50100		},
50101	}
50102	op := scope.AddOperation(opspec)
50103	return op.Output(0)
50104}
50105
50106// DecodeJpegAttr is an optional argument to DecodeJpeg.
50107type DecodeJpegAttr func(optionalAttr)
50108
50109// DecodeJpegChannels sets the optional channels attribute to value.
50110//
50111// value: Number of color channels for the decoded image.
50112// If not specified, defaults to 0
50113func DecodeJpegChannels(value int64) DecodeJpegAttr {
50114	return func(m optionalAttr) {
50115		m["channels"] = value
50116	}
50117}
50118
50119// DecodeJpegRatio sets the optional ratio attribute to value.
50120//
50121// value: Downscaling ratio.
50122// If not specified, defaults to 1
50123func DecodeJpegRatio(value int64) DecodeJpegAttr {
50124	return func(m optionalAttr) {
50125		m["ratio"] = value
50126	}
50127}
50128
50129// DecodeJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
50130//
50131// value: If true use a slower but nicer upscaling of the
50132// chroma planes (yuv420/422 only).
50133// If not specified, defaults to true
50134func DecodeJpegFancyUpscaling(value bool) DecodeJpegAttr {
50135	return func(m optionalAttr) {
50136		m["fancy_upscaling"] = value
50137	}
50138}
50139
50140// DecodeJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
50141//
50142// value: If true try to recover an image from truncated input.
50143// If not specified, defaults to false
50144func DecodeJpegTryRecoverTruncated(value bool) DecodeJpegAttr {
50145	return func(m optionalAttr) {
50146		m["try_recover_truncated"] = value
50147	}
50148}
50149
50150// DecodeJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
50151//
50152// value: The minimum required fraction of lines before a truncated
50153// input is accepted.
50154// If not specified, defaults to 1
50155func DecodeJpegAcceptableFraction(value float32) DecodeJpegAttr {
50156	return func(m optionalAttr) {
50157		m["acceptable_fraction"] = value
50158	}
50159}
50160
50161// DecodeJpegDctMethod sets the optional dct_method attribute to value.
50162//
50163// value: string specifying a hint about the algorithm used for
50164// decompression.  Defaults to "" which maps to a system-specific
50165// default.  Currently valid values are ["INTEGER_FAST",
50166// "INTEGER_ACCURATE"].  The hint may be ignored (e.g., the internal
50167// jpeg library changes to a version that does not have that specific
50168// option.)
50169// If not specified, defaults to ""
50170func DecodeJpegDctMethod(value string) DecodeJpegAttr {
50171	return func(m optionalAttr) {
50172		m["dct_method"] = value
50173	}
50174}
50175
50176// Decode a JPEG-encoded image to a uint8 tensor.
50177//
50178// The attr `channels` indicates the desired number of color channels for the
50179// decoded image.
50180//
50181// Accepted values are:
50182//
50183// *   0: Use the number of channels in the JPEG-encoded image.
50184// *   1: output a grayscale image.
50185// *   3: output an RGB image.
50186//
50187// If needed, the JPEG-encoded image is transformed to match the requested number
50188// of color channels.
50189//
50190// The attr `ratio` allows downscaling the image by an integer factor during
50191// decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
50192// downscaling the image later.
50193//
50194//
50195// This op also supports decoding PNGs and non-animated GIFs since the interface is
50196// the same, though it is cleaner to use `tf.io.decode_image`.
50197//
50198// Arguments:
50199//	contents: 0-D.  The JPEG-encoded image.
50200//
50201// Returns 3-D with shape `[height, width, channels]`..
50202func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (image tf.Output) {
50203	if scope.Err() != nil {
50204		return
50205	}
50206	attrs := map[string]interface{}{}
50207	for _, a := range optional {
50208		a(attrs)
50209	}
50210	opspec := tf.OpSpec{
50211		Type: "DecodeJpeg",
50212		Input: []tf.Input{
50213			contents,
50214		},
50215		Attrs: attrs,
50216	}
50217	op := scope.AddOperation(opspec)
50218	return op.Output(0)
50219}
50220
50221// Returns conj(x - y)(x - y) element-wise.
50222//
50223// *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
50224// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
50225func SquaredDifference(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
50226	if scope.Err() != nil {
50227		return
50228	}
50229	opspec := tf.OpSpec{
50230		Type: "SquaredDifference",
50231		Input: []tf.Input{
50232			x, y,
50233		},
50234	}
50235	op := scope.AddOperation(opspec)
50236	return op.Output(0)
50237}
50238
50239// Replica ID.
50240func XlaReplicaId(scope *Scope) (id tf.Output) {
50241	if scope.Err() != nil {
50242		return
50243	}
50244	opspec := tf.OpSpec{
50245		Type: "XlaReplicaId",
50246	}
50247	op := scope.AddOperation(opspec)
50248	return op.Output(0)
50249}
50250
50251// Output a fact about factorials.
50252func Fact(scope *Scope) (fact tf.Output) {
50253	if scope.Err() != nil {
50254		return
50255	}
50256	opspec := tf.OpSpec{
50257		Type: "Fact",
50258	}
50259	op := scope.AddOperation(opspec)
50260	return op.Output(0)
50261}
50262
50263// ModelDatasetAttr is an optional argument to ModelDataset.
50264type ModelDatasetAttr func(optionalAttr)
50265
50266// ModelDatasetAlgorithm sets the optional algorithm attribute to value.
50267// If not specified, defaults to 0
50268func ModelDatasetAlgorithm(value int64) ModelDatasetAttr {
50269	return func(m optionalAttr) {
50270		m["algorithm"] = value
50271	}
50272}
50273
50274// ModelDatasetCpuBudget sets the optional cpu_budget attribute to value.
50275// If not specified, defaults to 0
50276func ModelDatasetCpuBudget(value int64) ModelDatasetAttr {
50277	return func(m optionalAttr) {
50278		m["cpu_budget"] = value
50279	}
50280}
50281
50282// ModelDatasetRamBudget sets the optional ram_budget attribute to value.
50283// If not specified, defaults to 0
50284func ModelDatasetRamBudget(value int64) ModelDatasetAttr {
50285	return func(m optionalAttr) {
50286		m["ram_budget"] = value
50287	}
50288}
50289
50290// Identity transformation that models performance.
50291//
50292// Identity transformation that models performance.
50293//
50294// Arguments:
50295//	input_dataset: A variant tensor representing the input dataset.
50296//
50297//
50298func ModelDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ModelDatasetAttr) (handle tf.Output) {
50299	if scope.Err() != nil {
50300		return
50301	}
50302	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
50303	for _, a := range optional {
50304		a(attrs)
50305	}
50306	opspec := tf.OpSpec{
50307		Type: "ModelDataset",
50308		Input: []tf.Input{
50309			input_dataset,
50310		},
50311		Attrs: attrs,
50312	}
50313	op := scope.AddOperation(opspec)
50314	return op.Output(0)
50315}
50316
50317// Returns up to `num_records` (key, value) pairs produced by a Reader.
50318//
50319// Will dequeue from the input queue if necessary (e.g. when the
50320// Reader needs to start reading from a new file since it has finished
50321// with the previous file).
50322// It may return less than `num_records` even before the last batch.
50323//
50324// Arguments:
50325//	reader_handle: Handle to a `Reader`.
50326//	queue_handle: Handle to a `Queue`, with string work items.
50327//	num_records: number of records to read from `Reader`.
50328//
50329// Returns:
50330//	keys: A 1-D tensor.
50331//	values: A 1-D tensor.
50332func ReaderReadUpToV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output, num_records tf.Output) (keys tf.Output, values tf.Output) {
50333	if scope.Err() != nil {
50334		return
50335	}
50336	opspec := tf.OpSpec{
50337		Type: "ReaderReadUpToV2",
50338		Input: []tf.Input{
50339			reader_handle, queue_handle, num_records,
50340		},
50341	}
50342	op := scope.AddOperation(opspec)
50343	return op.Output(0), op.Output(1)
50344}
50345
50346// UnpackAttr is an optional argument to Unpack.
50347type UnpackAttr func(optionalAttr)
50348
50349// UnpackAxis sets the optional axis attribute to value.
50350//
50351// value: Dimension along which to unpack.  Negative values wrap around, so the
50352// valid range is `[-R, R)`.
50353// If not specified, defaults to 0
50354func UnpackAxis(value int64) UnpackAttr {
50355	return func(m optionalAttr) {
50356		m["axis"] = value
50357	}
50358}
50359
50360// Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
50361//
50362// Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
50363// For example, given a tensor of shape `(A, B, C, D)`;
50364//
50365// If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
50366//   and each tensor in `output` will have shape `(B, C, D)`. (Note that the
50367//   dimension unpacked along is gone, unlike `split`).
50368//
50369// If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
50370//   and each tensor in `output` will have shape `(A, C, D)`.
50371// Etc.
50372//
50373// This is the opposite of `pack`.
50374//
50375// Arguments:
50376//	value: 1-D or higher, with `axis` dimension size equal to `num`.
50377//
50378//
50379// Returns The list of tensors unpacked from `value`.
50380func Unpack(scope *Scope, value tf.Output, num int64, optional ...UnpackAttr) (output []tf.Output) {
50381	if scope.Err() != nil {
50382		return
50383	}
50384	attrs := map[string]interface{}{"num": num}
50385	for _, a := range optional {
50386		a(attrs)
50387	}
50388	opspec := tf.OpSpec{
50389		Type: "Unpack",
50390		Input: []tf.Input{
50391			value,
50392		},
50393		Attrs: attrs,
50394	}
50395	op := scope.AddOperation(opspec)
50396	if scope.Err() != nil {
50397		return
50398	}
50399	var idx int
50400	var err error
50401	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
50402		scope.UpdateErr("Unpack", err)
50403		return
50404	}
50405	return output
50406}
50407
50408// Creates a dataset that concatenates `input_dataset` with `another_dataset`.
50409func ConcatenateDataset(scope *Scope, input_dataset tf.Output, another_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
50410	if scope.Err() != nil {
50411		return
50412	}
50413	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
50414	opspec := tf.OpSpec{
50415		Type: "ConcatenateDataset",
50416		Input: []tf.Input{
50417			input_dataset, another_dataset,
50418		},
50419		Attrs: attrs,
50420	}
50421	op := scope.AddOperation(opspec)
50422	return op.Output(0)
50423}
50424
50425// TextLineReaderV2Attr is an optional argument to TextLineReaderV2.
50426type TextLineReaderV2Attr func(optionalAttr)
50427
50428// TextLineReaderV2SkipHeaderLines sets the optional skip_header_lines attribute to value.
50429//
50430// value: Number of lines to skip from the beginning of every file.
50431// If not specified, defaults to 0
50432func TextLineReaderV2SkipHeaderLines(value int64) TextLineReaderV2Attr {
50433	return func(m optionalAttr) {
50434		m["skip_header_lines"] = value
50435	}
50436}
50437
50438// TextLineReaderV2Container sets the optional container attribute to value.
50439//
50440// value: If non-empty, this reader is placed in the given container.
50441// Otherwise, a default container is used.
50442// If not specified, defaults to ""
50443func TextLineReaderV2Container(value string) TextLineReaderV2Attr {
50444	return func(m optionalAttr) {
50445		m["container"] = value
50446	}
50447}
50448
50449// TextLineReaderV2SharedName sets the optional shared_name attribute to value.
50450//
50451// value: If non-empty, this reader is named in the given bucket
50452// with this shared_name. Otherwise, the node name is used instead.
50453// If not specified, defaults to ""
50454func TextLineReaderV2SharedName(value string) TextLineReaderV2Attr {
50455	return func(m optionalAttr) {
50456		m["shared_name"] = value
50457	}
50458}
50459
50460// A Reader that outputs the lines of a file delimited by '\n'.
50461//
50462// Returns The handle to reference the Reader.
50463func TextLineReaderV2(scope *Scope, optional ...TextLineReaderV2Attr) (reader_handle tf.Output) {
50464	if scope.Err() != nil {
50465		return
50466	}
50467	attrs := map[string]interface{}{}
50468	for _, a := range optional {
50469		a(attrs)
50470	}
50471	opspec := tf.OpSpec{
50472		Type: "TextLineReaderV2",
50473
50474		Attrs: attrs,
50475	}
50476	op := scope.AddOperation(opspec)
50477	return op.Output(0)
50478}
50479
50480// EagerPyFuncAttr is an optional argument to EagerPyFunc.
50481type EagerPyFuncAttr func(optionalAttr)
50482
50483// EagerPyFuncIsAsync sets the optional is_async attribute to value.
50484// If not specified, defaults to false
50485func EagerPyFuncIsAsync(value bool) EagerPyFuncAttr {
50486	return func(m optionalAttr) {
50487		m["is_async"] = value
50488	}
50489}
50490
50491// Eagerly executes a python function to compute func(input)->output. The
50492//
50493// semantics of the input, output, and attributes are the same as those for
50494// PyFunc.
50495func EagerPyFunc(scope *Scope, input []tf.Output, token string, Tout []tf.DataType, optional ...EagerPyFuncAttr) (output []tf.Output) {
50496	if scope.Err() != nil {
50497		return
50498	}
50499	attrs := map[string]interface{}{"token": token, "Tout": Tout}
50500	for _, a := range optional {
50501		a(attrs)
50502	}
50503	opspec := tf.OpSpec{
50504		Type: "EagerPyFunc",
50505		Input: []tf.Input{
50506			tf.OutputList(input),
50507		},
50508		Attrs: attrs,
50509	}
50510	op := scope.AddOperation(opspec)
50511	if scope.Err() != nil {
50512		return
50513	}
50514	var idx int
50515	var err error
50516	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
50517		scope.UpdateErr("EagerPyFunc", err)
50518		return
50519	}
50520	return output
50521}
50522
50523// XlaShardingAttr is an optional argument to XlaSharding.
50524type XlaShardingAttr func(optionalAttr)
50525
50526// XlaShardingSharding sets the optional sharding attribute to value.
50527// If not specified, defaults to ""
50528func XlaShardingSharding(value string) XlaShardingAttr {
50529	return func(m optionalAttr) {
50530		m["sharding"] = value
50531	}
50532}
50533
50534// An op which shards the input based on the given sharding attribute.
50535func XlaSharding(scope *Scope, input tf.Output, optional ...XlaShardingAttr) (output tf.Output) {
50536	if scope.Err() != nil {
50537		return
50538	}
50539	attrs := map[string]interface{}{}
50540	for _, a := range optional {
50541		a(attrs)
50542	}
50543	opspec := tf.OpSpec{
50544		Type: "XlaSharding",
50545		Input: []tf.Input{
50546			input,
50547		},
50548		Attrs: attrs,
50549	}
50550	op := scope.AddOperation(opspec)
50551	return op.Output(0)
50552}
50553
50554// StatelessRandomUniformFullIntV2Attr is an optional argument to StatelessRandomUniformFullIntV2.
50555type StatelessRandomUniformFullIntV2Attr func(optionalAttr)
50556
50557// StatelessRandomUniformFullIntV2Dtype sets the optional dtype attribute to value.
50558//
50559// value: The type of the output.
50560// If not specified, defaults to DT_UINT64
50561func StatelessRandomUniformFullIntV2Dtype(value tf.DataType) StatelessRandomUniformFullIntV2Attr {
50562	return func(m optionalAttr) {
50563		m["dtype"] = value
50564	}
50565}
50566
50567// Outputs deterministic pseudorandom random integers from a uniform distribution.
50568//
50569// The generated values are uniform integers covering the whole range of `dtype`.
50570//
50571// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
50572//
50573// Arguments:
50574//	shape: The shape of the output tensor.
50575//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
50576//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
50577//	alg: The RNG algorithm (shape int32[]).
50578//
50579// Returns Random values with specified shape.
50580func StatelessRandomUniformFullIntV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomUniformFullIntV2Attr) (output tf.Output) {
50581	if scope.Err() != nil {
50582		return
50583	}
50584	attrs := map[string]interface{}{}
50585	for _, a := range optional {
50586		a(attrs)
50587	}
50588	opspec := tf.OpSpec{
50589		Type: "StatelessRandomUniformFullIntV2",
50590		Input: []tf.Input{
50591			shape, key, counter, alg,
50592		},
50593		Attrs: attrs,
50594	}
50595	op := scope.AddOperation(opspec)
50596	return op.Output(0)
50597}
50598
50599// FinalizeDatasetAttr is an optional argument to FinalizeDataset.
50600type FinalizeDatasetAttr func(optionalAttr)
50601
50602// FinalizeDatasetHasCapturedRef sets the optional has_captured_ref attribute to value.
50603// If not specified, defaults to false
50604func FinalizeDatasetHasCapturedRef(value bool) FinalizeDatasetAttr {
50605	return func(m optionalAttr) {
50606		m["has_captured_ref"] = value
50607	}
50608}
50609
50610// Creates a dataset by applying `tf.data.Options` to `input_dataset`.
50611//
50612// Arguments:
50613//	input_dataset: A variant tensor representing the input dataset.
50614//
50615//
50616func FinalizeDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...FinalizeDatasetAttr) (handle tf.Output) {
50617	if scope.Err() != nil {
50618		return
50619	}
50620	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
50621	for _, a := range optional {
50622		a(attrs)
50623	}
50624	opspec := tf.OpSpec{
50625		Type: "FinalizeDataset",
50626		Input: []tf.Input{
50627			input_dataset,
50628		},
50629		Attrs: attrs,
50630	}
50631	op := scope.AddOperation(opspec)
50632	return op.Output(0)
50633}
50634
50635// QuantizeAndDequantizeAttr is an optional argument to QuantizeAndDequantize.
50636type QuantizeAndDequantizeAttr func(optionalAttr)
50637
50638// QuantizeAndDequantizeSignedInput sets the optional signed_input attribute to value.
50639// If not specified, defaults to true
50640func QuantizeAndDequantizeSignedInput(value bool) QuantizeAndDequantizeAttr {
50641	return func(m optionalAttr) {
50642		m["signed_input"] = value
50643	}
50644}
50645
50646// QuantizeAndDequantizeNumBits sets the optional num_bits attribute to value.
50647// If not specified, defaults to 8
50648func QuantizeAndDequantizeNumBits(value int64) QuantizeAndDequantizeAttr {
50649	return func(m optionalAttr) {
50650		m["num_bits"] = value
50651	}
50652}
50653
50654// QuantizeAndDequantizeRangeGiven sets the optional range_given attribute to value.
50655// If not specified, defaults to false
50656func QuantizeAndDequantizeRangeGiven(value bool) QuantizeAndDequantizeAttr {
50657	return func(m optionalAttr) {
50658		m["range_given"] = value
50659	}
50660}
50661
50662// QuantizeAndDequantizeInputMin sets the optional input_min attribute to value.
50663// If not specified, defaults to 0
50664func QuantizeAndDequantizeInputMin(value float32) QuantizeAndDequantizeAttr {
50665	return func(m optionalAttr) {
50666		m["input_min"] = value
50667	}
50668}
50669
50670// QuantizeAndDequantizeInputMax sets the optional input_max attribute to value.
50671// If not specified, defaults to 0
50672func QuantizeAndDequantizeInputMax(value float32) QuantizeAndDequantizeAttr {
50673	return func(m optionalAttr) {
50674		m["input_max"] = value
50675	}
50676}
50677
50678// Use QuantizeAndDequantizeV2 instead.
50679//
50680// DEPRECATED at GraphDef version 22: Replaced by QuantizeAndDequantizeV2
50681func QuantizeAndDequantize(scope *Scope, input tf.Output, optional ...QuantizeAndDequantizeAttr) (output tf.Output) {
50682	if scope.Err() != nil {
50683		return
50684	}
50685	attrs := map[string]interface{}{}
50686	for _, a := range optional {
50687		a(attrs)
50688	}
50689	opspec := tf.OpSpec{
50690		Type: "QuantizeAndDequantize",
50691		Input: []tf.Input{
50692			input,
50693		},
50694		Attrs: attrs,
50695	}
50696	op := scope.AddOperation(opspec)
50697	return op.Output(0)
50698}
50699
50700// Sends the named tensor to another XLA computation. Wraps the XLA Send operator
50701//
50702// documented at
50703//  https://www.tensorflow.org/performance/xla/operation_semantics#send .
50704//
50705// Arguments:
50706//	tensor: The tensor to send.
50707//	tensor_name: A string key that identifies the channel.
50708//
50709// Returns the created operation.
50710func XlaSend(scope *Scope, tensor tf.Output, tensor_name string) (o *tf.Operation) {
50711	if scope.Err() != nil {
50712		return
50713	}
50714	attrs := map[string]interface{}{"tensor_name": tensor_name}
50715	opspec := tf.OpSpec{
50716		Type: "XlaSend",
50717		Input: []tf.Input{
50718			tensor,
50719		},
50720		Attrs: attrs,
50721	}
50722	return scope.AddOperation(opspec)
50723}
50724
50725// Returns the index of a data point that should be added to the seed set.
50726//
50727// Entries in distances are assumed to be squared distances of candidate points to
50728// the already sampled centers in the seed set. The op constructs one Markov chain
50729// of the k-MC^2 algorithm and returns the index of one candidate point to be added
50730// as an additional cluster center.
50731//
50732// Arguments:
50733//	distances: Vector with squared distances to the closest previously sampled cluster center
50734// for each candidate point.
50735//	seed: Scalar. Seed for initializing the random number generator.
50736//
50737// Returns Scalar with the index of the sampled point.
50738func KMC2ChainInitialization(scope *Scope, distances tf.Output, seed tf.Output) (index tf.Output) {
50739	if scope.Err() != nil {
50740		return
50741	}
50742	opspec := tf.OpSpec{
50743		Type: "KMC2ChainInitialization",
50744		Input: []tf.Input{
50745			distances, seed,
50746		},
50747	}
50748	op := scope.AddOperation(opspec)
50749	return op.Output(0)
50750}
50751
50752// TakeManySparseFromTensorsMapAttr is an optional argument to TakeManySparseFromTensorsMap.
50753type TakeManySparseFromTensorsMapAttr func(optionalAttr)
50754
50755// TakeManySparseFromTensorsMapContainer sets the optional container attribute to value.
50756//
50757// value: The container name for the `SparseTensorsMap` read by this op.
50758// If not specified, defaults to ""
50759func TakeManySparseFromTensorsMapContainer(value string) TakeManySparseFromTensorsMapAttr {
50760	return func(m optionalAttr) {
50761		m["container"] = value
50762	}
50763}
50764
50765// TakeManySparseFromTensorsMapSharedName sets the optional shared_name attribute to value.
50766//
50767// value: The shared name for the `SparseTensorsMap` read by this op.
50768// It should not be blank; rather the `shared_name` or unique Operation name
50769// of the Op that created the original `SparseTensorsMap` should be used.
50770// If not specified, defaults to ""
50771func TakeManySparseFromTensorsMapSharedName(value string) TakeManySparseFromTensorsMapAttr {
50772	return func(m optionalAttr) {
50773		m["shared_name"] = value
50774	}
50775}
50776
50777// Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
50778//
50779// The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
50780// `N` is the minibatch size and the rows correspond to the output handles of
50781// `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`.  The ranks of the
50782// original `SparseTensor` objects that went into the given input ops must all
50783// match.  When the final `SparseTensor` is created, it has rank one
50784// higher than the ranks of the incoming `SparseTensor` objects
50785// (they have been concatenated along a new row dimension on the left).
50786//
50787// The output `SparseTensor` object's shape values for all dimensions but the
50788// first are the max across the input `SparseTensor` objects' shape values
50789// for the corresponding dimensions.  Its first shape value is `N`, the minibatch
50790// size.
50791//
50792// The input `SparseTensor` objects' indices are assumed ordered in
50793// standard lexicographic order.  If this is not the case, after this
50794// step run `SparseReorder` to restore index ordering.
50795//
50796// For example, if the handles represent an input, which is a `[2, 3]` matrix
50797// representing two original `SparseTensor` objects:
50798//
50799// ```
50800//     index = [ 0]
50801//             [10]
50802//             [20]
50803//     values = [1, 2, 3]
50804//     shape = [50]
50805// ```
50806//
50807// and
50808//
50809// ```
50810//     index = [ 2]
50811//             [10]
50812//     values = [4, 5]
50813//     shape = [30]
50814// ```
50815//
50816// then the final `SparseTensor` will be:
50817//
50818// ```
50819//     index = [0  0]
50820//             [0 10]
50821//             [0 20]
50822//             [1  2]
50823//             [1 10]
50824//     values = [1, 2, 3, 4, 5]
50825//     shape = [2 50]
50826// ```
50827//
50828// Arguments:
50829//	sparse_handles: 1-D, The `N` serialized `SparseTensor` objects.
50830// Shape: `[N]`.
50831//	dtype: The `dtype` of the `SparseTensor` objects stored in the
50832// `SparseTensorsMap`.
50833//
50834// Returns:
50835//	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
50836//	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
50837//	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
50838func TakeManySparseFromTensorsMap(scope *Scope, sparse_handles tf.Output, dtype tf.DataType, optional ...TakeManySparseFromTensorsMapAttr) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
50839	if scope.Err() != nil {
50840		return
50841	}
50842	attrs := map[string]interface{}{"dtype": dtype}
50843	for _, a := range optional {
50844		a(attrs)
50845	}
50846	opspec := tf.OpSpec{
50847		Type: "TakeManySparseFromTensorsMap",
50848		Input: []tf.Input{
50849			sparse_handles,
50850		},
50851		Attrs: attrs,
50852	}
50853	op := scope.AddOperation(opspec)
50854	return op.Output(0), op.Output(1), op.Output(2)
50855}
50856
50857// Makes the summary of quantiles for the batch.
50858//
50859// An op that takes a list of tensors (one tensor per feature) and outputs the
50860// quantile summaries for each tensor.
50861//
50862// Arguments:
50863//	float_values: float; List of Rank 1 Tensors each containing values for a single feature.
50864//	example_weights: float; Rank 1 Tensor with weights per instance.
50865//	epsilon: float; The required maximum approximation error.
50866//
50867// Returns float; List of Rank 2 Tensors each containing the quantile summary
50868// (value, weight, min_rank, max_rank) of a single feature.
50869func BoostedTreesMakeQuantileSummaries(scope *Scope, float_values []tf.Output, example_weights tf.Output, epsilon tf.Output) (summaries []tf.Output) {
50870	if scope.Err() != nil {
50871		return
50872	}
50873	opspec := tf.OpSpec{
50874		Type: "BoostedTreesMakeQuantileSummaries",
50875		Input: []tf.Input{
50876			tf.OutputList(float_values), example_weights, epsilon,
50877		},
50878	}
50879	op := scope.AddOperation(opspec)
50880	if scope.Err() != nil {
50881		return
50882	}
50883	var idx int
50884	var err error
50885	if summaries, idx, err = makeOutputList(op, idx, "summaries"); err != nil {
50886		scope.UpdateErr("BoostedTreesMakeQuantileSummaries", err)
50887		return
50888	}
50889	return summaries
50890}
50891
50892// Computes the inverse permutation of a tensor.
50893//
50894// This operation computes the inverse of an index permutation. It takes a 1-D
50895// integer tensor `x`, which represents the indices of a zero-based array, and
50896// swaps each value with its index position. In other words, for an output tensor
50897// `y` and an input tensor `x`, this operation computes the following:
50898//
50899// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
50900//
50901// The values must include 0. There can be no duplicate values or negative values.
50902//
50903// For example:
50904//
50905// ```
50906// # tensor `x` is [3, 4, 0, 2, 1]
50907// invert_permutation(x) ==> [2, 4, 3, 0, 1]
50908// ```
50909//
50910// Arguments:
50911//	x: 1-D.
50912//
50913// Returns 1-D.
50914func InvertPermutation(scope *Scope, x tf.Output) (y tf.Output) {
50915	if scope.Err() != nil {
50916		return
50917	}
50918	opspec := tf.OpSpec{
50919		Type: "InvertPermutation",
50920		Input: []tf.Input{
50921			x,
50922		},
50923	}
50924	op := scope.AddOperation(opspec)
50925	return op.Output(0)
50926}
50927
50928// Locks a mutex resource.  The output is the lock.  So long as the lock tensor
50929//
50930// is alive, any other request to use `MutexLock` with this mutex will wait.
50931//
50932// This is particularly useful for creating a critical section when used in
50933// conjunction with `MutexLockIdentity`:
50934//
50935// ```python
50936//
50937// mutex = mutex_v2(
50938//   shared_name=handle_name, container=container, name=name)
50939//
50940// def execute_in_critical_section(fn, *args, **kwargs):
50941//   lock = gen_resource_variable_ops.mutex_lock(mutex)
50942//
50943//   with ops.control_dependencies([lock]):
50944//     r = fn(*args, **kwargs)
50945//
50946//   with ops.control_dependencies(nest.flatten(r)):
50947//     with ops.colocate_with(mutex):
50948//       ensure_lock_exists = mutex_lock_identity(lock)
50949//
50950//     # Make sure that if any element of r is accessed, all of
50951//     # them are executed together.
50952//     r = nest.map_structure(tf.identity, r)
50953//
50954//   with ops.control_dependencies([ensure_lock_exists]):
50955//     return nest.map_structure(tf.identity, r)
50956// ```
50957//
50958// While `fn` is running in the critical section, no other functions which wish to
50959// use this critical section may run.
50960//
50961// Often the use case is that two executions of the same graph, in parallel,
50962// wish to run `fn`; and we wish to ensure that only one of them executes
50963// at a time.  This is especially important if `fn` modifies one or more
50964// variables at a time.
50965//
50966// It is also useful if two separate functions must share a resource, but we
50967// wish to ensure the usage is exclusive.
50968//
50969// Arguments:
50970//	mutex: The mutex resource to lock.
50971//
50972// Returns A tensor that keeps a shared pointer to a lock on the mutex;
50973// when the Tensor is destroyed, the use count on the shared pointer is decreased
50974// by 1.  When it reaches 0, the lock is released.
50975func MutexLock(scope *Scope, mutex tf.Output) (mutex_lock tf.Output) {
50976	if scope.Err() != nil {
50977		return
50978	}
50979	opspec := tf.OpSpec{
50980		Type: "MutexLock",
50981		Input: []tf.Input{
50982			mutex,
50983		},
50984	}
50985	op := scope.AddOperation(opspec)
50986	return op.Output(0)
50987}
50988
50989// Creates a dataset that passes a sliding window over `input_dataset`.
50990//
50991// Arguments:
50992//
50993//	window_size: A scalar representing the number of elements in the
50994// sliding window.
50995//	window_shift: A scalar representing the steps moving the sliding window
50996// forward in one iteration. It must be positive.
50997//	window_stride: A scalar representing the stride of the input elements of the sliding window.
50998// It must be positive.
50999//
51000//
51001func SlidingWindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, window_shift tf.Output, window_stride tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
51002	if scope.Err() != nil {
51003		return
51004	}
51005	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
51006	opspec := tf.OpSpec{
51007		Type: "SlidingWindowDataset",
51008		Input: []tf.Input{
51009			input_dataset, window_size, window_shift, window_stride,
51010		},
51011		Attrs: attrs,
51012	}
51013	op := scope.AddOperation(opspec)
51014	return op.Output(0)
51015}
51016
51017// Returns the batched diagonal part of a batched tensor.
51018//
51019// This operation returns a tensor with the `diagonal` part
51020// of the batched `input`. The `diagonal` part is computed as follows:
51021//
51022// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
51023// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
51024//
51025// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
51026//
51027// The input must be at least a matrix.
51028//
51029// For example:
51030//
51031// ```
51032// # 'input' is [[[1, 0, 0, 0]
51033//                [0, 2, 0, 0]
51034//                [0, 0, 3, 0]
51035//                [0, 0, 0, 4]],
51036//               [[5, 0, 0, 0]
51037//                [0, 6, 0, 0]
51038//                [0, 0, 7, 0]
51039//                [0, 0, 0, 8]]]
51040//
51041// and input.shape = (2, 4, 4)
51042//
51043// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
51044//
51045// which has shape (2, 4)
51046// ```
51047//
51048// Arguments:
51049//	input: Rank `k` tensor where `k >= 2`.
51050//
51051// Returns The extracted diagonal(s) having shape
51052// `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
51053func MatrixDiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
51054	if scope.Err() != nil {
51055		return
51056	}
51057	opspec := tf.OpSpec{
51058		Type: "MatrixDiagPart",
51059		Input: []tf.Input{
51060			input,
51061		},
51062	}
51063	op := scope.AddOperation(opspec)
51064	return op.Output(0)
51065}
51066
51067// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
51068//
51069// Arguments:
51070//
51071//	num_threads: Identifies the number of threads to use for the private threadpool.
51072//
51073//
51074func ExperimentalPrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
51075	if scope.Err() != nil {
51076		return
51077	}
51078	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
51079	opspec := tf.OpSpec{
51080		Type: "ExperimentalPrivateThreadPoolDataset",
51081		Input: []tf.Input{
51082			input_dataset, num_threads,
51083		},
51084		Attrs: attrs,
51085	}
51086	op := scope.AddOperation(opspec)
51087	return op.Output(0)
51088}
51089
51090// BatchToSpace for 4-D tensors of type T.
51091//
51092// This is a legacy version of the more general BatchToSpaceND.
51093//
51094// Rearranges (permutes) data from batch into blocks of spatial data, followed by
51095// cropping. This is the reverse transformation of SpaceToBatch. More specifically,
51096// this op outputs a copy of the input tensor where values from the `batch`
51097// dimension are moved in spatial blocks to the `height` and `width` dimensions,
51098// followed by cropping along the `height` and `width` dimensions.
51099//
51100// Arguments:
51101//	input: 4-D tensor with shape
51102// `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
51103//   depth]`. Note that the batch size of the input tensor must be divisible by
51104// `block_size * block_size`.
51105//	crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
51106// how many elements to crop from the intermediate result across the spatial
51107// dimensions as follows:
51108//
51109//     crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
51110//
51111//
51112// Returns 4-D with shape `[batch, height, width, depth]`, where:
51113//
51114//       height = height_pad - crop_top - crop_bottom
51115//       width = width_pad - crop_left - crop_right
51116//
51117// The attr `block_size` must be greater than one. It indicates the block size.
51118//
51119// Some examples:
51120//
51121// (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
51122//
51123// ```
51124// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
51125// ```
51126//
51127// The output tensor has shape `[1, 2, 2, 1]` and value:
51128//
51129// ```
51130// x = [[[[1], [2]], [[3], [4]]]]
51131// ```
51132//
51133// (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
51134//
51135// ```
51136// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
51137// ```
51138//
51139// The output tensor has shape `[1, 2, 2, 3]` and value:
51140//
51141// ```
51142// x = [[[[1, 2, 3], [4, 5, 6]],
51143//       [[7, 8, 9], [10, 11, 12]]]]
51144// ```
51145//
51146// (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
51147//
51148// ```
51149// x = [[[[1], [3]], [[9], [11]]],
51150//      [[[2], [4]], [[10], [12]]],
51151//      [[[5], [7]], [[13], [15]]],
51152//      [[[6], [8]], [[14], [16]]]]
51153// ```
51154//
51155// The output tensor has shape `[1, 4, 4, 1]` and value:
51156//
51157// ```
51158// x = [[[[1],   [2],  [3],  [4]],
51159//      [[5],   [6],  [7],  [8]],
51160//      [[9],  [10], [11],  [12]],
51161//      [[13], [14], [15],  [16]]]]
51162// ```
51163//
51164// (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
51165//
51166// ```
51167// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
51168//      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
51169// ```
51170//
51171// The output tensor has shape `[2, 2, 4, 1]` and value:
51172//
51173// ```
51174// x = [[[[1], [3]], [[5], [7]]],
51175//      [[[2], [4]], [[10], [12]]],
51176//      [[[5], [7]], [[13], [15]]],
51177//      [[[6], [8]], [[14], [16]]]]
51178// ```
51179func BatchToSpace(scope *Scope, input tf.Output, crops tf.Output, block_size int64) (output tf.Output) {
51180	if scope.Err() != nil {
51181		return
51182	}
51183	attrs := map[string]interface{}{"block_size": block_size}
51184	opspec := tf.OpSpec{
51185		Type: "BatchToSpace",
51186		Input: []tf.Input{
51187			input, crops,
51188		},
51189		Attrs: attrs,
51190	}
51191	op := scope.AddOperation(opspec)
51192	return op.Output(0)
51193}
51194
51195// CTCGreedyDecoderAttr is an optional argument to CTCGreedyDecoder.
51196type CTCGreedyDecoderAttr func(optionalAttr)
51197
51198// CTCGreedyDecoderMergeRepeated sets the optional merge_repeated attribute to value.
51199//
51200// value: If True, merge repeated classes in output.
51201// If not specified, defaults to false
51202func CTCGreedyDecoderMergeRepeated(value bool) CTCGreedyDecoderAttr {
51203	return func(m optionalAttr) {
51204		m["merge_repeated"] = value
51205	}
51206}
51207
51208// CTCGreedyDecoderBlankIndex sets the optional blank_index attribute to value.
51209// If not specified, defaults to -1
51210func CTCGreedyDecoderBlankIndex(value int64) CTCGreedyDecoderAttr {
51211	return func(m optionalAttr) {
51212		m["blank_index"] = value
51213	}
51214}
51215
51216// Performs greedy decoding on the logits given in inputs.
51217//
51218// A note about the attribute merge_repeated: if enabled, when
51219// consecutive logits' maximum indices are the same, only the first of
51220// these is emitted.  Labeling the blank '*', the sequence "A B B * B B"
51221// becomes "A B B" if merge_repeated = True and "A B B B B" if
51222// merge_repeated = False.
51223//
51224// Regardless of the value of merge_repeated, if the maximum index of a given
51225// time and batch corresponds to the blank, index `(num_classes - 1)`, no new
51226// element is emitted.
51227//
51228// Arguments:
51229//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
51230//	sequence_length: A vector containing sequence lengths, size `(batch_size)`.
51231//
51232// Returns:
51233//	decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`,
51234// of a `SparseTensor<int64, 2>`.  The rows store: [batch, time].
51235//	decoded_values: Values vector, size: `(total_decoded_outputs)`,
51236// of a `SparseTensor<int64, 2>`.  The vector stores the decoded classes.
51237//	decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor.
51238// Values are: `[batch_size, max_decoded_length]`.
51239//	log_probability: Matrix, size `(batch_size x 1)`, containing sequence
51240// log-probabilities.
51241func CTCGreedyDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, optional ...CTCGreedyDecoderAttr) (decoded_indices tf.Output, decoded_values tf.Output, decoded_shape tf.Output, log_probability tf.Output) {
51242	if scope.Err() != nil {
51243		return
51244	}
51245	attrs := map[string]interface{}{}
51246	for _, a := range optional {
51247		a(attrs)
51248	}
51249	opspec := tf.OpSpec{
51250		Type: "CTCGreedyDecoder",
51251		Input: []tf.Input{
51252			inputs, sequence_length,
51253		},
51254		Attrs: attrs,
51255	}
51256	op := scope.AddOperation(opspec)
51257	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
51258}
51259
51260// Outputs deterministic pseudorandom random integers from a uniform distribution.
51261//
51262// The generated values follow a uniform distribution in the range `[minval, maxval)`.
51263//
51264// The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`.
51265//
51266// Arguments:
51267//	shape: The shape of the output tensor.
51268//	seed: 2 seeds (shape [2]).
51269//	minval: Minimum value (inclusive, scalar).
51270//	maxval: Maximum value (exclusive, scalar).
51271//
51272// Returns Random values with specified shape.
51273func StatelessRandomUniformInt(scope *Scope, shape tf.Output, seed tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output) {
51274	if scope.Err() != nil {
51275		return
51276	}
51277	opspec := tf.OpSpec{
51278		Type: "StatelessRandomUniformInt",
51279		Input: []tf.Input{
51280			shape, seed, minval, maxval,
51281		},
51282	}
51283	op := scope.AddOperation(opspec)
51284	return op.Output(0)
51285}
51286
51287// Creates a dataset that executes a SQL query and emits rows of the result set.
51288//
51289// Arguments:
51290//	driver_name: The database type. Currently, the only supported type is 'sqlite'.
51291//	data_source_name: A connection string to connect to the database.
51292//	query: A SQL query to execute.
51293//
51294//
51295func SqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
51296	if scope.Err() != nil {
51297		return
51298	}
51299	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
51300	opspec := tf.OpSpec{
51301		Type: "SqlDataset",
51302		Input: []tf.Input{
51303			driver_name, data_source_name, query,
51304		},
51305		Attrs: attrs,
51306	}
51307	op := scope.AddOperation(opspec)
51308	return op.Output(0)
51309}
51310
51311// IsotonicRegressionAttr is an optional argument to IsotonicRegression.
51312type IsotonicRegressionAttr func(optionalAttr)
51313
51314// IsotonicRegressionOutputDtype sets the optional output_dtype attribute to value.
51315//
51316// value: Dtype of output.
51317// If not specified, defaults to DT_FLOAT
51318func IsotonicRegressionOutputDtype(value tf.DataType) IsotonicRegressionAttr {
51319	return func(m optionalAttr) {
51320		m["output_dtype"] = value
51321	}
51322}
51323
51324// Solves a batch of isotonic regression problems.
51325//
51326// Arguments:
51327//	input: A (batch_size, dim)-tensor holding a batch of inputs.
51328//
51329// Returns:
51330//	output: A (batch_size, dim)-tensor holding the per-batch element solutions.
51331//	segments: An int32 (batch_size, dim)-tensor with the segments.
51332func IsotonicRegression(scope *Scope, input tf.Output, optional ...IsotonicRegressionAttr) (output tf.Output, segments tf.Output) {
51333	if scope.Err() != nil {
51334		return
51335	}
51336	attrs := map[string]interface{}{}
51337	for _, a := range optional {
51338		a(attrs)
51339	}
51340	opspec := tf.OpSpec{
51341		Type: "IsotonicRegression",
51342		Input: []tf.Input{
51343			input,
51344		},
51345		Attrs: attrs,
51346	}
51347	op := scope.AddOperation(opspec)
51348	return op.Output(0), op.Output(1)
51349}
51350
51351// Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
51352//
51353// The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
51354// `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
51355// input channel is processed independently of the others with its own structuring
51356// function. The `output` tensor has shape
51357// `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
51358// tensor depend on the `padding` algorithm. We currently only support the default
51359// "NHWC" `data_format`.
51360//
51361// In detail, the grayscale morphological 2-D dilation is the max-sum correlation
51362// (for consistency with `conv2d`, we use unmirrored filters):
51363//
51364//     output[b, y, x, c] =
51365//        max_{dy, dx} input[b,
51366//                           strides[1] * y + rates[1] * dy,
51367//                           strides[2] * x + rates[2] * dx,
51368//                           c] +
51369//                     filter[dy, dx, c]
51370//
51371// Max-pooling is a special case when the filter has size equal to the pooling
51372// kernel size and contains all zeros.
51373//
51374// Note on duality: The dilation of `input` by the `filter` is equal to the
51375// negation of the erosion of `-input` by the reflected `filter`.
51376//
51377// Arguments:
51378//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
51379//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
51380//	strides: The stride of the sliding window for each dimension of the input
51381// tensor. Must be: `[1, stride_height, stride_width, 1]`.
51382//	rates: The input stride for atrous morphological dilation. Must be:
51383// `[1, rate_height, rate_width, 1]`.
51384//	padding: The type of padding algorithm to use.
51385//
51386// Returns 4-D with shape `[batch, out_height, out_width, depth]`.
51387func Dilation2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, rates []int64, padding string) (output tf.Output) {
51388	if scope.Err() != nil {
51389		return
51390	}
51391	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
51392	opspec := tf.OpSpec{
51393		Type: "Dilation2D",
51394		Input: []tf.Input{
51395			input, filter,
51396		},
51397		Attrs: attrs,
51398	}
51399	op := scope.AddOperation(opspec)
51400	return op.Output(0)
51401}
51402
51403// Adjust the hue of one or more images.
51404//
51405// `images` is a tensor of at least 3 dimensions.  The last dimension is
51406// interpreted as channels, and must be three.
51407//
51408// The input image is considered in the RGB colorspace. Conceptually, the RGB
51409// colors are first mapped into HSV. A delta is then applied all the hue values,
51410// and then remapped back to RGB colorspace.
51411//
51412// Arguments:
51413//	images: Images to adjust.  At least 3-D.
51414//	delta: A float delta to add to the hue.
51415//
51416// Returns The hue-adjusted image or images.
51417func AdjustHue(scope *Scope, images tf.Output, delta tf.Output) (output tf.Output) {
51418	if scope.Err() != nil {
51419		return
51420	}
51421	opspec := tf.OpSpec{
51422		Type: "AdjustHue",
51423		Input: []tf.Input{
51424			images, delta,
51425		},
51426	}
51427	op := scope.AddOperation(opspec)
51428	return op.Output(0)
51429}
51430
51431// StageAttr is an optional argument to Stage.
51432type StageAttr func(optionalAttr)
51433
51434// StageCapacity sets the optional capacity attribute to value.
51435//
51436// value: Maximum number of elements in the Staging Area. If > 0, inserts
51437// on the container will block when the capacity is reached.
51438// If not specified, defaults to 0
51439//
51440// REQUIRES: value >= 0
51441func StageCapacity(value int64) StageAttr {
51442	return func(m optionalAttr) {
51443		m["capacity"] = value
51444	}
51445}
51446
51447// StageMemoryLimit sets the optional memory_limit attribute to value.
51448//
51449// value: The maximum number of bytes allowed for Tensors in the Staging Area.
51450// If > 0, inserts will block until sufficient space is available.
51451// If not specified, defaults to 0
51452//
51453// REQUIRES: value >= 0
51454func StageMemoryLimit(value int64) StageAttr {
51455	return func(m optionalAttr) {
51456		m["memory_limit"] = value
51457	}
51458}
51459
51460// StageContainer sets the optional container attribute to value.
51461//
51462// value: If non-empty, this queue is placed in the given container. Otherwise,
51463// a default container is used.
51464// If not specified, defaults to ""
51465func StageContainer(value string) StageAttr {
51466	return func(m optionalAttr) {
51467		m["container"] = value
51468	}
51469}
51470
51471// StageSharedName sets the optional shared_name attribute to value.
51472//
51473// value: It is necessary to match this name to the matching Unstage Op.
51474// If not specified, defaults to ""
51475func StageSharedName(value string) StageAttr {
51476	return func(m optionalAttr) {
51477		m["shared_name"] = value
51478	}
51479}
51480
51481// Stage values similar to a lightweight Enqueue.
51482//
51483// The basic functionality of this Op is similar to a queue with many
51484// fewer capabilities and options.  This Op is optimized for performance.
51485//
51486// Arguments:
51487//	values: a list of tensors
51488// dtypes A list of data types that inserted values should adhere to.
51489//
51490// Returns the created operation.
51491func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) (o *tf.Operation) {
51492	if scope.Err() != nil {
51493		return
51494	}
51495	attrs := map[string]interface{}{}
51496	for _, a := range optional {
51497		a(attrs)
51498	}
51499	opspec := tf.OpSpec{
51500		Type: "Stage",
51501		Input: []tf.Input{
51502			tf.OutputList(values),
51503		},
51504		Attrs: attrs,
51505	}
51506	return scope.AddOperation(opspec)
51507}
51508
51509// CudnnRNNBackpropV3Attr is an optional argument to CudnnRNNBackpropV3.
51510type CudnnRNNBackpropV3Attr func(optionalAttr)
51511
51512// CudnnRNNBackpropV3RnnMode sets the optional rnn_mode attribute to value.
51513// If not specified, defaults to "lstm"
51514func CudnnRNNBackpropV3RnnMode(value string) CudnnRNNBackpropV3Attr {
51515	return func(m optionalAttr) {
51516		m["rnn_mode"] = value
51517	}
51518}
51519
51520// CudnnRNNBackpropV3InputMode sets the optional input_mode attribute to value.
51521// If not specified, defaults to "linear_input"
51522func CudnnRNNBackpropV3InputMode(value string) CudnnRNNBackpropV3Attr {
51523	return func(m optionalAttr) {
51524		m["input_mode"] = value
51525	}
51526}
51527
51528// CudnnRNNBackpropV3Direction sets the optional direction attribute to value.
51529// If not specified, defaults to "unidirectional"
51530func CudnnRNNBackpropV3Direction(value string) CudnnRNNBackpropV3Attr {
51531	return func(m optionalAttr) {
51532		m["direction"] = value
51533	}
51534}
51535
51536// CudnnRNNBackpropV3Dropout sets the optional dropout attribute to value.
51537// If not specified, defaults to 0
51538func CudnnRNNBackpropV3Dropout(value float32) CudnnRNNBackpropV3Attr {
51539	return func(m optionalAttr) {
51540		m["dropout"] = value
51541	}
51542}
51543
51544// CudnnRNNBackpropV3Seed sets the optional seed attribute to value.
51545// If not specified, defaults to 0
51546func CudnnRNNBackpropV3Seed(value int64) CudnnRNNBackpropV3Attr {
51547	return func(m optionalAttr) {
51548		m["seed"] = value
51549	}
51550}
51551
51552// CudnnRNNBackpropV3Seed2 sets the optional seed2 attribute to value.
51553// If not specified, defaults to 0
51554func CudnnRNNBackpropV3Seed2(value int64) CudnnRNNBackpropV3Attr {
51555	return func(m optionalAttr) {
51556		m["seed2"] = value
51557	}
51558}
51559
51560// CudnnRNNBackpropV3NumProj sets the optional num_proj attribute to value.
51561// If not specified, defaults to 0
51562func CudnnRNNBackpropV3NumProj(value int64) CudnnRNNBackpropV3Attr {
51563	return func(m optionalAttr) {
51564		m["num_proj"] = value
51565	}
51566}
51567
51568// CudnnRNNBackpropV3TimeMajor sets the optional time_major attribute to value.
51569// If not specified, defaults to true
51570func CudnnRNNBackpropV3TimeMajor(value bool) CudnnRNNBackpropV3Attr {
51571	return func(m optionalAttr) {
51572		m["time_major"] = value
51573	}
51574}
51575
51576// Backprop step of CudnnRNNV3.
51577//
51578// Compute the backprop of both data and weights in a RNN. Takes an extra
51579//     "sequence_lengths" input than CudnnRNNBackprop.
51580//
51581// rnn_mode: Indicates the type of the RNN model.
51582// input_mode: Indicates whether there is a linear projection between the input and
51583//     the actual computation before the first layer. 'skip_input' is only allowed
51584//     when input_size == num_units; 'auto_select' implies 'skip_input' when
51585//     input_size == num_units; otherwise, it implies 'linear_input'.
51586// direction: Indicates whether a bidirectional model will be used. Should be
51587//   "unidirectional" or "bidirectional".
51588// dropout: Dropout probability. When set to 0., dropout is disabled.
51589// seed: The 1st part of a seed to initialize dropout.
51590// seed2: The 2nd part of a seed to initialize dropout.
51591// input: If time_major is true, this is a 3-D tensor with the shape of
51592//     [seq_length, batch_size, input_size]. If time_major is false, the shape is
51593//     [batch_size, seq_length, input_size].
51594// input_h: If time_major is true, this is a 3-D tensor with the shape of
51595//     [num_layer * dir, batch_size, num_units]. If time_major is false, the shape
51596//     is [batch_size, num_layer * dir, num_units].
51597// input_c: For LSTM, a 3-D tensor with the shape of
51598//     [num_layer * dir, batch, num_units]. For other models, it is ignored.
51599// params: A 1-D tensor that contains the weights and biases in an opaque layout.
51600//     The size must be created through CudnnRNNParamsSize, and initialized
51601//     separately. Note that they might not be compatible across different
51602//     generations. So it is a good idea to save and restore
51603// sequence_lengths: a vector of lengths of each input sequence.
51604// output: If time_major is true, this is a 3-D tensor with the shape of
51605//     [seq_length, batch_size, dir * num_units]. If time_major is false, the
51606//     shape is [batch_size, seq_length, dir * num_units].
51607// output_h: The same shape has input_h.
51608// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
51609// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
51610// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
51611//     pass.
51612// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
51613//     pass.
51614// time_major: Indicates whether the input/output format is time major or batch
51615//     major.
51616// reserve_space: The same reserve_space produced in the forward operation.
51617// input_backprop: The backprop to input in the forward pass. Has the same shape
51618//     as input.
51619// input_h_backprop: The backprop to input_h in the forward pass. Has the same
51620//     shape as input_h.
51621// input_c_backprop: The backprop to input_c in the forward pass. Has the same
51622//     shape as input_c.
51623// params_backprop: The backprop to the params buffer in the forward pass. Has the
51624//     same shape as params.
51625func CudnnRNNBackpropV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, sequence_lengths tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, host_reserved tf.Output, optional ...CudnnRNNBackpropV3Attr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
51626	if scope.Err() != nil {
51627		return
51628	}
51629	attrs := map[string]interface{}{}
51630	for _, a := range optional {
51631		a(attrs)
51632	}
51633	opspec := tf.OpSpec{
51634		Type: "CudnnRNNBackpropV3",
51635		Input: []tf.Input{
51636			input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved,
51637		},
51638		Attrs: attrs,
51639	}
51640	op := scope.AddOperation(opspec)
51641	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
51642}
51643
51644// Identity op for gradient debugging.
51645//
51646// This op is hidden from public in Python. It is used by TensorFlow Debugger to
51647// register gradient tensors for gradient debugging.
51648// This op operates on non-reference-type tensors.
51649func DebugGradientIdentity(scope *Scope, input tf.Output) (output tf.Output) {
51650	if scope.Err() != nil {
51651		return
51652	}
51653	opspec := tf.OpSpec{
51654		Type: "DebugGradientIdentity",
51655		Input: []tf.Input{
51656			input,
51657		},
51658	}
51659	op := scope.AddOperation(opspec)
51660	return op.Output(0)
51661}
51662
51663// Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
51664//
51665// For each entry in `x`, calculates the number of `1` (on) bits in the binary
51666// representation of that entry.
51667//
51668// **NOTE**: It is more efficient to first `tf.bitcast` your tensors into
51669// `int32` or `int64` and perform the bitcount on the result, than to feed in
51670// 8- or 16-bit inputs and then aggregate the resulting counts.
51671func PopulationCount(scope *Scope, x tf.Output) (y tf.Output) {
51672	if scope.Err() != nil {
51673		return
51674	}
51675	opspec := tf.OpSpec{
51676		Type: "PopulationCount",
51677		Input: []tf.Input{
51678			x,
51679		},
51680	}
51681	op := scope.AddOperation(opspec)
51682	return op.Output(0)
51683}
51684
51685// Makes its input available to the next iteration.
51686//
51687// Arguments:
51688//	data: The tensor to be made available to the next iteration.
51689//
51690// Returns The same tensor as `data`.
51691func NextIteration(scope *Scope, data tf.Output) (output tf.Output) {
51692	if scope.Err() != nil {
51693		return
51694	}
51695	opspec := tf.OpSpec{
51696		Type: "NextIteration",
51697		Input: []tf.Input{
51698			data,
51699		},
51700	}
51701	op := scope.AddOperation(opspec)
51702	return op.Output(0)
51703}
51704
51705// Add the quantile summaries to each quantile stream resource.
51706//
51707// An op that adds a list of quantile summaries to a quantile stream resource. Each
51708// summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank)
51709// for a single feature.
51710//
51711// Arguments:
51712//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
51713//	summaries: string; List of Rank 2 Tensor each containing the summaries for a single feature.
51714//
51715// Returns the created operation.
51716func BoostedTreesQuantileStreamResourceAddSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, summaries []tf.Output) (o *tf.Operation) {
51717	if scope.Err() != nil {
51718		return
51719	}
51720	opspec := tf.OpSpec{
51721		Type: "BoostedTreesQuantileStreamResourceAddSummaries",
51722		Input: []tf.Input{
51723			quantile_stream_resource_handle, tf.OutputList(summaries),
51724		},
51725	}
51726	return scope.AddOperation(opspec)
51727}
51728
51729// Returns immutable tensor from memory region.
51730//
51731// The current implementation memmaps the tensor from a file.
51732//
51733// Arguments:
51734//	dtype: Type of the returned tensor.
51735//	shape: Shape of the returned tensor.
51736//	memory_region_name: Name of readonly memory region used by the tensor, see
51737// NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
51738func ImmutableConst(scope *Scope, dtype tf.DataType, shape tf.Shape, memory_region_name string) (tensor tf.Output) {
51739	if scope.Err() != nil {
51740		return
51741	}
51742	attrs := map[string]interface{}{"dtype": dtype, "shape": shape, "memory_region_name": memory_region_name}
51743	opspec := tf.OpSpec{
51744		Type: "ImmutableConst",
51745
51746		Attrs: attrs,
51747	}
51748	op := scope.AddOperation(opspec)
51749	return op.Output(0)
51750}
51751
51752// A container for an iterator resource.
51753//
51754// Returns A handle to the iterator that can be passed to a "MakeIterator"
51755// or "IteratorGetNext" op.
51756func Iterator(scope *Scope, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
51757	if scope.Err() != nil {
51758		return
51759	}
51760	attrs := map[string]interface{}{"shared_name": shared_name, "container": container, "output_types": output_types, "output_shapes": output_shapes}
51761	opspec := tf.OpSpec{
51762		Type: "Iterator",
51763
51764		Attrs: attrs,
51765	}
51766	op := scope.AddOperation(opspec)
51767	return op.Output(0)
51768}
51769