• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15# Tests for this file live in python/kernel_tests/array_ops_test.py
16"""Support for manipulating tensors."""
17
18from __future__ import absolute_import
19from __future__ import division
20from __future__ import print_function
21
22import numbers
23import numpy as np
24
25from tensorflow.python.eager import context
26from tensorflow.python.framework import common_shapes
27from tensorflow.python.framework import composite_tensor
28from tensorflow.python.framework import constant_op
29from tensorflow.python.framework import dtypes
30from tensorflow.python.framework import ops
31from tensorflow.python.framework import sparse_tensor
32from tensorflow.python.framework import tensor_shape
33from tensorflow.python.framework import tensor_util
34# 'Constant' gets imported in the module 'array_ops'.
35from tensorflow.python.framework.constant_op import constant
36from tensorflow.python.ops import gen_array_ops
37from tensorflow.python.ops import gen_math_ops
38# go/tf-wildcard-import
39# pylint: disable=wildcard-import
40from tensorflow.python.ops.gen_array_ops import *
41from tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse  # pylint: disable=unused-import
42from tensorflow.python.util import deprecation
43from tensorflow.python.util import dispatch
44from tensorflow.python.util import nest
45from tensorflow.python.util.tf_export import tf_export
46# pylint: enable=wildcard-import
47
48# Used for slicing to specify a new 1 size dimension
49newaxis = None
50tf_export("newaxis").export_constant(__name__, "newaxis")
51
52# We override the 'slice' for the "slice" op, so we keep python's
53# existing 'slice' for later use in this module.
54_BaseSlice = slice
55
56
57@tf_export("reshape", v1=["reshape", "manip.reshape"])
58def reshape(tensor, shape, name=None):  # pylint: disable=redefined-outer-name
59  r"""Reshapes a tensor.
60
61  Given `tensor`, this operation returns a new `tf.Tensor` that has the same
62  values as `tensor` in the same order, except with a new shape given by
63  `shape`.
64
65  >>> t1 = [[1, 2, 3],
66  ...       [4, 5, 6]]
67  >>> print(tf.shape(t1).numpy())
68  [2 3]
69  >>> t2 = tf.reshape(t1, [6])
70  >>> t2
71  <tf.Tensor: shape=(6,), dtype=int32,
72    numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>
73  >>> tf.reshape(t2, [3, 2])
74  <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
75    array([[1, 2],
76           [3, 4],
77           [5, 6]], dtype=int32)>
78
79  The `tf.reshape` does not change the order of or the total number of elements
80  in the tensor, and so it can reuse the underlying data buffer. This makes it
81  a fast operation independent of how big of a tensor it is operating on.
82
83  >>> tf.reshape([1, 2, 3], [2, 2])
84  Traceback (most recent call last):
85  ...
86  InvalidArgumentError: Input to reshape is a tensor with 3 values, but the
87  requested shape has 4
88
89  To instead reorder the data to rearrange the dimensions of a tensor, see
90  `tf.transpose`.
91
92  >>> t = [[1, 2, 3],
93  ...      [4, 5, 6]]
94  >>> tf.reshape(t, [3, 2]).numpy()
95  array([[1, 2],
96         [3, 4],
97         [5, 6]], dtype=int32)
98  >>> tf.transpose(t, perm=[1, 0]).numpy()
99  array([[1, 4],
100         [2, 5],
101         [3, 6]], dtype=int32)
102
103  If one component of `shape` is the special value -1, the size of that
104  dimension is computed so that the total size remains constant.  In particular,
105  a `shape` of `[-1]` flattens into 1-D.  At most one component of `shape` can
106  be -1.
107
108  >>> t = [[1, 2, 3],
109  ...      [4, 5, 6]]
110  >>> tf.reshape(t, [-1])
111  <tf.Tensor: shape=(6,), dtype=int32,
112    numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>
113  >>> tf.reshape(t, [3, -1])
114  <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
115    array([[1, 2],
116           [3, 4],
117           [5, 6]], dtype=int32)>
118  >>> tf.reshape(t, [-1, 2])
119  <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
120    array([[1, 2],
121           [3, 4],
122           [5, 6]], dtype=int32)>
123
124  `tf.reshape(t, [])` reshapes a tensor `t` with one element to a scalar.
125
126  >>> tf.reshape([7], []).numpy()
127  7
128
129  More examples:
130
131  >>> t = [1, 2, 3, 4, 5, 6, 7, 8, 9]
132  >>> print(tf.shape(t).numpy())
133  [9]
134  >>> tf.reshape(t, [3, 3])
135  <tf.Tensor: shape=(3, 3), dtype=int32, numpy=
136    array([[1, 2, 3],
137           [4, 5, 6],
138           [7, 8, 9]], dtype=int32)>
139
140  >>> t = [[[1, 1], [2, 2]],
141  ...      [[3, 3], [4, 4]]]
142  >>> print(tf.shape(t).numpy())
143  [2 2 2]
144  >>> tf.reshape(t, [2, 4])
145  <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
146    array([[1, 1, 2, 2],
147           [3, 3, 4, 4]], dtype=int32)>
148
149  >>> t = [[[1, 1, 1],
150  ...       [2, 2, 2]],
151  ...      [[3, 3, 3],
152  ...       [4, 4, 4]],
153  ...      [[5, 5, 5],
154  ...       [6, 6, 6]]]
155  >>> print(tf.shape(t).numpy())
156  [3 2 3]
157  >>> # Pass '[-1]' to flatten 't'.
158  >>> tf.reshape(t, [-1])
159  <tf.Tensor: shape=(18,), dtype=int32,
160    numpy=array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
161    dtype=int32)>
162  >>> # -- Using -1 to infer the shape --
163  >>> # Here -1 is inferred to be 9:
164  >>> tf.reshape(t, [2, -1])
165  <tf.Tensor: shape=(2, 9), dtype=int32, numpy=
166    array([[1, 1, 1, 2, 2, 2, 3, 3, 3],
167           [4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>
168  >>> # -1 is inferred to be 2:
169  >>> tf.reshape(t, [-1, 9])
170  <tf.Tensor: shape=(2, 9), dtype=int32, numpy=
171    array([[1, 1, 1, 2, 2, 2, 3, 3, 3],
172           [4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>
173  >>> # -1 is inferred to be 3:
174  >>> tf.reshape(t, [ 2, -1, 3])
175  <tf.Tensor: shape=(2, 3, 3), dtype=int32, numpy=
176    array([[[1, 1, 1],
177            [2, 2, 2],
178            [3, 3, 3]],
179           [[4, 4, 4],
180            [5, 5, 5],
181            [6, 6, 6]]], dtype=int32)>
182
183  Args:
184    tensor: A `Tensor`.
185    shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
186      Defines the shape of the output tensor.
187    name: Optional string. A name for the operation.
188
189  Returns:
190    A `Tensor`. Has the same type as `tensor`.
191  """
192  result = gen_array_ops.reshape(tensor, shape, name)
193  tensor_util.maybe_set_static_shape(result, shape)
194  return result
195
196
197@tf_export("fill")
198def fill(dims, value, name=None):
199  r"""Creates a tensor filled with a scalar value.
200
201  This operation creates a tensor of shape `dims` and fills it with `value`.
202
203  For example:
204
205  >>> tf.fill([2, 3], 9)
206  <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
207  array([[9, 9, 9],
208         [9, 9, 9]], dtype=int32)>
209
210  `tf.fill` evaluates at graph runtime and supports dynamic shapes based on
211  other runtime `tf.Tensors`, unlike `tf.constant(value, shape=dims)`, which
212  embeds the value as a `Const` node.
213
214  Args:
215    dims: A 1-D sequence of non-negative numbers. Represents the shape of the
216      output `tf.Tensor`. Entries should be of type: `int32`, `int64`.
217    value: A value to fill the returned `tf.Tensor`.
218    name: Optional string. The name of the output `tf.Tensor`.
219
220  Returns:
221    A `tf.Tensor` with shape `dims` and the same dtype as `value`.
222
223  Raises:
224    InvalidArgumentError: `dims` contains negative entries.
225    NotFoundError: `dims` contains non-integer entries.
226
227  @compatibility(numpy)
228  Similar to `np.full`. In `numpy`, more parameters are supported. Passing a
229  number argument as the shape (`np.full(5, value)`) is valid in `numpy` for
230  specifying a 1-D shaped result, while TensorFlow does not support this syntax.
231  @end_compatibility
232  """
233  result = gen_array_ops.fill(dims, value, name=name)
234  tensor_util.maybe_set_static_shape(result, dims)
235  return result
236
237
238@tf_export("identity")
239@dispatch.add_dispatch_support
240def identity(input, name=None):  # pylint: disable=redefined-builtin
241  r"""Return a Tensor with the same shape and contents as input.
242
243  The return value is not the same Tensor as the original, but contains the same
244  values.  This operation is fast when used on the same device.
245
246  For example:
247
248  >>> a = tf.constant([0.78])
249  >>> a_identity = tf.identity(a)
250  >>> a.numpy()
251  array([0.78], dtype=float32)
252  >>> a_identity.numpy()
253  array([0.78], dtype=float32)
254
255  Calling `tf.identity` on a variable will make a Tensor that represents the
256  value of that variable at the time it is called. This is equivalent to calling
257  `<variable>.read_value()`.
258
259  >>> a = tf.Variable(5)
260  >>> a_identity = tf.identity(a)
261  >>> a.assign_add(1)
262  <tf.Variable ... shape=() dtype=int32, numpy=6>
263  >>> a.numpy()
264  6
265  >>> a_identity.numpy()
266  5
267
268  Args:
269    input: A `Tensor`.
270    name: A name for the operation (optional).
271
272  Returns:
273    A `Tensor`. Has the same type as `input`.
274  """
275  if isinstance(input, composite_tensor.CompositeTensor):
276    return nest.map_structure(identity, input, expand_composites=True)
277  if context.executing_eagerly() and not hasattr(input, "graph"):
278    # Make sure we get an input with handle data attached from resource
279    # variables. Variables have correct handle data when graph building.
280    input = ops.convert_to_tensor(input)
281  ret = gen_array_ops.identity(input, name=name)
282  # Propagate handle data for happier shape inference for resource variables.
283  if hasattr(input, "_handle_data"):
284    ret._handle_data = input._handle_data  # pylint: disable=protected-access
285  return ret
286
287
288# pylint: disable=redefined-builtin,protected-access
289@tf_export(v1=["expand_dims"])
290@dispatch.add_dispatch_support
291@deprecation.deprecated_args(None, "Use the `axis` argument instead", "dim")
292def expand_dims(input, axis=None, name=None, dim=None):
293  """Inserts a dimension of 1 into a tensor's shape.
294
295  Given a tensor `input`, this operation inserts a dimension of 1 at the
296  dimension index `axis` of `input`'s shape. The dimension index `axis` starts
297  at zero; if you specify a negative number for `axis` it is counted backward
298  from the end.
299
300  This operation is useful if you want to add a batch dimension to a single
301  element. For example, if you have a single image of shape `[height, width,
302  channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
303  which will make the shape `[1, height, width, channels]`.
304
305  Other examples:
306
307  ```python
308  # 't' is a tensor of shape [2]
309  tf.shape(tf.expand_dims(t, 0))  # [1, 2]
310  tf.shape(tf.expand_dims(t, 1))  # [2, 1]
311  tf.shape(tf.expand_dims(t, -1))  # [2, 1]
312
313  # 't2' is a tensor of shape [2, 3, 5]
314  tf.shape(tf.expand_dims(t2, 0))  # [1, 2, 3, 5]
315  tf.shape(tf.expand_dims(t2, 2))  # [2, 3, 1, 5]
316  tf.shape(tf.expand_dims(t2, 3))  # [2, 3, 5, 1]
317  ```
318
319  This operation requires that:
320
321  `-1-input.dims() <= dim <= input.dims()`
322
323  This operation is related to `squeeze()`, which removes dimensions of
324  size 1.
325
326  Args:
327    input: A `Tensor`.
328    axis: 0-D (scalar). Specifies the dimension index at which to expand the
329      shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`.
330    name: The name of the output `Tensor` (optional).
331    dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
332
333  Returns:
334    A `Tensor` with the same data as `input`, but its shape has an additional
335    dimension of size 1 added.
336
337  Raises:
338    ValueError: if either both or neither of `dim` and `axis` are specified.
339  """
340  axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
341  if axis is None:
342    raise ValueError("Must specify an axis argument to tf.expand_dims()")
343  return expand_dims_v2(input, axis, name)
344
345
346@tf_export("expand_dims", v1=[])
347@dispatch.add_dispatch_support
348def expand_dims_v2(input, axis, name=None):
349  """Returns a tensor with an additional dimension inserted at index `axis`.
350
351  Given a tensor `input`, this operation inserts a dimension of size 1 at the
352  dimension index `axis` of `input`'s shape. The dimension index `axis` starts
353  at zero; if you specify a negative number for `axis` it is counted backward
354  from the end.
355
356  This operation is useful if you want to add a batch dimension to a single
357  element. For example, if you have a single image of shape `[height, width,
358  channels]`, you can make it a batch of one image with `expand_dims(image, 0)`,
359  which will make the shape `[1, height, width, channels]`.
360
361  Examples:
362
363  >>> t = [[1, 2, 3],[4, 5, 6]] # shape [2, 3]
364
365  >>> tf.expand_dims(t, 0)
366  <tf.Tensor: shape=(1, 2, 3), dtype=int32, numpy=
367  array([[[1, 2, 3],
368          [4, 5, 6]]], dtype=int32)>
369
370  >>> tf.expand_dims(t, 1)
371  <tf.Tensor: shape=(2, 1, 3), dtype=int32, numpy=
372  array([[[1, 2, 3]],
373         [[4, 5, 6]]], dtype=int32)>
374
375  >>> tf.expand_dims(t, 2)
376  <tf.Tensor: shape=(2, 3, 1), dtype=int32, numpy=
377  array([[[1],
378          [2],
379          [3]],
380         [[4],
381          [5],
382          [6]]], dtype=int32)>
383
384  >>> tf.expand_dims(t, -1) # Last dimension index. In this case, same as 2.
385  <tf.Tensor: shape=(2, 3, 1), dtype=int32, numpy=
386  array([[[1],
387          [2],
388          [3]],
389         [[4],
390          [5],
391          [6]]], dtype=int32)>
392
393  This operation is related to:
394
395  *   `tf.squeeze`, which removes dimensions of size 1.
396  *   `tf.reshape`, which provides more flexible reshaping capability
397
398  Args:
399    input: A `Tensor`.
400    axis: Integer specifying the dimension index at which to expand the
401      shape of `input`. Given an input of D dimensions, `axis` must be in range
402      `[-(D+1), D]` (inclusive).
403    name: Optional string. The name of the output `Tensor`.
404
405  Returns:
406    A tensor with the same data as `input`, with an additional dimension
407    inserted at the index specified by `axis`.
408
409  Raises:
410    ValueError: If `axis` is not specified.
411    InvalidArgumentError: If `axis` is out of range `[-(D+1), D]`.
412  """
413  return gen_array_ops.expand_dims(input, axis, name)
414
415
416# pylint: enable=redefined-builtin,protected-access
417
418
419# Aliases for some automatically-generated names.
420# pylint: disable=protected-access
421@deprecation.deprecated("2016-11-30",
422                        "This op will be removed after the deprecation date. "
423                        "Please switch to tf.setdiff1d().")
424def listdiff(x, y, out_idx=None, name=None):
425  return gen_array_ops.list_diff(x, y, out_idx, name)
426
427
428listdiff.__doc__ = gen_array_ops.list_diff.__doc__ + "\n" + listdiff.__doc__
429
430# pylint: enable=protected-access
431
432
433# pylint: disable=undefined-variable
434@deprecation.deprecated("2018-11-30",
435                        "This op will be removed after the deprecation date. "
436                        "Please switch to tf.sets.difference().")
437@tf_export(v1=["setdiff1d"])
438def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
439  """Computes the difference between two lists of numbers or strings.
440
441  Given a list x and a list y, this operation returns a list out that
442  represents all values that are in x but not in y. The returned list
443  out is sorted in the same order that the numbers appear in x
444  (duplicates are preserved). This operation also returns a list idx
445  that represents the position of each out element in x.
446
447  In other words:
448
449  ```python
450  out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]
451  ```
452
453  Example usage:
454
455  >>> x = [1, 2, 3, 4, 5, 6]
456  >>> y = [1, 3, 5]
457  >>> setdiff1d(x,y)
458  ListDiff(out=<tf.Tensor: id=2, shape=(3,), dtype=int32,
459  numpy=array([2, 4, 6], dtype=int32)>, idx=<tf.Tensor: id=3,
460  shape=(3,), dtype=int32, numpy=array([1, 3, 5], dtype=int32)>)
461
462  Args:
463    x: A Tensor. 1-D. Values to keep.
464    y: A Tensor. Must have the same type as x. 1-D. Values to remove.
465    out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to
466      tf.int32.
467    name: A name for the operation (optional).
468
469  Returns:
470    A tuple of Tensor objects (out, idx).
471    out: A Tensor. Has the same type as x.
472    idx: A Tensor of type out_idx.
473  """
474  return gen_array_ops.list_diff(x, y, index_dtype, name)
475
476
477setdiff1d.__doc__ = gen_array_ops.list_diff.__doc__
478
479
480@tf_export("broadcast_dynamic_shape")
481def broadcast_dynamic_shape(shape_x, shape_y):
482  """Computes the shape of a broadcast given symbolic shapes.
483
484  When shape_x and shape_y are Tensors representing shapes (i.e. the result of
485  calling tf.shape on another Tensor) this computes a Tensor which is the shape
486  of the result of a broadcasting op applied in tensors of shapes shape_x and
487  shape_y.
488
489  For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
490  Tensor whose value is [5, 2, 3].
491
492  This is useful when validating the result of a broadcasting operation when the
493  tensors do not have statically known shapes.
494
495  Args:
496    shape_x: A rank 1 integer `Tensor`, representing the shape of x.
497    shape_y: A rank 1 integer `Tensor`, representing the shape of y.
498
499  Returns:
500    A rank 1 integer `Tensor` representing the broadcasted shape.
501  """
502  return gen_array_ops.broadcast_args(shape_x, shape_y)
503
504
505@tf_export("broadcast_static_shape")
506def broadcast_static_shape(shape_x, shape_y):
507  """Computes the shape of a broadcast given known shapes.
508
509  When shape_x and shape_y are fully known TensorShapes this computes a
510  TensorShape which is the shape of the result of a broadcasting op applied in
511  tensors of shapes shape_x and shape_y.
512
513  For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
514  TensorShape whose value is [5, 2, 3].
515
516  This is useful when validating the result of a broadcasting operation when the
517  tensors have statically known shapes.
518
519  Args:
520    shape_x: A `TensorShape`
521    shape_y: A `TensorShape`
522
523  Returns:
524    A `TensorShape` representing the broadcasted shape.
525
526  Raises:
527    ValueError: If the two shapes can not be broadcasted.
528  """
529  return common_shapes.broadcast_shape(shape_x, shape_y)
530
531
532@tf_export("shape", v1=[])
533def shape_v2(input, out_type=dtypes.int32, name=None):
534  # pylint: disable=redefined-builtin
535  """Returns the shape of a tensor.
536
537  This operation returns a 1-D integer tensor representing the shape of `input`.
538  This represents the minimal set of known information at definition time.
539
540  For example:
541
542  >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
543  >>> tf.shape(t)
544  <tf.Tensor: shape=(3,), dtype=int32, numpy=array([2, 2, 3], dtype=int32)>
545  >>> tf.shape(t).numpy()
546  array([2, 2, 3], dtype=int32)
547
548  Note: When using symbolic tensors, such as when using the Keras functional
549  API, tf.shape() will return the shape of the symbolic tensor.
550
551  >>> a = tf.keras.layers.Input((None, 10))
552  >>> tf.shape(a)
553  <tf.Tensor ... shape=(3,) dtype=int32>
554
555  In these cases, using `tf.Tensor.shape` will return more informative results.
556
557  >>> a.shape
558  TensorShape([None, None, 10])
559
560  `tf.shape` and `Tensor.shape` should be identical in eager mode.  Within
561  `tf.function` or within a `compat.v1` context, not all dimensions may be
562  known until execution time.
563
564  Args:
565    input: A `Tensor` or `SparseTensor`.
566    out_type: (Optional) The specified output type of the operation (`int32` or
567      `int64`). Defaults to `tf.int32`.
568    name: A name for the operation (optional).
569
570  Returns:
571    A `Tensor` of type `out_type`.
572  """
573  return shape(input, name, out_type)
574
575
576@tf_export(v1=["shape"])
577def shape(input, name=None, out_type=dtypes.int32):
578  # pylint: disable=redefined-builtin
579  """Returns the shape of a tensor.
580
581  This operation returns a 1-D integer tensor representing the shape of `input`.
582
583  For example:
584
585  ```python
586  t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
587  tf.shape(t)  # [2, 2, 3]
588  ```
589
590  Args:
591    input: A `Tensor` or `SparseTensor`.
592    name: A name for the operation (optional).
593    out_type: (Optional) The specified output type of the operation (`int32`
594    or `int64`). Defaults to `tf.int32`.
595
596  Returns:
597    A `Tensor` of type `out_type`.
598  """
599  return shape_internal(input, name, optimize=True, out_type=out_type)
600
601
602def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
603  # pylint: disable=redefined-builtin
604  """Returns the shape of a tensor.
605
606  Args:
607    input: A `Tensor` or `SparseTensor`.
608    name: A name for the operation (optional).
609    optimize: if true, encode the shape as a constant when possible.
610    out_type: (Optional) The specified output type of the operation (`int32` or
611      `int64`). Defaults to tf.int32.
612
613  Returns:
614    A `Tensor` of type `out_type`.
615
616  """
617  with ops.name_scope(name, "Shape", [input]) as name:
618    if isinstance(
619        input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
620      return gen_math_ops.cast(input.dense_shape, out_type)
621    else:
622      if not context.executing_eagerly():
623        input = ops.convert_to_tensor(input)
624        input_shape = input.get_shape()
625        if optimize and input_shape.is_fully_defined():
626          return constant(input_shape.as_list(), out_type, name=name)
627      return gen_array_ops.shape(input, name=name, out_type=out_type)
628
629
630@tf_export("shape_n")
631def shape_n(input, out_type=dtypes.int32, name=None):
632  # pylint: disable=redefined-builtin
633  """Returns shape of tensors.
634
635  Args:
636    input: A list of at least 1 `Tensor` object with the same type.
637    out_type: The specified output type of the operation (`int32` or `int64`).
638      Defaults to `tf.int32`(optional).
639    name: A name for the operation (optional).
640
641  Returns:
642    A list with the same length as `input` of `Tensor` objects with
643      type `out_type`.
644  """
645
646  return gen_array_ops.shape_n(input, out_type=out_type, name=name)
647
648
649@tf_export("size", v1=[])
650@dispatch.add_dispatch_support
651def size_v2(input, out_type=dtypes.int32, name=None):
652  # pylint: disable=redefined-builtin
653  """Returns the size of a tensor.
654
655  Returns a 0-D `Tensor` representing the number of elements in `input`
656  of type `out_type`. Defaults to tf.int32.
657
658  For example:
659
660  >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
661  >>> tf.size(t)
662  <tf.Tensor: shape=(), dtype=int32, numpy=12>
663
664  Args:
665    input: A `Tensor` or `SparseTensor`.
666    name: A name for the operation (optional).
667    out_type: (Optional) The specified non-quantized numeric output type of the
668      operation. Defaults to `tf.int32`.
669
670  Returns:
671    A `Tensor` of type `out_type`. Defaults to `tf.int32`.
672
673  @compatibility(numpy)
674  Equivalent to np.size()
675  @end_compatibility
676  """
677
678  return size(input, name, out_type)
679
680
681@tf_export(v1=["size"])
682@dispatch.add_dispatch_support
683def size(input, name=None, out_type=dtypes.int32):
684  # pylint: disable=redefined-builtin
685  """Returns the size of a tensor.
686
687  Returns a 0-D `Tensor` representing the number of elements in `input`
688  of type `out_type`. Defaults to tf.int32.
689
690  For example:
691
692  ```python
693  t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
694  tf.size(t)  # 12
695  ```
696
697  Args:
698    input: A `Tensor` or `SparseTensor`.
699    name: A name for the operation (optional).
700    out_type: (Optional) The specified non-quantized numeric output type of the
701      operation. Defaults to `tf.int32`.
702
703  Returns:
704    A `Tensor` of type `out_type`. Defaults to `tf.int32`.
705
706  @compatibility(numpy)
707  Equivalent to np.size()
708  @end_compatibility
709  """
710  return size_internal(input, name, optimize=True, out_type=out_type)
711
712
713def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
714  # pylint: disable=redefined-builtin,protected-access
715  """Returns the size of a tensor.
716
717  Args:
718    input: A `Tensor` or `SparseTensor`.
719    name: A name for the operation (optional).
720    optimize: if true, encode the size as a constant when possible.
721    out_type: (Optional) The specified non-quantized numeric output type of the
722      operation. Defaults to `tf.int32`.
723
724  Returns:
725    A `Tensor` of type `out_type`. Defaults to `tf.int32`.
726  """
727  if (context.executing_eagerly() and not hasattr(input, "graph") and
728      not isinstance(
729          input,
730          (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))):
731    input = ops.convert_to_tensor(input)
732    np_out_type = out_type.as_numpy_dtype
733    num_elements = np.prod(input._shape_tuple(), dtype=np_out_type)  # pylint: disable=protected-access
734    return ops.convert_to_tensor(num_elements, dtype=out_type)
735  with ops.name_scope(name, "Size", [input]) as name:
736    if isinstance(
737        input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
738      return gen_math_ops.prod(
739          gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
740    else:
741      input = ops.convert_to_tensor(input)
742      input_shape = input.get_shape()
743      if optimize:
744        if input_shape.is_fully_defined():
745          return constant(input_shape.num_elements(), out_type, name=name)
746        if input_shape.dims and any(dim == 0 for dim in input_shape.dims):
747          return constant(0, out_type, name=name)
748      return gen_array_ops.size(input, name=name, out_type=out_type)
749
750
751@tf_export("rank")
752@dispatch.add_dispatch_support
753def rank(input, name=None):
754  # pylint: disable=redefined-builtin
755  """Returns the rank of a tensor.
756
757  Returns a 0-D `int32` `Tensor` representing the rank of `input`.
758
759  For example:
760
761  ```python
762  # shape of tensor 't' is [2, 2, 3]
763  t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
764  tf.rank(t)  # 3
765  ```
766
767  **Note**: The rank of a tensor is not the same as the rank of a matrix. The
768  rank of a tensor is the number of indices required to uniquely select each
769  element of the tensor. Rank is also known as "order", "degree", or "ndims."
770
771  Args:
772    input: A `Tensor` or `SparseTensor`.
773    name: A name for the operation (optional).
774
775  Returns:
776    A `Tensor` of type `int32`.
777
778  @compatibility(numpy)
779  Equivalent to np.ndim
780  @end_compatibility
781  """
782  return rank_internal(input, name, optimize=True)
783
784
785def rank_internal(input, name=None, optimize=True):
786  # pylint: disable=redefined-builtin
787  """Returns the rank of a tensor.
788
789  Args:
790    input: A `Tensor` or `SparseTensor`.
791    name: A name for the operation (optional).
792    optimize: if true, encode the rank as a constant when possible.
793
794  Returns:
795    A `Tensor` of type `int32`.
796  """
797  with ops.name_scope(name, "Rank", [input]) as name:
798    if isinstance(
799        input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
800      return gen_array_ops.size(input.dense_shape, name=name)
801    else:
802      input = ops.convert_to_tensor(input)
803      input_shape = input.get_shape()
804      if optimize and input_shape.ndims is not None:
805        return constant(input_shape.ndims, dtypes.int32, name=name)
806      return gen_array_ops.rank(input, name=name)
807
808
809_SLICE_TYPE_ERROR = (
810    "Only integers, slices (`:`), ellipsis (`...`), "
811    "tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid "
812    "indices")
813
814_SUPPORTED_SLICE_DTYPES = (dtypes.int32, dtypes.int32_ref, dtypes.int64,
815                           dtypes.int64_ref)
816
817
818def _check_index(idx):
819  """Check if a given value is a valid index into a tensor."""
820  if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):
821    return
822
823  # Optimistic check. Assumptions:
824  # * any object with a dtype is supported
825  # * any object with a dtype has a sizeable shape attribute.
826  dtype = getattr(idx, "dtype", None)
827  if (dtype is None or dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or
828      idx.shape and len(idx.shape) == 1):
829    # TODO(slebedev): IndexError seems more appropriate here, but it
830    # will break `_slice_helper` contract.
831    raise TypeError(_SLICE_TYPE_ERROR + ", got {!r}".format(idx))
832
833
834def _is_undefined_dimension(d):
835  return isinstance(d, tensor_shape.Dimension) and d.value is None
836
837
838def _slice_helper(tensor, slice_spec, var=None):
839  """Overload for Tensor.__getitem__.
840
841  This operation extracts the specified region from the tensor.
842  The notation is similar to NumPy with the restriction that
843  currently only support basic indexing. That means that
844  using a non-scalar tensor as input is not currently allowed.
845
846  Some useful examples:
847
848  ```python
849  # Strip leading and trailing 2 elements
850  foo = tf.constant([1,2,3,4,5,6])
851  print(foo[2:-2].eval())  # => [3,4]
852
853  # Skip every other row and reverse the order of the columns
854  foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
855  print(foo[::2,::-1].eval())  # => [[3,2,1], [9,8,7]]
856
857  # Use scalar tensors as indices on both dimensions
858  print(foo[tf.constant(0), tf.constant(2)].eval())  # => 3
859
860  # Insert another dimension
861  foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
862  print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
863  print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
864  print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
865  [[7],[8],[9]]]
866
867  # Ellipses (3 equivalent operations)
868  foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
869  print(foo[tf.newaxis, :, :].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]
870  print(foo[tf.newaxis, ...].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]
871  print(foo[tf.newaxis].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]
872
873  # Masks
874  foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
875  print(foo[foo > 2].eval())  # => [3, 4, 5, 6, 7, 8, 9]
876  ```
877
878  Notes:
879    - `tf.newaxis` is `None` as in NumPy.
880    - An implicit ellipsis is placed at the end of the `slice_spec`
881    - NumPy advanced indexing is currently not supported.
882
883  Args:
884    tensor: An ops.Tensor object.
885    slice_spec: The arguments to Tensor.__getitem__.
886    var: In the case of variable slice assignment, the Variable object to slice
887      (i.e. tensor is the read-only view of this variable).
888
889  Returns:
890    The appropriate slice of "tensor", based on "slice_spec".
891
892  Raises:
893    ValueError: If a slice range is negative size.
894    TypeError: If the slice indices aren't int, slice, ellipsis,
895      tf.newaxis or scalar int32/int64 tensors.
896  """
897  if isinstance(slice_spec, bool) or \
898  (isinstance(slice_spec, ops.Tensor) and slice_spec.dtype == dtypes.bool) or \
899  (isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):
900    return boolean_mask(tensor=tensor, mask=slice_spec)
901
902  if not isinstance(slice_spec, (list, tuple)):
903    slice_spec = [slice_spec]
904
905  begin, end, strides = [], [], []
906  index = 0
907
908  new_axis_mask, shrink_axis_mask = 0, 0
909  begin_mask, end_mask = 0, 0
910  ellipsis_mask = 0
911  for s in slice_spec:
912    if isinstance(s, _BaseSlice):
913      if s.start is not None and not _is_undefined_dimension(s.start):
914        _check_index(s.start)
915        begin.append(s.start)
916      else:
917        begin.append(0)
918        begin_mask |= (1 << index)
919      if s.stop is not None and not _is_undefined_dimension(s.stop):
920        _check_index(s.stop)
921        end.append(s.stop)
922      else:
923        end.append(0)
924        end_mask |= (1 << index)
925      if s.step is not None and not _is_undefined_dimension(s.step):
926        _check_index(s.step)
927        strides.append(s.step)
928      else:
929        strides.append(1)
930    elif s is Ellipsis:
931      begin.append(0)
932      end.append(0)
933      strides.append(1)
934      ellipsis_mask |= (1 << index)
935    elif s is newaxis:
936      begin.append(0)
937      end.append(0)
938      strides.append(1)
939      new_axis_mask |= (1 << index)
940    else:
941      _check_index(s)
942      begin.append(s)
943      end.append(s + 1)
944      strides.append(1)
945      shrink_axis_mask |= (1 << index)
946    index += 1
947
948  # stack possibly involves no tensors, so we must use op_scope correct graph.
949  with ops.name_scope(
950      None,
951      "strided_slice", [tensor] + begin + end + strides,
952      skip_on_eager=False) as name:
953    if begin:
954      packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
955                                                  stack(strides))
956      if (packed_begin.dtype == dtypes.int64 or
957          packed_end.dtype == dtypes.int64 or
958          packed_strides.dtype == dtypes.int64):
959        if packed_begin.dtype != dtypes.int64:
960          packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
961        if packed_end.dtype != dtypes.int64:
962          packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
963        if packed_strides.dtype != dtypes.int64:
964          packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
965    else:
966      var_empty = constant([], dtype=dtypes.int32)
967      packed_begin = packed_end = packed_strides = var_empty
968    return strided_slice(
969        tensor,
970        packed_begin,
971        packed_end,
972        packed_strides,
973        begin_mask=begin_mask,
974        end_mask=end_mask,
975        shrink_axis_mask=shrink_axis_mask,
976        new_axis_mask=new_axis_mask,
977        ellipsis_mask=ellipsis_mask,
978        var=var,
979        name=name)
980
981
982# pylint: disable=undefined-variable,protected-access,redefined-outer-name
983@tf_export("slice")
984def slice(input_, begin, size, name=None):
985  # pylint: disable=redefined-builtin
986  """Extracts a slice from a tensor.
987
988  This operation extracts a slice of size `size` from a tensor `input_` starting
989  at the location specified by `begin`. The slice `size` is represented as a
990  tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
991  of `input_` that you want to slice. The starting location (`begin`) for the
992  slice is represented as an offset in each dimension of `input_`. In other
993  words, `begin[i]` is the offset into the i'th dimension of `input_` that you
994  want to slice from.
995
996  Note that `tf.Tensor.__getitem__` is typically a more pythonic way to
997  perform slices, as it allows you to write `foo[3:7, :-2]` instead of
998  `tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
999
1000  `begin` is zero-based; `size` is one-based. If `size[i]` is -1,
1001  all remaining elements in dimension i are included in the
1002  slice. In other words, this is equivalent to setting:
1003
1004  `size[i] = input_.dim_size(i) - begin[i]`
1005
1006  This operation requires that:
1007
1008  `0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n]`
1009
1010  For example:
1011
1012  ```python
1013  t = tf.constant([[[1, 1, 1], [2, 2, 2]],
1014                   [[3, 3, 3], [4, 4, 4]],
1015                   [[5, 5, 5], [6, 6, 6]]])
1016  tf.slice(t, [1, 0, 0], [1, 1, 3])  # [[[3, 3, 3]]]
1017  tf.slice(t, [1, 0, 0], [1, 2, 3])  # [[[3, 3, 3],
1018                                     #   [4, 4, 4]]]
1019  tf.slice(t, [1, 0, 0], [2, 1, 3])  # [[[3, 3, 3]],
1020                                     #  [[5, 5, 5]]]
1021  ```
1022
1023  Args:
1024    input_: A `Tensor`.
1025    begin: An `int32` or `int64` `Tensor`.
1026    size: An `int32` or `int64` `Tensor`.
1027    name: A name for the operation (optional).
1028
1029  Returns:
1030    A `Tensor` the same type as `input_`.
1031  """
1032  return gen_array_ops._slice(input_, begin, size, name=name)
1033
1034
1035# pylint: disable=invalid-name
1036@tf_export("strided_slice")
1037def strided_slice(input_,
1038                  begin,
1039                  end,
1040                  strides=None,
1041                  begin_mask=0,
1042                  end_mask=0,
1043                  ellipsis_mask=0,
1044                  new_axis_mask=0,
1045                  shrink_axis_mask=0,
1046                  var=None,
1047                  name=None):
1048  """Extracts a strided slice of a tensor (generalized python array indexing).
1049
1050  **Instead of calling this op directly most users will want to use the
1051  NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
1052  is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**
1053  The interface of this op is a low-level encoding of the slicing syntax.
1054
1055  Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
1056  from the given `input_` tensor. Starting at the location specified by `begin`
1057  the slice continues by adding `stride` to the index until all dimensions are
1058  not less than `end`.
1059  Note that a stride can be negative, which causes a reverse slice.
1060
1061  Given a Python slice `input[spec0, spec1, ..., specn]`,
1062  this function will be called as follows.
1063
1064  `begin`, `end`, and `strides` will be vectors of length n.
1065  n in general is not equal to the rank of the `input_` tensor.
1066
1067  In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,
1068  `new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to
1069  the ith spec.
1070
1071  If the ith bit of `begin_mask` is set, `begin[i]` is ignored and
1072  the fullest possible range in that dimension is used instead.
1073  `end_mask` works analogously, except with the end range.
1074
1075  `foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
1076  `foo[::-1]` reverses a tensor with shape 8.
1077
1078  If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions
1079  as needed will be inserted between other dimensions. Only one
1080  non-zero bit is allowed in `ellipsis_mask`.
1081
1082  For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
1083  equivalent to `foo[3:5,:,:,4:5]` and
1084  `foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
1085
1086  If the ith bit of `new_axis_mask` is set, then `begin`,
1087  `end`, and `stride` are ignored and a new length 1 dimension is
1088  added at this point in the output tensor.
1089
1090  For example,
1091  `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
1092
1093  If the ith bit of `shrink_axis_mask` is set, it implies that the ith
1094  specification shrinks the dimensionality by 1, taking on the value at index
1095  `begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in
1096  Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`
1097  equal to 2.
1098
1099
1100  NOTE: `begin` and `end` are zero-indexed.
1101  `strides` entries must be non-zero.
1102
1103
1104  ```python
1105  t = tf.constant([[[1, 1, 1], [2, 2, 2]],
1106                   [[3, 3, 3], [4, 4, 4]],
1107                   [[5, 5, 5], [6, 6, 6]]])
1108  tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1])  # [[[3, 3, 3]]]
1109  tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1])  # [[[3, 3, 3],
1110                                                        #   [4, 4, 4]]]
1111  tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1])  # [[[4, 4, 4],
1112                                                           #   [3, 3, 3]]]
1113  ```
1114
1115  Args:
1116    input_: A `Tensor`.
1117    begin: An `int32` or `int64` `Tensor`.
1118    end: An `int32` or `int64` `Tensor`.
1119    strides: An `int32` or `int64` `Tensor`.
1120    begin_mask: An `int32` mask.
1121    end_mask: An `int32` mask.
1122    ellipsis_mask: An `int32` mask.
1123    new_axis_mask: An `int32` mask.
1124    shrink_axis_mask: An `int32` mask.
1125    var: The variable corresponding to `input_` or None
1126    name: A name for the operation (optional).
1127
1128  Returns:
1129    A `Tensor` the same type as `input`.
1130  """
1131
1132  if strides is None:
1133    strides = ones_like(begin)
1134
1135  op = gen_array_ops.strided_slice(
1136      input=input_,
1137      begin=begin,
1138      end=end,
1139      strides=strides,
1140      name=name,
1141      begin_mask=begin_mask,
1142      end_mask=end_mask,
1143      ellipsis_mask=ellipsis_mask,
1144      new_axis_mask=new_axis_mask,
1145      shrink_axis_mask=shrink_axis_mask)
1146
1147  parent_name = name
1148
1149  if not (var is None and isinstance(op, ops.EagerTensor)):
1150
1151    def assign(val, name=None):
1152      """Closure that holds all the arguments to create an assignment."""
1153
1154      if var is None:
1155        raise ValueError("Sliced assignment is only supported for variables")
1156      else:
1157        if name is None:
1158          name = parent_name + "_assign"
1159
1160        return var._strided_slice_assign(
1161            begin=begin,
1162            end=end,
1163            strides=strides,
1164            value=val,
1165            name=name,
1166            begin_mask=begin_mask,
1167            end_mask=end_mask,
1168            ellipsis_mask=ellipsis_mask,
1169            new_axis_mask=new_axis_mask,
1170            shrink_axis_mask=shrink_axis_mask)
1171
1172    op.assign = assign
1173  return op
1174
1175
1176def _SliceHelperVar(var, slice_spec):
1177  """Creates a slice helper object given a variable.
1178
1179  This allows creating a sub-tensor from part of the current contents
1180  of a variable. See `tf.Tensor.__getitem__` for detailed examples
1181  of slicing.
1182
1183  This function in addition also allows assignment to a sliced range.
1184  This is similar to `__setitem__` functionality in Python. However,
1185  the syntax is different so that the user can capture the assignment
1186  operation for grouping or passing to `sess.run()`.
1187  For example,
1188
1189  ```python
1190  import tensorflow as tf
1191  A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
1192  with tf.compat.v1.Session() as sess:
1193    sess.run(tf.compat.v1.global_variables_initializer())
1194    print(sess.run(A[:2, :2]))  # => [[1,2], [4,5]]
1195
1196    op = A[:2,:2].assign(22. * tf.ones((2, 2)))
1197    print(sess.run(op))  # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
1198  ```
1199
1200  Note that assignments currently do not support NumPy broadcasting
1201  semantics.
1202
1203  Args:
1204    var: An `ops.Variable` object.
1205    slice_spec: The arguments to `Tensor.__getitem__`.
1206
1207  Returns:
1208    The appropriate slice of "tensor", based on "slice_spec".
1209    As an operator. The operator also has a `assign()` method
1210    that can be used to generate an assignment operator.
1211
1212  Raises:
1213    ValueError: If a slice range is negative size.
1214    TypeError: TypeError: If the slice indices aren't int, slice,
1215      ellipsis, tf.newaxis or int32/int64 tensors.
1216
1217  """
1218
1219  return _slice_helper(var.value(), slice_spec, var)
1220
1221
1222ops.Tensor._override_operator("__getitem__", _slice_helper)
1223
1224
1225@tf_export("parallel_stack")
1226def parallel_stack(values, name="parallel_stack"):
1227  """Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
1228
1229  Requires that the shape of inputs be known at graph construction time.
1230
1231  Packs the list of tensors in `values` into a tensor with rank one higher than
1232  each tensor in `values`, by packing them along the first dimension.
1233  Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
1234  tensor will have the shape `(N, A, B, C)`.
1235
1236  For example:
1237
1238  ```python
1239  x = tf.constant([1, 4])
1240  y = tf.constant([2, 5])
1241  z = tf.constant([3, 6])
1242  tf.parallel_stack([x, y, z])  # [[1, 4], [2, 5], [3, 6]]
1243  ```
1244
1245  The difference between `stack` and `parallel_stack` is that `stack` requires
1246  all the inputs be computed before the operation will begin but doesn't require
1247  that the input shapes be known during graph construction.
1248
1249  `parallel_stack` will copy pieces of the input into the output as they become
1250  available, in some situations this can provide a performance benefit.
1251
1252  Unlike `stack`, `parallel_stack` does NOT support backpropagation.
1253
1254  This is the opposite of unstack.  The numpy equivalent is
1255
1256      tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
1257
1258  Args:
1259    values: A list of `Tensor` objects with the same shape and type.
1260    name: A name for this operation (optional).
1261
1262  Returns:
1263    output: A stacked `Tensor` with the same type as `values`.
1264  """
1265  with ops.name_scope(name):
1266    value_t = ops.convert_to_tensor(values[0])
1267    value_shape = ops.convert_to_tensor(value_t).get_shape()
1268
1269    output_shape = tensor_shape.TensorShape([len(values)])
1270    output_shape = output_shape.concatenate(value_shape)
1271    # expand_dims converts concat to stack.
1272    return gen_array_ops.parallel_concat(
1273        [expand_dims(value, 0) for value in values], shape=output_shape)
1274
1275
1276@tf_export("stack")
1277@dispatch.add_dispatch_support
1278def stack(values, axis=0, name="stack"):
1279  """Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
1280
1281  Packs the list of tensors in `values` into a tensor with rank one higher than
1282  each tensor in `values`, by packing them along the `axis` dimension.
1283  Given a list of length `N` of tensors of shape `(A, B, C)`;
1284
1285  if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
1286  if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
1287  Etc.
1288
1289  For example:
1290
1291  >>> x = tf.constant([1, 4])
1292  >>> y = tf.constant([2, 5])
1293  >>> z = tf.constant([3, 6])
1294  >>> tf.stack([x, y, z])
1295  <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
1296  array([[1, 4],
1297         [2, 5],
1298         [3, 6]], dtype=int32)>
1299
1300  >> tf.stack([x, y, z], axis=1)
1301  <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
1302  array([[1, 2, 3],
1303         [4, 5, 6]], dtype=int32)>
1304
1305  This is the opposite of unstack.  The numpy equivalent is `np.stack`
1306
1307  >>> np.array_equal(np.stack([x, y, z]), tf.stack([x, y, z]))
1308  True
1309
1310  Args:
1311    values: A list of `Tensor` objects with the same shape and type.
1312    axis: An `int`. The axis to stack along. Defaults to the first dimension.
1313      Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
1314    name: A name for this operation (optional).
1315
1316  Returns:
1317    output: A stacked `Tensor` with the same type as `values`.
1318
1319  Raises:
1320    ValueError: If `axis` is out of the range [-(R+1), R+1).
1321  """
1322  if axis == 0:
1323    try:
1324      # If the input is a constant list, it can be converted to a constant op
1325      return ops.convert_to_tensor(values, name=name)
1326    except (TypeError, ValueError):
1327      pass  # Input list contains non-constant tensors
1328
1329  value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple()  # pylint: disable=protected-access
1330  if value_shape is not None:
1331    expanded_num_dims = len(value_shape) + 1
1332    if axis < -expanded_num_dims or axis >= expanded_num_dims:
1333      raise ValueError("axis = %d not in [%d, %d)" %
1334                       (axis, -expanded_num_dims, expanded_num_dims))
1335
1336  return gen_array_ops.pack(values, axis=axis, name=name)
1337
1338
1339# pylint: disable=invalid-name
1340def _autopacking_helper(list_or_tuple, dtype, name):
1341  """Converts the given list or tuple to a tensor by packing.
1342
1343  Args:
1344    list_or_tuple: A (possibly nested) list or tuple containing a tensor.
1345    dtype: The element type of the returned tensor.
1346    name: A name for the returned tensor.
1347
1348  Returns:
1349    A `tf.Tensor` with value equivalent to `list_or_tuple`.
1350  """
1351  if context.executing_eagerly():
1352    # NOTE: Fast path when all the items are tensors, this doesn't do any type
1353    # checking.
1354    if all(ops.is_dense_tensor_like(elem) for elem in list_or_tuple):
1355      return gen_array_ops.pack(list_or_tuple, name=name)
1356  must_pack = False
1357  converted_elems = []
1358  with ops.name_scope(name) as scope:
1359    for i, elem in enumerate(list_or_tuple):
1360      if ops.is_dense_tensor_like(elem):
1361        if dtype is not None and elem.dtype.base_dtype != dtype:
1362          raise TypeError("Cannot convert a list containing a tensor of dtype "
1363                          "%s to %s (Tensor is: %r)" %
1364                          (elem.dtype, dtype, elem))
1365        converted_elems.append(elem)
1366        must_pack = True
1367      elif isinstance(elem, (list, tuple)):
1368        converted_elem = _autopacking_helper(elem, dtype, str(i))
1369        if ops.is_dense_tensor_like(converted_elem):
1370          must_pack = True
1371        converted_elems.append(converted_elem)
1372      else:
1373        converted_elems.append(elem)
1374    if must_pack:
1375      elems_as_tensors = []
1376      for i, elem in enumerate(converted_elems):
1377        if ops.is_dense_tensor_like(elem):
1378          elems_as_tensors.append(elem)
1379        else:
1380          # NOTE(mrry): This is inefficient, but it enables us to
1381          # handle the case where the list arguments are other
1382          # convertible-to-tensor types, such as numpy arrays.
1383          elems_as_tensors.append(
1384              constant_op.constant(elem, dtype=dtype, name=str(i)))
1385      return gen_array_ops.pack(elems_as_tensors, name=scope)
1386    else:
1387      return converted_elems
1388
1389
1390def _get_dtype_from_nested_lists(list_or_tuple):
1391  """Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
1392
1393  Args:
1394    list_or_tuple: A list or tuple representing an object that can be converted
1395      to a `tf.Tensor`.
1396
1397  Returns:
1398    The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
1399    such object exists.
1400  """
1401  for elem in list_or_tuple:
1402    if ops.is_dense_tensor_like(elem):
1403      return elem.dtype.base_dtype
1404    elif isinstance(elem, (list, tuple)):
1405      maybe_dtype = _get_dtype_from_nested_lists(elem)
1406      if maybe_dtype is not None:
1407        return maybe_dtype
1408  return None
1409
1410
1411def _cast_nested_seqs_to_dtype(dtype):
1412
1413  def _maybe_cast(elem):
1414    if ops.is_dense_tensor_like(elem):
1415      if dtype != elem.dtype.base_dtype:
1416        elem = gen_math_ops.cast(elem, dtype)
1417    return elem
1418
1419  return _maybe_cast
1420
1421
1422_NON_AUTOPACKABLE_TYPES = set(np.core.numerictypes.ScalarType)
1423_NON_AUTOPACKABLE_TYPES.add(np.ndarray)
1424
1425
1426def _should_not_autopack(v):
1427  # The condition we really want is
1428  #    ops.is_dense_tensor_like(...)
1429  # but it is >5x slower due to abc.ABCMeta.__instancecheck__.
1430  # pylint: disable=unidiomatic-typecheck
1431  # TODO(slebedev): add nest.all?
1432  return all(type(elem) in _NON_AUTOPACKABLE_TYPES for elem in nest.flatten(v))
1433  # pylint: enable=unidiomatic-typecheck
1434
1435
1436def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
1437  """Tensor conversion function that automatically packs arguments."""
1438  if as_ref or _should_not_autopack(v):
1439    return NotImplemented
1440  inferred_dtype = _get_dtype_from_nested_lists(v)
1441  if inferred_dtype is None:
1442    # We did not find any tensor-like objects in the nested lists, so defer to
1443    # other conversion functions.
1444    return NotImplemented
1445  if dtype is None:
1446    dtype = inferred_dtype
1447  elif dtype != inferred_dtype:
1448    v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)
1449  return _autopacking_helper(v, dtype, name or "packed")
1450
1451
1452# pylint: enable=invalid-name
1453
1454# NOTE: Register this conversion function to run *before* one that
1455# assumes every element is a value.
1456ops.register_tensor_conversion_function((list, tuple),
1457                                        _autopacking_conversion_function, 99)
1458
1459
1460@tf_export("unstack")
1461def unstack(value, num=None, axis=0, name="unstack"):
1462  """Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
1463
1464  Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
1465  If `num` is not specified (the default), it is inferred from `value`'s shape.
1466  If `value.shape[axis]` is not known, `ValueError` is raised.
1467
1468  For example, given a tensor of shape `(A, B, C, D)`;
1469
1470  If `axis == 0` then the i'th tensor in `output` is the slice
1471    `value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
1472    (Note that the dimension unpacked along is gone, unlike `split`).
1473
1474  If `axis == 1` then the i'th tensor in `output` is the slice
1475    `value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
1476  Etc.
1477
1478  This is the opposite of stack.
1479
1480  Args:
1481    value: A rank `R > 0` `Tensor` to be unstacked.
1482    num: An `int`. The length of the dimension `axis`. Automatically inferred if
1483      `None` (the default).
1484    axis: An `int`. The axis to unstack along. Defaults to the first dimension.
1485      Negative values wrap around, so the valid range is `[-R, R)`.
1486    name: A name for the operation (optional).
1487
1488  Returns:
1489    The list of `Tensor` objects unstacked from `value`.
1490
1491  Raises:
1492    ValueError: If `num` is unspecified and cannot be inferred.
1493    ValueError: If `axis` is out of the range [-R, R).
1494  """
1495  if num is None:
1496    value = ops.convert_to_tensor(value)
1497    value_shape = value.get_shape()
1498    if value_shape.ndims is not None:
1499      if axis < -value_shape.ndims or axis >= value_shape.ndims:
1500        raise ValueError("axis = %d not in [%d, %d)" %
1501                         (axis, -value_shape.ndims, value_shape.ndims))
1502      num = value_shape.dims[axis].value
1503  if num is None:
1504    raise ValueError("Cannot infer num from shape %s" % value_shape)
1505  return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
1506
1507
1508@tf_export("concat")
1509@dispatch.add_dispatch_support
1510def concat(values, axis, name="concat"):
1511  """Concatenates tensors along one dimension.
1512
1513  Concatenates the list of tensors `values` along dimension `axis`.  If
1514  `values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
1515  result has shape
1516
1517      [D0, D1, ... Raxis, ...Dn]
1518
1519  where
1520
1521      Raxis = sum(Daxis(i))
1522
1523  That is, the data from the input tensors is joined along the `axis`
1524  dimension.
1525
1526  The number of dimensions of the input tensors must match, and all dimensions
1527  except `axis` must be equal.
1528
1529  For example:
1530
1531  >>> t1 = [[1, 2, 3], [4, 5, 6]]
1532  >>> t2 = [[7, 8, 9], [10, 11, 12]]
1533  >>> concat([t1, t2], 0)
1534  <tf.Tensor: shape=(4, 3), dtype=int32, numpy=
1535  array([[ 1,  2,  3],
1536         [ 4,  5,  6],
1537         [ 7,  8,  9],
1538         [10, 11, 12]], dtype=int32)>
1539
1540  >>> concat([t1, t2], 1)
1541  <tf.Tensor: shape=(2, 6), dtype=int32, numpy=
1542  array([[ 1,  2,  3,  7,  8,  9],
1543         [ 4,  5,  6, 10, 11, 12]], dtype=int32)>
1544
1545  As in Python, the `axis` could also be negative numbers. Negative `axis`
1546  are interpreted as counting from the end of the rank, i.e.,
1547   `axis + rank(values)`-th dimension.
1548
1549  For example:
1550
1551  >>> t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
1552  >>> t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
1553  >>> tf.concat([t1, t2], -1)
1554  <tf.Tensor: shape=(2, 2, 4), dtype=int32, numpy=
1555    array([[[ 1,  2,  7,  4],
1556            [ 2,  3,  8,  4]],
1557           [[ 4,  4,  2, 10],
1558            [ 5,  3, 15, 11]]], dtype=int32)>
1559
1560  Note: If you are concatenating along a new axis consider using stack.
1561  E.g.
1562
1563  ```python
1564  tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
1565  ```
1566
1567  can be rewritten as
1568
1569  ```python
1570  tf.stack(tensors, axis=axis)
1571  ```
1572
1573  Args:
1574    values: A list of `Tensor` objects or a single `Tensor`.
1575    axis: 0-D `int32` `Tensor`.  Dimension along which to concatenate. Must be
1576      in the range `[-rank(values), rank(values))`. As in Python, indexing for
1577      axis is 0-based. Positive axis in the rage of `[0, rank(values))` refers
1578      to `axis`-th dimension. And negative axis refers to `axis +
1579      rank(values)`-th dimension.
1580    name: A name for the operation (optional).
1581
1582  Returns:
1583    A `Tensor` resulting from concatenation of the input tensors.
1584  """
1585  if not isinstance(values, (list, tuple)):
1586    values = [values]
1587  # TODO(mrry): Change to return values?
1588  if len(values) == 1:  # Degenerate case of one tensor.
1589    # Make a throwaway call to convert_to_tensor to make sure
1590    # that axis is of the correct type, and make sure that
1591    # the returned tensor is a scalar.
1592    # TODO(keveman): Implement a standalone type and shape checker.
1593    with ops.name_scope(name) as scope:
1594      ops.convert_to_tensor(
1595          axis, name="concat_dim",
1596          dtype=dtypes.int32).get_shape().assert_has_rank(0)
1597      return identity(values[0], name=name)
1598  return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
1599
1600
1601@tf_export(v1=["boolean_mask"])
1602def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
1603  """Apply boolean mask to tensor.
1604
1605  Numpy equivalent is `tensor[mask]`.
1606
1607  ```python
1608  # 1-D example
1609  tensor = [0, 1, 2, 3]
1610  mask = np.array([True, False, True, False])
1611  boolean_mask(tensor, mask)  # [0, 2]
1612  ```
1613
1614  In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
1615  the first K dimensions of `tensor`'s shape.  We then have:
1616    `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
1617  where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
1618  The `axis` could be used with `mask` to indicate the axis to mask from.
1619  In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
1620  the first `axis + dim(mask)` dimensions of `tensor`'s shape.
1621
1622  See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
1623  ragged tensors, and can be used if you need to preserve the masked dimensions
1624  of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
1625
1626  Args:
1627    tensor:  N-D tensor.
1628    mask:  K-D boolean tensor, K <= N and K must be known statically.
1629    name:  A name for this operation (optional).
1630    axis:  A 0-D int Tensor representing the axis in `tensor` to mask from. By
1631      default, axis is 0 which will mask from the first dimension. Otherwise K +
1632      axis <= N.
1633
1634  Returns:
1635    (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
1636    to `True` values in `mask`.
1637
1638  Raises:
1639    ValueError:  If shapes do not conform.
1640
1641  Examples:
1642
1643  ```python
1644  # 2-D example
1645  tensor = [[1, 2], [3, 4], [5, 6]]
1646  mask = np.array([True, False, True])
1647  boolean_mask(tensor, mask)  # [[1, 2], [5, 6]]
1648  ```
1649  """
1650
1651  def _apply_mask_1d(reshaped_tensor, mask, axis=None):
1652    """Mask tensor along dimension 0 with a 1-D mask."""
1653    indices = squeeze(where_v2(mask), axis=[1])
1654    return gather(reshaped_tensor, indices, axis=axis)
1655
1656  with ops.name_scope(name, values=[tensor, mask]):
1657    tensor = ops.convert_to_tensor(tensor, name="tensor")
1658    mask = ops.convert_to_tensor(mask, name="mask")
1659
1660    shape_mask = mask.get_shape()
1661    ndims_mask = shape_mask.ndims
1662    shape_tensor = tensor.get_shape()
1663    if ndims_mask == 0:
1664      raise ValueError("mask cannot be scalar.")
1665    if ndims_mask is None:
1666      raise ValueError(
1667          "Number of mask dimensions must be specified, even if some dimensions"
1668          " are None.  E.g. shape=[None] is ok, but shape=None is not.")
1669    axis = 0 if axis is None else axis
1670    shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
1671
1672    leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])
1673    tensor = reshape(
1674        tensor,
1675        concat([
1676            shape(tensor)[:axis], [leading_size],
1677            shape(tensor)[axis + ndims_mask:]
1678        ], 0))
1679    first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
1680    tensor.set_shape(
1681        tensor_shape.as_shape(shape_tensor[:axis]).concatenate(
1682            [first_dim]).concatenate(shape_tensor[axis + ndims_mask:]))
1683
1684    mask = reshape(mask, [-1])
1685    return _apply_mask_1d(tensor, mask, axis)
1686
1687
1688@tf_export("boolean_mask", v1=[])
1689@dispatch.add_dispatch_support
1690def boolean_mask_v2(tensor, mask, axis=None, name="boolean_mask"):
1691  """Apply boolean mask to tensor.
1692
1693  Numpy equivalent is `tensor[mask]`.
1694
1695  ```python
1696  # 1-D example
1697  tensor = [0, 1, 2, 3]
1698  mask = np.array([True, False, True, False])
1699  boolean_mask(tensor, mask)  # [0, 2]
1700  ```
1701
1702  In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
1703  the first K dimensions of `tensor`'s shape.  We then have:
1704    `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
1705  where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
1706  The `axis` could be used with `mask` to indicate the axis to mask from.
1707  In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
1708  the first `axis + dim(mask)` dimensions of `tensor`'s shape.
1709
1710  See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
1711  ragged tensors, and can be used if you need to preserve the masked dimensions
1712  of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
1713
1714  Args:
1715    tensor:  N-D tensor.
1716    mask:  K-D boolean tensor, K <= N and K must be known statically.
1717    axis:  A 0-D int Tensor representing the axis in `tensor` to mask from. By
1718      default, axis is 0 which will mask from the first dimension. Otherwise K +
1719      axis <= N.
1720    name:  A name for this operation (optional).
1721
1722  Returns:
1723    (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
1724    to `True` values in `mask`.
1725
1726  Raises:
1727    ValueError:  If shapes do not conform.
1728
1729  Examples:
1730
1731  ```python
1732  # 2-D example
1733  tensor = [[1, 2], [3, 4], [5, 6]]
1734  mask = np.array([True, False, True])
1735  boolean_mask(tensor, mask)  # [[1, 2], [5, 6]]
1736  ```
1737  """
1738  return boolean_mask(tensor, mask, name, axis)
1739
1740
1741@tf_export("sparse.mask", v1=["sparse.mask", "sparse_mask"])
1742@deprecation.deprecated_endpoints("sparse_mask")
1743def sparse_mask(a, mask_indices, name=None):
1744  """Masks elements of `IndexedSlices`.
1745
1746  Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
1747  contains a subset of the slices of `a`. Only the slices at indices not
1748  specified in `mask_indices` are returned.
1749
1750  This is useful when you need to extract a subset of slices in an
1751  `IndexedSlices` object.
1752
1753  For example:
1754
1755  ```python
1756  # `a` contains slices at indices [12, 26, 37, 45] from a large tensor
1757  # with shape [1000, 10]
1758  a.indices  # [12, 26, 37, 45]
1759  tf.shape(a.values)  # [4, 10]
1760
1761  # `b` will be the subset of `a` slices at its second and third indices, so
1762  # we want to mask its first and last indices (which are at absolute
1763  # indices 12, 45)
1764  b = tf.sparse.mask(a, [12, 45])
1765
1766  b.indices  # [26, 37]
1767  tf.shape(b.values)  # [2, 10]
1768  ```
1769
1770  Args:
1771    a: An `IndexedSlices` instance.
1772    mask_indices: Indices of elements to mask.
1773    name: A name for the operation (optional).
1774
1775  Returns:
1776    The masked `IndexedSlices` instance.
1777  """
1778  with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
1779    indices = a.indices
1780    out_indices, to_gather = gen_array_ops.list_diff(indices, mask_indices)
1781    out_values = gather(a.values, to_gather, name=name)
1782    return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
1783
1784
1785@tf_export("unique")
1786def unique(x, out_idx=dtypes.int32, name=None):
1787  """Finds unique elements in a 1-D tensor.
1788
1789  This operation returns a tensor `y` containing all of the unique elements
1790  of `x` sorted in the same order that they occur in `x`. This operation
1791  also returns a tensor `idx` the same size as `x` that contains the index
1792  of each value of `x` in the unique output `y`. In other words:
1793
1794
1795    y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]
1796
1797  Example usage:
1798
1799  >>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
1800  >>> y, idx = unique(x)
1801  >>> y
1802  <tf.Tensor: id=5, shape=(5,), dtype=int32,
1803  numpy=array([1, 2, 4, 7, 8], dtype=int32)>
1804  >>> idx
1805  <tf.Tensor: id=6, shape=(9,), dtype=int32,
1806  numpy=array([0, 0, 1, 2, 2, 2, 3, 4, 4], dtype=int32)>
1807
1808  Args:
1809    x: A Tensor. 1-D.
1810    out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to
1811      tf.int32.
1812    name: A name for the operation (optional).
1813
1814  Returns:
1815    A tuple of Tensor objects (y, idx).
1816      y: A Tensor. Has the same type as x.
1817      idx: A Tensor of type out_idx.
1818
1819  """
1820  # TODO(yongtang): switch to v2 once API deprecation
1821  # period (3 weeks) pass.
1822  # TODO(yongtang): The documentation should also
1823  # be updated when switch  to v2.
1824  return gen_array_ops.unique(x, out_idx, name)
1825
1826
1827unique.__doc__ = gen_array_ops.unique.__doc__
1828
1829
1830@tf_export("unique_with_counts")
1831def unique_with_counts(x, out_idx=dtypes.int32, name=None):
1832  """Finds unique elements in a 1-D tensor.
1833
1834  This operation returns a tensor `y` containing all of the unique elements
1835  of `x` sorted in the same order that they occur in `x`. This operation
1836  also returns a tensor `idx` the same size as `x` that contains the index
1837  of each value of `x` in the unique output `y`. Finally, it returns a
1838  third tensor `count` that contains the count of each element of `y`
1839  in `x`. In other words:
1840
1841    y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]
1842
1843  Example usage:
1844
1845  >>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
1846  >>> y, idx, count = unique_with_counts(x)
1847  >>> y
1848  <tf.Tensor: id=8, shape=(5,), dtype=int32,
1849  numpy=array([1, 2, 4, 7, 8], dtype=int32)>
1850  >>> idx
1851  <tf.Tensor: id=9, shape=(9,), dtype=int32,
1852  numpy=array([0, 0, 1, 2, 2, 2, 3, 4, 4], dtype=int32)>
1853  >>> count
1854  <tf.Tensor: id=10, shape=(5,), dtype=int32,
1855  numpy=array([2, 1, 3, 1, 2], dtype=int32)>
1856
1857  Args:
1858    x: A Tensor. 1-D.
1859    out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to
1860      tf.int32.
1861    name: A name for the operation (optional).
1862
1863  Returns:
1864    A tuple of Tensor objects (y, idx, count).
1865      y: A Tensor. Has the same type as x.
1866      idx: A Tensor of type out_idx.
1867      count: A Tensor of type out_idx.
1868
1869  """
1870  # TODO(yongtang): switch to v2 once API deprecation
1871  # period (3 weeks) pass.
1872  # TODO(yongtang): The documentation should also
1873  # be updated when switch  to v2.
1874  return gen_array_ops.unique_with_counts(x, out_idx, name)
1875
1876
1877unique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__
1878
1879
1880@tf_export("split")
1881def split(value, num_or_size_splits, axis=0, num=None, name="split"):
1882  """Splits a tensor `value` into a list of sub tensors.
1883
1884  If `num_or_size_splits` is an integer, then `value` is split along the
1885  dimension `axis` into `num_split` smaller tensors. This requires that
1886  `value.shape[axis]` is divisible by `num_split`.
1887
1888  If `num_or_size_splits` is a 1-D Tensor (or list), we call it `size_splits`
1889  and `value` is split into `len(size_splits)` elements. The shape of the `i`-th
1890  element has the same size as the `value` except along dimension `axis` where
1891  the size is `size_splits[i]`.
1892
1893  For example:
1894
1895  >>> x = tf.Variable(tf.random.uniform([5, 30], -1, 1))
1896
1897  Split `x` into 3 tensors along dimension 1
1898  >>> s0, s1, s2 = tf.split(x, num_or_size_splits=3, axis=1)
1899  >>> tf.shape(s0).numpy()
1900  array([ 5, 10], dtype=int32)
1901
1902  Split `x` into 3 tensors with sizes [4, 15, 11] along dimension 1
1903  >>> split0, split1, split2 = tf.split(x, [4, 15, 11], 1)
1904  >>> tf.shape(split0).numpy()
1905  array([5, 4], dtype=int32)
1906  >>> tf.shape(split1).numpy()
1907  array([ 5, 15], dtype=int32)
1908  >>> tf.shape(split2).numpy()
1909  array([ 5, 11], dtype=int32)
1910
1911  Args:
1912    value: The `Tensor` to split.
1913    num_or_size_splits: Either an integer indicating the number of splits along
1914      `axis` or a 1-D integer `Tensor` or Python list containing the sizes of
1915      each output tensor along `axis`. If a scalar, then it must evenly divide
1916      `value.shape[axis]`; otherwise the sum of sizes along the split axis
1917      must match that of the `value`.
1918    axis: An integer or scalar `int32` `Tensor`. The dimension along which to
1919      split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
1920    num: Optional, used to specify the number of outputs when it cannot be
1921      inferred from the shape of `size_splits`.
1922    name: A name for the operation (optional).
1923
1924  Returns:
1925    if `num_or_size_splits` is a scalar returns a list of `num_or_size_splits`
1926    `Tensor` objects; if `num_or_size_splits` is a 1-D Tensor returns
1927    `num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
1928    `value`.
1929
1930  Raises:
1931    ValueError: If `num` is unspecified and cannot be inferred.
1932  """
1933  size_splits = ops.convert_to_tensor(num_or_size_splits)
1934  if isinstance(num_or_size_splits,
1935                (numbers.Integral, tensor_shape.Dimension)):
1936    return gen_array_ops.split(
1937        axis=axis, num_split=num_or_size_splits, value=value, name=name)
1938
1939  if size_splits._rank() == 0:
1940    raise ValueError(
1941        "Rank-0 tensors are not supported as the num_or_size_splits argument "
1942        "to split. Argument provided: %s" % (num_or_size_splits,))
1943
1944  if num is None:
1945    size_splits_shape = size_splits._shape_tuple()
1946    if size_splits_shape:
1947      num = size_splits_shape[0]
1948    if num is None:
1949      raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
1950
1951  return gen_array_ops.split_v(
1952      value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)
1953
1954
1955@tf_export("transpose", v1=[])
1956def transpose_v2(a, perm=None, conjugate=False, name="transpose"):
1957  """Transposes `a`, where `a` is a Tensor.
1958
1959  Permutes the dimensions according to the value of `perm`.
1960
1961  The returned tensor's dimension `i` will correspond to the input dimension
1962  `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank
1963  of the input tensor. Hence by default, this operation performs a regular
1964  matrix transpose on 2-D input Tensors.
1965
1966  If conjugate is `True` and `a.dtype` is either `complex64` or `complex128`
1967  then the values of `a` are conjugated and transposed.
1968
1969  @compatibility(numpy)
1970  In `numpy` transposes are memory-efficient constant time operations as they
1971  simply return a new view of the same data with adjusted `strides`.
1972
1973  TensorFlow does not support strides, so `transpose` returns a new tensor with
1974  the items permuted.
1975  @end_compatibility
1976
1977  For example:
1978
1979  >>> x = tf.constant([[1, 2, 3], [4, 5, 6]])
1980  >>> tf.transpose(x)
1981  <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
1982  array([[1, 4],
1983         [2, 5],
1984         [3, 6]], dtype=int32)>
1985
1986  Equivalently, you could call `tf.transpose(x, perm=[1, 0])`.
1987
1988  If `x` is complex, setting conjugate=True gives the conjugate transpose:
1989
1990  >>> x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
1991  ...                  [4 + 4j, 5 + 5j, 6 + 6j]])
1992  >>> tf.transpose(x, conjugate=True)
1993  <tf.Tensor: shape=(3, 2), dtype=complex128, numpy=
1994  array([[1.-1.j, 4.-4.j],
1995         [2.-2.j, 5.-5.j],
1996         [3.-3.j, 6.-6.j]])>
1997
1998  'perm' is more useful for n-dimensional tensors where n > 2:
1999
2000  >>> x = tf.constant([[[ 1,  2,  3],
2001  ...                   [ 4,  5,  6]],
2002  ...                  [[ 7,  8,  9],
2003  ...                   [10, 11, 12]]])
2004
2005  As above, simply calling `tf.transpose` will default to `perm=[2,1,0]`.
2006
2007  To take the transpose of the matrices in dimension-0 (such as when you are
2008  transposing matrices where 0 is the batch dimesnion), you would set
2009  `perm=[0,2,1]`.
2010
2011  >>> tf.transpose(x, perm=[0, 2, 1])
2012  <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
2013  array([[[ 1,  4],
2014          [ 2,  5],
2015          [ 3,  6]],
2016          [[ 7, 10],
2017          [ 8, 11],
2018          [ 9, 12]]], dtype=int32)>
2019
2020  Note: This has a shorthand `linalg.matrix_transpose`):
2021
2022  Args:
2023    a: A `Tensor`.
2024    perm: A permutation of the dimensions of `a`.  This should be a vector.
2025    conjugate: Optional bool. Setting it to `True` is mathematically equivalent
2026      to tf.math.conj(tf.transpose(input)).
2027    name: A name for the operation (optional).
2028
2029  Returns:
2030    A transposed `Tensor`.
2031  """
2032  return transpose(a=a, perm=perm, name=name, conjugate=conjugate)
2033
2034
2035@tf_export(v1=["transpose"])
2036def transpose(a, perm=None, name="transpose", conjugate=False):
2037  """Transposes `a`.
2038
2039  Permutes the dimensions according to `perm`.
2040
2041  The returned tensor's dimension i will correspond to the input dimension
2042  `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
2043  the rank of the input tensor. Hence by default, this operation performs a
2044  regular matrix transpose on 2-D input Tensors. If conjugate is True and
2045  `a.dtype` is either `complex64` or `complex128` then the values of `a`
2046  are conjugated and transposed.
2047
2048  @compatibility(numpy)
2049  In `numpy` transposes are memory-efficient constant time operations as they
2050  simply return a new view of the same data with adjusted `strides`.
2051
2052  TensorFlow does not support strides, so `transpose` returns a new tensor with
2053  the items permuted.
2054  @end_compatibility
2055
2056  For example:
2057
2058  ```python
2059  x = tf.constant([[1, 2, 3], [4, 5, 6]])
2060  tf.transpose(x)  # [[1, 4]
2061                   #  [2, 5]
2062                   #  [3, 6]]
2063
2064  # Equivalently
2065  tf.transpose(x, perm=[1, 0])  # [[1, 4]
2066                                #  [2, 5]
2067                                #  [3, 6]]
2068
2069  # If x is complex, setting conjugate=True gives the conjugate transpose
2070  x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
2071                   [4 + 4j, 5 + 5j, 6 + 6j]])
2072  tf.transpose(x, conjugate=True)  # [[1 - 1j, 4 - 4j],
2073                                   #  [2 - 2j, 5 - 5j],
2074                                   #  [3 - 3j, 6 - 6j]]
2075
2076  # 'perm' is more useful for n-dimensional tensors, for n > 2
2077  x = tf.constant([[[ 1,  2,  3],
2078                    [ 4,  5,  6]],
2079                   [[ 7,  8,  9],
2080                    [10, 11, 12]]])
2081
2082  # Take the transpose of the matrices in dimension-0
2083  # (this common operation has a shorthand `linalg.matrix_transpose`)
2084  tf.transpose(x, perm=[0, 2, 1])  # [[[1,  4],
2085                                   #   [2,  5],
2086                                   #   [3,  6]],
2087                                   #  [[7, 10],
2088                                   #   [8, 11],
2089                                   #   [9, 12]]]
2090  ```
2091
2092  Args:
2093    a: A `Tensor`.
2094    perm: A permutation of the dimensions of `a`.
2095    name: A name for the operation (optional).
2096    conjugate: Optional bool. Setting it to `True` is mathematically equivalent
2097      to tf.math.conj(tf.transpose(input)).
2098
2099  Returns:
2100    A transposed `Tensor`.
2101  """
2102  with ops.name_scope(name, "transpose", [a]) as name:
2103    if not tensor_util.is_tensor(a):
2104      a = ops.convert_to_tensor(a, name="a")
2105
2106    if conjugate and a.dtype.is_complex:
2107      transpose_fn = gen_array_ops.conjugate_transpose
2108    else:
2109      transpose_fn = gen_array_ops.transpose
2110
2111    if perm is not None:
2112      return transpose_fn(a, perm, name=name)
2113
2114    rank = a.shape.rank
2115    if rank is None:
2116      perm = gen_math_ops._range(gen_array_ops.rank(a) - 1, -1, -1)
2117    else:
2118      perm = np.arange(rank - 1, -1, -1, dtype=np.int32)
2119    return transpose_fn(a, perm, name=name)
2120
2121
2122# pylint: disable=invalid-name
2123@tf_export(
2124    "linalg.matrix_transpose",
2125    v1=["linalg.transpose", "linalg.matrix_transpose", "matrix_transpose"])
2126@deprecation.deprecated_endpoints("matrix_transpose", "linalg.transpose")
2127def matrix_transpose(a, name="matrix_transpose", conjugate=False):
2128  """Transposes last two dimensions of tensor `a`.
2129
2130  For example:
2131
2132  ```python
2133  x = tf.constant([[1, 2, 3], [4, 5, 6]])
2134  tf.linalg.matrix_transpose(x)  # [[1, 4],
2135                                 #  [2, 5],
2136                                 #  [3, 6]]
2137
2138  x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
2139                   [4 + 4j, 5 + 5j, 6 + 6j]])
2140  tf.linalg.matrix_transpose(x, conjugate=True)  # [[1 - 1j, 4 - 4j],
2141                                                 #  [2 - 2j, 5 - 5j],
2142                                                 #  [3 - 3j, 6 - 6j]]
2143
2144  # Matrix with two batch dimensions.
2145  # x.shape is [1, 2, 3, 4]
2146  # tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3]
2147  ```
2148
2149  Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
2150  This is done with minimal cost, and is preferable to using this function. E.g.
2151
2152  ```python
2153  # Good!  Transpose is taken at minimal additional cost.
2154  tf.matmul(matrix, b, transpose_b=True)
2155
2156  # Inefficient!
2157  tf.matmul(matrix, tf.linalg.matrix_transpose(b))
2158  ```
2159
2160  @compatibility(numpy)
2161  In `numpy` transposes are memory-efficient constant time operations as they
2162  simply return a new view of the same data with adjusted `strides`.
2163
2164  TensorFlow does not support strides, `linalg.matrix_transpose` returns a new
2165  tensor with the items permuted.
2166  @end_compatibility
2167
2168  Args:
2169    a: A `Tensor` with `rank >= 2`.
2170    name: A name for the operation (optional).
2171    conjugate: Optional bool. Setting it to `True` is mathematically equivalent
2172      to tf.math.conj(tf.linalg.matrix_transpose(input)).
2173
2174  Returns:
2175    A transposed batch matrix `Tensor`.
2176
2177  Raises:
2178    ValueError:  If `a` is determined statically to have `rank < 2`.
2179  """
2180  with ops.name_scope(name, values=[a]):
2181    a = ops.convert_to_tensor(a, name="a")
2182
2183    # If we know the number of dimensions (statically), we can do two things:
2184    # 1. Check that `a` is a (batch) matrix.
2185    # 2. Use a python list for perm.  This preserves static shape information
2186    #    and avoids extra computations.
2187    a_shape = a.get_shape()
2188    ndims = a_shape.ndims
2189    if ndims is not None:
2190      if ndims < 2:
2191        raise ValueError(
2192            "Argument 'a' should be a (batch) matrix, with rank >= 2.  Found: "
2193            "%s" % a_shape)
2194      perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
2195    else:
2196      a_rank = rank(a)
2197      perm = concat(
2198          (gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)
2199
2200    return transpose(a, perm=perm, conjugate=conjugate)
2201
2202
2203@tf_export("linalg.diag", v1=["linalg.diag", "matrix_diag"])
2204@deprecation.deprecated_endpoints("matrix_diag")
2205def matrix_diag(diagonal,
2206                name="diag",
2207                k=0,
2208                num_rows=-1,
2209                num_cols=-1,
2210                padding_value=0,
2211                align="RIGHT_LEFT"):
2212  """Returns a batched diagonal tensor with given batched diagonal values.
2213
2214  Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
2215  diagonals of a matrix, with everything else padded with `padding`. `num_rows`
2216  and `num_cols` specify the dimension of the innermost matrix of the output. If
2217  both are not specified, the op assumes the innermost matrix is square and
2218  infers its size from `k` and the innermost dimension of `diagonal`. If only
2219  one of them is specified, the op assumes the unspecified value is the smallest
2220  possible based on other criteria.
2221
2222  Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor
2223  has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only
2224  one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has
2225  rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
2226
2227  The second innermost dimension of `diagonal` has double meaning. When `k` is
2228  scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and
2229  the output tensor is:
2230
2231  ```
2232  output[i, j, ..., l, m, n]
2233    = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
2234      padding_value                             ; otherwise
2235  ```
2236
2237  Otherwise, `M` is treated as the number of diagonals for the matrix in the
2238  same batch (`M = k[1]-k[0]+1`), and the output tensor is:
2239
2240  ```
2241  output[i, j, ..., l, m, n]
2242    = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
2243      padding_value                                     ; otherwise
2244  ```
2245  where `d = n - m`, `diag_index = k[1] - d`, and
2246  `index_in_diag = n - max(d, 0) + offset`.
2247
2248  `offset` is zero except when the alignment of the diagonal is to the right.
2249  ```
2250  offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
2251                                             and `d >= 0`) or
2252                                           (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
2253                                             and `d <= 0`)
2254           0                          ; otherwise
2255  ```
2256  where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
2257
2258  For example:
2259
2260  ```
2261  # The main diagonal.
2262  diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
2263                       [5, 6, 7, 8]])
2264  tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
2265                                 [0, 2, 0, 0],
2266                                 [0, 0, 3, 0],
2267                                 [0, 0, 0, 4]],
2268                                [[5, 0, 0, 0],
2269                                 [0, 6, 0, 0],
2270                                 [0, 0, 7, 0],
2271                                 [0, 0, 0, 8]]]
2272
2273  # A superdiagonal (per batch).
2274  diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
2275                       [4, 5, 6]])
2276  tf.matrix_diag(diagonal, k = 1)
2277    ==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
2278          [0, 0, 2, 0],
2279          [0, 0, 0, 3],
2280          [0, 0, 0, 0]],
2281         [[0, 4, 0, 0],
2282          [0, 0, 5, 0],
2283          [0, 0, 0, 6],
2284          [0, 0, 0, 0]]]
2285
2286  # A tridiagonal band (per batch).
2287  diagonals = np.array([[[8, 9, 0],  # Input shape: (2, 2, 3)
2288                         [1, 2, 3],
2289                         [0, 4, 5]],
2290                        [[2, 3, 0],
2291                         [6, 7, 9],
2292                         [0, 9, 1]]])
2293  tf.matrix_diag(diagonals, k = (-1, 1))
2294    ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
2295          [4, 2, 9],
2296          [0, 5, 3]],
2297         [[6, 2, 0],
2298          [9, 7, 3],
2299          [0, 1, 9]]]
2300
2301  # RIGHT_LEFT alignment.
2302  diagonals = np.array([[[0, 8, 9],  # Input shape: (2, 2, 3)
2303                         [1, 2, 3],
2304                         [4, 5, 0]],
2305                        [[0, 2, 3],
2306                         [6, 7, 9],
2307                         [9, 1, 0]]])
2308  tf.matrix_diag(diagonals, k = (-1, 1), align="RIGHT_LEFT")
2309    ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
2310          [4, 2, 9],
2311          [0, 5, 3]],
2312         [[6, 2, 0],
2313          [9, 7, 3],
2314          [0, 1, 9]]]
2315
2316  # Rectangular matrix.
2317  diagonal = np.array([1, 2])  # Input shape: (2)
2318  tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
2319    ==> [[0, 0, 0, 0],  # Output shape: (3, 4)
2320         [1, 0, 0, 0],
2321         [0, 2, 0, 0]]
2322
2323  # Rectangular matrix with inferred num_cols and padding_value = 9.
2324  tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
2325    ==> [[9, 9],  # Output shape: (3, 2)
2326         [1, 9],
2327         [9, 2]]
2328  ```
2329
2330  Args:
2331    diagonal: A `Tensor` with `rank k >= 1`.
2332    name: A name for the operation (optional).
2333    k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
2334      main diagonal, and negative value means subdiagonals. `k` can be a single
2335      integer (for a single diagonal) or a pair of integers specifying the low
2336      and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
2337    num_rows: The number of rows of the output matrix. If it is not provided,
2338      the op assumes the output matrix is a square matrix and infers the matrix
2339      size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
2340    num_cols: The number of columns of the output matrix. If it is not provided,
2341      the op assumes the output matrix is a square matrix and infers the matrix
2342      size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
2343    padding_value: The value to fill the area outside the specified diagonal
2344      band with. Default is 0.
2345    align: Some diagonals are shorter than `max_diag_len` and need to be padded.
2346      `align` is a string specifying how superdiagonals and subdiagonals should
2347      be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
2348      (default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
2349      aligns superdiagonals to the right (left-pads the row) and subdiagonals to
2350      the left (right-pads the row). It is the packing format LAPACK uses.
2351      cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
2352
2353  Returns:
2354    A Tensor. Has the same type as `diagonal`.
2355  """
2356  # Special case to sidestep the tf.constant conversion error:
2357  # TypeError: Expected bool, got 0 of type 'int' instead.
2358  if hasattr(diagonal, "dtype") and diagonal.dtype == "bool":
2359    padding_value = bool(padding_value)
2360
2361  return gen_array_ops.matrix_diag_v3(
2362      diagonal=diagonal,
2363      k=k,
2364      num_rows=num_rows,
2365      num_cols=num_cols,
2366      padding_value=padding_value,
2367      align=align,
2368      name=name)
2369
2370
2371@tf_export("linalg.diag_part", v1=["linalg.diag_part", "matrix_diag_part"])
2372@deprecation.deprecated_endpoints("matrix_diag_part")
2373@dispatch.add_dispatch_support
2374def matrix_diag_part(
2375    input,  # pylint:disable=redefined-builtin
2376    name="diag_part",
2377    k=0,
2378    padding_value=0,
2379    align="RIGHT_LEFT"):
2380  """Returns the batched diagonal part of a batched tensor.
2381
2382  Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
2383  `input`.
2384
2385  Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
2386  Let `max_diag_len` be the maximum length among all diagonals to be extracted,
2387  `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
2388  Let `num_diags` be the number of diagonals to extract,
2389  `num_diags = k[1] - k[0] + 1`.
2390
2391  If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
2392  `[I, J, ..., L, max_diag_len]` and values:
2393
2394  ```
2395  diagonal[i, j, ..., l, n]
2396    = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
2397      padding_value                 ; otherwise.
2398  ```
2399  where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
2400
2401  Otherwise, the output tensor has rank `r` with dimensions
2402  `[I, J, ..., L, num_diags, max_diag_len]` with values:
2403
2404  ```
2405  diagonal[i, j, ..., l, m, n]
2406    = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
2407      padding_value                 ; otherwise.
2408  ```
2409  where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
2410
2411  `offset` is zero except when the alignment of the diagonal is to the right.
2412  ```
2413  offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
2414                                             and `d >= 0`) or
2415                                           (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
2416                                             and `d <= 0`)
2417           0                          ; otherwise
2418  ```
2419  where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
2420
2421  The input must be at least a matrix.
2422
2423  For example:
2424
2425  ```
2426  input = np.array([[[1, 2, 3, 4],  # Input shape: (2, 3, 4)
2427                     [5, 6, 7, 8],
2428                     [9, 8, 7, 6]],
2429                    [[5, 4, 3, 2],
2430                     [1, 2, 3, 4],
2431                     [5, 6, 7, 8]]])
2432
2433  # A main diagonal from each batch.
2434  tf.linalg.diag_part(input) ==> [[1, 6, 7],  # Output shape: (2, 3)
2435                                  [5, 2, 7]]
2436
2437  # A superdiagonal from each batch.
2438  tf.linalg.diag_part(input, k = 1)
2439    ==> [[2, 7, 6],  # Output shape: (2, 3)
2440         [4, 3, 8]]
2441
2442  # A band from each batch.
2443  tf.linalg.diag_part(input, k = (-1, 2))
2444    ==> [[[3, 8, 0],  # Output shape: (2, 4, 3)
2445          [2, 7, 6],
2446          [1, 6, 7],
2447          [0, 5, 8]],
2448         [[3, 4, 0],
2449          [4, 3, 8],
2450          [5, 2, 7],
2451          [0, 1, 6]]]
2452
2453  # RIGHT_LEFT alignment.
2454  tf.linalg.diag_part(input, k = (-1, 2), align="RIGHT_LEFT")
2455    ==> [[[0, 3, 8],  # Output shape: (2, 4, 3)
2456          [2, 7, 6],
2457          [1, 6, 7],
2458          [5, 8, 0]],
2459         [[0, 3, 4],
2460          [4, 3, 8],
2461          [5, 2, 7],
2462          [1, 6, 0]]]
2463
2464  # max_diag_len can be shorter than the main diagonal.
2465  tf.linalg.diag_part(input, k = (-2, -1))
2466    ==> [[[5, 8],
2467          [0, 9]],
2468         [[1, 6],
2469          [0, 5]]]
2470
2471  # padding_value = 9
2472  tf.linalg.diag_part(input, k = (1, 3), padding_value = 9)
2473    ==> [[[4, 9, 9],  # Output shape: (2, 3, 3)
2474          [3, 8, 9],
2475          [2, 7, 6]],
2476         [[2, 9, 9],
2477          [3, 4, 9],
2478          [4, 3, 8]]]
2479
2480  ```
2481
2482  Args:
2483    input: A `Tensor` with `rank k >= 2`.
2484    name: A name for the operation (optional).
2485    k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
2486      main diagonal, and negative value means subdiagonals. `k` can be a single
2487      integer (for a single diagonal) or a pair of integers specifying the low
2488      and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
2489    padding_value: The value to fill the area outside the specified diagonal
2490      band with. Default is 0.
2491    align: Some diagonals are shorter than `max_diag_len` and need to be padded.
2492      `align` is a string specifying how superdiagonals and subdiagonals should
2493      be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
2494      (default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
2495      aligns superdiagonals to the right (left-pads the row) and subdiagonals to
2496      the left (right-pads the row). It is the packing format LAPACK uses.
2497      cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
2498
2499  Returns:
2500    A Tensor containing diagonals of `input`. Has the same type as `input`.
2501  """
2502  # Special case to sidestep the tf.constant conversion error:
2503  # TypeError: Expected bool, got 0 of type 'int' instead.
2504  if hasattr(input, "dtype") and input.dtype == "bool":
2505    padding_value = bool(padding_value)
2506
2507  return gen_array_ops.matrix_diag_part_v3(
2508      input=input, k=k, padding_value=padding_value, align=align, name=name)
2509
2510
2511@tf_export("linalg.set_diag", v1=["linalg.set_diag", "matrix_set_diag"])
2512@deprecation.deprecated_endpoints("matrix_set_diag")
2513def matrix_set_diag(
2514    input,  # pylint:disable=redefined-builtin
2515    diagonal,
2516    name="set_diag",
2517    k=0,
2518    align="RIGHT_LEFT"):
2519  """Returns a batched matrix tensor with new batched diagonal values.
2520
2521  Given `input` and `diagonal`, this operation returns a tensor with the
2522  same shape and values as `input`, except for the specified diagonals of the
2523  innermost matrices. These will be overwritten by the values in `diagonal`.
2524
2525  `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
2526  `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
2527  Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
2528  `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
2529  `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
2530  `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
2531
2532  The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
2533  If `k` is scalar or `k[0] == k[1]`:
2534
2535  ```
2536  output[i, j, ..., l, m, n]
2537    = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
2538      input[i, j, ..., l, m, n]              ; otherwise
2539  ```
2540
2541  Otherwise,
2542
2543  ```
2544  output[i, j, ..., l, m, n]
2545    = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
2546      input[i, j, ..., l, m, n]                         ; otherwise
2547  ```
2548  where `d = n - m`, `diag_index = k[1] - d`, and
2549  `index_in_diag = n - max(d, 0) + offset`.
2550
2551  `offset` is zero except when the alignment of the diagonal is to the right.
2552  ```
2553  offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
2554                                             and `d >= 0`) or
2555                                           (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
2556                                             and `d <= 0`)
2557           0                          ; otherwise
2558  ```
2559  where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
2560
2561  For example:
2562
2563  ```
2564  # The main diagonal.
2565  input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
2566                     [7, 7, 7, 7],
2567                     [7, 7, 7, 7]],
2568                    [[7, 7, 7, 7],
2569                     [7, 7, 7, 7],
2570                     [7, 7, 7, 7]]])
2571  diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
2572                       [4, 5, 6]])
2573  tf.matrix_set_diag(input, diagonal)
2574    ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
2575          [7, 2, 7, 7],
2576          [7, 7, 3, 7]],
2577         [[4, 7, 7, 7],
2578          [7, 5, 7, 7],
2579          [7, 7, 6, 7]]]
2580
2581  # A superdiagonal (per batch).
2582  tf.matrix_set_diag(input, diagonal, k = 1)
2583    ==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
2584          [7, 7, 2, 7],
2585          [7, 7, 7, 3]],
2586         [[7, 4, 7, 7],
2587          [7, 7, 5, 7],
2588          [7, 7, 7, 6]]]
2589
2590  # A band of diagonals.
2591  diagonals = np.array([[[9, 1, 0],  # Diagonal shape: (2, 4, 3)
2592                         [6, 5, 8],
2593                         [1, 2, 3],
2594                         [0, 4, 5]],
2595                        [[1, 2, 0],
2596                         [5, 6, 4],
2597                         [6, 1, 2],
2598                         [0, 3, 4]]])
2599  tf.matrix_set_diag(input, diagonals, k = (-1, 2))
2600    ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
2601          [4, 2, 5, 1],
2602          [7, 5, 3, 8]],
2603         [[6, 5, 1, 7],
2604          [3, 1, 6, 2],
2605          [7, 4, 2, 4]]]
2606
2607  # RIGHT_LEFT alignment.
2608  diagonals = np.array([[[0, 9, 1],  # Diagonal shape: (2, 4, 3)
2609                         [6, 5, 8],
2610                         [1, 2, 3],
2611                         [4, 5, 0]],
2612                        [[0, 1, 2],
2613                         [5, 6, 4],
2614                         [6, 1, 2],
2615                         [3, 4, 0]]])
2616  tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="RIGHT_LEFT")
2617    ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
2618          [4, 2, 5, 1],
2619          [7, 5, 3, 8]],
2620         [[6, 5, 1, 7],
2621          [3, 1, 6, 2],
2622          [7, 4, 2, 4]]]
2623
2624  ```
2625
2626  Args:
2627    input: A `Tensor` with rank `k + 1`, where `k >= 1`.
2628    diagonal:  A `Tensor` with rank `k`, when `d_lower == d_upper`, or `k + 1`,
2629      otherwise. `k >= 1`.
2630    name: A name for the operation (optional).
2631    k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
2632      main diagonal, and negative value means subdiagonals. `k` can be a single
2633      integer (for a single diagonal) or a pair of integers specifying the low
2634      and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
2635    align: Some diagonals are shorter than `max_diag_len` and need to be padded.
2636      `align` is a string specifying how superdiagonals and subdiagonals should
2637      be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
2638      (default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
2639      aligns superdiagonals to the right (left-pads the row) and subdiagonals to
2640      the left (right-pads the row). It is the packing format LAPACK uses.
2641      cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
2642  """
2643  return gen_array_ops.matrix_set_diag_v3(
2644      input=input, diagonal=diagonal, k=k, align=align, name=name)
2645
2646
2647# pylint: enable=invalid-name
2648
2649
2650def _constant_if_small(value, shape, dtype, name):
2651  try:
2652    if np.prod(shape) < 1000:
2653      return constant(value, shape=shape, dtype=dtype, name=name)
2654  except TypeError:
2655    # Happens when shape is a Tensor, list with Tensor elements, etc.
2656    pass
2657  return None
2658
2659
2660@tf_export("zeros")
2661def zeros(shape, dtype=dtypes.float32, name=None):
2662  """Creates a tensor with all elements set to zero.
2663
2664  This operation returns a tensor of type `dtype` with shape `shape` and
2665  all elements set to zero.
2666
2667  >>> tf.zeros([3, 4], tf.int32)
2668  <tf.Tensor: shape=(3, 4), dtype=int32, numpy=
2669  array([[0, 0, 0, 0],
2670         [0, 0, 0, 0],
2671         [0, 0, 0, 0]], dtype=int32)>
2672
2673  Args:
2674    shape: A `list` of integers, a `tuple` of integers, or
2675      a 1-D `Tensor` of type `int32`.
2676    dtype: The DType of an element in the resulting `Tensor`.
2677    name: Optional string. A name for the operation.
2678
2679  Returns:
2680    A `Tensor` with all elements set to zero.
2681  """
2682  dtype = dtypes.as_dtype(dtype).base_dtype
2683  with ops.name_scope(name, "zeros", [shape]) as name:
2684    if dtype == dtypes.bool:
2685      zero = False
2686    elif dtype == dtypes.string:
2687      zero = ""
2688    else:
2689      zero = 0
2690
2691    if not isinstance(shape, ops.Tensor):
2692      try:
2693        if not context.executing_eagerly():
2694          # Create a constant if it won't be very big. Otherwise create a fill
2695          # op to prevent serialized GraphDefs from becoming too large.
2696          output = _constant_if_small(zero, shape, dtype, name)
2697          if output is not None:
2698            return output
2699
2700        # Go through tensor shapes to get int64-if-needed semantics
2701        shape = constant_op._tensor_shape_tensor_conversion_function(
2702            tensor_shape.TensorShape(shape))
2703      except (TypeError, ValueError):
2704        # Happens when shape is a list with tensor elements
2705        shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
2706    if not shape._shape_tuple():
2707      shape = reshape(shape, [-1])  # Ensure it's a vector
2708    output = fill(shape, constant(zero, dtype=dtype), name=name)
2709  assert output.dtype.base_dtype == dtype
2710  return output
2711
2712
2713@tf_export(v1=["zeros_like"])
2714@dispatch.add_dispatch_support
2715def zeros_like(tensor, dtype=None, name=None, optimize=True):
2716  """Creates a tensor with all elements set to zero.
2717
2718  Given a single tensor (`tensor`), this operation returns a tensor of the
2719  same type and shape as `tensor` with all elements set to zero. Optionally,
2720  you can use `dtype` to specify a new type for the returned tensor.
2721
2722  Examples:
2723
2724    >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
2725    >>> tf.zeros_like(tensor)
2726    <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
2727    array([[0, 0, 0],
2728           [0, 0, 0]], dtype=int32)>
2729
2730    >>> tf.zeros_like(tensor, dtype=tf.float32)
2731    <tf.Tensor: shape=(2, 3), dtype=float32, numpy=
2732    array([[0., 0., 0.],
2733           [0., 0., 0.]], dtype=float32)>
2734
2735  Args:
2736    tensor: A `Tensor`.
2737    dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
2738      `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
2739      `complex64`, `complex128`, `bool` or `string`. (optional)
2740    name: A name for the operation (optional).
2741    optimize: if `True`, attempt to statically determine the shape of `tensor`
2742      and encode it as a constant. (optional, defaults to `True`)
2743
2744  Returns:
2745    A `Tensor` with all elements set to zero.
2746  """
2747  return zeros_like_impl(tensor, dtype, name, optimize)
2748
2749
2750@tf_export("zeros_like", v1=[])
2751@dispatch.add_dispatch_support
2752def zeros_like_v2(
2753    input,  # pylint: disable=redefined-builtin
2754    dtype=None,
2755    name=None):
2756  """Creates a tensor with all elements set to zero.
2757
2758  Given a single tensor or array-like object (`input`), this operation returns
2759  a tensor of the same type and shape as `input` with all elements set to zero.
2760  Optionally, you can use `dtype` to specify a new type for the returned tensor.
2761
2762  Examples:
2763
2764    >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
2765    >>> tf.zeros_like(tensor)
2766    <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
2767    array([[0, 0, 0],
2768           [0, 0, 0]], dtype=int32)>
2769
2770    >>> tf.zeros_like(tensor, dtype=tf.float32)
2771    <tf.Tensor: shape=(2, 3), dtype=float32, numpy=
2772    array([[0., 0., 0.],
2773           [0., 0., 0.]], dtype=float32)>
2774
2775    >>> tf.zeros_like([[1, 2, 3], [4, 5, 6]])
2776    <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
2777    array([[0, 0, 0],
2778           [0, 0, 0]], dtype=int32)>
2779
2780  Args:
2781    input: A `Tensor` or array-like object.
2782    dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
2783      `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
2784      `complex64`, `complex128`, `bool` or `string` (optional).
2785    name: A name for the operation (optional).
2786
2787  Returns:
2788    A `Tensor` with all elements set to zero.
2789  """
2790  return zeros_like_impl(input, dtype, name, optimize=True)
2791
2792
2793def zeros_like_impl(tensor, dtype, name, optimize=True):
2794  """Internal implementation for the v1/v2 zeros_like API calls."""
2795  with ops.name_scope(name, "zeros_like", [tensor]) as name:
2796    if not tensor_util.is_tensor(tensor):
2797      tensor = ops.convert_to_tensor(tensor, name="tensor")
2798    tensor_shape = tensor.shape
2799    tensor_dtype = tensor.dtype
2800
2801    if context.executing_eagerly():
2802      if dtype is not None and dtype != tensor_dtype:
2803        return zeros(
2804            shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
2805      return gen_array_ops.zeros_like(tensor, name=name)
2806
2807    # For now, variant types must be created via zeros_like; as we need to
2808    # pass the input variant object to the proper zeros callback.
2809
2810    if (optimize and tensor_shape.is_fully_defined() and
2811        tensor_dtype != dtypes.variant):
2812      # We can produce a zeros tensor independent of the value of 'tensor',
2813      # since the shape is known statically.
2814      return zeros(tensor_shape, dtype=dtype or tensor_dtype, name=name)
2815
2816    if dtype is not None and dtype != tensor_dtype and dtype != dtypes.variant:
2817      return zeros(
2818          shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
2819    else:
2820      return gen_array_ops.zeros_like(tensor, name=name)
2821
2822
2823@tf_export(v1=["ones_like"])
2824@dispatch.add_dispatch_support
2825def ones_like(tensor, dtype=None, name=None, optimize=True):
2826  """Creates a tensor with all elements set to 1.
2827
2828  Given a single tensor (`tensor`), this operation returns a tensor of the same
2829  type and shape as `tensor` with all elements set to 1. Optionally, you can
2830  specify a new type (`dtype`) for the returned tensor.
2831
2832  For example:
2833
2834  ```python
2835  tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
2836  tf.ones_like(tensor)  # [[1, 1, 1], [1, 1, 1]]
2837  ```
2838
2839  Args:
2840    tensor: A `Tensor`.
2841    dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
2842      `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`,
2843      `complex128` or `bool`.
2844    name: A name for the operation (optional).
2845    optimize: if true, attempt to statically determine the shape of 'tensor' and
2846      encode it as a constant.
2847
2848  Returns:
2849    A `Tensor` with all elements set to 1.
2850  """
2851  return ones_like_impl(tensor, dtype, name, optimize)
2852
2853
2854@tf_export("ones_like", v1=[])
2855@dispatch.add_dispatch_support
2856def ones_like_v2(
2857    input,  # pylint: disable=redefined-builtin
2858    dtype=None,
2859    name=None):
2860  """Creates a tensor of all ones that has the same shape as the input.
2861
2862  Given a single tensor (`tensor`), this operation returns a tensor of the
2863  same type and shape as `tensor` with all elements set to 1. Optionally,
2864  you can use `dtype` to specify a new type for the returned tensor.
2865
2866  For example:
2867
2868  >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
2869  >>> tf.ones_like(tensor)
2870  <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
2871    array([[1, 1, 1],
2872           [1, 1, 1]], dtype=int32)>
2873
2874  Args:
2875    input: A `Tensor`.
2876    dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
2877      `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
2878      `complex64`, `complex128`, `bool` or `string`.
2879    name: A name for the operation (optional).
2880
2881  Returns:
2882    A `Tensor` with all elements set to one.
2883  """
2884  return ones_like_impl(input, dtype, name, optimize=True)
2885
2886
2887def ones_like_impl(tensor, dtype, name, optimize=True):
2888  """Internal implementation for the v1/v2 ones_like API calls."""
2889  with ops.name_scope(name, "ones_like", [tensor]) as name:
2890    tensor = ops.convert_to_tensor(tensor, name="tensor")
2891    ones_shape = shape_internal(tensor, optimize=optimize)
2892    if dtype is None:
2893      dtype = tensor.dtype
2894    ret = ones(ones_shape, dtype=dtype, name=name)
2895    if not context.executing_eagerly():
2896      ret.set_shape(tensor.get_shape())
2897    return ret
2898
2899
2900@tf_export("ones")
2901def ones(shape, dtype=dtypes.float32, name=None):
2902  """Creates a tensor with all elements set to one (1).
2903
2904  This operation returns a tensor of type `dtype` with shape `shape` and
2905  all elements set to one.
2906
2907  >>> tf.ones([3, 4], tf.int32)
2908  <tf.Tensor: shape=(3, 4), dtype=int32, numpy=
2909  array([[1, 1, 1, 1],
2910         [1, 1, 1, 1],
2911         [1, 1, 1, 1]], dtype=int32)>
2912
2913  Args:
2914    shape: A `list` of integers, a `tuple` of integers, or
2915      a 1-D `Tensor` of type `int32`.
2916    dtype: Optional DType of an element in the resulting `Tensor`. Default is
2917      `tf.float32`.
2918    name: Optional string. A name for the operation.
2919
2920  Returns:
2921    A `Tensor` with all elements set to one (1).
2922  """
2923  dtype = dtypes.as_dtype(dtype).base_dtype
2924  with ops.name_scope(name, "ones", [shape]) as name:
2925    one = True if dtype == dtypes.bool else 1
2926    if not isinstance(shape, ops.Tensor):
2927      try:
2928        if not context.executing_eagerly():
2929          # Create a constant if it won't be very big. Otherwise create a fill
2930          # op to prevent serialized GraphDefs from becoming too large.
2931          output = _constant_if_small(one, shape, dtype, name)
2932          if output is not None:
2933            return output
2934
2935        # Go through tensor shapes to get int64-if-needed semantics
2936        shape = constant_op._tensor_shape_tensor_conversion_function(
2937            tensor_shape.TensorShape(shape))
2938      except (TypeError, ValueError):
2939        # Happens when shape is a list with tensor elements
2940        shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
2941    if not shape._shape_tuple():
2942      shape = reshape(shape, [-1])  # Ensure it's a vector
2943    output = fill(shape, constant(one, dtype=dtype), name=name)
2944  assert output.dtype.base_dtype == dtype
2945  return output
2946
2947
2948@tf_export(v1=["placeholder"])
2949def placeholder(dtype, shape=None, name=None):
2950  """Inserts a placeholder for a tensor that will be always fed.
2951
2952  **Important**: This tensor will produce an error if evaluated. Its value must
2953  be fed using the `feed_dict` optional argument to `Session.run()`,
2954  `Tensor.eval()`, or `Operation.run()`.
2955
2956  For example:
2957
2958  ```python
2959  x = tf.compat.v1.placeholder(tf.float32, shape=(1024, 1024))
2960  y = tf.matmul(x, x)
2961
2962  with tf.compat.v1.Session() as sess:
2963    print(sess.run(y))  # ERROR: will fail because x was not fed.
2964
2965    rand_array = np.random.rand(1024, 1024)
2966    print(sess.run(y, feed_dict={x: rand_array}))  # Will succeed.
2967  ```
2968
2969  @compatibility(eager)
2970  Placeholders are not compatible with eager execution.
2971  @end_compatibility
2972
2973  Args:
2974    dtype: The type of elements in the tensor to be fed.
2975    shape: The shape of the tensor to be fed (optional). If the shape is not
2976      specified, you can feed a tensor of any shape.
2977    name: A name for the operation (optional).
2978
2979  Returns:
2980    A `Tensor` that may be used as a handle for feeding a value, but not
2981    evaluated directly.
2982
2983  Raises:
2984    RuntimeError: if eager execution is enabled
2985  """
2986  if context.executing_eagerly():
2987    raise RuntimeError("tf.placeholder() is not compatible with "
2988                       "eager execution.")
2989
2990  return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
2991
2992
2993@tf_export(v1=["placeholder_with_default"])
2994def placeholder_with_default(input, shape, name=None):  # pylint: disable=redefined-builtin
2995  """A placeholder op that passes through `input` when its output is not fed.
2996
2997  Args:
2998    input: A `Tensor`. The default value to produce when output is not fed.
2999    shape: A `tf.TensorShape` or list of `int`s. The (possibly partial) shape of
3000      the tensor.
3001    name: A name for the operation (optional).
3002
3003  Returns:
3004    A `Tensor`. Has the same type as `input`.
3005  """
3006  return gen_array_ops.placeholder_with_default(input, shape, name)
3007
3008
3009# pylint: disable=redefined-outer-name
3010def _normalize_sparse_shape(shape, name):
3011  """Returns a tuple of (Tensor or None, rank or None)."""
3012  if shape is None:
3013    return (None, None)
3014  rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
3015  if not isinstance(shape, ops.Tensor) and None in shape:
3016    return (None, rank)
3017  return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
3018
3019
3020@tf_export(v1=["sparse.placeholder", "sparse_placeholder"])
3021@deprecation.deprecated_endpoints("sparse_placeholder")
3022def sparse_placeholder(dtype, shape=None, name=None):
3023  """Inserts a placeholder for a sparse tensor that will be always fed.
3024
3025  **Important**: This sparse tensor will produce an error if evaluated.
3026  Its value must be fed using the `feed_dict` optional argument to
3027  `Session.run()`, `Tensor.eval()`, or `Operation.run()`.
3028
3029  For example:
3030
3031  ```python
3032  x = tf.compat.v1.sparse.placeholder(tf.float32)
3033  y = tf.sparse.reduce_sum(x)
3034
3035  with tf.compat.v1.Session() as sess:
3036    print(sess.run(y))  # ERROR: will fail because x was not fed.
3037
3038    indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
3039    values = np.array([1.0, 2.0], dtype=np.float32)
3040    shape = np.array([7, 9, 2], dtype=np.int64)
3041    print(sess.run(y, feed_dict={
3042      x: tf.compat.v1.SparseTensorValue(indices, values, shape)}))  # Will
3043      succeed.
3044    print(sess.run(y, feed_dict={
3045      x: (indices, values, shape)}))  # Will succeed.
3046
3047    sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
3048    sp_value = sp.eval(session=sess)
3049    print(sess.run(y, feed_dict={x: sp_value}))  # Will succeed.
3050  ```
3051
3052  @compatibility{eager} Placeholders are not compatible with eager execution.
3053
3054  Args:
3055    dtype: The type of `values` elements in the tensor to be fed.
3056    shape: The shape of the tensor to be fed (optional). If the shape is not
3057      specified, you can feed a sparse tensor of any shape.
3058    name: A name for prefixing the operations (optional).
3059
3060  Returns:
3061    A `SparseTensor` that may be used as a handle for feeding a value, but not
3062    evaluated directly.
3063
3064  Raises:
3065    RuntimeError: if eager execution is enabled
3066  """
3067  if context.executing_eagerly():
3068    raise RuntimeError("tf.placeholder() is not compatible with "
3069                       "eager execution.")
3070
3071  shape_name = (name + "/shape") if name is not None else None
3072  shape, rank = _normalize_sparse_shape(shape, shape_name)
3073  if shape is None:
3074    shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
3075  return sparse_tensor.SparseTensor(
3076      values=placeholder(
3077          dtype,
3078          shape=[None],
3079          name=(name + "/values") if name is not None else None),
3080      indices=placeholder(
3081          dtypes.int64,
3082          shape=[None, rank],
3083          name=(name + "/indices") if name is not None else None),
3084      dense_shape=shape)
3085
3086
3087# pylint: enable=redefined-outer-name
3088
3089
3090@tf_export("pad", v1=[])
3091def pad_v2(tensor, paddings, mode="CONSTANT", constant_values=0, name=None):
3092  """Pads a tensor.
3093
3094  This operation pads a `tensor` according to the `paddings` you specify.
3095  `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
3096  `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
3097  many values to add before the contents of `tensor` in that dimension, and
3098  `paddings[D, 1]` indicates how many values to add after the contents of
3099  `tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
3100  and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
3101  `mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
3102  no greater than `tensor.dim_size(D)`.
3103
3104  The padded size of each dimension D of the output is:
3105
3106  `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
3107
3108  For example:
3109
3110  ```python
3111  t = tf.constant([[1, 2, 3], [4, 5, 6]])
3112  paddings = tf.constant([[1, 1,], [2, 2]])
3113  # 'constant_values' is 0.
3114  # rank of 't' is 2.
3115  tf.pad(t, paddings, "CONSTANT")  # [[0, 0, 0, 0, 0, 0, 0],
3116                                   #  [0, 0, 1, 2, 3, 0, 0],
3117                                   #  [0, 0, 4, 5, 6, 0, 0],
3118                                   #  [0, 0, 0, 0, 0, 0, 0]]
3119
3120  tf.pad(t, paddings, "REFLECT")  # [[6, 5, 4, 5, 6, 5, 4],
3121                                  #  [3, 2, 1, 2, 3, 2, 1],
3122                                  #  [6, 5, 4, 5, 6, 5, 4],
3123                                  #  [3, 2, 1, 2, 3, 2, 1]]
3124
3125  tf.pad(t, paddings, "SYMMETRIC")  # [[2, 1, 1, 2, 3, 3, 2],
3126                                    #  [2, 1, 1, 2, 3, 3, 2],
3127                                    #  [5, 4, 4, 5, 6, 6, 5],
3128                                    #  [5, 4, 4, 5, 6, 6, 5]]
3129  ```
3130
3131  Args:
3132    tensor: A `Tensor`.
3133    paddings: A `Tensor` of type `int32`.
3134    mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
3135    constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
3136      same type as `tensor`.
3137    name: A name for the operation (optional).
3138
3139  Returns:
3140    A `Tensor`. Has the same type as `tensor`.
3141
3142  Raises:
3143    ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
3144  """
3145  return pad(tensor, paddings, mode, name, constant_values)
3146
3147
3148@tf_export(v1=["pad"])
3149def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0):  # pylint: disable=invalid-name
3150  """Pads a tensor.
3151
3152  This operation pads a `tensor` according to the `paddings` you specify.
3153  `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
3154  `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
3155  many values to add before the contents of `tensor` in that dimension, and
3156  `paddings[D, 1]` indicates how many values to add after the contents of
3157  `tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
3158  and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
3159  `mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
3160  no greater than `tensor.dim_size(D)`.
3161
3162  The padded size of each dimension D of the output is:
3163
3164  `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
3165
3166  For example:
3167
3168  ```python
3169  t = tf.constant([[1, 2, 3], [4, 5, 6]])
3170  paddings = tf.constant([[1, 1,], [2, 2]])
3171  # 'constant_values' is 0.
3172  # rank of 't' is 2.
3173  tf.pad(t, paddings, "CONSTANT")  # [[0, 0, 0, 0, 0, 0, 0],
3174                                   #  [0, 0, 1, 2, 3, 0, 0],
3175                                   #  [0, 0, 4, 5, 6, 0, 0],
3176                                   #  [0, 0, 0, 0, 0, 0, 0]]
3177
3178  tf.pad(t, paddings, "REFLECT")  # [[6, 5, 4, 5, 6, 5, 4],
3179                                  #  [3, 2, 1, 2, 3, 2, 1],
3180                                  #  [6, 5, 4, 5, 6, 5, 4],
3181                                  #  [3, 2, 1, 2, 3, 2, 1]]
3182
3183  tf.pad(t, paddings, "SYMMETRIC")  # [[2, 1, 1, 2, 3, 3, 2],
3184                                    #  [2, 1, 1, 2, 3, 3, 2],
3185                                    #  [5, 4, 4, 5, 6, 6, 5],
3186                                    #  [5, 4, 4, 5, 6, 6, 5]]
3187  ```
3188
3189  Args:
3190    tensor: A `Tensor`.
3191    paddings: A `Tensor` of type `int32`.
3192    mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
3193    name: A name for the operation (optional).
3194    constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
3195      same type as `tensor`.
3196
3197  Returns:
3198    A `Tensor`. Has the same type as `tensor`.
3199
3200  Raises:
3201    ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
3202  """
3203
3204  # Convert lower/mixed case to upper for NumPy compatibility
3205  # NumPy uses all lower-case modes.
3206  mode = mode.upper()
3207  if mode == "CONSTANT":
3208    # TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
3209    # remove the "Pad" fallback here.
3210    if not tensor_util.is_tensor(constant_values) and constant_values == 0:
3211      result = gen_array_ops.pad(tensor, paddings, name=name)
3212    else:
3213      result = gen_array_ops.pad_v2(
3214          tensor, paddings, constant_values, name=name)
3215  elif mode == "REFLECT":
3216    result = gen_array_ops.mirror_pad(
3217        tensor, paddings, mode="REFLECT", name=name)
3218  elif mode == "SYMMETRIC":
3219    result = gen_array_ops.mirror_pad(
3220        tensor, paddings, mode="SYMMETRIC", name=name)
3221  else:
3222    raise ValueError("Unknown padding mode: %s" % mode)
3223
3224  # Restore shape information where possible.
3225  if not context.executing_eagerly():
3226    paddings_constant = _get_paddings_constant(paddings)
3227    input_shape = (
3228        tensor_shape.TensorShape(tensor.shape)
3229        if isinstance(tensor, ops.Tensor) else result.op.inputs[0].shape)
3230    if (input_shape.ndims is not None and
3231        not result.shape.is_fully_defined() and paddings_constant is not None):
3232      new_shape = []
3233      for padding, dim in zip(paddings_constant, input_shape.as_list()):
3234        if padding is None or dim is None or any((x is None for x in padding)):
3235          new_shape.append(None)
3236        else:
3237          new_shape.append(sum(padding) + dim)
3238      result.set_shape(new_shape)
3239
3240  return result
3241
3242
3243def _get_paddings_constant(paddings):
3244  """Helper to get the constant values of the paddings arg to pad().
3245
3246  Used under V1 graph mode to facilitate computation of the shape of the output
3247  tensor of `pad()`.
3248
3249  Args:
3250    paddings: The same paddings arg as passed to pad(). Can be a Tensor, or
3251      a nested list or tuple of Tensor and/or numbers.
3252
3253  Returns:
3254    A nested list or numbers or `None`, in which `None` indicates unknown
3255    padding size.
3256  """
3257  if isinstance(paddings, ops.Tensor):
3258    return tensor_util.constant_value(paddings, partial=True)
3259  elif isinstance(paddings, (list, tuple)):
3260    return [_get_paddings_constant(x) for x in paddings]
3261  else:
3262    return paddings
3263
3264
3265@tf_export("meshgrid")
3266def meshgrid(*args, **kwargs):
3267  """Broadcasts parameters for evaluation on an N-D grid.
3268
3269  Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
3270  of N-D coordinate arrays for evaluating expressions on an N-D grid.
3271
3272  Notes:
3273
3274  `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
3275  When the `indexing` argument is set to 'xy' (the default), the broadcasting
3276  instructions for the first two dimensions are swapped.
3277
3278  Examples:
3279
3280  Calling `X, Y = meshgrid(x, y)` with the tensors
3281
3282  ```python
3283  x = [1, 2, 3]
3284  y = [4, 5, 6]
3285  X, Y = tf.meshgrid(x, y)
3286  # X = [[1, 2, 3],
3287  #      [1, 2, 3],
3288  #      [1, 2, 3]]
3289  # Y = [[4, 4, 4],
3290  #      [5, 5, 5],
3291  #      [6, 6, 6]]
3292  ```
3293
3294  Args:
3295    *args: `Tensor`s with rank 1.
3296    **kwargs:
3297      - indexing: Either 'xy' or 'ij' (optional, default: 'xy').
3298      - name: A name for the operation (optional).
3299
3300  Returns:
3301    outputs: A list of N `Tensor`s with rank N.
3302
3303  Raises:
3304    TypeError: When no keyword arguments (kwargs) are passed.
3305    ValueError: When indexing keyword argument is not one of `xy` or `ij`.
3306  """
3307
3308  indexing = kwargs.pop("indexing", "xy")
3309  name = kwargs.pop("name", "meshgrid")
3310  if kwargs:
3311    key = list(kwargs.keys())[0]
3312    raise TypeError("'{}' is an invalid keyword argument "
3313                    "for this function".format(key))
3314
3315  if indexing not in ("xy", "ij"):
3316    raise ValueError("indexing parameter must be either 'xy' or 'ij'")
3317
3318  with ops.name_scope(name, "meshgrid", args) as name:
3319    ndim = len(args)
3320    s0 = (1,) * ndim
3321
3322    # Prepare reshape by inserting dimensions with size 1 where needed
3323    output = []
3324    for i, x in enumerate(args):
3325      output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
3326    # Create parameters for broadcasting each tensor to the full size
3327    shapes = [size(x) for x in args]
3328
3329    output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
3330
3331    if indexing == "xy" and ndim > 1:
3332      output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
3333      output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
3334      shapes[0], shapes[1] = shapes[1], shapes[0]
3335
3336    # TODO(nolivia): improve performance with a broadcast
3337    mult_fact = ones(shapes, output_dtype)
3338    return [x * mult_fact for x in output]
3339
3340
3341NEW_AXIS = -1
3342SHRINK_AXIS = -2
3343
3344
3345# PEP-8 naming
3346# pylint: disable=invalid-name,redefined-outer-name
3347def _compute_size_of_strided_dim(shrink, spec, size):
3348  """Computes the size of a single strided slice dimension."""
3349
3350  unknown = None  # Document what None means here.
3351  use_full_range = None  # Document other use of None.
3352  # if this is a shrink axis (i.e. a non-range index)
3353  # it either will produce an error or return 1
3354  if shrink:
3355    return 1
3356  if size is unknown or size.value is unknown:
3357    return unknown
3358  size = size.value
3359  stride = spec.step
3360  if stride is not unknown:
3361    if stride == 0:
3362      return unknown
3363    stride = spec.step
3364    valid_range = [0, size] if stride > 0 else [-1, size - 1]
3365
3366    # PEP-8 naming
3367    # pylint: disable=invalid-name
3368    def canonical(x, c):
3369      if x is use_full_range:
3370        return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
3371      else:
3372        x_fwd = size + x if x < 0 else x  # make negative indices positive
3373        return max(valid_range[0], min(valid_range[1], x_fwd))
3374
3375    begin = canonical(spec.start, 0)
3376    end = canonical(spec.stop, 1)
3377    interval_length = end - begin
3378    if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
3379      return 0
3380    else:
3381      remainder = 1 if interval_length % stride != 0 else 0
3382      return interval_length // stride + remainder
3383  else:
3384    return unknown  # unknown because stride is unknown
3385
3386
3387def _TileGradShape(op):
3388  """Shape function for the TileGrad op."""
3389  multiples_shape = op.inputs[1].get_shape().with_rank(1)
3390  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
3391  # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
3392  # it is a vector of non-negative integers, and (ii) doing so allows
3393  # us to handle partially-known multiples.
3394  multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
3395      input_shape.ndims)
3396  if multiples.ndims is None:
3397    return [tensor_shape.unknown_shape()]
3398  else:
3399    output_dims = []
3400    for dim, multiple in zip(input_shape.dims, multiples.dims):
3401      output_dims.append(dim // multiple)
3402    return [tensor_shape.TensorShape(output_dims)]
3403
3404
3405@tf_export("edit_distance")
3406def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
3407  """Computes the Levenshtein distance between sequences.
3408
3409  This operation takes variable-length sequences (`hypothesis` and `truth`),
3410  each provided as a `SparseTensor`, and computes the Levenshtein distance.
3411  You can normalize the edit distance by length of `truth` by setting
3412  `normalize` to true.
3413
3414  For example, given the following input:
3415
3416  ```python
3417  # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
3418  #   (0,0) = ["a"]
3419  #   (1,0) = ["b"]
3420  hypothesis = tf.SparseTensor(
3421      [[0, 0, 0],
3422       [1, 0, 0]],
3423      ["a", "b"],
3424      (2, 1, 1))
3425
3426  # 'truth' is a tensor of shape `[2, 2]` with variable-length values:
3427  #   (0,0) = []
3428  #   (0,1) = ["a"]
3429  #   (1,0) = ["b", "c"]
3430  #   (1,1) = ["a"]
3431  truth = tf.SparseTensor(
3432      [[0, 1, 0],
3433       [1, 0, 0],
3434       [1, 0, 1],
3435       [1, 1, 0]],
3436      ["a", "b", "c", "a"],
3437      (2, 2, 2))
3438
3439  normalize = True
3440  ```
3441
3442  This operation would return the following:
3443
3444  ```python
3445  # 'output' is a tensor of shape `[2, 2]` with edit distances normalized
3446  # by 'truth' lengths.
3447  output ==> [[inf, 1.0],  # (0,0): no truth, (0,1): no hypothesis
3448             [0.5, 1.0]]  # (1,0): addition, (1,1): no hypothesis
3449  ```
3450
3451  Args:
3452    hypothesis: A `SparseTensor` containing hypothesis sequences.
3453    truth: A `SparseTensor` containing truth sequences.
3454    normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
3455      length of `truth.`
3456    name: A name for the operation (optional).
3457
3458  Returns:
3459    A dense `Tensor` with rank `R - 1`, where R is the rank of the
3460    `SparseTensor` inputs `hypothesis` and `truth`.
3461
3462  Raises:
3463    TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
3464  """
3465  if not isinstance(
3466      hypothesis,
3467      (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
3468    raise TypeError("Hypothesis must be a SparseTensor.")
3469  if not isinstance(
3470      truth, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
3471    raise TypeError("Truth must be a SparseTensor.")
3472
3473  return gen_array_ops.edit_distance(
3474      hypothesis.indices,
3475      hypothesis.values,
3476      hypothesis.dense_shape,
3477      truth.indices,
3478      truth.values,
3479      truth.dense_shape,
3480      normalize=normalize,
3481      name=name)
3482
3483
3484@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
3485def _FakeQuantWithMinMaxArgsGradient(op, grad):
3486  """Gradient for FakeQuantWithMinMaxArgs op."""
3487  return fake_quant_with_min_max_args_gradient(
3488      grad,
3489      op.inputs[0],
3490      min=op.get_attr("min"),
3491      max=op.get_attr("max"),
3492      num_bits=op.get_attr("num_bits"),
3493      narrow_range=op.get_attr("narrow_range"))
3494
3495
3496@ops.RegisterGradient("FakeQuantWithMinMaxVars")
3497def _FakeQuantWithMinMaxVarsGradient(op, grad):
3498  """Gradient for FakeQuantWithMinMaxVars op."""
3499  return fake_quant_with_min_max_vars_gradient(
3500      grad,
3501      op.inputs[0],
3502      op.inputs[1],
3503      op.inputs[2],
3504      num_bits=op.get_attr("num_bits"),
3505      narrow_range=op.get_attr("narrow_range"))
3506
3507
3508@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
3509def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
3510  """Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
3511  return fake_quant_with_min_max_vars_per_channel_gradient(
3512      grad,
3513      op.inputs[0],
3514      op.inputs[1],
3515      op.inputs[2],
3516      num_bits=op.get_attr("num_bits"),
3517      narrow_range=op.get_attr("narrow_range"))
3518
3519
3520@tf_export("required_space_to_batch_paddings")
3521def required_space_to_batch_paddings(input_shape,
3522                                     block_shape,
3523                                     base_paddings=None,
3524                                     name=None):
3525  """Calculate padding required to make block_shape divide input_shape.
3526
3527  This function can be used to calculate a suitable paddings argument for use
3528  with space_to_batch_nd and batch_to_space_nd.
3529
3530  Args:
3531    input_shape: int32 Tensor of shape [N].
3532    block_shape: int32 Tensor of shape [N].
3533    base_paddings: Optional int32 Tensor of shape [N, 2].  Specifies the minimum
3534      amount of padding to use.  All elements must be >= 0.  If not specified,
3535      defaults to 0.
3536    name: string.  Optional name prefix.
3537
3538  Returns:
3539    (paddings, crops), where:
3540
3541    `paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
3542    satisfying:
3543
3544        paddings[i, 0] = base_paddings[i, 0].
3545        0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
3546        (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
3547
3548        crops[i, 0] = 0
3549        crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
3550
3551  Raises: ValueError if called with incompatible shapes.
3552  """
3553  with ops.name_scope(name, "required_space_to_batch_paddings",
3554                      [input_shape, block_shape]):
3555    input_shape = ops.convert_to_tensor(
3556        input_shape, dtype=dtypes.int32, name="input_shape")
3557    block_shape = ops.convert_to_tensor(
3558        block_shape, dtype=dtypes.int32, name="block_shape")
3559
3560    block_shape.get_shape().assert_is_fully_defined()
3561    block_shape.get_shape().assert_has_rank(1)
3562    num_block_dims = block_shape.get_shape().dims[0].value
3563    if num_block_dims == 0:
3564      return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
3565
3566    input_shape.get_shape().assert_is_compatible_with([num_block_dims])
3567
3568    if base_paddings is not None:
3569      base_paddings = ops.convert_to_tensor(
3570          base_paddings, dtype=dtypes.int32, name="base_paddings")
3571      base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
3572    else:
3573      base_paddings = zeros([num_block_dims, 2], dtypes.int32)
3574
3575    const_block_shape = tensor_util.constant_value(block_shape)
3576    const_input_shape = tensor_util.constant_value(input_shape)
3577    const_base_paddings = tensor_util.constant_value(base_paddings)
3578    if (const_block_shape is not None and const_input_shape is not None and
3579        const_base_paddings is not None):
3580      block_shape = const_block_shape
3581      input_shape = const_input_shape
3582      base_paddings = const_base_paddings
3583
3584    # Use same expression for both constant and non-constant case.
3585    pad_start = base_paddings[:, 0]
3586    orig_pad_end = base_paddings[:, 1]
3587    full_input_shape = input_shape + pad_start + orig_pad_end
3588    pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
3589    pad_end = orig_pad_end + pad_end_extra
3590
3591    result_paddings = stack(
3592        [[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
3593        name="paddings")
3594    result_crops = stack([[0, pad_end_extra[i]] for i in range(num_block_dims)],
3595                         name="crops")
3596    return result_paddings, result_crops
3597
3598
3599@tf_export(v1=["nn.space_to_batch", "space_to_batch"])
3600@deprecation.deprecated_endpoints("space_to_batch")
3601def space_to_batch(  # pylint: disable=missing-docstring
3602    input,  # pylint: disable=redefined-builtin
3603    paddings,
3604    block_size=None,
3605    name=None,
3606    block_shape=None):  # pylint: disable=redefined-builtin
3607  block_size = deprecation.deprecated_argument_lookup("block_shape",
3608                                                      block_shape, "block_size",
3609                                                      block_size)
3610  result = space_to_batch_nd(
3611      input,
3612      paddings=paddings,
3613      block_shape=np.array([block_size, block_size], dtype=np.int64),
3614      name=name)
3615  result.set_shape(result.get_shape().with_rank(4))
3616  return result
3617
3618
3619space_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__
3620
3621
3622@tf_export("space_to_batch", "nn.space_to_batch", v1=[])
3623def space_to_batch_v2(input, block_shape, paddings, name=None):  # pylint: disable=redefined-builtin
3624  return space_to_batch_nd(input, block_shape, paddings, name)
3625
3626
3627space_to_batch_v2.__doc__ = gen_array_ops.space_to_batch_nd.__doc__
3628
3629
3630@tf_export(v1=["nn.space_to_depth", "space_to_depth"])
3631@deprecation.deprecated_endpoints("space_to_depth")
3632def space_to_depth(input, block_size, name=None, data_format="NHWC"):  # pylint: disable=redefined-builtin
3633  return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
3634
3635
3636space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
3637
3638
3639@tf_export("nn.space_to_depth", v1=[])
3640def space_to_depth_v2(input, block_size, data_format="NHWC", name=None):  # pylint: disable=redefined-builtin
3641  return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
3642
3643
3644space_to_depth_v2.__doc__ = gen_array_ops.space_to_depth.__doc__
3645
3646
3647@tf_export(v1=["nn.depth_to_space", "depth_to_space"])
3648@deprecation.deprecated_endpoints("depth_to_space")
3649def depth_to_space(input, block_size, name=None, data_format="NHWC"):  # pylint: disable=redefined-builtin
3650  return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
3651
3652
3653depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
3654
3655
3656@tf_export("nn.depth_to_space", v1=[])
3657def depth_to_space_v2(input, block_size, data_format="NHWC", name=None):  # pylint: disable=redefined-builtin
3658  return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
3659
3660
3661depth_to_space_v2.__doc__ = gen_array_ops.depth_to_space.__doc__
3662
3663
3664@tf_export(v1=["batch_to_space"])
3665def batch_to_space(input, crops, block_size, name=None, block_shape=None):  # pylint: disable=redefined-builtin,missing-docstring
3666  block_size = deprecation.deprecated_argument_lookup("block_shape",
3667                                                      block_shape, "block_size",
3668                                                      block_size)
3669  result = batch_to_space_nd(
3670      input,
3671      crops=crops,
3672      block_shape=np.array([block_size, block_size], dtype=np.int64),
3673      name=name)
3674  result.set_shape(result.get_shape().with_rank(4))
3675  return result
3676
3677
3678batch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__
3679
3680
3681@tf_export("batch_to_space", v1=[])
3682def batch_to_space_v2(input, block_shape, crops, name=None):  # pylint: disable=redefined-builtin
3683  """BatchToSpace for N-D tensors of type T.
3684
3685  This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
3686  shape `block_shape + [batch]`, interleaves these blocks back into the grid
3687  defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the
3688  same rank as the input.  The spatial dimensions of this intermediate result
3689  are then optionally cropped according to `crops` to produce the output.  This
3690  is the reverse of SpaceToBatch (see `tf.space_to_batch`).
3691
3692  Args:
3693    input: A N-D `Tensor` with shape `input_shape = [batch] + spatial_shape +
3694      remaining_shape`, where `spatial_shape` has M dimensions.
3695    block_shape: A 1-D `Tensor` with shape [M]. Must be one of the following
3696      types: `int32`, `int64`. All values must be >= 1. For backwards
3697      compatibility with TF 1.0, this parameter may be an int, in which case it
3698      is converted to
3699      `numpy.array([block_shape, block_shape],
3700      dtype=numpy.int64)`.
3701    crops: A  2-D `Tensor` with shape `[M, 2]`. Must be one of the
3702      following types: `int32`, `int64`. All values must be >= 0.
3703      `crops[i] = [crop_start, crop_end]` specifies the amount to crop from
3704      input dimension `i + 1`, which corresponds to spatial dimension `i`.
3705      It is required that
3706      `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
3707      This operation is equivalent to the following steps:
3708      1. Reshape `input` to `reshaped` of shape: [block_shape[0], ...,
3709        block_shape[M-1], batch / prod(block_shape), input_shape[1], ...,
3710        input_shape[N-1]]
3711      2. Permute dimensions of `reshaped` to produce `permuted` of shape
3712         [batch / prod(block_shape),  input_shape[1], block_shape[0], ...,
3713         input_shape[M], block_shape[M-1], input_shape[M+1],
3714        ..., input_shape[N-1]]
3715      3. Reshape `permuted` to produce `reshaped_permuted` of shape
3716         [batch / prod(block_shape), input_shape[1] * block_shape[0], ...,
3717         input_shape[M] * block_shape[M-1], input_shape[M+1], ...,
3718         input_shape[N-1]]
3719      4. Crop the start and end of dimensions `[1, ..., M]` of
3720         `reshaped_permuted` according to `crops` to produce the output
3721         of shape:
3722         [batch / prod(block_shape),  input_shape[1] *
3723           block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] *
3724           block_shape[M-1] - crops[M-1,0] - crops[M-1,1],  input_shape[M+1],
3725           ..., input_shape[N-1]]
3726      Some Examples:
3727      (1) For the following input of shape `[4, 1, 1, 1]`,
3728         `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
3729         ```python
3730         [[[[1]]],
3731          [[[2]]],
3732          [[[3]]],
3733          [[[4]]]]
3734         ```
3735         The output tensor has shape `[1, 2, 2, 1]` and value:
3736         ``` x = [[[[1], [2]],
3737                   [[3], [4]]]] ```
3738      (2) For the following input of shape `[4, 1, 1, 3]`,
3739         `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
3740         ```python
3741         [[[1,  2,   3]],
3742          [[4,  5,   6]],
3743          [[7,  8,   9]],
3744          [[10, 11, 12]]]
3745         ```
3746         The output tensor has shape `[1, 2, 2, 3]` and value:
3747         ```python
3748         x = [[[[1, 2, 3], [4,  5,  6 ]],
3749               [[7, 8, 9], [10, 11, 12]]]]
3750         ```
3751      (3) For the following
3752         input of shape `[4, 2, 2, 1]`,
3753         `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
3754         ```python
3755         x = [[[[1], [3]], [[ 9], [11]]],
3756              [[[2], [4]], [[10], [12]]],
3757              [[[5], [7]], [[13], [15]]],
3758              [[[6], [8]], [[14], [16]]]]
3759         ```
3760         The output tensor has shape `[1, 4, 4, 1]` and value:
3761         ```python
3762         x = [[[1],  [2],  [ 3], [ 4]],
3763              [[5],  [6],  [ 7], [ 8]],
3764              [[9],  [10], [11], [12]],
3765              [[13], [14], [15], [16]]]
3766         ```
3767       (4) For the following input of shape
3768          `[8, 1, 3, 1]`,
3769          `block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`:
3770          ```python
3771          x = [[[[0], [ 1], [ 3]]],
3772               [[[0], [ 9], [11]]],
3773               [[[0], [ 2], [ 4]]],
3774               [[[0], [10], [12]]],
3775               [[[0], [ 5], [ 7]]],
3776               [[[0], [13], [15]]],
3777               [[[0], [ 6], [ 8]]],
3778               [[[0], [14], [16]]]]
3779          ```
3780          The output tensor has shape `[2, 2, 4, 1]` and value:
3781          ```python
3782          x = [[[[ 1], [ 2], [ 3], [ 4]],
3783                [[ 5], [ 6], [ 7], [ 8]]],
3784               [[[ 9], [10], [11], [12]],
3785                [[13], [14], [15], [16]]]] ```
3786    name: A name for the operation (optional).
3787
3788  Returns:
3789    A `Tensor`. Has the same type as `input`.
3790  """
3791  if isinstance(block_shape, int):
3792    block_shape = np.array([block_shape, block_shape], dtype=np.int64)
3793
3794  return batch_to_space_nd(
3795      input=input, block_shape=block_shape, crops=crops, name=name)
3796
3797
3798@tf_export("one_hot")
3799@dispatch.add_dispatch_support
3800def one_hot(indices,
3801            depth,
3802            on_value=None,
3803            off_value=None,
3804            axis=None,
3805            dtype=None,
3806            name=None):
3807  """Returns a one-hot tensor.
3808
3809  The locations represented by indices in `indices` take value `on_value`,
3810  while all other locations take value `off_value`.
3811
3812  `on_value` and `off_value` must have matching data types. If `dtype` is also
3813  provided, they must be the same data type as specified by `dtype`.
3814
3815  If `on_value` is not provided, it will default to the value `1` with type
3816  `dtype`
3817
3818  If `off_value` is not provided, it will default to the value `0` with type
3819  `dtype`
3820
3821  If the input `indices` is rank `N`, the output will have rank `N+1`. The
3822  new axis is created at dimension `axis` (default: the new axis is appended
3823  at the end).
3824
3825  If `indices` is a scalar the output shape will be a vector of length `depth`
3826
3827  If `indices` is a vector of length `features`, the output shape will be:
3828
3829  ```
3830    features x depth if axis == -1
3831    depth x features if axis == 0
3832  ```
3833
3834  If `indices` is a matrix (batch) with shape `[batch, features]`, the output
3835  shape will be:
3836
3837  ```
3838    batch x features x depth if axis == -1
3839    batch x depth x features if axis == 1
3840    depth x batch x features if axis == 0
3841  ```
3842
3843  If `indices` is a RaggedTensor, the 'axis' argument must be positive and refer
3844  to a non-ragged axis. The output will be equivalent to applying 'one_hot' on
3845  the values of the RaggedTensor, and creating a new RaggedTensor from the
3846  result.
3847
3848  If `dtype` is not provided, it will attempt to assume the data type of
3849  `on_value` or `off_value`, if one or both are passed in. If none of
3850  `on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
3851  value `tf.float32`.
3852
3853  Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
3854  etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
3855
3856  For example:
3857
3858  ```python
3859  indices = [0, 1, 2]
3860  depth = 3
3861  tf.one_hot(indices, depth)  # output: [3 x 3]
3862  # [[1., 0., 0.],
3863  #  [0., 1., 0.],
3864  #  [0., 0., 1.]]
3865
3866  indices = [0, 2, -1, 1]
3867  depth = 3
3868  tf.one_hot(indices, depth,
3869             on_value=5.0, off_value=0.0,
3870             axis=-1)  # output: [4 x 3]
3871  # [[5.0, 0.0, 0.0],  # one_hot(0)
3872  #  [0.0, 0.0, 5.0],  # one_hot(2)
3873  #  [0.0, 0.0, 0.0],  # one_hot(-1)
3874  #  [0.0, 5.0, 0.0]]  # one_hot(1)
3875
3876  indices = [[0, 2], [1, -1]]
3877  depth = 3
3878  tf.one_hot(indices, depth,
3879             on_value=1.0, off_value=0.0,
3880             axis=-1)  # output: [2 x 2 x 3]
3881  # [[[1.0, 0.0, 0.0],   # one_hot(0)
3882  #   [0.0, 0.0, 1.0]],  # one_hot(2)
3883  #  [[0.0, 1.0, 0.0],   # one_hot(1)
3884  #   [0.0, 0.0, 0.0]]]  # one_hot(-1)
3885
3886  indices = tf.ragged.constant([[0, 1], [2]])
3887  depth = 3
3888  tf.one_hot(indices, depth)  # output: [2 x None x 3]
3889  # [[[1., 0., 0.],
3890  #   [0., 1., 0.]],
3891  #  [[0., 0., 1.]]]
3892  ```
3893
3894  Args:
3895    indices: A `Tensor` of indices.
3896    depth: A scalar defining the depth of the one hot dimension.
3897    on_value: A scalar defining the value to fill in output when `indices[j]
3898      = i`. (default: 1)
3899    off_value: A scalar defining the value to fill in output when `indices[j]
3900      != i`. (default: 0)
3901    axis: The axis to fill (default: -1, a new inner-most axis).
3902    dtype: The data type of the output tensor.
3903    name: A name for the operation (optional).
3904
3905  Returns:
3906    output: The one-hot tensor.
3907
3908  Raises:
3909    TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
3910    TypeError: If dtype of `on_value` and `off_value` don't match one another
3911  """
3912  with ops.name_scope(
3913      name, "one_hot",
3914      [indices, depth, on_value, off_value, axis, dtype]) as name:
3915    on_exists = on_value is not None
3916    off_exists = off_value is not None
3917
3918    on_dtype = (
3919        ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists else None)
3920    off_dtype = (
3921        ops.convert_to_tensor(off_value).dtype.base_dtype
3922        if off_exists else None)
3923
3924    if on_exists or off_exists:
3925      if dtype is not None:
3926        # Ensure provided on_value and/or off_value match dtype
3927        if on_exists and on_dtype != dtype:
3928          raise TypeError("dtype {0} of on_value does not match "
3929                          "dtype parameter {1}".format(on_dtype, dtype))
3930        if off_exists and off_dtype != dtype:
3931          raise TypeError("dtype {0} of off_value does not match "
3932                          "dtype parameter {1}".format(off_dtype, dtype))
3933      else:
3934        # dtype not provided: automatically assign it
3935        dtype = on_dtype if on_exists else off_dtype
3936    elif dtype is None:
3937      # None of on_value, off_value, or dtype provided. Default dtype to float32
3938      dtype = dtypes.float32
3939
3940    if not on_exists:
3941      # on_value not provided: assign to value 1 of type dtype
3942      on_value = ops.convert_to_tensor(1, dtype, name="on_value")
3943      on_dtype = dtype
3944    if not off_exists:
3945      # off_value not provided: assign to value 0 of type dtype
3946      off_value = ops.convert_to_tensor(0, dtype, name="off_value")
3947      off_dtype = dtype
3948
3949    if on_dtype != off_dtype:
3950      raise TypeError("dtype {0} of on_value does not match "
3951                      "dtype {1} of off_value".format(on_dtype, off_dtype))
3952
3953    return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,
3954                                 name)
3955
3956
3957def _all_dimensions(x):
3958  """Returns a 1D-tensor listing all dimensions in x."""
3959  # Fast path: avoid creating Rank and Range ops if ndims is known.
3960  if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
3961    return constant_op.constant(
3962        np.arange(x.get_shape().ndims), dtype=dtypes.int32)
3963  if (isinstance(x, sparse_tensor.SparseTensor) and
3964      x.dense_shape.get_shape().is_fully_defined()):
3965    r = x.dense_shape.get_shape().dims[0].value  # sparse.dense_shape is 1-D.
3966    return constant_op.constant(np.arange(r), dtype=dtypes.int32)
3967
3968  # Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
3969  return gen_math_ops._range(0, rank(x), 1)
3970
3971
3972@tf_export("sequence_mask")
3973def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
3974  """Returns a mask tensor representing the first N positions of each cell.
3975
3976  If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
3977  dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
3978
3979  ```
3980  mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
3981  ```
3982
3983  Examples:
3984
3985  ```python
3986  tf.sequence_mask([1, 3, 2], 5)  # [[True, False, False, False, False],
3987                                  #  [True, True, True, False, False],
3988                                  #  [True, True, False, False, False]]
3989
3990  tf.sequence_mask([[1, 3],[2,0]])  # [[[True, False, False],
3991                                    #   [True, True, True]],
3992                                    #  [[True, True, False],
3993                                    #   [False, False, False]]]
3994  ```
3995
3996  Args:
3997    lengths: integer tensor, all its values <= maxlen.
3998    maxlen: scalar integer tensor, size of last dimension of returned tensor.
3999      Default is the maximum value in `lengths`.
4000    dtype: output type of the resulting tensor.
4001    name: name of the op.
4002
4003  Returns:
4004    A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
4005  Raises:
4006    ValueError: if `maxlen` is not a scalar.
4007  """
4008  with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
4009    lengths = ops.convert_to_tensor(lengths)
4010
4011    if maxlen is None:
4012      maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
4013      maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)
4014    else:
4015      maxlen = ops.convert_to_tensor(maxlen)
4016    if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
4017      raise ValueError("maxlen must be scalar for sequence_mask")
4018
4019    # The basic idea is to compare a range row vector of size maxlen:
4020    # [0, 1, 2, 3, 4]
4021    # to length as a matrix with 1 column: [[1], [3], [2]].
4022    # Because of broadcasting on both arguments this comparison results
4023    # in a matrix of size (len(lengths), maxlen)
4024    row_vector = gen_math_ops._range(
4025        constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
4026    # Since maxlen >= max(lengths), it is safe to use maxlen as a cast
4027    # authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
4028    matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
4029    result = row_vector < matrix
4030
4031    if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
4032      return result
4033    else:
4034      return gen_math_ops.cast(result, dtype)
4035
4036
4037@tf_export(v1=["squeeze"])
4038@dispatch.add_dispatch_support
4039@deprecation.deprecated_args(None, "Use the `axis` argument instead",
4040                             "squeeze_dims")
4041def squeeze(input, axis=None, name=None, squeeze_dims=None):
4042  # pylint: disable=redefined-builtin
4043  """Removes dimensions of size 1 from the shape of a tensor.
4044
4045  Given a tensor `input`, this operation returns a tensor of the same type with
4046  all dimensions of size 1 removed. If you don't want to remove all size 1
4047  dimensions, you can remove specific size 1 dimensions by specifying
4048  `axis`.
4049
4050  For example:
4051
4052  >>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
4053  >>> t = tf.ones([1, 2, 1, 3, 1, 1])
4054  >>> print(tf.shape(tf.squeeze(t)).numpy())
4055  [2 3]
4056
4057  Or, to remove specific size 1 dimensions:
4058
4059  >>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
4060  >>> t = tf.ones([1, 2, 1, 3, 1, 1])
4061  >>> print(tf.shape(tf.squeeze(t, [2, 4])).numpy())
4062  [1 2 3 1]
4063
4064  Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
4065  time, where `N` is the number of elements in the squeezed dimensions.
4066
4067  Args:
4068    input: A `Tensor`. The `input` to squeeze.
4069    axis: An optional list of `ints`. Defaults to `[]`. If specified, only
4070      squeezes the dimensions listed. The dimension index starts at 0. It is an
4071      error to squeeze a dimension that is not 1. Must be in the range
4072      `[-rank(input), rank(input))`. Must be specified if `input` is a
4073      `RaggedTensor`.
4074    name: A name for the operation (optional).
4075    squeeze_dims: Deprecated keyword argument that is now axis.
4076
4077  Returns:
4078    A `Tensor`. Has the same type as `input`.
4079    Contains the same data as `input`, but has one or more dimensions of
4080    size 1 removed.
4081
4082  Raises:
4083    ValueError: When both `squeeze_dims` and `axis` are specified.
4084  """
4085  axis = deprecation.deprecated_argument_lookup("axis", axis, "squeeze_dims",
4086                                                squeeze_dims)
4087  if np.isscalar(axis):
4088    axis = [axis]
4089  return gen_array_ops.squeeze(input, axis, name)
4090
4091
4092@tf_export("squeeze", v1=[])
4093@dispatch.add_dispatch_support
4094def squeeze_v2(input, axis=None, name=None):
4095  """Removes dimensions of size 1 from the shape of a tensor.
4096
4097  Given a tensor `input`, this operation returns a tensor of the same type with
4098  all dimensions of size 1 removed. If you don't want to remove all size 1
4099  dimensions, you can remove specific size 1 dimensions by specifying
4100  `axis`.
4101
4102  For example:
4103
4104  ```python
4105  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
4106  tf.shape(tf.squeeze(t))  # [2, 3]
4107  ```
4108
4109  Or, to remove specific size 1 dimensions:
4110
4111  ```python
4112  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
4113  tf.shape(tf.squeeze(t, [2, 4]))  # [1, 2, 3, 1]
4114  ```
4115
4116  Unlike the older op `tf.compat.v1.squeeze`, this op does not accept a
4117  deprecated `squeeze_dims` argument.
4118
4119  Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
4120  time, where `N` is the number of elements in the squeezed dimensions.
4121
4122  Args:
4123    input: A `Tensor`. The `input` to squeeze.
4124    axis: An optional list of `ints`. Defaults to `[]`. If specified, only
4125      squeezes the dimensions listed. The dimension index starts at 0. It is an
4126      error to squeeze a dimension that is not 1. Must be in the range
4127      `[-rank(input), rank(input))`. Must be specified if `input` is a
4128      `RaggedTensor`.
4129    name: A name for the operation (optional).
4130
4131  Returns:
4132    A `Tensor`. Has the same type as `input`.
4133    Contains the same data as `input`, but has one or more dimensions of
4134    size 1 removed.
4135
4136  Raises:
4137    ValueError: The input cannot be converted to a tensor, or the specified
4138      axis cannot be squeezed.
4139  """
4140  # pylint: disable=redefined-builtin
4141  return squeeze(input, axis, name)
4142
4143
4144@tf_export(v1=["where"])
4145@dispatch.add_dispatch_support
4146def where(condition, x=None, y=None, name=None):
4147  """Return the elements, either from `x` or `y`, depending on the `condition`.
4148
4149  If both `x` and `y` are None, then this operation returns the coordinates of
4150  true elements of `condition`.  The coordinates are returned in a 2-D tensor
4151  where the first dimension (rows) represents the number of true elements, and
4152  the second dimension (columns) represents the coordinates of the true
4153  elements. Keep in mind, the shape of the output tensor can vary depending on
4154  how many true values there are in input. Indices are output in row-major
4155  order.
4156
4157  If both non-None, `x` and `y` must have the same shape.
4158  The `condition` tensor must be a scalar if `x` and `y` are scalar.
4159  If `x` and `y` are tensors of higher rank, then `condition` must be either a
4160  vector with size matching the first dimension of `x`, or must have the same
4161  shape as `x`.
4162
4163  The `condition` tensor acts as a mask that chooses, based on the value at each
4164  element, whether the corresponding element / row in the output should be taken
4165  from `x` (if true) or `y` (if false).
4166
4167  If `condition` is a vector and `x` and `y` are higher rank matrices, then it
4168  chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
4169  has the same shape as `x` and `y`, then it chooses which element to copy from
4170  `x` and `y`.
4171
4172  Args:
4173    condition: A `Tensor` of type `bool`
4174    x: A Tensor which may have the same shape as `condition`. If `condition` is
4175      rank 1, `x` may have higher rank, but its first dimension must match the
4176      size of `condition`.
4177    y: A `tensor` with the same shape and type as `x`.
4178    name: A name of the operation (optional)
4179
4180  Returns:
4181    A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
4182    Otherwise, a `Tensor` with shape `(num_true, rank(condition))`.
4183
4184  Raises:
4185    ValueError: When exactly one of `x` or `y` is non-None.
4186  """
4187  if x is None and y is None:
4188    with ops.name_scope(name, "Where", [condition]) as name:
4189      condition = ops.convert_to_tensor(
4190          condition, preferred_dtype=dtypes.bool, name="condition")
4191      return gen_array_ops.where(condition=condition, name=name)
4192  elif x is not None and y is not None:
4193    return gen_math_ops.select(condition=condition, x=x, y=y, name=name)
4194  else:
4195    raise ValueError("x and y must both be non-None or both be None.")
4196
4197
4198@tf_export("where", v1=["where_v2"])
4199def where_v2(condition, x=None, y=None, name=None):
4200  """Return the elements where `condition` is `True` (multiplexing `x` and `y`).
4201
4202  This operator has two modes: in one mode both `x` and `y` are provided, in
4203  another mode neither are provided. `condition` is always expected to be a
4204  `tf.Tensor` of type `bool`.
4205
4206  #### Retrieving indices of `True` elements
4207
4208  If `x` and `y` are not provided (both are None):
4209
4210  `tf.where` will return the indices of `condition` that are `True`, in
4211  the form of a 2-D tensor with shape (n, d).
4212  (Where n is the number of matching indices in `condition`,
4213  and d is the number of dimensions in `condition`).
4214
4215  Indices are output in row-major order.
4216
4217  >>> tf.where([True, False, False, True])
4218  <tf.Tensor: shape=(2, 1), dtype=int64, numpy=
4219  array([[0],
4220         [3]])>
4221
4222  >>> tf.where([[True, False], [False, True]])
4223  <tf.Tensor: shape=(2, 2), dtype=int64, numpy=
4224  array([[0, 0],
4225         [1, 1]])>
4226
4227  >>> tf.where([[[True, False], [False, True], [True, True]]])
4228  <tf.Tensor: shape=(4, 3), dtype=int64, numpy=
4229  array([[0, 0, 0],
4230         [0, 1, 1],
4231         [0, 2, 0],
4232         [0, 2, 1]])>
4233
4234  #### Multiplexing between `x` and `y`
4235
4236  If `x` and `y` are provided (both have non-None values):
4237
4238  `tf.where` will choose an output shape from the shapes of `condition`, `x`,
4239  and `y` that all three shapes are
4240  [broadcastable](https://docs.scipy.org/doc/numpy/reference/ufuncs.html) to.
4241
4242  The `condition` tensor acts as a mask that chooses whether the corresponding
4243  element / row in the output should be taken from `x`
4244  (if the elemment in `condition is True) or `y` (if it is false).
4245
4246  >>> tf.where([True, False, False, True], [1,2,3,4], [100,200,300,400])
4247  <tf.Tensor: shape=(4,), dtype=int32, numpy=array([  1, 200, 300,   4],
4248  dtype=int32)>
4249  >>> tf.where([True, False, False, True], [1,2,3,4], [100])
4250  <tf.Tensor: shape=(4,), dtype=int32, numpy=array([  1, 100, 100,   4],
4251  dtype=int32)>
4252  >>> tf.where([True, False, False, True], [1,2,3,4], 100)
4253  <tf.Tensor: shape=(4,), dtype=int32, numpy=array([  1, 100, 100,   4],
4254  dtype=int32)>
4255  >>> tf.where([True, False, False, True], 1, 100)
4256  <tf.Tensor: shape=(4,), dtype=int32, numpy=array([  1, 100, 100,   1],
4257  dtype=int32)>
4258
4259  >>> tf.where(True, [1,2,3,4], 100)
4260  <tf.Tensor: shape=(4,), dtype=int32, numpy=array([1, 2, 3, 4],
4261  dtype=int32)>
4262  >>> tf.where(False, [1,2,3,4], 100)
4263  <tf.Tensor: shape=(4,), dtype=int32, numpy=array([100, 100, 100, 100],
4264  dtype=int32)>
4265
4266  Args:
4267    condition: A `tf.Tensor` of type `bool`
4268    x: If provided, a Tensor which is of the same type as `y`, and has a shape
4269      broadcastable with `condition` and `y`.
4270    y: If provided, a Tensor which is of the same type as `y`, and has a shape
4271      broadcastable with `condition` and `x`.
4272    name: A name of the operation (optional).
4273
4274  Returns:
4275    If `x` and `y` are provided:
4276      A `Tensor` with the same type as `x` and `y`, and shape that
4277      is broadcast from `condition`, `x`, and `y`.
4278    Otherwise, a `Tensor` with shape `(num_true, dim_size(condition))`.
4279
4280  Raises:
4281    ValueError: When exactly one of `x` or `y` is non-None, or the shapes
4282      are not all broadcastable.
4283  """
4284  if x is None and y is None:
4285    with ops.name_scope(name, "Where", [condition]) as name:
4286      condition = ops.convert_to_tensor(
4287          condition, preferred_dtype=dtypes.bool, name="condition")
4288      return gen_array_ops.where(condition=condition, name=name)
4289  elif x is not None and y is not None:
4290    return gen_math_ops.select_v2(condition=condition, t=x, e=y, name=name)
4291  else:
4292    raise ValueError("x and y must both be non-None or both be None.")
4293
4294
4295# pylint: disable=redefined-builtin
4296@tf_export(v1=["reverse_sequence"])
4297@deprecation.deprecated_args(None,
4298                             "seq_dim is deprecated, use seq_axis instead",
4299                             "seq_dim")
4300@deprecation.deprecated_args(None,
4301                             "batch_dim is deprecated, use batch_axis instead",
4302                             "batch_dim")
4303def reverse_sequence(input,
4304                     seq_lengths,
4305                     seq_axis=None,
4306                     batch_axis=None,
4307                     name=None,
4308                     seq_dim=None,
4309                     batch_dim=None):
4310  """Reverses variable length slices.
4311
4312  This op first slices `input` along the dimension `batch_axis`, and for
4313  each slice `i`, reverses the first `seq_lengths[i]` elements along the
4314  dimension `seq_axis`.
4315
4316  The elements of `seq_lengths` must obey `seq_lengths[i] <=
4317  input.dims[seq_dim]`, and `seq_lengths` must be a vector of length
4318  `input.dims[batch_dim]`.
4319
4320  The output slice `i` along dimension `batch_axis` is then given by
4321  input slice `i`, with the first `seq_lengths[i]` slices along
4322  dimension `seq_axis` reversed.
4323
4324  Example usage:
4325
4326  >>> seq_lengths = [7, 2, 3, 5]
4327  >>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0],
4328  ...          [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]]
4329  >>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0)
4330  >>> output
4331  <tf.Tensor: shape=(4, 8), dtype=int32, numpy=
4332  array([[0, 0, 5, 4, 3, 2, 1, 0],
4333         [2, 1, 0, 0, 0, 0, 0, 0],
4334         [3, 2, 1, 4, 0, 0, 0, 0],
4335         [5, 4, 3, 2, 1, 6, 7, 8]], dtype=int32)>
4336
4337  Args:
4338    `input`: A `Tensor`. The input to reverse.
4339    `seq_lengths`: A `Tensor`. Must be one of the following types: `int32`,
4340      `int64`. 1-D with length `input.dims(batch_dim)` and `max(seq_lengths) <=
4341      input.dims(seq_dim)`
4342    `seq_axis`: An `int`. The dimension which is partially reversed.
4343    `batch_axis`: An optional `int`. Defaults to `0`. The dimension along which
4344      reversal is performed.
4345    `name`: A name for the operation (optional).
4346
4347  Returns:
4348    A Tensor. Has the same type as input.
4349  """
4350  seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
4351                                                    "seq_dim", seq_dim)
4352  batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
4353                                                      "batch_dim", batch_dim)
4354  return gen_array_ops.reverse_sequence(
4355      input=input,
4356      seq_lengths=seq_lengths,
4357      seq_dim=seq_axis,
4358      batch_dim=batch_axis,
4359      name=name)
4360
4361
4362@tf_export("reverse_sequence", v1=[])
4363def reverse_sequence_v2(input,
4364                        seq_lengths,
4365                        seq_axis=None,
4366                        batch_axis=None,
4367                        name=None):
4368  return gen_array_ops.reverse_sequence(
4369      input=input,
4370      seq_lengths=seq_lengths,
4371      seq_dim=seq_axis,
4372      batch_dim=batch_axis,
4373      name=name)
4374
4375reverse_sequence_v2.__doc__ = reverse_sequence.__doc__
4376# pylint: enable=redefined-builtin
4377
4378
4379@tf_export(v1=["gather"])
4380@dispatch.add_dispatch_support
4381def gather(params,
4382           indices,
4383           validate_indices=None,
4384           name=None,
4385           axis=None,
4386           batch_dims=0):  # pylint: disable=g-doc-args
4387  r"""Gather slices from params axis `axis` according to indices.
4388
4389  Gather slices from params axis `axis` according to `indices`.  `indices` must
4390  be an integer tensor of any dimension (usually 0-D or 1-D).
4391
4392  For 0-D (scalar) `indices`:
4393
4394  $$\begin{align*}
4395  output[p_0, ..., p_{axis-1}, &&          &&& p_{axis + 1}, ..., p_{N-1}] = \\
4396  params[p_0, ..., p_{axis-1}, && indices, &&& p_{axis + 1}, ..., p_{N-1}]
4397  \end{align*}$$
4398
4399  Where *N* = `ndims(params)`.
4400
4401  For 1-D (vector) `indices` with `batch_dims=0`:
4402
4403  $$\begin{align*}
4404  output[p_0, ..., p_{axis-1}, &&         &i,  &&p_{axis + 1}, ..., p_{N-1}] =\\
4405  params[p_0, ..., p_{axis-1}, && indices[&i], &&p_{axis + 1}, ..., p_{N-1}]
4406  \end{align*}$$
4407
4408  In the general case, produces an output tensor where:
4409
4410  $$\begin{align*}
4411  output[p_0,             &..., p_{axis-1},                       &
4412         &i_{B},           ..., i_{M-1},                          &
4413         p_{axis + 1},    &..., p_{N-1}]                          = \\
4414  params[p_0,             &..., p_{axis-1},                       &
4415         indices[p_0, ..., p_{B-1}, &i_{B}, ..., i_{M-1}],        &
4416         p_{axis + 1},    &..., p_{N-1}]
4417  \end{align*}$$
4418
4419  Where *N* = `ndims(params)`, *M* = `ndims(indices)`, and *B* = `batch_dims`.
4420  Note that `params.shape[:batch_dims]` must be identical to
4421  `indices.shape[:batch_dims]`.
4422
4423  The shape of the output tensor is:
4424
4425  > `output.shape = params.shape[:axis] + indices.shape[batch_dims:] +
4426  > params.shape[axis + 1:]`.
4427
4428  Note that on CPU, if an out of bound index is found, an error is returned.
4429  On GPU, if an out of bound index is found, a 0 is stored in the corresponding
4430  output value.
4431
4432  See also `tf.gather_nd`.
4433
4434  <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
4435  <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png"
4436  alt>
4437  </div>
4438
4439  Args:
4440    params: The `Tensor` from which to gather values. Must be at least rank
4441      `axis + 1`.
4442    indices: The index `Tensor`.  Must be one of the following types: `int32`,
4443      `int64`. Must be in range `[0, params.shape[axis])`.
4444    validate_indices: Deprecated, does nothing.
4445    axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
4446      `axis` in `params` to gather `indices` from. Must be greater than or equal
4447      to `batch_dims`.  Defaults to the first non-batch dimension. Supports
4448      negative indexes.
4449    batch_dims: An `integer`.  The number of batch dimensions.  Must be less
4450      than or equal to `rank(indices)`.
4451    name: A name for the operation (optional).
4452
4453  Returns:
4454    A `Tensor`. Has the same type as `params`.
4455  """
4456  del validate_indices
4457
4458  if axis is None:
4459    axis = batch_dims
4460  if tensor_util.constant_value(axis) != 0:
4461    return gen_array_ops.gather_v2(
4462        params, indices, axis, batch_dims=batch_dims, name=name)
4463  try:
4464    # TODO(apassos) find a less bad way of detecting resource variables
4465    # without introducing a circular dependency.
4466    return params.sparse_read(indices, name=name)
4467  except AttributeError:
4468    return gen_array_ops.gather_v2(params, indices, axis, name=name)
4469
4470
4471@tf_export("gather", v1=[])
4472@dispatch.add_dispatch_support
4473def gather_v2(params,
4474              indices,
4475              validate_indices=None,
4476              axis=None,
4477              batch_dims=0,
4478              name=None):
4479  return gather(
4480      params,
4481      indices,
4482      validate_indices=validate_indices,
4483      name=name,
4484      axis=axis,
4485      batch_dims=batch_dims)
4486
4487
4488gather_v2.__doc__ = gather.__doc__
4489
4490
4491@tf_export(v1=["batch_gather"])
4492@dispatch.add_dispatch_support
4493@deprecation.deprecated(
4494    "2017-10-25", "`tf.batch_gather` is deprecated, please use `tf.gather` "
4495    "with `batch_dims=-1` instead.")  # pylint: disable=missing-docstring
4496def batch_gather(params, indices, name=None):
4497  """Gather slices from params according to indices with leading batch dims."""
4498  with ops.name_scope(name, "BatchGather", [params, indices]):
4499    indices = ops.convert_to_tensor(indices, name="indices")
4500    params = ops.convert_to_tensor(params, name="params")
4501    if indices.shape.ndims is None:
4502      raise ValueError(
4503          "batch_gather does not allow indices with unknown shape.")
4504    return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)
4505
4506
4507def _batch_gather(params, indices, batch_dims, axis=None):
4508  r"""Gather slices from params according to indices with leading batch dims.
4509
4510  This operation assumes that the leading `batch_dims` dimensions of `indices`
4511  and `params` are batch dimensions; and performs a `tf.gather` operation within
4512  each batch. (If `batch_dims` is not specified, then it defaults to
4513  `rank(indices)-1`.)  In the case in which `batch_dims==0`, this operation
4514  is equivalent to `tf.gather`.
4515
4516  Args:
4517    params: A Tensor. The tensor from which to gather values.
4518    indices: A Tensor. Must be one of the following types: int32, int64. Index
4519      tensor. Must be in range `[0, params.shape[batch_dims]]`.
4520    batch_dims: An integer or none.  The number of batch dimensions.  Must be
4521      less than `rank(indices)`.  Defaults to `rank(indices) - 1` if None.
4522    axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
4523      `axis` in `params` to gather `indices` from. Must be greater than or equal
4524      to `batch_dims`.  Defaults to the first non-batch dimension. Supports
4525      negative indexes.
4526
4527  Returns:
4528    A Tensor. Has the same type as `params`.
4529
4530  Raises:
4531    ValueError: if `indices` has an unknown shape.
4532  """
4533  if batch_dims is not None and not isinstance(batch_dims, int):
4534    raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
4535  indices = ops.convert_to_tensor(indices, name="indices")
4536  params = ops.convert_to_tensor(params, name="params")
4537
4538  indices_ndims = indices.shape.ndims
4539  if indices_ndims is None:
4540    raise ValueError("tf.gather does not allow indices with unknown "
4541                     "rank when batch_dims is specified.")
4542  if batch_dims is None:
4543    batch_dims = indices_ndims - 1
4544  if batch_dims < 0:
4545    batch_dims += indices_ndims
4546  if batch_dims < 0 or batch_dims >= indices_ndims:
4547    raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
4548                     (batch_dims, indices_ndims))
4549  if params.shape.ndims is not None and batch_dims >= params.shape.ndims:
4550    raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
4551                     (batch_dims, params.shape.ndims))
4552
4553  # Handle axis by transposing the axis dimension to be the first non-batch
4554  # dimension, recursively calling batch_gather with axis=0, and then
4555  # transposing the result to put the pre-axis dimensions before the indices
4556  # dimensions.
4557  if axis is not None and axis != batch_dims:
4558    # Adjust axis to be positive.
4559    if not isinstance(axis, int):
4560      axis = tf.where(axis < 0, axis + array_ops.rank(params), axis)
4561    elif axis < 0 and params.shape.ndims is None:
4562      axis = axis + array_ops.rank(params)
4563    else:
4564      if (axis < -params.shape.ndims) or (axis >= params.shape.ndims):
4565        raise ValueError("axis (%d) out of range [%d, %d)" %
4566                         (axis, -params.shape.ndims, params.shape.ndims))
4567      if axis < 0:
4568        axis += params.shape.ndims
4569      if axis < batch_dims:
4570        raise ValueError("batch_dims = %d must be less than or equal to "
4571                         "axis = %d" % (batch_dims, axis))
4572
4573    # Move params[axis] up to params[batch_dims].
4574    perm = [
4575        list(range(batch_dims)), [axis],
4576        gen_math_ops._range(batch_dims, axis, 1),
4577        gen_math_ops._range(axis + 1, rank(params), 1)
4578    ]
4579    params = transpose(params, concat(perm, axis=0))
4580
4581    result = _batch_gather(params, indices, batch_dims=batch_dims)
4582
4583    # Move the result dimensions corresponding to params[batch_dims:axis]
4584    # to just before the dimensions corresponding to indices[batch_dims:].
4585    params_start = indices_ndims + axis - batch_dims
4586    perm = [
4587        list(range(batch_dims)),
4588        gen_math_ops._range(indices_ndims, params_start, 1),
4589        list(range(batch_dims, indices_ndims)),
4590        gen_math_ops._range(params_start, rank(result), 1)
4591    ]
4592    return transpose(result, perm=concat(perm, axis=0))
4593
4594  indices_shape = shape(indices)
4595  params_shape = shape(params)
4596  batch_indices = indices
4597  indices_dtype = indices.dtype.base_dtype
4598  accum_dim_value = ones((), dtype=indices_dtype)
4599  # Use correct type for offset index computation
4600  casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)
4601  for dim in range(batch_dims, 0, -1):
4602    dim_value = casted_params_shape[dim - 1]
4603    accum_dim_value *= casted_params_shape[dim]
4604    start = zeros((), dtype=indices_dtype)
4605    step = ones((), dtype=indices_dtype)
4606    dim_indices = gen_math_ops._range(start, dim_value, step)
4607    dim_indices *= accum_dim_value
4608    dim_shape = stack(
4609        [1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0)
4610    batch_indices += reshape(dim_indices, dim_shape)
4611
4612  flat_indices = reshape(batch_indices, [-1])
4613  outer_shape = params_shape[batch_dims + 1:]
4614  flat_inner_shape = gen_math_ops.prod(params_shape[:batch_dims + 1], [0],
4615                                       False)
4616
4617  flat_params = reshape(params, concat([[flat_inner_shape], outer_shape],
4618                                       axis=0))
4619  flat_result = gather(flat_params, flat_indices)
4620  result = reshape(flat_result, concat([indices_shape, outer_shape], axis=0))
4621  final_shape = indices.get_shape()[:batch_dims].merge_with(
4622      params.get_shape()[:batch_dims])
4623  final_shape = final_shape.concatenate(indices.get_shape().dims[batch_dims:])
4624  final_shape = final_shape.concatenate(params.get_shape()[batch_dims + 1:])
4625  result.set_shape(final_shape)
4626  return result
4627
4628
4629@tf_export(v1=["gather_nd", "manip.gather_nd"])
4630@dispatch.add_dispatch_support
4631@deprecated_endpoints("manip.gather_nd")
4632def gather_nd(params, indices, name=None, batch_dims=0):
4633  r"""Gather slices from `params` into a Tensor with shape specified by `indices`.
4634
4635  `indices` is an K-dimensional integer tensor, best thought of as a
4636  (K-1)-dimensional tensor of indices into `params`, where each element defines
4637  a slice of `params`:
4638
4639      output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
4640
4641  Whereas in `tf.gather` `indices` defines slices into the first
4642  dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
4643  first `N` dimensions of `params`, where `N = indices.shape[-1]`.
4644
4645  The last dimension of `indices` can be at most the rank of
4646  `params`:
4647
4648      indices.shape[-1] <= params.rank
4649
4650  The last dimension of `indices` corresponds to elements
4651  (if `indices.shape[-1] == params.rank`) or slices
4652  (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
4653  of `params`.  The output tensor has shape
4654
4655      indices.shape[:-1] + params.shape[indices.shape[-1]:]
4656
4657  Additionally both 'params' and 'indices' can have M leading batch
4658  dimensions that exactly match. In this case 'batch_dims' must be M.
4659
4660  Note that on CPU, if an out of bound index is found, an error is returned.
4661  On GPU, if an out of bound index is found, a 0 is stored in the
4662  corresponding output value.
4663
4664  Some examples below.
4665
4666  Simple indexing into a matrix:
4667
4668  ```python
4669      indices = [[0, 0], [1, 1]]
4670      params = [['a', 'b'], ['c', 'd']]
4671      output = ['a', 'd']
4672  ```
4673
4674  Slice indexing into a matrix:
4675
4676  ```python
4677      indices = [[1], [0]]
4678      params = [['a', 'b'], ['c', 'd']]
4679      output = [['c', 'd'], ['a', 'b']]
4680  ```
4681
4682  Indexing into a 3-tensor:
4683
4684  ```python
4685      indices = [[1]]
4686      params = [[['a0', 'b0'], ['c0', 'd0']],
4687                [['a1', 'b1'], ['c1', 'd1']]]
4688      output = [[['a1', 'b1'], ['c1', 'd1']]]
4689
4690
4691      indices = [[0, 1], [1, 0]]
4692      params = [[['a0', 'b0'], ['c0', 'd0']],
4693                [['a1', 'b1'], ['c1', 'd1']]]
4694      output = [['c0', 'd0'], ['a1', 'b1']]
4695
4696
4697      indices = [[0, 0, 1], [1, 0, 1]]
4698      params = [[['a0', 'b0'], ['c0', 'd0']],
4699                [['a1', 'b1'], ['c1', 'd1']]]
4700      output = ['b0', 'b1']
4701  ```
4702
4703  The examples below are for the case when only indices have leading extra
4704  dimensions. If both 'params' and 'indices' have leading batch dimensions, use
4705  the 'batch_dims' parameter to run gather_nd in batch mode.
4706
4707  Batched indexing into a matrix:
4708
4709  ```python
4710      indices = [[[0, 0]], [[0, 1]]]
4711      params = [['a', 'b'], ['c', 'd']]
4712      output = [['a'], ['b']]
4713  ```
4714
4715  Batched slice indexing into a matrix:
4716
4717  ```python
4718      indices = [[[1]], [[0]]]
4719      params = [['a', 'b'], ['c', 'd']]
4720      output = [[['c', 'd']], [['a', 'b']]]
4721  ```
4722
4723  Batched indexing into a 3-tensor:
4724
4725  ```python
4726      indices = [[[1]], [[0]]]
4727      params = [[['a0', 'b0'], ['c0', 'd0']],
4728                [['a1', 'b1'], ['c1', 'd1']]]
4729      output = [[[['a1', 'b1'], ['c1', 'd1']]],
4730                [[['a0', 'b0'], ['c0', 'd0']]]]
4731
4732      indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
4733      params = [[['a0', 'b0'], ['c0', 'd0']],
4734                [['a1', 'b1'], ['c1', 'd1']]]
4735      output = [[['c0', 'd0'], ['a1', 'b1']],
4736                [['a0', 'b0'], ['c1', 'd1']]]
4737
4738
4739      indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
4740      params = [[['a0', 'b0'], ['c0', 'd0']],
4741                [['a1', 'b1'], ['c1', 'd1']]]
4742      output = [['b0', 'b1'], ['d0', 'c1']]
4743  ```
4744
4745  Examples with batched 'params' and 'indices':
4746
4747  ```python
4748      batch_dims = 1
4749      indices = [[1], [0]]
4750      params = [[['a0', 'b0'], ['c0', 'd0']],
4751                [['a1', 'b1'], ['c1', 'd1']]]
4752      output = [['c0', 'd0'], ['a1', 'b1']]
4753
4754      batch_dims = 1
4755      indices = [[[1]], [[0]]]
4756      params = [[['a0', 'b0'], ['c0', 'd0']],
4757                [['a1', 'b1'], ['c1', 'd1']]]
4758      output = [[['c0', 'd0']], [['a1', 'b1']]]
4759
4760      batch_dims = 1
4761      indices = [[[1, 0]], [[0, 1]]]
4762      params = [[['a0', 'b0'], ['c0', 'd0']],
4763                [['a1', 'b1'], ['c1', 'd1']]]
4764      output = [['c0'], ['b1']]
4765  ```
4766
4767  See also `tf.gather`.
4768
4769  Args:
4770    params: A `Tensor`. The tensor from which to gather values.
4771    indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
4772      Index tensor.
4773    name: A name for the operation (optional).
4774    batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.
4775
4776  Returns:
4777    A `Tensor`. Has the same type as `params`.
4778  """
4779  batch_dims_ = tensor_util.constant_value(batch_dims)
4780  if batch_dims_ is not None:
4781    batch_dims = int(batch_dims_)
4782  if batch_dims == 0:
4783    try:
4784      # TODO(apassos) find a less bad way of detecting resource variables
4785      # without introducing a circular dependency.
4786      return params.gather_nd(indices, name=name)
4787    except AttributeError:
4788      return gen_array_ops.gather_nd(params, indices, name=name)
4789  else:
4790    return batch_gather_nd(params, indices, batch_dims=batch_dims, name=name)
4791
4792
4793@tf_export("gather_nd", v1=[])
4794@dispatch.add_dispatch_support
4795def gather_nd_v2(params, indices, batch_dims=0, name=None):
4796  return gather_nd(params, indices, name=name, batch_dims=batch_dims)
4797
4798
4799gather_nd_v2.__doc__ = gather_nd.__doc__
4800
4801
4802def batch_gather_nd(params, indices, batch_dims, name=None):
4803  """gather_nd implementation with batch support."""
4804  with ops.name_scope(name, "BatchGatherND", [params, indices]):
4805    indices = ops.convert_to_tensor(indices, name="indices")
4806    params = ops.convert_to_tensor(params, name="params")
4807
4808    if not isinstance(batch_dims, int):
4809      raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
4810    if batch_dims < 0:
4811      raise ValueError("tf.gather_nd does not allow negative batch_dims.")
4812    params_ndims = params.shape.ndims
4813    indices_ndims = indices.shape.ndims
4814    if indices_ndims is not None and batch_dims >= indices_ndims:
4815      raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
4816                       (batch_dims, indices_ndims))
4817    if params_ndims is not None and batch_dims >= params_ndims:
4818      raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
4819                       (batch_dims, params_ndims))
4820
4821    expand = batch_dims == 0
4822    if expand:
4823      # Normally gather_nd will be called when batch_dims == 0.
4824      # But if this function is called with batch_dims = 0, e.g. for testing
4825      # purposes, this adds a dummy batch dimension to make batch_dims = 1.
4826      params = expand_dims(params, axis=0)
4827      indices = expand_dims(indices, axis=0)
4828      batch_dims = 1
4829
4830    params_shape = shape(params)
4831    indices_shape = shape(indices)
4832    batch_shape = params_shape[:batch_dims]
4833    batch_size = gen_math_ops.prod(batch_shape, [0])
4834    index_internal_ndims = rank(indices) - batch_dims - 1
4835    indices_internal_shape = indices_shape[batch_dims:-1]
4836
4837    # Assuming a 'params' with shape [b1, ..., bM, g1, ..., gN] and an 'indices'
4838    # with shape [b1, ..., bM, i1, ..., iK, C], where C <= N, we need to modify
4839    # 'indices' s.t. it has shape [i1, ..., iK, D], where D <= M + N and slices
4840    # to the entire 'params' tensor.
4841    # Assuming we have a batch of shape [B1, B2], we use meshgrid to create a
4842    # grid of size B1 x B2.
4843    batch_dim_list = unstack(batch_shape, axis=0)
4844    dim_ranges = [
4845        gen_math_ops.cast(gen_math_ops._range(0, x, 1), indices.dtype)
4846        for x in batch_dim_list
4847    ]
4848    mesh_list = meshgrid(*dim_ranges, indexing="ij") if dim_ranges else []
4849    # Then we flatten and stack the tensors to form a (B1.B2) by 2 matrix.
4850    flat_list = [reshape(x, shape=(-1,)) for x in mesh_list]
4851    index_grid = transpose(stack(flat_list, axis=0))
4852    # We need to concatenate these batch coordinates with the internal indices.
4853    # concat -> index_grid [B1.B2, 2] with indices [i1, ..., iK, C]
4854    # So we reshape them both to [(B1.B2), i1, ..., iK, *]
4855    index_grid_shape = shape(index_grid)
4856    index_grid = reshape(
4857        index_grid,
4858        concat([
4859            index_grid_shape[:1],
4860            ones(index_internal_ndims, dtype=dtypes.int32), index_grid_shape[1:]
4861        ],
4862               axis=0))
4863    tile_shape = concat(((1,), indices_internal_shape, (1,)), axis=0)
4864    index_grid = tile(index_grid, multiples=tile_shape)
4865    # index_grid now has shape [(B1.B2), i1, ..., iK, 2]
4866    flat_shape = concat(([batch_size], indices_shape[batch_dims:]), axis=0)
4867    flat_indices = reshape(indices, shape=flat_shape)
4868    # flat_indices now has shape [(B1.B2), i1, ..., iK, C]
4869    indices = concat((index_grid, flat_indices), axis=-1)
4870    # indices has shape [(B1.B2), i1, ..., iK, 2+C]
4871    out = gen_array_ops.gather_nd(params, indices)
4872    # out has shape [(B1.B2), i1, ..., iK, N-C]. Now we reshape batch to
4873    # its original form.
4874    out_shape = shape(out)
4875    out = reshape(out, shape=concat((batch_shape, out_shape[1:]), axis=0))
4876    if expand:
4877      out = squeeze(out, axis=0)
4878  return out
4879
4880
4881# Define quantize_v2 here in order to make name the second-to-last attribute,
4882# because round_mode was added later.
4883# (And also now because of 'axis' processing).
4884@tf_export(v1=["quantize_v2"])
4885@deprecation.deprecated(
4886    "2017-10-25",
4887    "`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` "
4888    "instead.")  # pylint: disable=missing-docstring
4889def quantize_v2(
4890    input,  # pylint: disable=redefined-builtin
4891    min_range,
4892    max_range,
4893    T,
4894    mode="MIN_COMBINED",
4895    name=None,
4896    round_mode="HALF_AWAY_FROM_ZERO",
4897    narrow_range=False,
4898    axis=None,
4899    ensure_minimum_range=0.01):
4900  if axis is None:
4901    axis = -1
4902  elif axis < 0:
4903    if input.shape.ndims is None:
4904      raise ValueError("input should have known rank to use negative axis.")
4905    axis %= input.shape.ndims
4906
4907  if ensure_minimum_range != 0.01:
4908    return gen_array_ops.quantize_v2(
4909        input,
4910        min_range,
4911        max_range,
4912        T=T,
4913        mode=mode,
4914        name=name,
4915        round_mode=round_mode,
4916        narrow_range=narrow_range,
4917        axis=axis,
4918        ensure_minimum_range=ensure_minimum_range)
4919  return gen_array_ops.quantize_v2(
4920      input,
4921      min_range,
4922      max_range,
4923      T=T,
4924      mode=mode,
4925      name=name,
4926      round_mode=round_mode,
4927      narrow_range=narrow_range,
4928      axis=axis)
4929
4930
4931quantize_v2.__doc__ = """Please use `tf.quantization.quantize` instead."""
4932
4933
4934# We want to expose tf.quantization.quantize instead of
4935# tf.quantization.quantize; we can deprecate tf.quantization.quantize in next
4936# version of TensorFlow.
4937@tf_export("quantization.quantize", v1=["quantization.quantize", "quantize"])
4938@deprecation.deprecated_endpoints("quantize")
4939def quantize(
4940    input,  # pylint: disable=redefined-builtin
4941    min_range,
4942    max_range,
4943    T,
4944    mode="MIN_COMBINED",
4945    round_mode="HALF_AWAY_FROM_ZERO",
4946    name=None,
4947    narrow_range=False,
4948    axis=None,
4949    ensure_minimum_range=0.01):
4950  """Quantize the input tensor."""
4951  if ensure_minimum_range != 0.01:
4952    return quantize_v2(
4953        input,
4954        min_range,
4955        max_range,
4956        T,
4957        mode=mode,
4958        round_mode=round_mode,
4959        name=name,
4960        narrow_range=narrow_range,
4961        axis=axis,
4962        ensure_minimum_range=ensure_minimum_range)
4963  return quantize_v2(
4964      input,
4965      min_range,
4966      max_range,
4967      T,
4968      mode=mode,
4969      round_mode=round_mode,
4970      name=name,
4971      narrow_range=narrow_range,
4972      axis=axis)
4973
4974
4975@tf_export("quantization.dequantize", v1=["quantization.dequantize",
4976                                          "dequantize"])
4977@deprecation.deprecated_endpoints("dequantize")
4978def dequantize(  # pylint: disable=missing-docstring
4979    input,  # pylint: disable=redefined-builtin
4980    min_range,
4981    max_range,
4982    mode="MIN_COMBINED",
4983    name=None,
4984    axis=None,
4985    narrow_range=False,
4986    dtype=dtypes.float32):
4987  if axis is None:
4988    axis = -1
4989  elif axis < 0:
4990    if input.shape.ndims is None:
4991      raise ValueError("input should have known rank to use negative axis.")
4992    axis %= input.shape.ndims
4993
4994  if axis >= 0 or narrow_range:
4995    return gen_array_ops.dequantize(
4996        input,
4997        min_range,
4998        max_range,
4999        mode=mode,
5000        name=name,
5001        narrow_range=narrow_range,
5002        axis=axis,
5003        dtype=dtype)
5004  return gen_array_ops.dequantize(
5005      input, min_range, max_range, mode=mode, name=name, dtype=dtype)
5006
5007
5008dequantize.__doc__ = gen_array_ops.dequantize.__doc__
5009
5010
5011@tf_export("quantization.quantize_and_dequantize")
5012def quantize_and_dequantize(
5013    input,  # pylint: disable=redefined-builtin
5014    input_min,
5015    input_max,
5016    signed_input=True,
5017    num_bits=8,
5018    range_given=False,
5019    round_mode="HALF_TO_EVEN",
5020    name=None,
5021    narrow_range=False,
5022    axis=None):
5023  """Quantizes then dequantizes a tensor.
5024
5025  Args:
5026    input: A `Tensor` to quantize and dequantize.
5027    input_min: If range_given=True, the minimum input value, that needs to be
5028      represented in the quantized representation. If axis is specified, this
5029      should be a vector of minimum values for each slice along axis.
5030    input_max: If range_given=True, the maximum input value that needs to be
5031      represented in the quantized representation. If axis is specified, this
5032      should be a vector of maximum values for each slice along axis.
5033    signed_input: True if the quantization is signed or unsigned.
5034    num_bits: The bitwidth of the quantization.
5035    range_given: If true use `input_min` and `input_max` for the range of the
5036      input, otherwise determine min and max from the input `Tensor`.
5037    round_mode: Rounding mode when rounding from float values to quantized ones.
5038      one of ['HALF_TO_EVEN', 'HALF_UP']
5039    name: Optional name for the operation.
5040    narrow_range: If true, then the absolute value of the quantized minimum
5041      value is the same as the quantized maximum value, instead of 1 greater.
5042      i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
5043    axis: Integer. If specified, refers to a dimension of the input tensor, such
5044      that quantization will be per slice along that dimension.
5045
5046  Returns:
5047    A `Tensor`. Each element is the result of quantizing and dequantizing the
5048    corresponding element of `input`.
5049  """
5050  if axis is None:
5051    axis = -1
5052  elif axis < 0:
5053    if input.shape.ndims is None:
5054      raise ValueError("input should have known rank to use negative axis.")
5055    axis %= input.shape.ndims
5056
5057  return gen_array_ops.quantize_and_dequantize_v2(
5058      input,
5059      input_min=input_min,
5060      input_max=input_max,
5061      signed_input=signed_input,
5062      num_bits=num_bits,
5063      range_given=range_given,
5064      round_mode=round_mode,
5065      narrow_range=narrow_range,
5066      axis=axis,
5067      name=name)
5068
5069
5070@tf_export("searchsorted")
5071def searchsorted(sorted_sequence,
5072                 values,
5073                 side="left",
5074                 out_type=dtypes.int32,
5075                 name=None):
5076  """Searches input tensor for values on the innermost dimension.
5077
5078  A 2-D example:
5079
5080  ```
5081    sorted_sequence = [[0, 3, 9, 9, 10],
5082                       [1, 2, 3, 4, 5]]
5083    values = [[2, 4, 9],
5084              [0, 2, 6]]
5085
5086    result = searchsorted(sorted_sequence, values, side="left")
5087
5088    result == [[1, 2, 2],
5089               [0, 1, 5]]
5090
5091    result = searchsorted(sorted_sequence, values, side="right")
5092
5093    result == [[1, 2, 4],
5094               [0, 2, 5]]
5095  ```
5096
5097  Args:
5098    sorted_sequence: N-D `Tensor` containing a sorted sequence.
5099    values: N-D `Tensor` containing the search values.
5100    side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to
5101      upper_bound.
5102    out_type: The output type (`int32` or `int64`).  Default is `tf.int32`.
5103    name: Optional name for the operation.
5104
5105  Returns:
5106    An N-D `Tensor` the size of values containing the result of applying either
5107    lower_bound or upper_bound (depending on side) to each value.  The result
5108    is not a global index to the entire `Tensor`, but the index in the last
5109    dimension.
5110
5111  Raises:
5112    ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.
5113                If the total size of values exceeds `2^31 - 1` elements.
5114                If the first `N-1` dimensions of the two tensors don't match.
5115  """
5116  sequence_size = shape_internal(sorted_sequence)[-1]
5117  values_size = shape_internal(values)[-1]
5118  sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])
5119  values_2d = reshape(values, [-1, values_size])
5120  if side == "right":
5121    output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type,
5122                                       name)
5123  elif side == "left":
5124    output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type,
5125                                       name)
5126  else:
5127    raise ValueError("side must be either 'right' or 'left'.  Saw: %s." % side)
5128  return reshape(output, shape_internal(values))
5129
5130
5131quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
5132
5133
5134@tf_export("image.extract_patches")
5135def extract_image_patches_v2(images, sizes, strides, rates, padding, name=None):
5136  r"""Extract `patches` from `images`.
5137
5138  This op collects patches from the input image, as if applying a
5139  convolution. All extracted patches are stacked in the depth (last) dimension
5140  of the output.
5141
5142  Specifically, the op extracts patches of shape `sizes` which are `strides`
5143  apart in the input image. The output is subsampled using the `rates` argument,
5144  in the same manner as "atrous" or "dilated" convolutions.
5145
5146  The result is a 4D tensor which is indexed by batch, row, and column.
5147  `output[i, x, y]` contains a flattened patch of size `sizes[1], sizes[2]`
5148  which is taken from the input starting at
5149  `images[i, x*strides[1], y*strides[2]]`.
5150
5151  Each output patch can be reshaped to `sizes[1], sizes[2], depth`, where
5152  `depth` is `images.shape[3]`.
5153
5154  The output elements are taken from the input at intervals given by the `rate`
5155  argument, as in dilated convolutions.
5156
5157  The `padding` argument has no effect on the size of each patch, it determines
5158  how many patches are extracted. If `VALID`, only patches which are fully
5159  contained in the input image are included. If `SAME`, all patches whose
5160  starting point is inside the input are included, and areas outside the input
5161  default to zero.
5162
5163  Example:
5164
5165  ```
5166    n = 10
5167    # images is a 1 x 10 x 10 x 1 array that contains the numbers 1 through 100
5168    images = [[[[x * n + y + 1] for y in range(n)] for x in range(n)]]
5169
5170    # We generate two outputs as follows:
5171    # 1. 3x3 patches with stride length 5
5172    # 2. Same as above, but the rate is increased to 2
5173    tf.extract_image_patches(images=images,
5174                             ksizes=[1, 3, 3, 1],
5175                             strides=[1, 5, 5, 1],
5176                             rates=[1, 1, 1, 1],
5177                             padding='VALID')
5178
5179    # Yields:
5180    [[[[ 1  2  3 11 12 13 21 22 23]
5181       [ 6  7  8 16 17 18 26 27 28]]
5182      [[51 52 53 61 62 63 71 72 73]
5183       [56 57 58 66 67 68 76 77 78]]]]
5184  ```
5185
5186  If we mark the pixels in the input image which are taken for the output with
5187  `*`, we see the pattern:
5188
5189  ```
5190     *  *  *  4  5  *  *  *  9 10
5191     *  *  * 14 15  *  *  * 19 20
5192     *  *  * 24 25  *  *  * 29 30
5193    31 32 33 34 35 36 37 38 39 40
5194    41 42 43 44 45 46 47 48 49 50
5195     *  *  * 54 55  *  *  * 59 60
5196     *  *  * 64 65  *  *  * 69 70
5197     *  *  * 74 75  *  *  * 79 80
5198    81 82 83 84 85 86 87 88 89 90
5199    91 92 93 94 95 96 97 98 99 100
5200  ```
5201
5202  ```
5203    tf.extract_image_patches(images=images,
5204                             sizes=[1, 3, 3, 1],
5205                             strides=[1, 5, 5, 1],
5206                             rates=[1, 2, 2, 1],
5207                             padding='VALID')
5208
5209    # Yields:
5210    [[[[  1   3   5  21  23  25  41  43  45]
5211       [  6   8  10  26  28  30  46  48  50]]
5212
5213      [[ 51  53  55  71  73  75  91  93  95]
5214       [ 56  58  60  76  78  80  96  98 100]]]]
5215  ```
5216
5217  We can again draw the effect, this time using the symbols `*`, `x`, `+` and
5218  `o` to distinguish the patches:
5219
5220  ```
5221     *  2  *  4  *  x  7  x  9  x
5222    11 12 13 14 15 16 17 18 19 20
5223     * 22  * 24  *  x 27  x 29  x
5224    31 32 33 34 35 36 37 38 39 40
5225     * 42  * 44  *  x 47  x 49  x
5226     + 52  + 54  +  o 57  o 59  o
5227    61 62 63 64 65 66 67 68 69 70
5228     + 72  + 74  +  o 77  o 79  o
5229    81 82 83 84 85 86 87 88 89 90
5230     + 92  + 94  +  o 97  o 99  o
5231  ```
5232
5233  Args:
5234    images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth]
5235    sizes: The size of the extracted patches. Must be [1, size_rows, size_cols,
5236      1].
5237    strides: A 1-D Tensor of length 4. How far the centers of two consecutive
5238      patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`.
5239    rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`.
5240      This is the input stride, specifying how far two consecutive patch samples
5241      are in the input. Equivalent to extracting patches with `patch_sizes_eff =
5242      patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling
5243      them spatially by a factor of `rates`. This is equivalent to `rate` in
5244      dilated (a.k.a. Atrous) convolutions.
5245    padding: The type of padding algorithm to use.
5246    name: A name for the operation (optional).
5247
5248  Returns:
5249    A 4-D Tensor of the same type as the input.
5250  """
5251  return gen_array_ops.extract_image_patches(images, sizes, strides, rates,
5252                                             padding, name)
5253
5254
5255@tf_export(v1=["image.extract_image_patches", "extract_image_patches"])
5256@deprecation.deprecated_args(None, "ksizes is deprecated, use sizes instead",
5257                             "ksizes")
5258def extract_image_patches(  # pylint: disable=missing-docstring
5259    images,
5260    ksizes=None,
5261    strides=None,
5262    rates=None,
5263    padding=None,
5264    name=None,
5265    sizes=None):
5266  """Extract patches from images and put them in the "depth" output dimension.
5267
5268  Args:
5269    `images`: A `Tensor`. Must be one of the following types: `float32`,
5270      `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`,
5271      `uint16`, `half`, `uint32`, `uint64`. 4-D Tensor with shape
5272    `[batch, in_rows, in_cols, depth]`. `ksizes`: A list of `ints` that has
5273      length `>= 4`. The size of the sliding window for each
5274    dimension of `images`. `strides`: A list of `ints` that has length `>= 4`.
5275      1-D of length 4. How far the centers of two consecutive
5276    patches are in the images. Must be:
5277    `[1, stride_rows, stride_cols, 1]`. `rates`: A list of `ints`
5278    that has length `>= 4`. 1-D of length 4. Must be: `[1, rate_rows, rate_cols,
5279      1]`. This is the input stride, specifying how far two consecutive patch
5280      samples are in the input. Equivalent to extracting patches with
5281      `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`,
5282      followed by subsampling them spatially by a factor of `rates`. This is
5283      equivalent to `rate` in dilated (a.k.a. Atrous) convolutions.
5284    `padding`: A `string` from: "SAME", "VALID". The type of padding algorithm
5285      to use.
5286    We specify the size-related attributes as:  ``` ksizes = [1, ksize_rows,
5287      ksize_cols, 1] strides = [1, strides_rows, strides_cols, 1] rates = [1,
5288      rates_rows, rates_cols, 1]
5289    name: A name for the operation (optional). ```
5290
5291  Returns:
5292    A Tensor. Has the same type as images.
5293  """
5294  ksizes = deprecation.deprecated_argument_lookup("sizes", sizes, "ksizes",
5295                                                  ksizes)
5296  return gen_array_ops.extract_image_patches(images, ksizes, strides, rates,
5297                                             padding, name)
5298
5299
5300extract_image_patches.__doc__ = gen_array_ops.extract_image_patches.__doc__
5301
5302
5303@tf_export("fingerprint")
5304def fingerprint(data, method="farmhash64", name=None):
5305  r"""Generates fingerprint values.
5306
5307  Generates fingerprint values of `data`.
5308
5309  Fingerprint op considers the first dimension of `data` as the batch dimension,
5310  and `output[i]` contains the fingerprint value generated from contents in
5311  `data[i, ...]` for all `i`.
5312
5313  Fingerprint op writes fingerprint values as byte arrays. For example, the
5314  default method `farmhash64` generates a 64-bit fingerprint value at a time.
5315  This 8-byte value is written out as an `tf.uint8` array of size 8, in
5316  little-endian order.
5317
5318  For example, suppose that `data` has data type `tf.int32` and shape (2, 3, 4),
5319  and that the fingerprint method is `farmhash64`. In this case, the output
5320  shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the
5321  size of each fingerprint value in bytes. `output[0, :]` is generated from
5322  12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from
5323  other 12 integers in `data[1, :, :]`.
5324
5325  Note that this op fingerprints the raw underlying buffer, and it does not
5326  fingerprint Tensor's metadata such as data type and/or shape. For example, the
5327  fingerprint values are invariant under reshapes and bitcasts as long as the
5328  batch dimension remain the same:
5329
5330  ```python
5331  tf.fingerprint(data) == tf.fingerprint(tf.reshape(data, ...))
5332  tf.fingerprint(data) == tf.fingerprint(tf.bitcast(data, ...))
5333  ```
5334
5335  For string data, one should expect `tf.fingerprint(data) !=
5336  tf.fingerprint(tf.string.reduce_join(data))` in general.
5337
5338  Args:
5339    data: A `Tensor`. Must have rank 1 or higher.
5340    method: A `Tensor` of type `tf.string`. Fingerprint method used by this op.
5341      Currently available method is `farmhash64`.
5342    name: A name for the operation (optional).
5343
5344  Returns:
5345    A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to
5346    `data`'s first dimension, and the second dimension size depends on the
5347    fingerprint algorithm.
5348  """
5349  return gen_array_ops.fingerprint(data, method, name)
5350
5351
5352def convert_to_int_tensor(tensor, name, dtype=dtypes.int32):
5353  """Converts the given value to an integer Tensor."""
5354  tensor = ops.convert_to_tensor(tensor, name=name, preferred_dtype=dtype)
5355  if tensor.dtype.is_integer:
5356    tensor = gen_math_ops.cast(tensor, dtype)
5357  else:
5358    raise TypeError("%s must be an integer tensor; dtype=%s" %
5359                    (name, tensor.dtype))
5360  return tensor
5361
5362
5363def get_positive_axis(axis, ndims):
5364  """Validate an `axis` parameter, and normalize it to be positive.
5365
5366  If `ndims` is known (i.e., not `None`), then check that `axis` is in the
5367  range `-ndims <= axis < ndims`, and return `axis` (if `axis >= 0`) or
5368  `axis + ndims` (otherwise).
5369  If `ndims` is not known, and `axis` is positive, then return it as-is.
5370  If `ndims` is not known, and `axis` is negative, then report an error.
5371
5372  Args:
5373    axis: An integer constant
5374    ndims: An integer constant, or `None`
5375
5376  Returns:
5377    The normalized `axis` value.
5378
5379  Raises:
5380    ValueError: If `axis` is out-of-bounds, or if `axis` is negative and
5381      `ndims is None`.
5382  """
5383  if not isinstance(axis, int):
5384    raise TypeError("axis must be an int; got %s" % type(axis).__name__)
5385  if ndims is not None:
5386    if 0 <= axis < ndims:
5387      return axis
5388    elif -ndims <= axis < 0:
5389      return axis + ndims
5390    else:
5391      raise ValueError("axis=%s out of bounds: expected %s<=axis<%s" %
5392                       (axis, -ndims, ndims))
5393  elif axis < 0:
5394    raise ValueError("axis may only be negative if ndims is statically known.")
5395  return axis
5396
5397
5398# This op is intended to exactly match the semantics of numpy.repeat, with
5399# one exception: numpy.repeat has special (and somewhat non-intuitive) behavior
5400# when axis is not specified.  Rather than implement that special behavior, we
5401# simply make `axis` be a required argument.
5402#
5403# External (OSS) `tf.repeat` feature request:
5404# https://github.com/tensorflow/tensorflow/issues/8246
5405def repeat_with_axis(data, repeats, axis, name=None):
5406  """Repeats elements of `data`.
5407
5408  Args:
5409    data: An `N`-dimensional tensor.
5410    repeats: A 1-D integer tensor specifying how many times each element in
5411      `axis` should be repeated.  `len(repeats)` must equal `data.shape[axis]`.
5412      Supports broadcasting from a scalar value.
5413    axis: `int`.  The axis along which to repeat values.  Must be less than
5414      `max(N, 1)`.
5415    name: A name for the operation.
5416
5417  Returns:
5418    A tensor with `max(N, 1)` dimensions.  Has the same shape as `data`,
5419    except that dimension `axis` has size `sum(repeats)`.
5420
5421  Example usage:
5422
5423  >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)
5424  <tf.Tensor: shape=(5,), dtype=string,
5425  numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>
5426  >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)
5427  <tf.Tensor: shape=(5, 2), dtype=int32, numpy=
5428  array([[1, 2],
5429         [1, 2],
5430         [3, 4],
5431         [3, 4],
5432         [3, 4]], dtype=int32)>
5433  >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)
5434  <tf.Tensor: shape=(2, 5), dtype=int32, numpy=
5435  array([[1, 1, 2, 2, 2],
5436         [3, 3, 4, 4, 4]], dtype=int32)>
5437
5438  """
5439  if not isinstance(axis, int):
5440    raise TypeError("axis must be an int; got %s" % type(axis).__name__)
5441
5442  with ops.name_scope(name, "Repeat", [data, repeats]):
5443    data = ops.convert_to_tensor(data, name="data")
5444    repeats = convert_to_int_tensor(repeats, name="repeats")
5445    repeats.shape.with_rank_at_most(1)
5446
5447    # If `data` is a scalar, then upgrade it to a vector.
5448    data = _with_nonzero_rank(data)
5449    data_shape = shape(data)
5450
5451    # If `axis` is negative, then convert it to a positive value.
5452    axis = get_positive_axis(axis, data.shape.ndims)
5453
5454    # Check data Tensor shapes.
5455    if repeats.shape.ndims == 1:
5456      data.shape.dims[axis].assert_is_compatible_with(repeats.shape[0])
5457
5458    # If we know that `repeats` is a scalar, then we can just tile & reshape.
5459    if repeats.shape.ndims == 0:
5460      expanded = expand_dims(data, axis + 1)
5461      tiled = tile_one_dimension(expanded, axis + 1, repeats)
5462      result_shape = concat([data_shape[:axis], [-1], data_shape[axis + 1:]],
5463                            axis=0)
5464      return reshape(tiled, result_shape)
5465
5466    # Broadcast the `repeats` tensor so rank(repeats) == axis + 1.
5467    if repeats.shape.ndims != axis + 1:
5468      repeats_shape = shape(repeats)
5469      repeats_ndims = rank(repeats)
5470      broadcast_shape = concat(
5471          [data_shape[:axis + 1 - repeats_ndims], repeats_shape], axis=0)
5472      repeats = broadcast_to(repeats, broadcast_shape)
5473      repeats.set_shape([None] * (axis + 1))
5474
5475    # Create a "sequence mask" based on `repeats`, where slices across `axis`
5476    # contain one `True` value for each repetition.  E.g., if
5477    # `repeats = [3, 1, 2]`, then `mask = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]`.
5478    max_repeat = gen_math_ops.maximum(
5479        0, gen_math_ops._max(repeats, _all_dimensions(repeats)))
5480    mask = sequence_mask(repeats, max_repeat)
5481
5482    # Add a new dimension around each value that needs to be repeated, and
5483    # then tile that new dimension to match the maximum number of repetitions.
5484    expanded = expand_dims(data, axis + 1)
5485    tiled = tile_one_dimension(expanded, axis + 1, max_repeat)
5486
5487    # Use `boolean_mask` to discard the extra repeated values.  This also
5488    # flattens all dimensions up through `axis`.
5489    masked = boolean_mask(tiled, mask)
5490
5491    # Reshape the output tensor to add the outer dimensions back.
5492    if axis == 0:
5493      result = masked
5494    else:
5495      result_shape = concat([data_shape[:axis], [-1], data_shape[axis + 1:]],
5496                            axis=0)
5497      result = reshape(masked, result_shape)
5498
5499    # Preserve shape information.
5500    if data.shape.ndims is not None:
5501      new_axis_size = 0 if repeats.shape[0] == 0 else None
5502      result.set_shape(data.shape[:axis].concatenate(
5503          [new_axis_size]).concatenate(data.shape[axis + 1:]))
5504
5505    return result
5506
5507
5508def tile_one_dimension(data, axis, multiple):
5509  """Tiles a single dimension of a tensor."""
5510  # Assumes axis is a nonnegative int.
5511  if data.shape.ndims is not None:
5512    multiples = [1] * data.shape.ndims
5513    multiples[axis] = multiple
5514  else:
5515    ones_value = ones(rank(data), dtypes.int32)
5516    multiples = concat([ones_value[:axis], [multiple], ones_value[axis + 1:]],
5517                       axis=0)
5518  return tile(data, multiples)
5519
5520
5521def _with_nonzero_rank(data):
5522  """If `data` is scalar, then add a dimension; otherwise return as-is."""
5523  if data.shape.ndims is not None:
5524    if data.shape.ndims == 0:
5525      return stack([data])
5526    else:
5527      return data
5528  else:
5529    data_shape = shape(data)
5530    data_ndims = rank(data)
5531    return reshape(data, concat([[1], data_shape], axis=0)[-data_ndims:])
5532
5533
5534@tf_export("repeat")
5535def repeat(input, repeats, axis=None, name=None):  # pylint: disable=redefined-builtin
5536  """Repeat elements of `input`.
5537
5538  Args:
5539    input: An `N`-dimensional Tensor.
5540    repeats: An 1-D `int` Tensor. The number of repetitions for each element.
5541      repeats is broadcasted to fit the shape of the given axis. `len(repeats)`
5542      must equal `input.shape[axis]` if axis is not None.
5543    axis: An int. The axis along which to repeat values. By default (axis=None),
5544      use the flattened input array, and return a flat output array.
5545    name: A name for the operation.
5546
5547  Returns:
5548    A Tensor which has the same shape as `input`, except along the given axis.
5549      If axis is None then the output array is flattened to match the flattened
5550      input array.
5551
5552  Example usage:
5553
5554  >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)
5555  <tf.Tensor: shape=(5,), dtype=string,
5556  numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>
5557
5558  >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)
5559  <tf.Tensor: shape=(5, 2), dtype=int32, numpy=
5560  array([[1, 2],
5561         [1, 2],
5562         [3, 4],
5563         [3, 4],
5564         [3, 4]], dtype=int32)>
5565
5566  >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)
5567  <tf.Tensor: shape=(2, 5), dtype=int32, numpy=
5568  array([[1, 1, 2, 2, 2],
5569         [3, 3, 4, 4, 4]], dtype=int32)>
5570
5571  >>> repeat(3, repeats=4)
5572  <tf.Tensor: shape=(4,), dtype=int32, numpy=array([3, 3, 3, 3], dtype=int32)>
5573
5574  >>> repeat([[1,2], [3,4]], repeats=2)
5575  <tf.Tensor: shape=(8,), dtype=int32,
5576  numpy=array([1, 1, 2, 2, 3, 3, 4, 4], dtype=int32)>
5577
5578  """
5579  if axis is None:
5580    input = reshape(input, [-1])
5581    axis = 0
5582  return repeat_with_axis(input, repeats, axis, name)
5583