• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15# Tests for this file live in python/kernel_tests/array_ops_test.py
16"""Support for manipulating tensors."""
17
18from __future__ import absolute_import
19from __future__ import division
20from __future__ import print_function
21
22import sys
23
24import numpy as np
25import six
26
27from tensorflow.python.eager import context
28from tensorflow.python.framework import common_shapes
29from tensorflow.python.framework import constant_op
30from tensorflow.python.framework import dtypes
31from tensorflow.python.framework import ops
32from tensorflow.python.framework import sparse_tensor
33from tensorflow.python.framework import tensor_shape
34from tensorflow.python.framework import tensor_util
35# 'Constant' gets imported in the module 'array_ops'.
36from tensorflow.python.framework.constant_op import constant
37from tensorflow.python.ops import gen_array_ops
38from tensorflow.python.ops import gen_math_ops
39# go/tf-wildcard-import
40# pylint: disable=wildcard-import
41from tensorflow.python.ops.gen_array_ops import *
42from tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse  # pylint: disable=unused-import
43from tensorflow.python.util import deprecation
44from tensorflow.python.util import dispatch
45from tensorflow.python.util import nest
46from tensorflow.python.util.tf_export import tf_export
47# pylint: enable=wildcard-import
48
49# Used for slicing to specify a new 1 size dimension
50newaxis = None
51tf_export("newaxis").export_constant(__name__, "newaxis")
52
53# We override the 'slice' for the "slice" op, so we keep python's
54# existing 'slice' for later use in this module.
55_BaseSlice = slice
56
57
58@tf_export("identity")
59@dispatch.add_dispatch_support
60def identity(input, name=None):  # pylint: disable=redefined-builtin
61  r"""Return a tensor with the same shape and contents as input.
62
63  Args:
64    input: A `Tensor`.
65    name: A name for the operation (optional).
66
67  Returns:
68    A `Tensor`. Has the same type as `input`.
69  """
70  if context.executing_eagerly() and not hasattr(input, "graph"):
71    input = ops.convert_to_tensor(input)
72    in_device = input.backing_device
73    # TODO(ashankar): Does 'identity' need to invoke execution callbacks?
74    context_device = context.context().device_name
75    if not context_device:
76      context_device = "/job:localhost/replica:0/task:0/device:CPU:0"
77    if context_device == in_device:
78      return input
79    else:
80      copied = input._copy()  # pylint: disable=protected-access
81      if hasattr(copied, "_handle_data"):
82        copied._handle_data = input._handle_data  # pylint: disable=protected-access
83      return copied
84  else:
85    ret = gen_array_ops.identity(input, name=name)
86    # Propagate handle data for happier shape inference for resource variables.
87    if hasattr(input, "_handle_data"):
88      ret._handle_data = input._handle_data  # pylint: disable=protected-access
89    return ret
90
91
92# pylint: disable=redefined-builtin,protected-access
93@tf_export(v1=["expand_dims"])
94@dispatch.add_dispatch_support
95@deprecation.deprecated_args(None, "Use the `axis` argument instead", "dim")
96def expand_dims(input, axis=None, name=None, dim=None):
97  """Inserts a dimension of 1 into a tensor's shape.
98
99  Given a tensor `input`, this operation inserts a dimension of 1 at the
100  dimension index `axis` of `input`'s shape. The dimension index `axis` starts
101  at zero; if you specify a negative number for `axis` it is counted backward
102  from the end.
103
104  This operation is useful if you want to add a batch dimension to a single
105  element. For example, if you have a single image of shape `[height, width,
106  channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
107  which will make the shape `[1, height, width, channels]`.
108
109  Other examples:
110
111  ```python
112  # 't' is a tensor of shape [2]
113  tf.shape(tf.expand_dims(t, 0))  # [1, 2]
114  tf.shape(tf.expand_dims(t, 1))  # [2, 1]
115  tf.shape(tf.expand_dims(t, -1))  # [2, 1]
116
117  # 't2' is a tensor of shape [2, 3, 5]
118  tf.shape(tf.expand_dims(t2, 0))  # [1, 2, 3, 5]
119  tf.shape(tf.expand_dims(t2, 2))  # [2, 3, 1, 5]
120  tf.shape(tf.expand_dims(t2, 3))  # [2, 3, 5, 1]
121  ```
122
123  This operation requires that:
124
125  `-1-input.dims() <= dim <= input.dims()`
126
127  This operation is related to `squeeze()`, which removes dimensions of
128  size 1.
129
130  Args:
131    input: A `Tensor`.
132    axis: 0-D (scalar). Specifies the dimension index at which to
133      expand the shape of `input`. Must be in the range
134      `[-rank(input) - 1, rank(input)]`.
135    name: The name of the output `Tensor` (optional).
136    dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
137
138  Returns:
139    A `Tensor` with the same data as `input`, but its shape has an additional
140    dimension of size 1 added.
141
142  Raises:
143    ValueError: if either both or neither of `dim` and `axis` are specified.
144  """
145  axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
146  if axis is None:
147    raise ValueError("Must specify an axis argument to tf.expand_dims()")
148  return expand_dims_v2(input, axis, name)
149
150
151@tf_export("expand_dims", v1=[])
152@dispatch.add_dispatch_support
153def expand_dims_v2(input, axis, name=None):
154  """Inserts a dimension of 1 into a tensor's shape.
155
156  Given a tensor `input`, this operation inserts a dimension of 1 at the
157  dimension index `axis` of `input`'s shape. The dimension index `axis` starts
158  at zero; if you specify a negative number for `axis` it is counted backward
159  from the end.
160
161  This operation is useful if you want to add a batch dimension to a single
162  element. For example, if you have a single image of shape `[height, width,
163  channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
164  which will make the shape `[1, height, width, channels]`.
165
166  Other examples:
167
168  ```python
169  # 't' is a tensor of shape [2]
170  tf.shape(tf.expand_dims(t, 0))  # [1, 2]
171  tf.shape(tf.expand_dims(t, 1))  # [2, 1]
172  tf.shape(tf.expand_dims(t, -1))  # [2, 1]
173
174  # 't2' is a tensor of shape [2, 3, 5]
175  tf.shape(tf.expand_dims(t2, 0))  # [1, 2, 3, 5]
176  tf.shape(tf.expand_dims(t2, 2))  # [2, 3, 1, 5]
177  tf.shape(tf.expand_dims(t2, 3))  # [2, 3, 5, 1]
178  ```
179
180  This operation requires that:
181
182  `-1-input.dims() <= dim <= input.dims()`
183
184  This operation is related to `squeeze()`, which removes dimensions of
185  size 1.
186
187  Args:
188    input: A `Tensor`.
189    axis: 0-D (scalar). Specifies the dimension index at which to
190      expand the shape of `input`. Must be in the range
191      `[-rank(input) - 1, rank(input)]`.
192    name: The name of the output `Tensor` (optional).
193
194  Returns:
195    A `Tensor` with the same data as `input`, but its shape has an additional
196    dimension of size 1 added.
197  """
198  return gen_array_ops.expand_dims(input, axis, name)
199
200
201# pylint: enable=redefined-builtin,protected-access
202
203
204# Aliases for some automatically-generated names.
205# pylint: disable=protected-access
206@deprecation.deprecated(
207    "2016-11-30",
208    "This op will be removed after the deprecation date. "
209    "Please switch to tf.setdiff1d().")
210def listdiff(x, y, out_idx=None, name=None):
211  return gen_array_ops.list_diff(x, y, out_idx, name)
212
213
214listdiff.__doc__ = gen_array_ops.list_diff.__doc__ + "\n" + listdiff.__doc__
215
216# pylint: enable=protected-access
217
218
219# pylint: disable=undefined-variable
220@deprecation.deprecated(
221    "2018-11-30",
222    "This op will be removed after the deprecation date. "
223    "Please switch to tf.sets.difference().")
224@tf_export(v1=["setdiff1d"])
225def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
226  return gen_array_ops.list_diff(x, y, index_dtype, name)
227
228
229setdiff1d.__doc__ = gen_array_ops.list_diff.__doc__
230
231
232@tf_export("broadcast_dynamic_shape")
233def broadcast_dynamic_shape(shape_x, shape_y):
234  """Computes the shape of a broadcast given symbolic shapes.
235
236  When shape_x and shape_y are Tensors representing shapes (i.e. the result of
237  calling tf.shape on another Tensor) this computes a Tensor which is the shape
238  of the result of a broadcasting op applied in tensors of shapes shape_x and
239  shape_y.
240
241  For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
242  Tensor whose value is [5, 2, 3].
243
244  This is useful when validating the result of a broadcasting operation when the
245  tensors do not have statically known shapes.
246
247  Args:
248    shape_x: A rank 1 integer `Tensor`, representing the shape of x.
249    shape_y: A rank 1 integer `Tensor`, representing the shape of y.
250
251  Returns:
252    A rank 1 integer `Tensor` representing the broadcasted shape.
253  """
254  return gen_array_ops.broadcast_args(shape_x, shape_y)
255
256
257@tf_export("broadcast_static_shape")
258def broadcast_static_shape(shape_x, shape_y):
259  """Computes the shape of a broadcast given known shapes.
260
261  When shape_x and shape_y are fully known TensorShapes this computes a
262  TensorShape which is the shape of the result of a broadcasting op applied in
263  tensors of shapes shape_x and shape_y.
264
265  For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
266  TensorShape whose value is [5, 2, 3].
267
268  This is useful when validating the result of a broadcasting operation when the
269  tensors have statically known shapes.
270
271  Args:
272    shape_x: A `TensorShape`
273    shape_y: A `TensorShape`
274
275  Returns:
276    A `TensorShape` representing the broadcasted shape.
277
278  Raises:
279    ValueError: If the two shapes can not be broadcasted.
280  """
281  return common_shapes.broadcast_shape(shape_x, shape_y)
282
283
284@tf_export("shape", v1=[])
285def shape_v2(input, out_type=dtypes.int32, name=None):
286  # pylint: disable=redefined-builtin
287  return shape(input, name, out_type)
288
289
290@tf_export(v1=["shape"])
291def shape(input, name=None, out_type=dtypes.int32):
292  # pylint: disable=redefined-builtin
293  """Returns the shape of a tensor.
294
295  This operation returns a 1-D integer tensor representing the shape of `input`.
296
297  For example:
298
299  ```python
300  t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
301  tf.shape(t)  # [2, 2, 3]
302  ```
303
304  Args:
305    input: A `Tensor` or `SparseTensor`.
306    name: A name for the operation (optional).
307    out_type: (Optional) The specified output type of the operation
308      (`int32` or `int64`). Defaults to `tf.int32`.
309
310  Returns:
311    A `Tensor` of type `out_type`.
312  """
313  return shape_internal(input, name, optimize=True, out_type=out_type)
314
315
316def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
317  # pylint: disable=redefined-builtin
318  """Returns the shape of a tensor.
319
320  Args:
321    input: A `Tensor` or `SparseTensor`.
322    name: A name for the operation (optional).
323    optimize: if true, encode the shape as a constant when possible.
324    out_type: (Optional) The specified output type of the operation
325      (`int32` or `int64`). Defaults to tf.int32.
326
327  Returns:
328    A `Tensor` of type `out_type`.
329
330  """
331  with ops.name_scope(name, "Shape", [input]) as name:
332    if isinstance(input, (sparse_tensor.SparseTensor,
333                          sparse_tensor.SparseTensorValue)):
334      return gen_math_ops.cast(input.dense_shape, out_type)
335    else:
336      if not context.executing_eagerly():
337        input_tensor = ops.convert_to_tensor(input)
338        input_shape = input_tensor.get_shape()
339        if optimize and input_shape.is_fully_defined():
340          return constant(input_shape.as_list(), out_type, name=name)
341      return gen_array_ops.shape(input, name=name, out_type=out_type)
342
343
344@tf_export("shape_n")
345def shape_n(input, out_type=dtypes.int32, name=None):
346  # pylint: disable=redefined-builtin
347  """Returns shape of tensors.
348
349  Args:
350    input: A list of at least 1 `Tensor` object with the same type.
351    out_type: The specified output type of the operation
352      (`int32` or `int64`). Defaults to `tf.int32`(optional).
353    name: A name for the operation (optional).
354
355  Returns:
356    A list with the same length as `input` of `Tensor` objects with
357      type `out_type`.
358  """
359
360  return gen_array_ops.shape_n(input, out_type=out_type, name=name)
361
362
363@tf_export("size", v1=[])
364@dispatch.add_dispatch_support
365def size_v2(input, out_type=dtypes.int32, name=None):
366  # pylint: disable=redefined-builtin
367  return size(input, name, out_type)
368
369
370@tf_export(v1=["size"])
371@dispatch.add_dispatch_support
372def size(input, name=None, out_type=dtypes.int32):
373  # pylint: disable=redefined-builtin
374  """Returns the size of a tensor.
375
376  Returns a 0-D `Tensor` representing the number of elements in `input`
377  of type `out_type`. Defaults to tf.int32.
378
379  For example:
380
381  ```python
382  t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
383  tf.size(t)  # 12
384  ```
385
386  Args:
387    input: A `Tensor` or `SparseTensor`.
388    name: A name for the operation (optional).
389    out_type: (Optional) The specified non-quantized numeric output type
390      of the operation. Defaults to `tf.int32`.
391
392  Returns:
393    A `Tensor` of type `out_type`. Defaults to `tf.int32`.
394
395  @compatibility(numpy)
396  Equivalent to np.size()
397  @end_compatibility
398  """
399  return size_internal(input, name, optimize=True, out_type=out_type)
400
401
402def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
403  # pylint: disable=redefined-builtin,protected-access
404  """Returns the size of a tensor.
405
406  Args:
407    input: A `Tensor` or `SparseTensor`.
408    name: A name for the operation (optional).
409    optimize: if true, encode the size as a constant when possible.
410    out_type: (Optional) The specified non-quantized numeric output type
411      of the operation. Defaults to `tf.int32`.
412
413  Returns:
414    A `Tensor` of type `out_type`. Defaults to `tf.int32`.
415  """
416  if context.executing_eagerly() and not isinstance(
417      input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
418    input = ops.convert_to_tensor(input)
419    np_out_type = out_type.as_numpy_dtype
420    num_elements = np.prod(input._shape_tuple(), dtype=np_out_type)  # pylint: disable=protected-access
421    return ops.convert_to_tensor(num_elements, dtype=out_type)
422  with ops.name_scope(name, "Size", [input]) as name:
423    if isinstance(input, (sparse_tensor.SparseTensor,
424                          sparse_tensor.SparseTensorValue)):
425      return gen_math_ops.prod(
426          gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
427    else:
428      input_tensor = ops.convert_to_tensor(input)
429      input_shape = input_tensor.get_shape()
430      if optimize:
431        if input_shape.is_fully_defined():
432          return constant(input_shape.num_elements(), out_type, name=name)
433        if input_shape.dims and any(dim == 0 for dim in input_shape.dims):
434          return constant(0, out_type, name=name)
435      return gen_array_ops.size(input, name=name, out_type=out_type)
436
437
438@tf_export("rank")
439@dispatch.add_dispatch_support
440def rank(input, name=None):
441  # pylint: disable=redefined-builtin
442  """Returns the rank of a tensor.
443
444  Returns a 0-D `int32` `Tensor` representing the rank of `input`.
445
446  For example:
447
448  ```python
449  # shape of tensor 't' is [2, 2, 3]
450  t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
451  tf.rank(t)  # 3
452  ```
453
454  **Note**: The rank of a tensor is not the same as the rank of a matrix. The
455  rank of a tensor is the number of indices required to uniquely select each
456  element of the tensor. Rank is also known as "order", "degree", or "ndims."
457
458  Args:
459    input: A `Tensor` or `SparseTensor`.
460    name: A name for the operation (optional).
461
462  Returns:
463    A `Tensor` of type `int32`.
464
465  @compatibility(numpy)
466  Equivalent to np.ndim
467  @end_compatibility
468  """
469  return rank_internal(input, name, optimize=True)
470
471
472def rank_internal(input, name=None, optimize=True):
473  # pylint: disable=redefined-builtin
474  """Returns the rank of a tensor.
475
476  Args:
477    input: A `Tensor` or `SparseTensor`.
478    name: A name for the operation (optional).
479    optimize: if true, encode the rank as a constant when possible.
480
481  Returns:
482    A `Tensor` of type `int32`.
483  """
484  with ops.name_scope(name, "Rank", [input]) as name:
485    if isinstance(input, (sparse_tensor.SparseTensor,
486                          sparse_tensor.SparseTensorValue)):
487      return gen_array_ops.size(input.dense_shape, name=name)
488    else:
489      input_tensor = ops.convert_to_tensor(input)
490      input_shape = input_tensor.get_shape()
491      if optimize and input_shape.ndims is not None:
492        return constant(input_shape.ndims, dtypes.int32, name=name)
493      return gen_array_ops.rank(input, name=name)
494
495
496_SLICE_TYPE_ERROR = (
497    "Only integers, slices (`:`), ellipsis (`...`), "
498    "tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid "
499    "indices")
500
501_SUPPORTED_SLICE_DTYPES = (
502    dtypes.int32,
503    dtypes.int32_ref,
504    dtypes.int64,
505    dtypes.int64_ref
506)
507
508
509def _check_index(idx):
510  """Check if a given value is a valid index into a tensor."""
511  if isinstance(idx, (six.integer_types, tensor_shape.Dimension)):
512    return
513
514  # Optimistic check. Assumptions:
515  # * any object with a dtype is supported
516  # * any object with a dtype has a sizeable shape attribute.
517  dtype = getattr(idx, "dtype", None)
518  if (dtype is None or
519      dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or
520      idx.shape and len(idx.shape) == 1):
521    # TODO(slebedev): IndexError seems more appropriate here, but it
522    # will break `_slice_helper` contract.
523    raise TypeError(_SLICE_TYPE_ERROR + ", got {!r}".format(idx))
524
525
526def _slice_helper(tensor, slice_spec, var=None):
527  """Overload for Tensor.__getitem__.
528
529  This operation extracts the specified region from the tensor.
530  The notation is similar to NumPy with the restriction that
531  currently only support basic indexing. That means that
532  using a non-scalar tensor as input is not currently allowed.
533
534  Some useful examples:
535
536  ```python
537  # strip leading and trailing 2 elements
538  foo = tf.constant([1,2,3,4,5,6])
539  print(foo[2:-2].eval())  # => [3,4]
540
541  # skip every row and reverse every column
542  foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
543  print(foo[::2,::-1].eval())  # => [[3,2,1], [9,8,7]]
544
545  # Use scalar tensors as indices on both dimensions
546  print(foo[tf.constant(0), tf.constant(2)].eval())  # => 3
547
548  # Insert another dimension
549  foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
550  print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
551  print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
552  print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
553  [[7],[8],[9]]]
554
555  # Ellipses (3 equivalent operations)
556  foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
557  print(foo[tf.newaxis, :, :].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]
558  print(foo[tf.newaxis, ...].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]
559  print(foo[tf.newaxis].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]
560
561  # masks
562  foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
563  print(foo[foo > 2].eval())  # => [3, 4, 5, 6, 7, 8, 9]
564  ```
565
566  Notes:
567    - `tf.newaxis` is `None` as in NumPy.
568    - An implicit ellipsis is placed at the end of the `slice_spec`
569    - NumPy advanced indexing is currently not supported.
570
571  Args:
572    tensor: An ops.Tensor object.
573    slice_spec: The arguments to Tensor.__getitem__.
574    var: In the case of variable slice assignment, the Variable
575      object to slice (i.e. tensor is the read-only view of this
576      variable).
577
578  Returns:
579    The appropriate slice of "tensor", based on "slice_spec".
580
581  Raises:
582    ValueError: If a slice range is negative size.
583    TypeError: If the slice indices aren't int, slice, ellipsis,
584      tf.newaxis or scalar int32/int64 tensors.
585  """
586  if isinstance(slice_spec, bool) or \
587  (isinstance(slice_spec, ops.Tensor) and slice_spec.dtype == dtypes.bool) or \
588  (isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):
589    return boolean_mask(tensor=tensor, mask=slice_spec)
590
591  if not isinstance(slice_spec, (list, tuple)):
592    slice_spec = [slice_spec]
593
594  begin, end, strides = [], [], []
595  index = 0
596
597  new_axis_mask, shrink_axis_mask = 0, 0
598  begin_mask, end_mask = 0, 0
599  ellipsis_mask = 0
600  for s in slice_spec:
601    if isinstance(s, _BaseSlice):
602      # python doesn't always use None when constructing ranges
603      # for example a[:] gives slice(None,sys.maxsize,None)
604      # whereas a[::1] gives slice(None,None,None)
605      if s.start is not None and s.start is not sys.maxsize:
606        _check_index(s.start)
607        begin.append(s.start)
608      else:
609        begin.append(0)
610        begin_mask |= (1 << index)
611      if s.stop is not None and s.stop != sys.maxsize:
612        _check_index(s.stop)
613        end.append(s.stop)
614      else:
615        end.append(0)
616        end_mask |= (1 << index)
617      if s.step is not None:
618        _check_index(s.step)
619        strides.append(s.step)
620      else:
621        strides.append(1)
622    elif s is Ellipsis:
623      begin.append(0)
624      end.append(0)
625      strides.append(1)
626      ellipsis_mask |= (1 << index)
627    elif s is newaxis:
628      begin.append(0)
629      end.append(0)
630      strides.append(1)
631      new_axis_mask |= (1 << index)
632    else:
633      _check_index(s)
634      begin.append(s)
635      end.append(s + 1)
636      strides.append(1)
637      shrink_axis_mask |= (1 << index)
638    index += 1
639
640  # stack possibly involves no tensors, so we must use op_scope correct graph.
641  with ops.name_scope(None, "strided_slice",
642                      [tensor] + begin + end + strides) as name:
643    if begin:
644      packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
645                                                  stack(strides))
646      if (packed_begin.dtype == dtypes.int64 or
647          packed_end.dtype == dtypes.int64 or
648          packed_strides.dtype == dtypes.int64):
649        if packed_begin.dtype != dtypes.int64:
650          packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
651        if packed_end.dtype != dtypes.int64:
652          packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
653        if packed_strides.dtype != dtypes.int64:
654          packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
655    else:
656      var_empty = constant([], dtype=dtypes.int32)
657      packed_begin = packed_end = packed_strides = var_empty
658    return strided_slice(
659        tensor,
660        packed_begin,
661        packed_end,
662        packed_strides,
663        begin_mask=begin_mask,
664        end_mask=end_mask,
665        shrink_axis_mask=shrink_axis_mask,
666        new_axis_mask=new_axis_mask,
667        ellipsis_mask=ellipsis_mask,
668        var=var,
669        name=name)
670
671
672# pylint: disable=undefined-variable,protected-access,redefined-outer-name
673@tf_export("slice")
674def slice(input_, begin, size, name=None):
675  # pylint: disable=redefined-builtin
676  """Extracts a slice from a tensor.
677
678  This operation extracts a slice of size `size` from a tensor `input` starting
679  at the location specified by `begin`. The slice `size` is represented as a
680  tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
681  of `input` that you want to slice. The starting location (`begin`) for the
682  slice is represented as an offset in each dimension of `input`. In other
683  words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
684  want to slice from.
685
686  Note that `tf.Tensor.__getitem__` is typically a more pythonic way to
687  perform slices, as it allows you to write `foo[3:7, :-2]` instead of
688  `tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
689
690  `begin` is zero-based; `size` is one-based. If `size[i]` is -1,
691  all remaining elements in dimension i are included in the
692  slice. In other words, this is equivalent to setting:
693
694  `size[i] = input.dim_size(i) - begin[i]`
695
696  This operation requires that:
697
698  `0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n]`
699
700  For example:
701
702  ```python
703  t = tf.constant([[[1, 1, 1], [2, 2, 2]],
704                   [[3, 3, 3], [4, 4, 4]],
705                   [[5, 5, 5], [6, 6, 6]]])
706  tf.slice(t, [1, 0, 0], [1, 1, 3])  # [[[3, 3, 3]]]
707  tf.slice(t, [1, 0, 0], [1, 2, 3])  # [[[3, 3, 3],
708                                     #   [4, 4, 4]]]
709  tf.slice(t, [1, 0, 0], [2, 1, 3])  # [[[3, 3, 3]],
710                                     #  [[5, 5, 5]]]
711  ```
712
713  Args:
714    input_: A `Tensor`.
715    begin: An `int32` or `int64` `Tensor`.
716    size: An `int32` or `int64` `Tensor`.
717    name: A name for the operation (optional).
718
719  Returns:
720    A `Tensor` the same type as `input`.
721  """
722  return gen_array_ops._slice(input_, begin, size, name=name)
723
724
725# pylint: disable=invalid-name
726@tf_export("strided_slice")
727def strided_slice(input_,
728                  begin,
729                  end,
730                  strides=None,
731                  begin_mask=0,
732                  end_mask=0,
733                  ellipsis_mask=0,
734                  new_axis_mask=0,
735                  shrink_axis_mask=0,
736                  var=None,
737                  name=None):
738  """Extracts a strided slice of a tensor (generalized python array indexing).
739
740  **Instead of calling this op directly most users will want to use the
741  NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
742  is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**
743  The interface of this op is a low-level encoding of the slicing syntax.
744
745  Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
746  from the given `input_` tensor. Starting at the location specified by `begin`
747  the slice continues by adding `stride` to the index until all dimensions are
748  not less than `end`.
749  Note that a stride can be negative, which causes a reverse slice.
750
751  Given a Python slice `input[spec0, spec1, ..., specn]`,
752  this function will be called as follows.
753
754  `begin`, `end`, and `strides` will be vectors of length n.
755  n in general is not equal to the rank of the `input_` tensor.
756
757  In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,
758  `new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to
759  the ith spec.
760
761  If the ith bit of `begin_mask` is set, `begin[i]` is ignored and
762  the fullest possible range in that dimension is used instead.
763  `end_mask` works analogously, except with the end range.
764
765  `foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
766  `foo[::-1]` reverses a tensor with shape 8.
767
768  If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions
769  as needed will be inserted between other dimensions. Only one
770  non-zero bit is allowed in `ellipsis_mask`.
771
772  For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
773  equivalent to `foo[3:5,:,:,4:5]` and
774  `foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
775
776  If the ith bit of `new_axis_mask` is set, then `begin`,
777  `end`, and `stride` are ignored and a new length 1 dimension is
778  added at this point in the output tensor.
779
780  For example,
781  `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
782
783  If the ith bit of `shrink_axis_mask` is set, it implies that the ith
784  specification shrinks the dimensionality by 1, taking on the value at index
785  `begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in
786  Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`
787  equal to 2.
788
789
790  NOTE: `begin` and `end` are zero-indexed.
791  `strides` entries must be non-zero.
792
793
794  ```python
795  t = tf.constant([[[1, 1, 1], [2, 2, 2]],
796                   [[3, 3, 3], [4, 4, 4]],
797                   [[5, 5, 5], [6, 6, 6]]])
798  tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1])  # [[[3, 3, 3]]]
799  tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1])  # [[[3, 3, 3],
800                                                        #   [4, 4, 4]]]
801  tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1])  # [[[4, 4, 4],
802                                                           #   [3, 3, 3]]]
803  ```
804
805  Args:
806    input_: A `Tensor`.
807    begin: An `int32` or `int64` `Tensor`.
808    end: An `int32` or `int64` `Tensor`.
809    strides: An `int32` or `int64` `Tensor`.
810    begin_mask: An `int32` mask.
811    end_mask: An `int32` mask.
812    ellipsis_mask: An `int32` mask.
813    new_axis_mask: An `int32` mask.
814    shrink_axis_mask: An `int32` mask.
815    var: The variable corresponding to `input_` or None
816    name: A name for the operation (optional).
817
818  Returns:
819    A `Tensor` the same type as `input`.
820  """
821
822  if strides is None:
823    strides = ones_like(begin)
824
825  op = gen_array_ops.strided_slice(
826      input=input_,
827      begin=begin,
828      end=end,
829      strides=strides,
830      name=name,
831      begin_mask=begin_mask,
832      end_mask=end_mask,
833      ellipsis_mask=ellipsis_mask,
834      new_axis_mask=new_axis_mask,
835      shrink_axis_mask=shrink_axis_mask)
836
837  parent_name = name
838
839  if not (var is None and isinstance(op, ops.EagerTensor)):
840    def assign(val, name=None):
841      """Closure that holds all the arguments to create an assignment."""
842
843      if var is None:
844        raise ValueError("Sliced assignment is only supported for variables")
845
846      if name is None:
847        name = parent_name + "_assign"
848
849      return var._strided_slice_assign(
850          begin=begin,
851          end=end,
852          strides=strides,
853          value=val,
854          name=name,
855          begin_mask=begin_mask,
856          end_mask=end_mask,
857          ellipsis_mask=ellipsis_mask,
858          new_axis_mask=new_axis_mask,
859          shrink_axis_mask=shrink_axis_mask)
860
861    op.assign = assign
862  return op
863
864
865def _SliceHelperVar(var, slice_spec):
866  """Creates a slice helper object given a variable.
867
868  This allows creating a sub-tensor from part of the current contents
869  of a variable. See `tf.Tensor.__getitem__` for detailed examples
870  of slicing.
871
872  This function in addition also allows assignment to a sliced range.
873  This is similar to `__setitem__` functionality in Python. However,
874  the syntax is different so that the user can capture the assignment
875  operation for grouping or passing to `sess.run()`.
876  For example,
877
878  ```python
879  import tensorflow as tf
880  A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
881  with tf.Session() as sess:
882    sess.run(tf.global_variables_initializer())
883    print(sess.run(A[:2, :2]))  # => [[1,2], [4,5]]
884
885    op = A[:2,:2].assign(22. * tf.ones((2, 2)))
886    print(sess.run(op))  # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
887  ```
888
889  Note that assignments currently do not support NumPy broadcasting
890  semantics.
891
892  Args:
893    var: An `ops.Variable` object.
894    slice_spec: The arguments to `Tensor.__getitem__`.
895
896  Returns:
897    The appropriate slice of "tensor", based on "slice_spec".
898    As an operator. The operator also has a `assign()` method
899    that can be used to generate an assignment operator.
900
901  Raises:
902    ValueError: If a slice range is negative size.
903    TypeError: TypeError: If the slice indices aren't int, slice,
904      ellipsis, tf.newaxis or int32/int64 tensors.
905
906  """
907
908  return _slice_helper(var.value(), slice_spec, var)
909
910
911ops.Tensor._override_operator("__getitem__", _slice_helper)
912
913
914@tf_export("parallel_stack")
915def parallel_stack(values, name="parallel_stack"):
916  """Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
917
918  Requires that the shape of inputs be known at graph construction time.
919
920  Packs the list of tensors in `values` into a tensor with rank one higher than
921  each tensor in `values`, by packing them along the first dimension.
922  Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
923  tensor will have the shape `(N, A, B, C)`.
924
925  For example:
926
927  ```python
928  x = tf.constant([1, 4])
929  y = tf.constant([2, 5])
930  z = tf.constant([3, 6])
931  tf.parallel_stack([x, y, z])  # [[1, 4], [2, 5], [3, 6]]
932  ```
933
934  The difference between `stack` and `parallel_stack` is that `stack` requires
935  all the inputs be computed before the operation will begin but doesn't require
936  that the input shapes be known during graph construction.
937
938  `parallel_stack` will copy pieces of the input into the output as they become
939  available, in some situations this can provide a performance benefit.
940
941  Unlike `stack`, `parallel_stack` does NOT support backpropagation.
942
943  This is the opposite of unstack.  The numpy equivalent is
944
945      tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
946
947  Args:
948    values: A list of `Tensor` objects with the same shape and type.
949    name: A name for this operation (optional).
950
951  Returns:
952    output: A stacked `Tensor` with the same type as `values`.
953  """
954  with ops.name_scope(name):
955    value_t = ops.convert_to_tensor(values[0])
956    value_shape = ops.convert_to_tensor(value_t).get_shape()
957
958    output_shape = tensor_shape.TensorShape([len(values)])
959    output_shape = output_shape.concatenate(value_shape)
960    # expand_dims converts concat to stack.
961    return gen_array_ops.parallel_concat(
962        [expand_dims(value, 0) for value in values], shape=output_shape)
963
964
965@tf_export("stack")
966@dispatch.add_dispatch_support
967def stack(values, axis=0, name="stack"):
968  """Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
969
970  Packs the list of tensors in `values` into a tensor with rank one higher than
971  each tensor in `values`, by packing them along the `axis` dimension.
972  Given a list of length `N` of tensors of shape `(A, B, C)`;
973
974  if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
975  if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
976  Etc.
977
978  For example:
979
980  ```python
981  x = tf.constant([1, 4])
982  y = tf.constant([2, 5])
983  z = tf.constant([3, 6])
984  tf.stack([x, y, z])  # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
985  tf.stack([x, y, z], axis=1)  # [[1, 2, 3], [4, 5, 6]]
986  ```
987
988  This is the opposite of unstack.  The numpy equivalent is
989
990  ```python
991  tf.stack([x, y, z]) = np.stack([x, y, z])
992  ```
993
994  Args:
995    values: A list of `Tensor` objects with the same shape and type.
996    axis: An `int`. The axis to stack along. Defaults to the first dimension.
997      Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
998    name: A name for this operation (optional).
999
1000  Returns:
1001    output: A stacked `Tensor` with the same type as `values`.
1002
1003  Raises:
1004    ValueError: If `axis` is out of the range [-(R+1), R+1).
1005  """
1006  if axis == 0:
1007    try:
1008      # If the input is a constant list, it can be converted to a constant op
1009      return ops.convert_to_tensor(values, name=name)
1010    except (TypeError, ValueError):
1011      pass  # Input list contains non-constant tensors
1012
1013  value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple()  # pylint: disable=protected-access
1014  if value_shape is not None:
1015    expanded_num_dims = len(value_shape) + 1
1016    if axis < -expanded_num_dims or axis >= expanded_num_dims:
1017      raise ValueError("axis = %d not in [%d, %d)" % (axis, -expanded_num_dims,
1018                                                      expanded_num_dims))
1019
1020  return gen_array_ops.pack(values, axis=axis, name=name)
1021
1022
1023# pylint: disable=invalid-name
1024def _autopacking_helper(list_or_tuple, dtype, name):
1025  """Converts the given list or tuple to a tensor by packing.
1026
1027  Args:
1028    list_or_tuple: A (possibly nested) list or tuple containing a tensor.
1029    dtype: The element type of the returned tensor.
1030    name: A name for the returned tensor.
1031
1032  Returns:
1033    A `tf.Tensor` with value equivalent to `list_or_tuple`.
1034  """
1035  if context.executing_eagerly():
1036    # NOTE: Fast path when all the items are tensors, this doesn't do any type
1037    # checking.
1038    if all(ops.is_dense_tensor_like(elem) for elem in list_or_tuple):
1039      return gen_array_ops.pack(list_or_tuple, name=name)
1040  must_pack = False
1041  converted_elems = []
1042  with ops.name_scope(name) as scope:
1043    for i, elem in enumerate(list_or_tuple):
1044      if ops.is_dense_tensor_like(elem):
1045        if dtype is not None and elem.dtype.base_dtype != dtype:
1046          raise TypeError("Cannot convert a list containing a tensor of dtype "
1047                          "%s to %s (Tensor is: %r)" % (elem.dtype, dtype,
1048                                                        elem))
1049        converted_elems.append(elem)
1050        must_pack = True
1051      elif isinstance(elem, (list, tuple)):
1052        converted_elem = _autopacking_helper(elem, dtype, str(i))
1053        if ops.is_dense_tensor_like(converted_elem):
1054          must_pack = True
1055        converted_elems.append(converted_elem)
1056      else:
1057        converted_elems.append(elem)
1058    if must_pack:
1059      elems_as_tensors = []
1060      for i, elem in enumerate(converted_elems):
1061        if ops.is_dense_tensor_like(elem):
1062          elems_as_tensors.append(elem)
1063        else:
1064          # NOTE(mrry): This is inefficient, but it enables us to
1065          # handle the case where the list arguments are other
1066          # convertible-to-tensor types, such as numpy arrays.
1067          elems_as_tensors.append(
1068              constant_op.constant(elem, dtype=dtype, name=str(i)))
1069      return gen_array_ops.pack(elems_as_tensors, name=scope)
1070    else:
1071      return converted_elems
1072
1073
1074def _get_dtype_from_nested_lists(list_or_tuple):
1075  """Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
1076
1077  Args:
1078    list_or_tuple: A list or tuple representing an object that can be
1079      converted to a `tf.Tensor`.
1080
1081  Returns:
1082    The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
1083    such object exists.
1084  """
1085  for elem in list_or_tuple:
1086    if ops.is_dense_tensor_like(elem):
1087      return elem.dtype.base_dtype
1088    elif isinstance(elem, (list, tuple)):
1089      maybe_dtype = _get_dtype_from_nested_lists(elem)
1090      if maybe_dtype is not None:
1091        return maybe_dtype
1092  return None
1093
1094
1095def _cast_nested_seqs_to_dtype(dtype):
1096  def _maybe_cast(elem):
1097    if ops.is_dense_tensor_like(elem):
1098      if dtype != elem.dtype.base_dtype:
1099        elem = gen_math_ops.cast(elem, dtype)
1100    return elem
1101  return _maybe_cast
1102
1103
1104def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
1105  """Tensor conversion function that automatically packs arguments."""
1106  if as_ref:
1107    return NotImplemented
1108  inferred_dtype = _get_dtype_from_nested_lists(v)
1109  if inferred_dtype is None:
1110    # We did not find any tensor-like objects in the nested lists, so defer to
1111    # other conversion functions.
1112    return NotImplemented
1113  if dtype is None:
1114    dtype = inferred_dtype
1115  elif dtype != inferred_dtype:
1116    v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)
1117  return _autopacking_helper(v, dtype, name or "packed")
1118
1119
1120# pylint: enable=invalid-name
1121
1122# NOTE: Register this conversion function to run *before* one that
1123# assumes every element is a value.
1124ops.register_tensor_conversion_function((list, tuple),
1125                                        _autopacking_conversion_function, 99)
1126
1127
1128@tf_export("unstack")
1129def unstack(value, num=None, axis=0, name="unstack"):
1130  """Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
1131
1132  Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
1133  If `num` is not specified (the default), it is inferred from `value`'s shape.
1134  If `value.shape[axis]` is not known, `ValueError` is raised.
1135
1136  For example, given a tensor of shape `(A, B, C, D)`;
1137
1138  If `axis == 0` then the i'th tensor in `output` is the slice
1139    `value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
1140    (Note that the dimension unpacked along is gone, unlike `split`).
1141
1142  If `axis == 1` then the i'th tensor in `output` is the slice
1143    `value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
1144  Etc.
1145
1146  This is the opposite of stack.
1147
1148  Args:
1149    value: A rank `R > 0` `Tensor` to be unstacked.
1150    num: An `int`. The length of the dimension `axis`. Automatically inferred
1151      if `None` (the default).
1152    axis: An `int`. The axis to unstack along. Defaults to the first
1153      dimension. Negative values wrap around, so the valid range is `[-R, R)`.
1154    name: A name for the operation (optional).
1155
1156  Returns:
1157    The list of `Tensor` objects unstacked from `value`.
1158
1159  Raises:
1160    ValueError: If `num` is unspecified and cannot be inferred.
1161    ValueError: If `axis` is out of the range [-R, R).
1162  """
1163  if num is None:
1164    value = ops.convert_to_tensor(value)
1165    value_shape = value.get_shape()
1166    if value_shape.ndims is not None:
1167      if axis < -value_shape.ndims or axis >= value_shape.ndims:
1168        raise ValueError("axis = %d not in [%d, %d)" %
1169                         (axis, -value_shape.ndims, value_shape.ndims))
1170      num = value_shape.dims[axis].value
1171  if num is None:
1172    raise ValueError("Cannot infer num from shape %s" % value_shape)
1173  return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
1174
1175
1176@tf_export("concat")
1177@dispatch.add_dispatch_support
1178def concat(values, axis, name="concat"):
1179  """Concatenates tensors along one dimension.
1180
1181  Concatenates the list of tensors `values` along dimension `axis`.  If
1182  `values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
1183  result has shape
1184
1185      [D0, D1, ... Raxis, ...Dn]
1186
1187  where
1188
1189      Raxis = sum(Daxis(i))
1190
1191  That is, the data from the input tensors is joined along the `axis`
1192  dimension.
1193
1194  The number of dimensions of the input tensors must match, and all dimensions
1195  except `axis` must be equal.
1196
1197  For example:
1198
1199  ```python
1200  t1 = [[1, 2, 3], [4, 5, 6]]
1201  t2 = [[7, 8, 9], [10, 11, 12]]
1202  tf.concat([t1, t2], 0)  # [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
1203  tf.concat([t1, t2], 1)  # [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
1204
1205  # tensor t3 with shape [2, 3]
1206  # tensor t4 with shape [2, 3]
1207  tf.shape(tf.concat([t3, t4], 0))  # [4, 3]
1208  tf.shape(tf.concat([t3, t4], 1))  # [2, 6]
1209  ```
1210  As in Python, the `axis` could also be negative numbers. Negative `axis`
1211  are interpreted as counting from the end of the rank, i.e.,
1212   `axis + rank(values)`-th dimension.
1213
1214  For example:
1215
1216  ```python
1217  t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
1218  t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
1219  tf.concat([t1, t2], -1)
1220  ```
1221
1222  would produce:
1223
1224  ```python
1225  [[[ 1,  2,  7,  4],
1226    [ 2,  3,  8,  4]],
1227
1228   [[ 4,  4,  2, 10],
1229    [ 5,  3, 15, 11]]]
1230  ```
1231
1232  Note: If you are concatenating along a new axis consider using stack.
1233  E.g.
1234
1235  ```python
1236  tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
1237  ```
1238
1239  can be rewritten as
1240
1241  ```python
1242  tf.stack(tensors, axis=axis)
1243  ```
1244
1245  Args:
1246    values: A list of `Tensor` objects or a single `Tensor`.
1247    axis: 0-D `int32` `Tensor`.  Dimension along which to concatenate. Must be
1248      in the range `[-rank(values), rank(values))`. As in Python, indexing
1249      for axis is 0-based. Positive axis in the rage of
1250      `[0, rank(values))` refers to `axis`-th dimension. And negative axis
1251      refers to `axis + rank(values)`-th dimension.
1252    name: A name for the operation (optional).
1253
1254  Returns:
1255    A `Tensor` resulting from concatenation of the input tensors.
1256  """
1257  if not isinstance(values, (list, tuple)):
1258    values = [values]
1259  # TODO(mrry): Change to return values?
1260  if len(values) == 1:  # Degenerate case of one tensor.
1261    # Make a throwaway call to convert_to_tensor to make sure
1262    # that axis is of the correct type, and make sure that
1263    # the returned tensor is a scalar.
1264    # TODO(keveman): Implement a standalone type and shape checker.
1265    with ops.name_scope(name) as scope:
1266      ops.convert_to_tensor(
1267          axis, name="concat_dim",
1268          dtype=dtypes.int32).get_shape().assert_is_compatible_with(
1269              tensor_shape.scalar())
1270      return identity(values[0], name=scope)
1271  return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
1272
1273
1274@tf_export(v1=["boolean_mask"])
1275def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
1276  """Apply boolean mask to tensor.  Numpy equivalent is `tensor[mask]`.
1277
1278  ```python
1279  # 1-D example
1280  tensor = [0, 1, 2, 3]
1281  mask = np.array([True, False, True, False])
1282  boolean_mask(tensor, mask)  # [0, 2]
1283  ```
1284
1285  In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
1286  the first K dimensions of `tensor`'s shape.  We then have:
1287    `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
1288  where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
1289  The `axis` could be used with `mask` to indicate the axis to mask from.
1290  In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
1291  the first `axis + dim(mask)` dimensions of `tensor`'s shape.
1292
1293  Args:
1294    tensor:  N-D tensor.
1295    mask:  K-D boolean tensor, K <= N and K must be known statically.
1296    name:  A name for this operation (optional).
1297    axis:  A 0-D int Tensor representing the axis in `tensor` to mask from.
1298      By default, axis is 0 which will mask from the first dimension. Otherwise
1299      K + axis <= N.
1300
1301  Returns:
1302    (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
1303    to `True` values in `mask`.
1304
1305  Raises:
1306    ValueError:  If shapes do not conform.
1307
1308  Examples:
1309
1310  ```python
1311  # 2-D example
1312  tensor = [[1, 2], [3, 4], [5, 6]]
1313  mask = np.array([True, False, True])
1314  boolean_mask(tensor, mask)  # [[1, 2], [5, 6]]
1315  ```
1316  """
1317
1318  def _apply_mask_1d(reshaped_tensor, mask, axis=None):
1319    """Mask tensor along dimension 0 with a 1-D mask."""
1320    indices = squeeze(where(mask), axis=[1])
1321    return gather(reshaped_tensor, indices, axis=axis)
1322
1323  with ops.name_scope(name, values=[tensor, mask]):
1324    tensor = ops.convert_to_tensor(tensor, name="tensor")
1325    mask = ops.convert_to_tensor(mask, name="mask")
1326
1327    shape_mask = mask.get_shape()
1328    ndims_mask = shape_mask.ndims
1329    shape_tensor = tensor.get_shape()
1330    if ndims_mask == 0:
1331      raise ValueError("mask cannot be scalar.")
1332    if ndims_mask is None:
1333      raise ValueError(
1334          "Number of mask dimensions must be specified, even if some dimensions"
1335          " are None.  E.g. shape=[None] is ok, but shape=None is not.")
1336    axis = 0 if axis is None else axis
1337    shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
1338
1339    leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])
1340    tensor = reshape(tensor,
1341                     concat([
1342                         shape(tensor)[:axis], [leading_size],
1343                         shape(tensor)[axis + ndims_mask:]
1344                     ], 0))
1345    first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
1346    tensor.set_shape(
1347        tensor_shape.as_shape(shape_tensor[:axis]).concatenate([first_dim])
1348        .concatenate(shape_tensor[axis + ndims_mask:]))
1349
1350    mask = reshape(mask, [-1])
1351    return _apply_mask_1d(tensor, mask, axis)
1352
1353
1354@tf_export("boolean_mask", v1=[])
1355@dispatch.add_dispatch_support
1356def boolean_mask_v2(tensor, mask, axis=None, name="boolean_mask"):
1357  """Apply boolean mask to tensor.
1358
1359  Numpy equivalent is `tensor[mask]`.
1360
1361  ```python
1362  # 1-D example
1363  tensor = [0, 1, 2, 3]
1364  mask = np.array([True, False, True, False])
1365  boolean_mask(tensor, mask)  # [0, 2]
1366  ```
1367
1368  In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
1369  the first K dimensions of `tensor`'s shape.  We then have:
1370    `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
1371  where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
1372  The `axis` could be used with `mask` to indicate the axis to mask from.
1373  In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
1374  the first `axis + dim(mask)` dimensions of `tensor`'s shape.
1375
1376  Args:
1377    tensor:  N-D tensor.
1378    mask:  K-D boolean tensor, K <= N and K must be known statically.
1379    axis:  A 0-D int Tensor representing the axis in `tensor` to mask from. By
1380      default, axis is 0 which will mask from the first dimension. Otherwise K +
1381      axis <= N.
1382    name:  A name for this operation (optional).
1383
1384  Returns:
1385    (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
1386    to `True` values in `mask`.
1387
1388  Raises:
1389    ValueError:  If shapes do not conform.
1390
1391  Examples:
1392
1393  ```python
1394  # 2-D example
1395  tensor = [[1, 2], [3, 4], [5, 6]]
1396  mask = np.array([True, False, True])
1397  boolean_mask(tensor, mask)  # [[1, 2], [5, 6]]
1398  ```
1399  """
1400  return boolean_mask(tensor, mask, name, axis)
1401
1402
1403@tf_export("sparse.mask", v1=["sparse.mask", "sparse_mask"])
1404@deprecation.deprecated_endpoints("sparse_mask")
1405def sparse_mask(a, mask_indices, name=None):
1406  """Masks elements of `IndexedSlices`.
1407
1408  Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
1409  contains a subset of the slices of `a`. Only the slices at indices not
1410  specified in `mask_indices` are returned.
1411
1412  This is useful when you need to extract a subset of slices in an
1413  `IndexedSlices` object.
1414
1415  For example:
1416
1417  ```python
1418  # `a` contains slices at indices [12, 26, 37, 45] from a large tensor
1419  # with shape [1000, 10]
1420  a.indices  # [12, 26, 37, 45]
1421  tf.shape(a.values)  # [4, 10]
1422
1423  # `b` will be the subset of `a` slices at its second and third indices, so
1424  # we want to mask its first and last indices (which are at absolute
1425  # indices 12, 45)
1426  b = tf.sparse.mask(a, [12, 45])
1427
1428  b.indices  # [26, 37]
1429  tf.shape(b.values)  # [2, 10]
1430  ```
1431
1432  Args:
1433    a: An `IndexedSlices` instance.
1434    mask_indices: Indices of elements to mask.
1435    name: A name for the operation (optional).
1436
1437  Returns:
1438    The masked `IndexedSlices` instance.
1439  """
1440  with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
1441    indices = a.indices
1442    out_indices, to_gather = setdiff1d(indices, mask_indices)
1443    out_values = gather(a.values, to_gather, name=name)
1444    return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
1445
1446
1447@tf_export("unique")
1448def unique(x, out_idx=dtypes.int32, name=None):
1449  # TODO(yongtang): switch to v2 once API deprecation
1450  # period (3 weeks) pass.
1451  # TODO(yongtang): The documentation should also
1452  # be updated when switch  to v2.
1453  return gen_array_ops.unique(x, out_idx, name)
1454
1455
1456unique.__doc__ = gen_array_ops.unique.__doc__
1457
1458
1459@tf_export("unique_with_counts")
1460def unique_with_counts(x, out_idx=dtypes.int32, name=None):
1461  # TODO(yongtang): switch to v2 once API deprecation
1462  # period (3 weeks) pass.
1463  # TODO(yongtang): The documentation should also
1464  # be updated when switch  to v2.
1465  return gen_array_ops.unique_with_counts(x, out_idx, name)
1466
1467
1468unique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__
1469
1470
1471@tf_export("split")
1472def split(value, num_or_size_splits, axis=0, num=None, name="split"):
1473  """Splits a tensor into sub tensors.
1474
1475  If `num_or_size_splits` is an integer, then `value` is split along dimension
1476  `axis` into `num_split` smaller tensors. This requires that `num_split` evenly
1477  divides `value.shape[axis]`.
1478
1479  If `num_or_size_splits` is a 1-D Tensor (or list), we call it `size_splits`
1480  and `value` is split into `len(size_splits)` elements. The shape of the `i`-th
1481  element has the same size as the `value` except along dimension `axis` where
1482  the size is `size_splits[i]`.
1483
1484  For example:
1485
1486  ```python
1487  # 'value' is a tensor with shape [5, 30]
1488  # Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
1489  split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
1490  tf.shape(split0)  # [5, 4]
1491  tf.shape(split1)  # [5, 15]
1492  tf.shape(split2)  # [5, 11]
1493  # Split 'value' into 3 tensors along dimension 1
1494  split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
1495  tf.shape(split0)  # [5, 10]
1496  ```
1497
1498  Args:
1499    value: The `Tensor` to split.
1500    num_or_size_splits: Either an integer indicating the number of
1501      splits along split_dim or a 1-D integer `Tensor` or Python list containing
1502      the sizes of each output tensor along split_dim. If a scalar then it must
1503      evenly divide `value.shape[axis]`; otherwise the sum of sizes along the
1504      split dimension must match that of the `value`.
1505    axis: An integer or scalar `int32` `Tensor`. The dimension along which to
1506    split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
1507    num: Optional, used to specify the number of outputs when it cannot be
1508      inferred from the shape of `size_splits`.
1509    name: A name for the operation (optional).
1510
1511  Returns:
1512    if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
1513    objects; if `num_or_size_splits` is a 1-D Tensor returns
1514    `num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
1515    `value`.
1516
1517  Raises:
1518    ValueError: If `num` is unspecified and cannot be inferred.
1519  """
1520  size_splits = ops.convert_to_tensor(num_or_size_splits)
1521  if isinstance(num_or_size_splits,
1522                six.integer_types + (tensor_shape.Dimension,)):
1523    return gen_array_ops.split(
1524        axis=axis, num_split=num_or_size_splits, value=value, name=name)
1525
1526  if size_splits._rank() == 0:
1527    raise ValueError(
1528        "Rank-0 tensors are not supported as the num_or_size_splits argument "
1529        "to split. Argument provided: %s" % (num_or_size_splits,))
1530
1531  if num is None:
1532    size_splits_shape = size_splits._shape_tuple()
1533    if size_splits_shape:
1534      num = size_splits_shape[0]
1535    if num is None:
1536      raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
1537
1538  return gen_array_ops.split_v(
1539      value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)
1540
1541
1542@tf_export("transpose", v1=[])
1543def transpose_v2(a, perm=None, conjugate=False, name="transpose"):
1544  """Transposes `a`. Permutes the dimensions according to `perm`.
1545
1546  The returned tensor's dimension i will correspond to the input dimension
1547  `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
1548  the rank of the input tensor. Hence by default, this operation performs a
1549  regular matrix transpose on 2-D input Tensors. If conjugate is True and
1550  `a.dtype` is either `complex64` or `complex128` then the values of `a`
1551  are conjugated and transposed.
1552
1553  @compatibility(numpy)
1554  In `numpy` transposes are memory-efficient constant time operations as they
1555  simply return a new view of the same data with adjusted `strides`.
1556
1557  TensorFlow does not support strides, so `transpose` returns a new tensor with
1558  the items permuted.
1559  @end_compatibility
1560
1561  For example:
1562
1563  ```python
1564  x = tf.constant([[1, 2, 3], [4, 5, 6]])
1565  tf.transpose(x)  # [[1, 4]
1566                   #  [2, 5]
1567                   #  [3, 6]]
1568
1569  # Equivalently
1570  tf.transpose(x, perm=[1, 0])  # [[1, 4]
1571                                #  [2, 5]
1572                                #  [3, 6]]
1573
1574  # If x is complex, setting conjugate=True gives the conjugate transpose
1575  x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
1576                   [4 + 4j, 5 + 5j, 6 + 6j]])
1577  tf.transpose(x, conjugate=True)  # [[1 - 1j, 4 - 4j],
1578                                   #  [2 - 2j, 5 - 5j],
1579                                   #  [3 - 3j, 6 - 6j]]
1580
1581  # 'perm' is more useful for n-dimensional tensors, for n > 2
1582  x = tf.constant([[[ 1,  2,  3],
1583                    [ 4,  5,  6]],
1584                   [[ 7,  8,  9],
1585                    [10, 11, 12]]])
1586
1587  # Take the transpose of the matrices in dimension-0
1588  # (this common operation has a shorthand `linalg.transpose`)
1589  tf.transpose(x, perm=[0, 2, 1])  # [[[1,  4],
1590                                   #   [2,  5],
1591                                   #   [3,  6]],
1592                                   #  [[7, 10],
1593                                   #   [8, 11],
1594                                   #   [9, 12]]]
1595  ```
1596
1597  Args:
1598    a: A `Tensor`.
1599    perm: A permutation of the dimensions of `a`.
1600    conjugate: Optional bool. Setting it to `True` is mathematically equivalent
1601      to tf.conj(tf.transpose(input)).
1602    name: A name for the operation (optional).
1603
1604  Returns:
1605    A transposed `Tensor`.
1606  """
1607  return transpose(a=a, perm=perm, name=name, conjugate=conjugate)
1608
1609
1610@tf_export(v1=["transpose"])
1611def transpose(a, perm=None, name="transpose", conjugate=False):
1612  """Transposes `a`. Permutes the dimensions according to `perm`.
1613
1614  The returned tensor's dimension i will correspond to the input dimension
1615  `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
1616  the rank of the input tensor. Hence by default, this operation performs a
1617  regular matrix transpose on 2-D input Tensors. If conjugate is True and
1618  `a.dtype` is either `complex64` or `complex128` then the values of `a`
1619  are conjugated and transposed.
1620
1621  @compatibility(numpy)
1622  In `numpy` transposes are memory-efficient constant time operations as they
1623  simply return a new view of the same data with adjusted `strides`.
1624
1625  TensorFlow does not support strides, so `transpose` returns a new tensor with
1626  the items permuted.
1627  @end_compatibility
1628
1629  For example:
1630
1631  ```python
1632  x = tf.constant([[1, 2, 3], [4, 5, 6]])
1633  tf.transpose(x)  # [[1, 4]
1634                   #  [2, 5]
1635                   #  [3, 6]]
1636
1637  # Equivalently
1638  tf.transpose(x, perm=[1, 0])  # [[1, 4]
1639                                #  [2, 5]
1640                                #  [3, 6]]
1641
1642  # If x is complex, setting conjugate=True gives the conjugate transpose
1643  x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
1644                   [4 + 4j, 5 + 5j, 6 + 6j]])
1645  tf.transpose(x, conjugate=True)  # [[1 - 1j, 4 - 4j],
1646                                   #  [2 - 2j, 5 - 5j],
1647                                   #  [3 - 3j, 6 - 6j]]
1648
1649  # 'perm' is more useful for n-dimensional tensors, for n > 2
1650  x = tf.constant([[[ 1,  2,  3],
1651                    [ 4,  5,  6]],
1652                   [[ 7,  8,  9],
1653                    [10, 11, 12]]])
1654
1655  # Take the transpose of the matrices in dimension-0
1656  # (this common operation has a shorthand `linalg.transpose`)
1657  tf.transpose(x, perm=[0, 2, 1])  # [[[1,  4],
1658                                   #   [2,  5],
1659                                   #   [3,  6]],
1660                                   #  [[7, 10],
1661                                   #   [8, 11],
1662                                   #   [9, 12]]]
1663  ```
1664
1665  Args:
1666    a: A `Tensor`.
1667    perm: A permutation of the dimensions of `a`.
1668    name: A name for the operation (optional).
1669    conjugate: Optional bool. Setting it to `True` is mathematically equivalent
1670      to tf.conj(tf.transpose(input)).
1671
1672  Returns:
1673    A transposed `Tensor`.
1674  """
1675  with ops.name_scope(name, "transpose", [a]) as name:
1676    transpose_fn = (
1677        gen_array_ops.conjugate_transpose
1678        if (conjugate and a.dtype.is_complex) else gen_array_ops.transpose)
1679    if perm is None:
1680      a = ops.convert_to_tensor(a, name="a")
1681      if not a.get_shape().ndims:
1682        rank = gen_array_ops.rank(a)
1683        perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
1684      else:
1685        rank = a.get_shape().ndims
1686        perm = (rank - 1) - np.arange(rank)
1687      ret = transpose_fn(a, perm, name=name)
1688      # NOTE(mrry): Setting the shape explicitly because
1689      #   reverse is not handled by the shape function.
1690      if not context.executing_eagerly():
1691        input_shape = ret.op.inputs[0].get_shape().dims
1692        if input_shape is not None:
1693          ret.set_shape(input_shape[::-1])
1694    else:
1695      ret = transpose_fn(a, perm, name=name)
1696    return ret
1697
1698
1699# pylint: disable=invalid-name
1700@tf_export("linalg.transpose", v1=["linalg.transpose", "matrix_transpose"])
1701@deprecation.deprecated_endpoints("matrix_transpose")
1702def matrix_transpose(a, name="matrix_transpose", conjugate=False):
1703  """Transposes last two dimensions of tensor `a`.
1704
1705  For example:
1706
1707  ```python
1708  x = tf.constant([[1, 2, 3], [4, 5, 6]])
1709  tf.linalg.transpose(x)  # [[1, 4],
1710                          #  [2, 5],
1711                          #  [3, 6]]
1712
1713  x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
1714                   [4 + 4j, 5 + 5j, 6 + 6j]])
1715  tf.linalg.transpose(x, conjugate=True)  # [[1 - 1j, 4 - 4j],
1716                                          #  [2 - 2j, 5 - 5j],
1717                                          #  [3 - 3j, 6 - 6j]]
1718
1719  # Matrix with two batch dimensions.
1720  # x.shape is [1, 2, 3, 4]
1721  # tf.linalg.transpose(x) is shape [1, 2, 4, 3]
1722  ```
1723
1724  Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
1725  This is done with minimal cost, and is preferable to using this function. E.g.
1726
1727  ```python
1728  # Good!  Transpose is taken at minimal additional cost.
1729  tf.matmul(matrix, b, transpose_b=True)
1730
1731  # Inefficient!
1732  tf.matmul(matrix, tf.linalg.transpose(b))
1733  ```
1734
1735  @compatibility(numpy)
1736  In `numpy` transposes are memory-efficient constant time operations as they
1737  simply return a new view of the same data with adjusted `strides`.
1738
1739  TensorFlow does not support strides, `linalg.transposes` return a new tensor
1740  with the items permuted.
1741  @end_compatibility
1742
1743  Args:
1744    a: A `Tensor` with `rank >= 2`.
1745    name: A name for the operation (optional).
1746    conjugate: Optional bool. Setting it to `True` is mathematically equivalent
1747      to tf.conj(tf.linalg.transpose(input)).
1748
1749  Returns:
1750    A transposed batch matrix `Tensor`.
1751
1752  Raises:
1753    ValueError:  If `a` is determined statically to have `rank < 2`.
1754  """
1755  with ops.name_scope(name, values=[a]):
1756    a = ops.convert_to_tensor(a, name="a")
1757
1758    # If we know the number of dimensions (statically), we can do two things:
1759    # 1. Check that `a` is a (batch) matrix.
1760    # 2. Use a python list for perm.  This preserves static shape information
1761    #    and avoids extra computations.
1762    a_shape = a.get_shape()
1763    ndims = a_shape.ndims
1764    if ndims is not None:
1765      if ndims < 2:
1766        raise ValueError(
1767            "Argument 'a' should be a (batch) matrix, with rank >= 2.  Found: "
1768            "%s" % a_shape)
1769      perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
1770    else:
1771      a_rank = rank(a)
1772      perm = concat((gen_math_ops._range(0, a_rank - 2, 1),
1773                     [a_rank - 1, a_rank - 2]), 0)
1774
1775    return transpose(a, perm=perm, conjugate=conjugate)
1776
1777
1778# pylint: enable=invalid-name
1779
1780
1781def _constant_if_small(value, shape, dtype, name):
1782  try:
1783    if np.prod(shape) < 1000:
1784      return constant(value, shape=shape, dtype=dtype, name=name)
1785  except TypeError:
1786    # Happens when shape is a Tensor, list with Tensor elements, etc.
1787    pass
1788  return None
1789
1790
1791@tf_export("zeros")
1792def zeros(shape, dtype=dtypes.float32, name=None):
1793  """Creates a tensor with all elements set to zero.
1794
1795  This operation returns a tensor of type `dtype` with shape `shape` and
1796  all elements set to zero.
1797
1798  For example:
1799
1800  ```python
1801  tf.zeros([3, 4], tf.int32)  # [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
1802  ```
1803
1804  Args:
1805    shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
1806      `int32`.
1807    dtype: The type of an element in the resulting `Tensor`.
1808    name: A name for the operation (optional).
1809
1810  Returns:
1811    A `Tensor` with all elements set to zero.
1812  """
1813  dtype = dtypes.as_dtype(dtype).base_dtype
1814  with ops.name_scope(name, "zeros", [shape]) as name:
1815    if dtype == dtypes.bool:
1816      zero = False
1817    elif dtype == dtypes.string:
1818      zero = ""
1819    else:
1820      zero = 0
1821
1822    if not isinstance(shape, ops.Tensor):
1823      try:
1824        # Create a constant if it won't be very big. Otherwise create a fill op
1825        # to prevent serialized GraphDefs from becoming too large.
1826        output = _constant_if_small(zero, shape, dtype, name)
1827        if output is not None:
1828          return output
1829
1830        # Go through tensor shapes to get int64-if-needed semantics
1831        shape = constant_op._tensor_shape_tensor_conversion_function(
1832            tensor_shape.TensorShape(shape))
1833      except (TypeError, ValueError):
1834        # Happens when shape is a list with tensor elements
1835        shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
1836    if not shape._shape_tuple():
1837      shape = reshape(shape, [-1])  # Ensure it's a vector
1838    output = fill(shape, constant(zero, dtype=dtype), name=name)
1839  assert output.dtype.base_dtype == dtype
1840  return output
1841
1842
1843@tf_export(v1=["zeros_like"])
1844@dispatch.add_dispatch_support
1845def zeros_like(tensor, dtype=None, name=None, optimize=True):
1846  """Creates a tensor with all elements set to zero.
1847
1848  Given a single tensor (`tensor`), this operation returns a tensor of the
1849  same type and shape as `tensor` with all elements set to zero. Optionally,
1850  you can use `dtype` to specify a new type for the returned tensor.
1851
1852  For example:
1853
1854  ```python
1855  tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
1856  tf.zeros_like(tensor)  # [[0, 0, 0], [0, 0, 0]]
1857  ```
1858
1859  Args:
1860    tensor: A `Tensor`.
1861    dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
1862      `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
1863      `complex64`, `complex128`, `bool` or `string`.
1864    name: A name for the operation (optional).
1865    optimize: if true, attempt to statically determine the shape of 'tensor'
1866    and encode it as a constant.
1867
1868  Returns:
1869    A `Tensor` with all elements set to zero.
1870  """
1871  return zeros_like_impl(tensor, dtype, name, optimize)
1872
1873
1874@tf_export("zeros_like", v1=[])
1875@dispatch.add_dispatch_support
1876def zeros_like_v2(
1877    input,  # pylint: disable=redefined-builtin
1878    dtype=None,
1879    name=None):
1880  """Creates a tensor with all elements set to zero.
1881
1882  Given a single tensor (`tensor`), this operation returns a tensor of the
1883  same type and shape as `tensor` with all elements set to zero. Optionally,
1884  you can use `dtype` to specify a new type for the returned tensor.
1885
1886  For example:
1887
1888  ```python
1889  tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
1890  tf.zeros_like(tensor)  # [[0, 0, 0], [0, 0, 0]]
1891  ```
1892
1893  Args:
1894    input: A `Tensor`.
1895    dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
1896      `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
1897      `complex64`, `complex128`, `bool` or `string`.
1898    name: A name for the operation (optional).
1899
1900  Returns:
1901    A `Tensor` with all elements set to zero.
1902  """
1903  return zeros_like_impl(input, dtype, name, optimize=True)
1904
1905
1906def zeros_like_impl(tensor, dtype, name, optimize=True):
1907  """Internal implementation for the v1/v2 zeros_like API calls."""
1908  with ops.name_scope(name, "zeros_like", [tensor]) as name:
1909    tensor = ops.convert_to_tensor(tensor, name="tensor")
1910
1911    if context.executing_eagerly():
1912      if dtype is not None and dtype != tensor.dtype:
1913        return zeros(
1914            shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
1915      with ops.device(tensor.device):
1916        return gen_array_ops.zeros_like(tensor, name=name)
1917
1918    # For now, variant types must be created via zeros_like; as we need to
1919    # pass the input variant object to the proper zeros callback.
1920
1921    if (optimize and tensor.shape.is_fully_defined() and
1922        tensor.dtype != dtypes.variant):
1923      # We can produce a zeros tensor independent of the value of 'tensor',
1924      # since the shape is known statically.
1925      return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)
1926
1927    if dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
1928      return zeros(
1929          shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
1930    else:
1931      return gen_array_ops.zeros_like(tensor, name=name)
1932
1933
1934@tf_export(v1=["ones_like"])
1935@dispatch.add_dispatch_support
1936def ones_like(tensor, dtype=None, name=None, optimize=True):
1937  """Creates a tensor with all elements set to 1.
1938
1939  Given a single tensor (`tensor`), this operation returns a tensor of the same
1940  type and shape as `tensor` with all elements set to 1. Optionally, you can
1941  specify a new type (`dtype`) for the returned tensor.
1942
1943  For example:
1944
1945  ```python
1946  tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
1947  tf.ones_like(tensor)  # [[1, 1, 1], [1, 1, 1]]
1948  ```
1949
1950  Args:
1951    tensor: A `Tensor`.
1952    dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
1953      `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
1954      `complex64`, `complex128` or `bool`.
1955    name: A name for the operation (optional).
1956    optimize: if true, attempt to statically determine the shape of 'tensor'
1957    and encode it as a constant.
1958
1959  Returns:
1960    A `Tensor` with all elements set to 1.
1961  """
1962  return ones_like_impl(tensor, dtype, name, optimize)
1963
1964
1965@tf_export("ones_like", v1=[])
1966@dispatch.add_dispatch_support
1967def ones_like_v2(
1968    input,  # pylint: disable=redefined-builtin
1969    dtype=None,
1970    name=None):
1971  """Creates a tensor with all elements set to zero.
1972
1973  Given a single tensor (`tensor`), this operation returns a tensor of the
1974  same type and shape as `tensor` with all elements set to zero. Optionally,
1975  you can use `dtype` to specify a new type for the returned tensor.
1976
1977  For example:
1978
1979  ```python
1980  tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
1981  tf.ones_like(tensor)  # [[1, 1, 1], [1, 1, 1]]
1982  ```
1983
1984  Args:
1985    input: A `Tensor`.
1986    dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
1987      `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
1988      `complex64`, `complex128`, `bool` or `string`.
1989    name: A name for the operation (optional).
1990
1991  Returns:
1992    A `Tensor` with all elements set to zero.
1993  """
1994  return ones_like_impl(input, dtype, name, optimize=True)
1995
1996
1997def ones_like_impl(tensor, dtype, name, optimize=True):
1998  """Internal implementation for the v1/v2 ones_like API calls."""
1999  with ops.name_scope(name, "ones_like", [tensor]) as name:
2000    tensor = ops.convert_to_tensor(tensor, name="tensor")
2001    ones_shape = shape_internal(tensor, optimize=optimize)
2002    if dtype is None:
2003      dtype = tensor.dtype
2004    ret = ones(ones_shape, dtype=dtype, name=name)
2005    if not context.executing_eagerly():
2006      ret.set_shape(tensor.get_shape())
2007    return ret
2008
2009
2010@tf_export("ones")
2011def ones(shape, dtype=dtypes.float32, name=None):
2012  """Creates a tensor with all elements set to 1.
2013
2014  This operation returns a tensor of type `dtype` with shape `shape` and all
2015  elements set to 1.
2016
2017  For example:
2018
2019  ```python
2020  tf.ones([2, 3], tf.int32)  # [[1, 1, 1], [1, 1, 1]]
2021  ```
2022
2023  Args:
2024    shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
2025      `int32`.
2026    dtype: The type of an element in the resulting `Tensor`.
2027    name: A name for the operation (optional).
2028
2029  Returns:
2030    A `Tensor` with all elements set to 1.
2031  """
2032  dtype = dtypes.as_dtype(dtype).base_dtype
2033  with ops.name_scope(name, "ones", [shape]) as name:
2034    one = True if dtype == dtypes.bool else 1
2035    if not isinstance(shape, ops.Tensor):
2036      try:
2037        # Create a constant if it won't be very big. Otherwise create a fill op
2038        # to prevent serialized GraphDefs from becoming too large.
2039        output = _constant_if_small(one, shape, dtype, name)
2040        if output is not None:
2041          return output
2042
2043        # Go through tensor shapes to get int64-if-needed semantics
2044        shape = constant_op._tensor_shape_tensor_conversion_function(
2045            tensor_shape.TensorShape(shape))
2046      except (TypeError, ValueError):
2047        # Happens when shape is a list with tensor elements
2048        shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
2049    if not shape._shape_tuple():
2050      shape = reshape(shape, [-1])  # Ensure it's a vector
2051    output = fill(shape, constant(one, dtype=dtype), name=name)
2052  assert output.dtype.base_dtype == dtype
2053  return output
2054
2055
2056@tf_export(v1=["placeholder"])
2057def placeholder(dtype, shape=None, name=None):
2058  """Inserts a placeholder for a tensor that will be always fed.
2059
2060  **Important**: This tensor will produce an error if evaluated. Its value must
2061  be fed using the `feed_dict` optional argument to `Session.run()`,
2062  `Tensor.eval()`, or `Operation.run()`.
2063
2064  For example:
2065
2066  ```python
2067  x = tf.placeholder(tf.float32, shape=(1024, 1024))
2068  y = tf.matmul(x, x)
2069
2070  with tf.Session() as sess:
2071    print(sess.run(y))  # ERROR: will fail because x was not fed.
2072
2073    rand_array = np.random.rand(1024, 1024)
2074    print(sess.run(y, feed_dict={x: rand_array}))  # Will succeed.
2075  ```
2076
2077  @compatibility(eager)
2078  Placeholders are not compatible with eager execution.
2079  @end_compatibility
2080
2081  Args:
2082    dtype: The type of elements in the tensor to be fed.
2083    shape: The shape of the tensor to be fed (optional). If the shape is not
2084      specified, you can feed a tensor of any shape.
2085    name: A name for the operation (optional).
2086
2087  Returns:
2088    A `Tensor` that may be used as a handle for feeding a value, but not
2089    evaluated directly.
2090
2091  Raises:
2092    RuntimeError: if eager execution is enabled
2093  """
2094  if context.executing_eagerly():
2095    raise RuntimeError("tf.placeholder() is not compatible with "
2096                       "eager execution.")
2097
2098  return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
2099
2100
2101@tf_export(v1=["placeholder_with_default"])
2102def placeholder_with_default(input, shape, name=None):  # pylint: disable=redefined-builtin
2103  """A placeholder op that passes through `input` when its output is not fed.
2104
2105  Args:
2106    input: A `Tensor`. The default value to produce when output is not fed.
2107    shape: A `tf.TensorShape` or list of `int`s. The (possibly partial) shape
2108      of the tensor.
2109    name: A name for the operation (optional).
2110
2111  Returns:
2112    A `Tensor`. Has the same type as `input`.
2113  """
2114  return gen_array_ops.placeholder_with_default(input, shape, name)
2115
2116
2117# pylint: disable=redefined-outer-name
2118def _normalize_sparse_shape(shape, name):
2119  """Returns a tuple of (Tensor or None, rank or None)."""
2120  if shape is None:
2121    return (None, None)
2122  rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
2123  if not isinstance(shape, ops.Tensor) and None in shape:
2124    return (None, rank)
2125  return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
2126
2127
2128@tf_export(v1=["sparse.placeholder", "sparse_placeholder"])
2129@deprecation.deprecated_endpoints("sparse_placeholder")
2130def sparse_placeholder(dtype, shape=None, name=None):
2131  """Inserts a placeholder for a sparse tensor that will be always fed.
2132
2133  **Important**: This sparse tensor will produce an error if evaluated.
2134  Its value must be fed using the `feed_dict` optional argument to
2135  `Session.run()`, `Tensor.eval()`, or `Operation.run()`.
2136
2137  For example:
2138
2139  ```python
2140  x = tf.sparse.placeholder(tf.float32)
2141  y = tf.sparse.reduce_sum(x)
2142
2143  with tf.Session() as sess:
2144    print(sess.run(y))  # ERROR: will fail because x was not fed.
2145
2146    indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
2147    values = np.array([1.0, 2.0], dtype=np.float32)
2148    shape = np.array([7, 9, 2], dtype=np.int64)
2149    print(sess.run(y, feed_dict={
2150      x: tf.SparseTensorValue(indices, values, shape)}))  # Will succeed.
2151    print(sess.run(y, feed_dict={
2152      x: (indices, values, shape)}))  # Will succeed.
2153
2154    sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
2155    sp_value = sp.eval(session=sess)
2156    print(sess.run(y, feed_dict={x: sp_value}))  # Will succeed.
2157  ```
2158
2159  @compatibility{eager} Placeholders are not compatible with eager execution.
2160
2161  Args:
2162    dtype: The type of `values` elements in the tensor to be fed.
2163    shape: The shape of the tensor to be fed (optional). If the shape is not
2164      specified, you can feed a sparse tensor of any shape.
2165    name: A name for prefixing the operations (optional).
2166
2167  Returns:
2168    A `SparseTensor` that may be used as a handle for feeding a value, but not
2169    evaluated directly.
2170
2171  Raises:
2172    RuntimeError: if eager execution is enabled
2173  """
2174  if context.executing_eagerly():
2175    raise RuntimeError("tf.placeholder() is not compatible with "
2176                       "eager execution.")
2177
2178  shape_name = (name + "/shape") if name is not None else None
2179  shape, rank = _normalize_sparse_shape(shape, shape_name)
2180  if shape is None:
2181    shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
2182  return sparse_tensor.SparseTensor(
2183      values=placeholder(
2184          dtype,
2185          shape=[None],
2186          name=(name + "/values") if name is not None else None),
2187      indices=placeholder(
2188          dtypes.int64, shape=[None, rank],
2189          name=(name + "/indices") if name is not None else None),
2190      dense_shape=shape)
2191
2192
2193# pylint: enable=redefined-outer-name
2194
2195
2196@tf_export("pad", v1=[])
2197def pad_v2(tensor, paddings, mode="CONSTANT", constant_values=0, name=None):
2198  """Pads a tensor.
2199
2200  This operation pads a `tensor` according to the `paddings` you specify.
2201  `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
2202  `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
2203  many values to add before the contents of `tensor` in that dimension, and
2204  `paddings[D, 1]` indicates how many values to add after the contents of
2205  `tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
2206  and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
2207  `mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
2208  no greater than `tensor.dim_size(D)`.
2209
2210  The padded size of each dimension D of the output is:
2211
2212  `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
2213
2214  For example:
2215
2216  ```python
2217  t = tf.constant([[1, 2, 3], [4, 5, 6]])
2218  paddings = tf.constant([[1, 1,], [2, 2]])
2219  # 'constant_values' is 0.
2220  # rank of 't' is 2.
2221  tf.pad(t, paddings, "CONSTANT")  # [[0, 0, 0, 0, 0, 0, 0],
2222                                   #  [0, 0, 1, 2, 3, 0, 0],
2223                                   #  [0, 0, 4, 5, 6, 0, 0],
2224                                   #  [0, 0, 0, 0, 0, 0, 0]]
2225
2226  tf.pad(t, paddings, "REFLECT")  # [[6, 5, 4, 5, 6, 5, 4],
2227                                  #  [3, 2, 1, 2, 3, 2, 1],
2228                                  #  [6, 5, 4, 5, 6, 5, 4],
2229                                  #  [3, 2, 1, 2, 3, 2, 1]]
2230
2231  tf.pad(t, paddings, "SYMMETRIC")  # [[2, 1, 1, 2, 3, 3, 2],
2232                                    #  [2, 1, 1, 2, 3, 3, 2],
2233                                    #  [5, 4, 4, 5, 6, 6, 5],
2234                                    #  [5, 4, 4, 5, 6, 6, 5]]
2235  ```
2236
2237  Args:
2238    tensor: A `Tensor`.
2239    paddings: A `Tensor` of type `int32`.
2240    mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
2241    constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
2242      same type as `tensor`.
2243    name: A name for the operation (optional).
2244
2245  Returns:
2246    A `Tensor`. Has the same type as `tensor`.
2247
2248  Raises:
2249    ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
2250  """
2251  return pad(tensor, paddings, mode, name, constant_values)
2252
2253
2254@tf_export(v1=["pad"])
2255def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0):  # pylint: disable=invalid-name
2256  """Pads a tensor.
2257
2258  This operation pads a `tensor` according to the `paddings` you specify.
2259  `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
2260  `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
2261  many values to add before the contents of `tensor` in that dimension, and
2262  `paddings[D, 1]` indicates how many values to add after the contents of
2263  `tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
2264  and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
2265  `mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
2266  no greater than `tensor.dim_size(D)`.
2267
2268  The padded size of each dimension D of the output is:
2269
2270  `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
2271
2272  For example:
2273
2274  ```python
2275  t = tf.constant([[1, 2, 3], [4, 5, 6]])
2276  paddings = tf.constant([[1, 1,], [2, 2]])
2277  # 'constant_values' is 0.
2278  # rank of 't' is 2.
2279  tf.pad(t, paddings, "CONSTANT")  # [[0, 0, 0, 0, 0, 0, 0],
2280                                   #  [0, 0, 1, 2, 3, 0, 0],
2281                                   #  [0, 0, 4, 5, 6, 0, 0],
2282                                   #  [0, 0, 0, 0, 0, 0, 0]]
2283
2284  tf.pad(t, paddings, "REFLECT")  # [[6, 5, 4, 5, 6, 5, 4],
2285                                  #  [3, 2, 1, 2, 3, 2, 1],
2286                                  #  [6, 5, 4, 5, 6, 5, 4],
2287                                  #  [3, 2, 1, 2, 3, 2, 1]]
2288
2289  tf.pad(t, paddings, "SYMMETRIC")  # [[2, 1, 1, 2, 3, 3, 2],
2290                                    #  [2, 1, 1, 2, 3, 3, 2],
2291                                    #  [5, 4, 4, 5, 6, 6, 5],
2292                                    #  [5, 4, 4, 5, 6, 6, 5]]
2293  ```
2294
2295  Args:
2296    tensor: A `Tensor`.
2297    paddings: A `Tensor` of type `int32`.
2298    mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
2299    name: A name for the operation (optional).
2300    constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
2301      same type as `tensor`.
2302
2303  Returns:
2304    A `Tensor`. Has the same type as `tensor`.
2305
2306  Raises:
2307    ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
2308  """
2309
2310  # Convert lower/mixed case to upper for NumPy compatibility
2311  # NumPy uses all lower-case modes.
2312  mode = mode.upper()
2313  if mode == "CONSTANT":
2314    # TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
2315    # remove the "Pad" fallback here.
2316    if constant_values != 0:
2317      result = gen_array_ops.pad_v2(
2318          tensor, paddings, constant_values, name=name)
2319    else:
2320      result = gen_array_ops.pad(tensor, paddings, name=name)
2321  elif mode == "REFLECT":
2322    result = gen_array_ops.mirror_pad(
2323        tensor, paddings, mode="REFLECT", name=name)
2324  elif mode == "SYMMETRIC":
2325    result = gen_array_ops.mirror_pad(
2326        tensor, paddings, mode="SYMMETRIC", name=name)
2327  else:
2328    raise ValueError("Unknown padding mode: %s" % mode)
2329
2330  # Restore shape information where possible.
2331  if not context.executing_eagerly():
2332    paddings_constant = tensor_util.constant_value(
2333        result.op.inputs[1], partial=True)
2334    input_shape = result.op.inputs[0].shape
2335    if (input_shape.ndims is not None and not result.shape.is_fully_defined()
2336        and paddings_constant is not None):
2337      new_shape = []
2338      for padding, dim in zip(paddings_constant, input_shape.as_list()):
2339        if padding is None or dim is None or any((x is None for x in padding)):
2340          new_shape.append(None)
2341        else:
2342          new_shape.append(sum(padding) + dim)
2343      result.set_shape(new_shape)
2344
2345  return result
2346
2347
2348@tf_export("meshgrid")
2349def meshgrid(*args, **kwargs):
2350  """Broadcasts parameters for evaluation on an N-D grid.
2351
2352  Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
2353  of N-D coordinate arrays for evaluating expressions on an N-D grid.
2354
2355  Notes:
2356
2357  `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
2358  When the `indexing` argument is set to 'xy' (the default), the broadcasting
2359  instructions for the first two dimensions are swapped.
2360
2361  Examples:
2362
2363  Calling `X, Y = meshgrid(x, y)` with the tensors
2364
2365  ```python
2366  x = [1, 2, 3]
2367  y = [4, 5, 6]
2368  X, Y = tf.meshgrid(x, y)
2369  # X = [[1, 2, 3],
2370  #      [1, 2, 3],
2371  #      [1, 2, 3]]
2372  # Y = [[4, 4, 4],
2373  #      [5, 5, 5],
2374  #      [6, 6, 6]]
2375  ```
2376
2377  Args:
2378    *args: `Tensor`s with rank 1.
2379    **kwargs:
2380      - indexing: Either 'xy' or 'ij' (optional, default: 'xy').
2381      - name: A name for the operation (optional).
2382
2383  Returns:
2384    outputs: A list of N `Tensor`s with rank N.
2385
2386  Raises:
2387    TypeError: When no keyword arguments (kwargs) are passed.
2388    ValueError: When indexing keyword argument is not one of `xy` or `ij`.
2389  """
2390
2391  indexing = kwargs.pop("indexing", "xy")
2392  name = kwargs.pop("name", "meshgrid")
2393  if kwargs:
2394    key = list(kwargs.keys())[0]
2395    raise TypeError("'{}' is an invalid keyword argument "
2396                    "for this function".format(key))
2397
2398  if indexing not in ("xy", "ij"):
2399    raise ValueError("indexing parameter must be either 'xy' or 'ij'")
2400
2401  with ops.name_scope(name, "meshgrid", args) as name:
2402    ndim = len(args)
2403    s0 = (1,) * ndim
2404
2405    # Prepare reshape by inserting dimensions with size 1 where needed
2406    output = []
2407    for i, x in enumerate(args):
2408      output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
2409    # Create parameters for broadcasting each tensor to the full size
2410    shapes = [size(x) for x in args]
2411
2412    output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
2413
2414    if indexing == "xy" and ndim > 1:
2415      output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
2416      output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
2417      shapes[0], shapes[1] = shapes[1], shapes[0]
2418
2419    # TODO(nolivia): improve performance with a broadcast
2420    mult_fact = ones(shapes, output_dtype)
2421    return [x * mult_fact for x in output]
2422
2423
2424NEW_AXIS = -1
2425SHRINK_AXIS = -2
2426
2427
2428# PEP-8 naming
2429# pylint: disable=invalid-name,redefined-outer-name
2430def _compute_size_of_strided_dim(shrink, spec, size):
2431  """Computes the size of a single strided slice dimension."""
2432
2433  unknown = None  # Document what None means here.
2434  use_full_range = None  # Document other use of None.
2435  # if this is a shrink axis (i.e. a non-range index)
2436  # it either will produce an error or return 1
2437  if shrink:
2438    return 1
2439  if size is unknown or size.value is unknown:
2440    return unknown
2441  size = size.value
2442  stride = spec.step
2443  if stride is not unknown:
2444    if stride == 0:
2445      return unknown
2446    stride = spec.step
2447    valid_range = [0, size] if stride > 0 else [-1, size - 1]
2448
2449    # PEP-8 naming
2450    # pylint: disable=invalid-name
2451    def canonical(x, c):
2452      if x is use_full_range:
2453        return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
2454      else:
2455        x_fwd = size + x if x < 0 else x  # make negative indices positive
2456        return max(valid_range[0], min(valid_range[1], x_fwd))
2457
2458    begin = canonical(spec.start, 0)
2459    end = canonical(spec.stop, 1)
2460    interval_length = end - begin
2461    if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
2462      return 0
2463    else:
2464      remainder = 1 if interval_length % stride != 0 else 0
2465      return interval_length // stride + remainder
2466  else:
2467    return unknown  # unknown because stride is unknown
2468
2469
2470def _TileGradShape(op):
2471  """Shape function for the TileGrad op."""
2472  multiples_shape = op.inputs[1].get_shape().with_rank(1)
2473  input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
2474  # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
2475  # it is a vector of non-negative integers, and (ii) doing so allows
2476  # us to handle partially-known multiples.
2477  multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
2478      input_shape.ndims)
2479  if multiples.ndims is None:
2480    return [tensor_shape.unknown_shape()]
2481  else:
2482    output_dims = []
2483    for dim, multiple in zip(input_shape.dims, multiples.dims):
2484      output_dims.append(dim // multiple)
2485    return [tensor_shape.TensorShape(output_dims)]
2486
2487
2488@tf_export("edit_distance")
2489def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
2490  """Computes the Levenshtein distance between sequences.
2491
2492  This operation takes variable-length sequences (`hypothesis` and `truth`),
2493  each provided as a `SparseTensor`, and computes the Levenshtein distance.
2494  You can normalize the edit distance by length of `truth` by setting
2495  `normalize` to true.
2496
2497  For example, given the following input:
2498
2499  ```python
2500  # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
2501  #   (0,0) = ["a"]
2502  #   (1,0) = ["b"]
2503  hypothesis = tf.SparseTensor(
2504      [[0, 0, 0],
2505       [1, 0, 0]],
2506      ["a", "b"],
2507      (2, 1, 1))
2508
2509  # 'truth' is a tensor of shape `[2, 2]` with variable-length values:
2510  #   (0,0) = []
2511  #   (0,1) = ["a"]
2512  #   (1,0) = ["b", "c"]
2513  #   (1,1) = ["a"]
2514  truth = tf.SparseTensor(
2515      [[0, 1, 0],
2516       [1, 0, 0],
2517       [1, 0, 1],
2518       [1, 1, 0]],
2519      ["a", "b", "c", "a"],
2520      (2, 2, 2))
2521
2522  normalize = True
2523  ```
2524
2525  This operation would return the following:
2526
2527  ```python
2528  # 'output' is a tensor of shape `[2, 2]` with edit distances normalized
2529  # by 'truth' lengths.
2530  output ==> [[inf, 1.0],  # (0,0): no truth, (0,1): no hypothesis
2531             [0.5, 1.0]]  # (1,0): addition, (1,1): no hypothesis
2532  ```
2533
2534  Args:
2535    hypothesis: A `SparseTensor` containing hypothesis sequences.
2536    truth: A `SparseTensor` containing truth sequences.
2537    normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
2538      length of `truth.`
2539    name: A name for the operation (optional).
2540
2541  Returns:
2542    A dense `Tensor` with rank `R - 1`, where R is the rank of the
2543    `SparseTensor` inputs `hypothesis` and `truth`.
2544
2545  Raises:
2546    TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
2547  """
2548  if not isinstance(hypothesis, (sparse_tensor.SparseTensor,
2549                                 sparse_tensor.SparseTensorValue)):
2550    raise TypeError("Hypothesis must be a SparseTensor.")
2551  if not isinstance(truth, (sparse_tensor.SparseTensor,
2552                            sparse_tensor.SparseTensorValue)):
2553    raise TypeError("Truth must be a SparseTensor.")
2554
2555  return gen_array_ops.edit_distance(
2556      hypothesis.indices,
2557      hypothesis.values,
2558      hypothesis.dense_shape,
2559      truth.indices,
2560      truth.values,
2561      truth.dense_shape,
2562      normalize=normalize,
2563      name=name)
2564
2565
2566@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
2567def _FakeQuantWithMinMaxArgsGradient(op, grad):
2568  """Gradient for FakeQuantWithMinMaxArgs op."""
2569  return fake_quant_with_min_max_args_gradient(
2570      grad,
2571      op.inputs[0],
2572      min=op.get_attr("min"),
2573      max=op.get_attr("max"),
2574      num_bits=op.get_attr("num_bits"),
2575      narrow_range=op.get_attr("narrow_range"))
2576
2577
2578@ops.RegisterGradient("FakeQuantWithMinMaxVars")
2579def _FakeQuantWithMinMaxVarsGradient(op, grad):
2580  """Gradient for FakeQuantWithMinMaxVars op."""
2581  return fake_quant_with_min_max_vars_gradient(
2582      grad,
2583      op.inputs[0],
2584      op.inputs[1],
2585      op.inputs[2],
2586      num_bits=op.get_attr("num_bits"),
2587      narrow_range=op.get_attr("narrow_range"))
2588
2589
2590@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
2591def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
2592  """Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
2593  return fake_quant_with_min_max_vars_per_channel_gradient(
2594      grad,
2595      op.inputs[0],
2596      op.inputs[1],
2597      op.inputs[2],
2598      num_bits=op.get_attr("num_bits"),
2599      narrow_range=op.get_attr("narrow_range"))
2600
2601
2602@tf_export("required_space_to_batch_paddings")
2603def required_space_to_batch_paddings(input_shape,
2604                                     block_shape,
2605                                     base_paddings=None,
2606                                     name=None):
2607  """Calculate padding required to make block_shape divide input_shape.
2608
2609  This function can be used to calculate a suitable paddings argument for use
2610  with space_to_batch_nd and batch_to_space_nd.
2611
2612  Args:
2613    input_shape: int32 Tensor of shape [N].
2614    block_shape: int32 Tensor of shape [N].
2615    base_paddings: Optional int32 Tensor of shape [N, 2].  Specifies the minimum
2616      amount of padding to use.  All elements must be >= 0.  If not specified,
2617      defaults to 0.
2618    name: string.  Optional name prefix.
2619
2620  Returns:
2621    (paddings, crops), where:
2622
2623    `paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
2624    satisfying:
2625
2626        paddings[i, 0] = base_paddings[i, 0].
2627        0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
2628        (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
2629
2630        crops[i, 0] = 0
2631        crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
2632
2633  Raises: ValueError if called with incompatible shapes.
2634  """
2635  with ops.name_scope(name, "required_space_to_batch_paddings",
2636                      [input_shape, block_shape]):
2637    input_shape = ops.convert_to_tensor(
2638        input_shape, dtype=dtypes.int32, name="input_shape")
2639    block_shape = ops.convert_to_tensor(
2640        block_shape, dtype=dtypes.int32, name="block_shape")
2641
2642    block_shape.get_shape().assert_is_fully_defined()
2643    block_shape.get_shape().assert_has_rank(1)
2644    num_block_dims = block_shape.get_shape().dims[0].value
2645    if num_block_dims == 0:
2646      return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
2647
2648    input_shape.get_shape().assert_is_compatible_with([num_block_dims])
2649
2650    if base_paddings is not None:
2651      base_paddings = ops.convert_to_tensor(
2652          base_paddings, dtype=dtypes.int32, name="base_paddings")
2653      base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
2654    else:
2655      base_paddings = zeros([num_block_dims, 2], dtypes.int32)
2656
2657    const_block_shape = tensor_util.constant_value(block_shape)
2658    const_input_shape = tensor_util.constant_value(input_shape)
2659    const_base_paddings = tensor_util.constant_value(base_paddings)
2660    if (const_block_shape is not None and const_input_shape is not None and
2661        const_base_paddings is not None):
2662      block_shape = const_block_shape
2663      input_shape = const_input_shape
2664      base_paddings = const_base_paddings
2665
2666    # Use same expression for both constant and non-constant case.
2667    pad_start = base_paddings[:, 0]
2668    orig_pad_end = base_paddings[:, 1]
2669    full_input_shape = input_shape + pad_start + orig_pad_end
2670    pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
2671    pad_end = orig_pad_end + pad_end_extra
2672
2673    result_paddings = stack(
2674        [[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
2675        name="paddings")
2676    result_crops = stack(
2677        [[0, pad_end_extra[i]] for i in range(num_block_dims)], name="crops")
2678    return result_paddings, result_crops
2679
2680
2681@tf_export(v1=["nn.space_to_batch", "space_to_batch"])
2682@deprecation.deprecated_endpoints("space_to_batch")
2683def space_to_batch(  # pylint: disable=missing-docstring
2684    input, paddings, block_size=None, name=None, block_shape=None):  # pylint: disable=redefined-builtin
2685  block_size = deprecation.deprecated_argument_lookup(
2686      "block_shape", block_shape, "block_size", block_size)
2687  result = space_to_batch_nd(
2688      input,
2689      paddings=paddings,
2690      block_shape=np.array([block_size, block_size], dtype=np.int64),
2691      name=name)
2692  result.set_shape(result.get_shape().with_rank(4))
2693  return result
2694
2695
2696space_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__
2697
2698
2699@tf_export("space_to_batch", "nn.space_to_batch", v1=[])
2700def space_to_batch_v2(input, block_shape, paddings, name=None):  # pylint: disable=redefined-builtin
2701  return space_to_batch_nd(input, block_shape, paddings, name)
2702
2703
2704space_to_batch_v2.__doc__ = gen_array_ops.space_to_batch_nd.__doc__
2705
2706
2707@tf_export(v1=["nn.space_to_depth", "space_to_depth"])
2708@deprecation.deprecated_endpoints("space_to_depth")
2709def space_to_depth(input, block_size, name=None, data_format="NHWC"):  # pylint: disable=redefined-builtin
2710  return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
2711
2712
2713space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
2714
2715
2716@tf_export("nn.space_to_depth", v1=[])
2717def space_to_depth_v2(input, block_size, data_format="NHWC", name=None):  # pylint: disable=redefined-builtin
2718  return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
2719
2720
2721space_to_depth_v2.__doc__ = gen_array_ops.space_to_depth.__doc__
2722
2723
2724@tf_export(v1=["nn.depth_to_space", "depth_to_space"])
2725@deprecation.deprecated_endpoints("depth_to_space")
2726def depth_to_space(input, block_size, name=None, data_format="NHWC"):  # pylint: disable=redefined-builtin
2727  return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
2728
2729
2730depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
2731
2732
2733@tf_export("nn.depth_to_space", v1=[])
2734def depth_to_space_v2(input, block_size, data_format="NHWC", name=None):  # pylint: disable=redefined-builtin
2735  return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
2736
2737
2738depth_to_space_v2.__doc__ = gen_array_ops.depth_to_space.__doc__
2739
2740
2741@tf_export(v1=["batch_to_space"])
2742def batch_to_space(input, crops, block_size, name=None, block_shape=None):  # pylint: disable=redefined-builtin,missing-docstring
2743  block_size = deprecation.deprecated_argument_lookup(
2744      "block_shape", block_shape, "block_size", block_size)
2745  result = batch_to_space_nd(
2746      input,
2747      crops=crops,
2748      block_shape=np.array([block_size, block_size], dtype=np.int64),
2749      name=name)
2750  result.set_shape(result.get_shape().with_rank(4))
2751  return result
2752
2753
2754batch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__
2755
2756
2757@tf_export("batch_to_space", v1=[])
2758def batch_to_space_v2(input, block_shape, crops, name=None):  # pylint: disable=redefined-builtin
2759  """BatchToSpace for N-D tensors of type T.
2760
2761  This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
2762  shape `block_shape + [batch]`, interleaves these blocks back into the grid
2763  defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the
2764  same rank as the input.  The spatial dimensions of this intermediate result
2765  are then optionally cropped according to `crops` to produce the output.  This
2766  is the reverse of SpaceToBatch.  See below for a precise description.
2767
2768  Args:
2769    input: A `Tensor`.
2770      N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
2771      where spatial_shape has M dimensions.
2772    block_shape: A `Tensor`. Must be one of the following types:
2773      `int32`, `int64`. 1-D with shape `[M]`, all values must be >= 1.
2774      For backwards compatibility with TF 1.0, this parameter may be an int, in
2775      which case it is converted to
2776      `numpy.array([block_shape, block_shape], dtype=numpy.int64)`.
2777    crops: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2778      2-D with shape `[M, 2]`, all values must be >= 0.
2779        `crops[i] = [crop_start, crop_end]` specifies the amount to crop from
2780        input dimension `i + 1`, which corresponds to spatial dimension `i`.  It
2781        is required that
2782        `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
2783
2784      This operation is equivalent to the following steps:
2785
2786      1. Reshape `input` to `reshaped` of shape:
2787           [block_shape[0], ..., block_shape[M-1],
2788            batch / prod(block_shape),
2789            input_shape[1], ..., input_shape[N-1]]
2790
2791      2. Permute dimensions of `reshaped` to produce `permuted` of shape
2792           [batch / prod(block_shape),
2793
2794            input_shape[1], block_shape[0],
2795            ...,
2796            input_shape[M], block_shape[M-1],
2797
2798            input_shape[M+1], ..., input_shape[N-1]]
2799
2800      3. Reshape `permuted` to produce `reshaped_permuted` of shape
2801           [batch / prod(block_shape),
2802
2803            input_shape[1] * block_shape[0],
2804            ...,
2805            input_shape[M] * block_shape[M-1],
2806
2807            input_shape[M+1],
2808            ...,
2809            input_shape[N-1]]
2810
2811      4. Crop the start and end of dimensions `[1, ..., M]` of
2812         `reshaped_permuted` according to `crops` to produce the
2813         output of shape:
2814           [batch / prod(block_shape),
2815
2816            input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
2817            ...,
2818            input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
2819
2820            input_shape[M+1], ..., input_shape[N-1]]
2821
2822      Some examples:
2823
2824      (1) For the following input of shape `[4, 1, 1, 1]`,
2825          `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
2826
2827      ```
2828      [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
2829      ```
2830
2831      The output tensor has shape `[1, 2, 2, 1]` and value:
2832
2833      ```
2834      x = [[[[1], [2]], [[3], [4]]]]
2835      ```
2836
2837      (2) For the following input of shape `[4, 1, 1, 3]`,
2838          `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
2839
2840      ```
2841      [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
2842      ```
2843
2844      The output tensor has shape `[1, 2, 2, 3]` and value:
2845
2846      ```
2847      x = [[[[1, 2, 3], [4, 5, 6]],
2848            [[7, 8, 9], [10, 11, 12]]]]
2849      ```
2850
2851      (3) For the following input of shape `[4, 2, 2, 1]`,
2852          `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
2853
2854      ```
2855      x = [[[[1], [3]], [[9], [11]]],
2856           [[[2], [4]], [[10], [12]]],
2857           [[[5], [7]], [[13], [15]]],
2858           [[[6], [8]], [[14], [16]]]]
2859      ```
2860
2861      The output tensor has shape `[1, 4, 4, 1]` and value:
2862
2863      ```
2864      x = [[[1],   [2],  [3],  [4]],
2865           [[5],   [6],  [7],  [8]],
2866           [[9],  [10], [11],  [12]],
2867           [[13], [14], [15],  [16]]]
2868      ```
2869
2870      (4) For the following input of shape `[8, 1, 3, 1]`,
2871          `block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`:
2872
2873      ```
2874      x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
2875           [[[0], [2], [4]]], [[[0], [10], [12]]],
2876           [[[0], [5], [7]]], [[[0], [13], [15]]],
2877           [[[0], [6], [8]]], [[[0], [14], [16]]]]
2878      ```
2879
2880      The output tensor has shape `[2, 2, 4, 1]` and value:
2881
2882      ```
2883      x = [[[[1],   [2],  [3],  [4]],
2884            [[5],   [6],  [7],  [8]]],
2885           [[[9],  [10], [11],  [12]],
2886            [[13], [14], [15],  [16]]]]
2887      ```
2888    name: A name for the operation (optional).
2889
2890  Returns:
2891    A `Tensor`. Has the same type as `input`.
2892  """
2893  if isinstance(block_shape, int):
2894    block_shape = np.array([block_shape, block_shape], dtype=np.int64)
2895
2896  return batch_to_space_nd(input=input,
2897                           block_shape=block_shape,
2898                           crops=crops,
2899                           name=name)
2900
2901
2902@tf_export("one_hot")
2903def one_hot(indices,
2904            depth,
2905            on_value=None,
2906            off_value=None,
2907            axis=None,
2908            dtype=None,
2909            name=None):
2910  """Returns a one-hot tensor.
2911
2912  The locations represented by indices in `indices` take value `on_value`,
2913  while all other locations take value `off_value`.
2914
2915  `on_value` and `off_value` must have matching data types. If `dtype` is also
2916  provided, they must be the same data type as specified by `dtype`.
2917
2918  If `on_value` is not provided, it will default to the value `1` with type
2919  `dtype`
2920
2921  If `off_value` is not provided, it will default to the value `0` with type
2922  `dtype`
2923
2924  If the input `indices` is rank `N`, the output will have rank `N+1`. The
2925  new axis is created at dimension `axis` (default: the new axis is appended
2926  at the end).
2927
2928  If `indices` is a scalar the output shape will be a vector of length `depth`
2929
2930  If `indices` is a vector of length `features`, the output shape will be:
2931
2932  ```
2933    features x depth if axis == -1
2934    depth x features if axis == 0
2935  ```
2936
2937  If `indices` is a matrix (batch) with shape `[batch, features]`, the output
2938  shape will be:
2939
2940  ```
2941    batch x features x depth if axis == -1
2942    batch x depth x features if axis == 1
2943    depth x batch x features if axis == 0
2944  ```
2945
2946  If `dtype` is not provided, it will attempt to assume the data type of
2947  `on_value` or `off_value`, if one or both are passed in. If none of
2948  `on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
2949  value `tf.float32`.
2950
2951  Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
2952  etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
2953
2954  For example:
2955
2956  ```python
2957  indices = [0, 1, 2]
2958  depth = 3
2959  tf.one_hot(indices, depth)  # output: [3 x 3]
2960  # [[1., 0., 0.],
2961  #  [0., 1., 0.],
2962  #  [0., 0., 1.]]
2963
2964  indices = [0, 2, -1, 1]
2965  depth = 3
2966  tf.one_hot(indices, depth,
2967             on_value=5.0, off_value=0.0,
2968             axis=-1)  # output: [4 x 3]
2969  # [[5.0, 0.0, 0.0],  # one_hot(0)
2970  #  [0.0, 0.0, 5.0],  # one_hot(2)
2971  #  [0.0, 0.0, 0.0],  # one_hot(-1)
2972  #  [0.0, 5.0, 0.0]]  # one_hot(1)
2973
2974  indices = [[0, 2], [1, -1]]
2975  depth = 3
2976  tf.one_hot(indices, depth,
2977             on_value=1.0, off_value=0.0,
2978             axis=-1)  # output: [2 x 2 x 3]
2979  # [[[1.0, 0.0, 0.0],   # one_hot(0)
2980  #   [0.0, 0.0, 1.0]],  # one_hot(2)
2981  #  [[0.0, 1.0, 0.0],   # one_hot(1)
2982  #   [0.0, 0.0, 0.0]]]  # one_hot(-1)
2983  ```
2984
2985  Args:
2986    indices: A `Tensor` of indices.
2987    depth: A scalar defining the depth of the one hot dimension.
2988    on_value: A scalar defining the value to fill in output when `indices[j]
2989      = i`. (default: 1)
2990    off_value: A scalar defining the value to fill in output when `indices[j]
2991      != i`. (default: 0)
2992    axis: The axis to fill (default: -1, a new inner-most axis).
2993    dtype: The data type of the output tensor.
2994    name: A name for the operation (optional).
2995
2996  Returns:
2997    output: The one-hot tensor.
2998
2999  Raises:
3000    TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
3001    TypeError: If dtype of `on_value` and `off_value` don't match one another
3002  """
3003  with ops.name_scope(name, "one_hot",
3004                      [indices, depth, on_value, off_value, axis,
3005                       dtype]) as name:
3006    on_exists = on_value is not None
3007    off_exists = off_value is not None
3008
3009    on_dtype = (ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists
3010                else None)
3011    off_dtype = (ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists
3012                 else None)
3013
3014    if on_exists or off_exists:
3015      if dtype is not None:
3016        # Ensure provided on_value and/or off_value match dtype
3017        if on_exists and on_dtype != dtype:
3018          raise TypeError("dtype {0} of on_value does not match "
3019                          "dtype parameter {1}".format(on_dtype, dtype))
3020        if off_exists and off_dtype != dtype:
3021          raise TypeError("dtype {0} of off_value does not match "
3022                          "dtype parameter {1}".format(off_dtype, dtype))
3023      else:
3024        # dtype not provided: automatically assign it
3025        dtype = on_dtype if on_exists else off_dtype
3026    elif dtype is None:
3027      # None of on_value, off_value, or dtype provided. Default dtype to float32
3028      dtype = dtypes.float32
3029
3030    if not on_exists:
3031      # on_value not provided: assign to value 1 of type dtype
3032      on_value = ops.convert_to_tensor(1, dtype, name="on_value")
3033      on_dtype = dtype
3034    if not off_exists:
3035      # off_value not provided: assign to value 0 of type dtype
3036      off_value = ops.convert_to_tensor(0, dtype, name="off_value")
3037      off_dtype = dtype
3038
3039    if on_dtype != off_dtype:
3040      raise TypeError("dtype {0} of on_value does not match "
3041                      "dtype {1} of off_value".format(on_dtype, off_dtype))
3042
3043    return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,
3044                                 name)
3045
3046
3047def _all_dimensions(x):
3048  """Returns a 1D-tensor listing all dimensions in x."""
3049  # Fast path: avoid creating Rank and Range ops if ndims is known.
3050  if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
3051    return constant_op.constant(
3052        np.arange(x.get_shape().ndims), dtype=dtypes.int32)
3053  if (isinstance(x, sparse_tensor.SparseTensor) and
3054      x.dense_shape.get_shape().is_fully_defined()):
3055    r = x.dense_shape.get_shape().dims[0].value  # sparse.dense_shape is 1-D.
3056    return constant_op.constant(np.arange(r), dtype=dtypes.int32)
3057
3058  # Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
3059  return gen_math_ops._range(0, rank(x), 1)
3060
3061
3062@tf_export("sequence_mask")
3063def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
3064  """Returns a mask tensor representing the first N positions of each cell.
3065
3066  If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
3067  dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
3068
3069  ```
3070  mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
3071  ```
3072
3073  Examples:
3074
3075  ```python
3076  tf.sequence_mask([1, 3, 2], 5)  # [[True, False, False, False, False],
3077                                  #  [True, True, True, False, False],
3078                                  #  [True, True, False, False, False]]
3079
3080  tf.sequence_mask([[1, 3],[2,0]])  # [[[True, False, False],
3081                                    #   [True, True, True]],
3082                                    #  [[True, True, False],
3083                                    #   [False, False, False]]]
3084  ```
3085
3086  Args:
3087    lengths: integer tensor, all its values <= maxlen.
3088    maxlen: scalar integer tensor, size of last dimension of returned tensor.
3089      Default is the maximum value in `lengths`.
3090    dtype: output type of the resulting tensor.
3091    name: name of the op.
3092  Returns:
3093    A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
3094  Raises:
3095    ValueError: if `maxlen` is not a scalar.
3096  """
3097  with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
3098    lengths = ops.convert_to_tensor(lengths)
3099
3100    if maxlen is None:
3101      maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
3102      maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)
3103    else:
3104      maxlen = ops.convert_to_tensor(maxlen)
3105    if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
3106      raise ValueError("maxlen must be scalar for sequence_mask")
3107
3108    # The basic idea is to compare a range row vector of size maxlen:
3109    # [0, 1, 2, 3, 4]
3110    # to length as a matrix with 1 column: [[1], [3], [2]].
3111    # Because of broadcasting on both arguments this comparison results
3112    # in a matrix of size (len(lengths), maxlen)
3113    row_vector = gen_math_ops._range(
3114        constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
3115    # Since maxlen >= max(lengths), it is safe to use maxlen as a cast
3116    # authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
3117    matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
3118    result = row_vector < matrix
3119
3120    if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
3121      return result
3122    else:
3123      return gen_math_ops.cast(result, dtype)
3124
3125
3126@tf_export(v1=["squeeze"])
3127@deprecation.deprecated_args(None, "Use the `axis` argument instead",
3128                             "squeeze_dims")
3129def squeeze(input, axis=None, name=None, squeeze_dims=None):
3130  # pylint: disable=redefined-builtin
3131  """Removes dimensions of size 1 from the shape of a tensor.
3132
3133  Given a tensor `input`, this operation returns a tensor of the same type with
3134  all dimensions of size 1 removed. If you don't want to remove all size 1
3135  dimensions, you can remove specific size 1 dimensions by specifying
3136  `axis`.
3137
3138  For example:
3139
3140  ```python
3141  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
3142  tf.shape(tf.squeeze(t))  # [2, 3]
3143  ```
3144
3145  Or, to remove specific size 1 dimensions:
3146
3147  ```python
3148  # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
3149  tf.shape(tf.squeeze(t, [2, 4]))  # [1, 2, 3, 1]
3150  ```
3151
3152  Args:
3153    input: A `Tensor`. The `input` to squeeze.
3154    axis: An optional list of `ints`. Defaults to `[]`.
3155      If specified, only squeezes the dimensions listed. The dimension
3156      index starts at 0. It is an error to squeeze a dimension that is not 1.
3157      Must be in the range `[-rank(input), rank(input))`.
3158    name: A name for the operation (optional).
3159    squeeze_dims: Deprecated keyword argument that is now axis.
3160
3161  Returns:
3162    A `Tensor`. Has the same type as `input`.
3163    Contains the same data as `input`, but has one or more dimensions of
3164    size 1 removed.
3165
3166  Raises:
3167    ValueError: When both `squeeze_dims` and `axis` are specified.
3168  """
3169  axis = deprecation.deprecated_argument_lookup(
3170      "axis", axis, "squeeze_dims", squeeze_dims)
3171  if np.isscalar(axis):
3172    axis = [axis]
3173  return gen_array_ops.squeeze(input, axis, name)
3174
3175
3176@tf_export("squeeze", v1=[])
3177def squeeze_v2(input, axis=None, name=None):
3178  # pylint: disable=redefined-builtin
3179  return squeeze(input, axis, name)
3180
3181
3182@tf_export("where")
3183@dispatch.add_dispatch_support
3184def where(condition, x=None, y=None, name=None):
3185  """Return the elements, either from `x` or `y`, depending on the `condition`.
3186
3187  If both `x` and `y` are None, then this operation returns the coordinates of
3188  true elements of `condition`.  The coordinates are returned in a 2-D tensor
3189  where the first dimension (rows) represents the number of true elements, and
3190  the second dimension (columns) represents the coordinates of the true
3191  elements. Keep in mind, the shape of the output tensor can vary depending on
3192  how many true values there are in input. Indices are output in row-major
3193  order.
3194
3195  If both non-None, `x` and `y` must have the same shape.
3196  The `condition` tensor must be a scalar if `x` and `y` are scalar.
3197  If `x` and `y` are vectors of higher rank, then `condition` must be either a
3198  vector with size matching the first dimension of `x`, or must have the same
3199  shape as `x`.
3200
3201  The `condition` tensor acts as a mask that chooses, based on the value at each
3202  element, whether the corresponding element / row in the output should be taken
3203  from `x` (if true) or `y` (if false).
3204
3205  If `condition` is a vector and `x` and `y` are higher rank matrices, then it
3206  chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
3207  has the same shape as `x` and `y`, then it chooses which element to copy from
3208  `x` and `y`.
3209
3210  Args:
3211    condition: A `Tensor` of type `bool`
3212    x: A Tensor which may have the same shape as `condition`. If `condition` is
3213      rank 1, `x` may have higher rank, but its first dimension must match the
3214      size of `condition`.
3215    y: A `tensor` with the same shape and type as `x`.
3216    name: A name of the operation (optional)
3217
3218  Returns:
3219    A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
3220    Otherwise, a `Tensor` with shape `(num_true, rank(condition))`.
3221
3222  Raises:
3223    ValueError: When exactly one of `x` or `y` is non-None.
3224  """
3225  if x is None and y is None:
3226    with ops.name_scope(name, "Where", [condition]) as name:
3227      condition = ops.convert_to_tensor(
3228          condition, preferred_dtype=dtypes.bool, name="condition")
3229      return gen_array_ops.where(condition=condition, name=name)
3230  elif x is not None and y is not None:
3231    return gen_math_ops.select(condition=condition, x=x, y=y, name=name)
3232  else:
3233    raise ValueError("x and y must both be non-None or both be None.")
3234
3235
3236# pylint: disable=redefined-builtin
3237@tf_export(v1=["reverse_sequence"])
3238@deprecation.deprecated_args(
3239    None, "seq_dim is deprecated, use seq_axis instead", "seq_dim")
3240@deprecation.deprecated_args(
3241    None, "batch_dim is deprecated, use batch_axis instead", "batch_dim")
3242def reverse_sequence(input,
3243                     seq_lengths,
3244                     seq_axis=None,
3245                     batch_axis=None,
3246                     name=None,
3247                     seq_dim=None,
3248                     batch_dim=None):
3249  seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
3250                                                    "seq_dim", seq_dim)
3251  batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
3252                                                      "batch_dim", batch_dim)
3253  return gen_array_ops.reverse_sequence(
3254      input=input,
3255      seq_lengths=seq_lengths,
3256      seq_dim=seq_axis,
3257      batch_dim=batch_axis,
3258      name=name)
3259
3260
3261reverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(
3262    deprecation.rewrite_argument_docstring(
3263        gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
3264    "seq_dim", "seq_axis")
3265
3266
3267@tf_export("reverse_sequence", v1=[])
3268def reverse_sequence_v2(
3269    input, seq_lengths, seq_axis=None, batch_axis=None, name=None):
3270  return gen_array_ops.reverse_sequence(
3271      input=input,
3272      seq_lengths=seq_lengths,
3273      seq_dim=seq_axis,
3274      batch_dim=batch_axis,
3275      name=name)
3276
3277
3278reverse_sequence_v2.__doc__ = deprecation.rewrite_argument_docstring(
3279    deprecation.rewrite_argument_docstring(
3280        gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
3281    "seq_dim", "seq_axis")
3282
3283# pylint: enable=redefined-builtin
3284
3285
3286@tf_export(v1=["gather"])
3287@dispatch.add_dispatch_support
3288def gather(params,
3289           indices,
3290           validate_indices=None,
3291           name=None,
3292           axis=None,
3293           batch_dims=0):
3294  r"""Gather slices from params axis axis according to indices.
3295
3296  Gather slices from params axis `axis` according to `indices`.  `indices` must
3297  be an integer tensor of any dimension (usually 0-D or 1-D).
3298
3299  For 0-D (scalar) `indices`:
3300
3301  > `output`$$[p_0,          ..., p_{axis-1},        \hspace{5.1em}
3302  >            p_{axis + 1}, ..., p_{N-1}]$$ =\
3303  > `params`$$[p_0,          ..., p_{axis-1},        \hspace{1em}
3304  >            indices,                              \hspace{1em}
3305  >            p_{axis + 1}, ..., p_{N-1}]$$.
3306
3307  For 1-D (vector) `indices` with `batch_dims=0`:
3308
3309  > `output`$$[p_0,          ..., p_{axis-1},        \hspace{2.6em}
3310  >            i,                                    \hspace{2.6em}
3311  >            p_{axis + 1}, ..., p_{N-1}]$$ =\
3312  > `params`$$[p_0,          ..., p_{axis-1},        \hspace{1em}
3313  >            indices[i],                           \hspace{1em}
3314  >            p_{axis + 1}, ..., p_{N-1}]$$.
3315
3316  In the general case, produces an output tensor where:
3317
3318  > `output`$$[p_0,             ..., p_{axis-1},     \hspace{1.2em}
3319  >            i_{batch\_dims}, ..., i_{M-1},        \hspace{1.3em}
3320  >            p_{axis + 1},    ..., p_{N-1}]$$ =\
3321  > `params`$$[p_0,             ..., p_{axis-1},     \hspace{1em}
3322  >            indices[i_0,     ..., i_{M-1}],       \hspace{1em}
3323  >            p_{axis + 1},    ..., p_{N-1}]$$.
3324
3325  Where $$N$$=`ndims(params)` and $$M$$=`ndims(indices)`.
3326  The shape of the output tensor is:
3327
3328  > `output.shape = params.shape[:axis] + indices.shape[batch_dims:] +
3329  > params.shape[axis + 1:]`.
3330
3331  Note that on CPU, if an out of bound index is found, an error is returned.
3332  On GPU, if an out of bound index is found, a 0 is stored in the corresponding
3333  output value.
3334
3335  See also `tf.gather_nd`.
3336
3337  <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
3338  <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png"
3339  alt>
3340  </div>
3341
3342  Args:
3343    params: The `Tensor` from which to gather values. Must be at least rank
3344      `axis + 1`.
3345    indices: The index `Tensor`.  Must be one of the following types: `int32`,
3346      `int64`. Must be in range `[0, params.shape[axis])`.
3347    validate_indices: Deprecated, does nothing.
3348    name: A name for the operation (optional).
3349    axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
3350      `axis` in `params` to gather `indices` from. Must be greater than or equal
3351      to `batch_dims`.  Defaults to the first non-batch dimension. Supports
3352      negative indexes.
3353    batch_dims: An `integer`.  The number of batch dimensions.  Must be less
3354      than `rank(indices)`.
3355
3356  Returns:
3357    A `Tensor`. Has the same type as `params`.
3358  """
3359  del validate_indices
3360  if batch_dims != 0:
3361    with ops.name_scope(name, "Gather", [params, indices, axis]):
3362      return _batch_gather(params, indices, batch_dims, axis)
3363  if axis is None:
3364    axis = batch_dims
3365  if axis != 0:
3366    # Note that we do a sparse_read here to avoid snapshotting the entire
3367    # resource variable and doing a gather, which can be inefficient and lead to
3368    # subtle race conditions. TODO(apassos) implement axis != 0 on sparse_read
3369    return gen_array_ops.gather_v2(params, indices, axis, name=name)
3370  try:
3371    # TODO(apassos) find a less bad way of detecting resource variables without
3372    # introducing a circular dependency.
3373    return params.sparse_read(indices, name=name)
3374  except AttributeError:
3375    return gen_array_ops.gather_v2(params, indices, axis, name=name)
3376
3377
3378@tf_export("gather", v1=[])
3379@dispatch.add_dispatch_support
3380def gather_v2(params, indices, validate_indices=None, axis=None,
3381              batch_dims=0, name=None):
3382  return gather(params, indices, validate_indices=validate_indices, name=name,
3383                axis=axis, batch_dims=batch_dims)
3384
3385
3386gather.__doc__ = gather_v2.__doc__ = gen_array_ops.gather_v2.__doc__
3387
3388
3389@tf_export(v1=["batch_gather"])
3390@dispatch.add_dispatch_support
3391@deprecation.deprecated(
3392    "2017-10-25", "`tf.batch_gather` is deprecated, please use `tf.gather` "
3393    "with `batch_dims` instead.")  # pylint: disable=missing-docstring
3394def batch_gather(params, indices, name=None):
3395  """Gather slices from params according to indices with leading batch dims."""
3396  with ops.name_scope(name, "BatchGather", [params, indices]):
3397    indices = ops.convert_to_tensor(indices, name="indices")
3398    params = ops.convert_to_tensor(params, name="params")
3399    if indices.shape.ndims is None:
3400      raise ValueError(
3401          "batch_gather does not allow indices with unknown shape.")
3402    return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)
3403
3404
3405def _batch_gather(params, indices, batch_dims, axis=None):
3406  r"""Gather slices from params according to indices with leading batch dims.
3407
3408  This operation assumes that the leading `batch_dims` dimensions of `indices`
3409  and `params` are batch dimensions; and performs a `tf.gather` operation within
3410  each batch. (If `batch_dims` is not specified, then it defaults to
3411  `rank(indices)-1`.)  In the case in which `batch_dims==0`, this operation
3412  is equivalent to `tf.gather`.
3413
3414  Args:
3415    params: A Tensor. The tensor from which to gather values.
3416    indices: A Tensor. Must be one of the following types: int32, int64. Index
3417      tensor. Must be in range `[0, params.shape[batch_dims]]`.
3418    batch_dims: An integer or none.  The number of batch dimensions.  Must be
3419      less than `rank(indices)`.  Defaults to `rank(indices) - 1` if None.
3420    axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
3421      `axis` in `params` to gather `indices` from. Must be greater than or equal
3422      to `batch_dims`.  Defaults to the first non-batch dimension. Supports
3423      negative indexes.
3424
3425  Returns:
3426    A Tensor. Has the same type as `params`.
3427
3428  Raises:
3429    ValueError: if `indices` has an unknown shape.
3430  """
3431  if batch_dims is not None and not isinstance(batch_dims, int):
3432    raise TypeError("batch_dims must be an int; got %r" % batch_dims)
3433  indices = ops.convert_to_tensor(indices, name="indices")
3434  params = ops.convert_to_tensor(params, name="params")
3435
3436  indices_ndims = indices.shape.ndims
3437  if indices_ndims is None:
3438    raise ValueError("tf.gather does not allow indices with unknown "
3439                     "rank when batch_dims is specified.")
3440  if batch_dims is None:
3441    batch_dims = indices_ndims - 1
3442  if batch_dims < 0:
3443    batch_dims += indices_ndims
3444  if batch_dims < 0 or batch_dims >= indices_ndims:
3445    raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
3446                     (batch_dims, indices_ndims))
3447  if params.shape.ndims is not None and batch_dims >= params.shape.ndims:
3448    raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
3449                     (batch_dims, params.shape.ndims))
3450
3451  # Handle axis by transposing the axis dimension to be the first non-batch
3452  # dimension, recursively calling batch_gather with axis=0, and then
3453  # transposing the result to put the pre-axis dimensions before the indices
3454  # dimensions.
3455  if axis is not None and axis != batch_dims:
3456    # Adjust axis to be positive.
3457    if not isinstance(axis, int):
3458      axis = tf.where(axis < 0, axis + array_ops.rank(params), axis)
3459    elif axis < 0 and params.shape.ndims is None:
3460      axis = axis + array_ops.rank(params)
3461    else:
3462      if (axis < -params.shape.ndims) or (axis >= params.shape.ndims):
3463        raise ValueError("axis (%d) out of range [%d, %d)" %
3464                         (axis, -params.shape.ndims, params.shape.ndims))
3465      if axis < 0:
3466        axis += params.shape.ndims
3467      if axis < batch_dims:
3468        raise ValueError("batch_dims = %d must be less than or equal to "
3469                         "axis = %d" % (batch_dims, axis))
3470
3471    # Move params[axis] up to params[batch_dims].
3472    perm = [
3473        list(range(batch_dims)), [axis],
3474        gen_math_ops._range(batch_dims, axis, 1),
3475        gen_math_ops._range(axis + 1, rank(params), 1)
3476    ]
3477    params = transpose(params, concat(perm, axis=0))
3478
3479    result = _batch_gather(params, indices, batch_dims=batch_dims)
3480
3481    # Move the result dimensions corresponding to params[batch_dims:axis]
3482    # to just before the dimensions corresponding to indices[batch_dims:].
3483    params_start = indices_ndims + axis - batch_dims
3484    perm = [
3485        list(range(batch_dims)),
3486        gen_math_ops._range(indices_ndims, params_start, 1),
3487        list(range(batch_dims, indices_ndims)),
3488        gen_math_ops._range(params_start, rank(result), 1)
3489    ]
3490    return transpose(result, perm=concat(perm, axis=0))
3491
3492  indices_shape = shape(indices)
3493  params_shape = shape(params)
3494  batch_indices = indices
3495  indices_dtype = indices.dtype.base_dtype
3496  accum_dim_value = ones((), dtype=indices_dtype)
3497  # Use correct type for offset index computation
3498  casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)
3499  for dim in range(batch_dims, 0, -1):
3500    dim_value = casted_params_shape[dim - 1]
3501    accum_dim_value *= casted_params_shape[dim]
3502    start = zeros((), dtype=indices_dtype)
3503    step = ones((), dtype=indices_dtype)
3504    dim_indices = gen_math_ops._range(start, dim_value, step)
3505    dim_indices *= accum_dim_value
3506    dim_shape = stack(
3507        [1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0)
3508    batch_indices += reshape(dim_indices, dim_shape)
3509
3510  flat_indices = reshape(batch_indices, [-1])
3511  outer_shape = params_shape[batch_dims + 1:]
3512  flat_inner_shape = gen_math_ops.prod(params_shape[:batch_dims + 1], [0],
3513                                       False)
3514
3515  flat_params = reshape(params, concat([[flat_inner_shape], outer_shape],
3516                                       axis=0))
3517  flat_result = gather(flat_params, flat_indices)
3518  result = reshape(flat_result, concat([indices_shape, outer_shape], axis=0))
3519  final_shape = indices.get_shape()[:batch_dims].merge_with(
3520      params.get_shape()[:batch_dims])
3521  final_shape = final_shape.concatenate(indices.get_shape().dims[batch_dims:])
3522  final_shape = final_shape.concatenate(params.get_shape()[batch_dims + 1:])
3523  result.set_shape(final_shape)
3524  return result
3525
3526
3527# Define quantize_v2 here in order to make name the second-to-last attribute,
3528# because round_mode was added later.
3529@tf_export(v1=["quantize_v2"])
3530@deprecation.deprecated(
3531    "2017-10-25",
3532    "`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` "
3533    "instead.")  # pylint: disable=missing-docstring
3534def quantize_v2(input,  # pylint: disable=redefined-builtin
3535                min_range,
3536                max_range,
3537                T,
3538                mode="MIN_COMBINED",
3539                name=None,
3540                round_mode="HALF_AWAY_FROM_ZERO"):
3541  return gen_array_ops.quantize_v2(input,
3542                                   min_range,
3543                                   max_range,
3544                                   T=T,
3545                                   mode=mode,
3546                                   name=name,
3547                                   round_mode=round_mode)
3548
3549
3550quantize_v2.__doc__ = """Please use `tf.quantization.quantize` instead."""
3551
3552
3553# We want to expose tf.quantize instead of tf.quantize_v2; we can deprecate
3554# tf.quantize_v2 in next version of TensorFlow.
3555@tf_export("quantization.quantize", v1=["quantization.quantize", "quantize"])
3556@deprecation.deprecated_endpoints("quantize")
3557def quantize(input,  # pylint: disable=redefined-builtin
3558             min_range,
3559             max_range,
3560             T,
3561             mode="MIN_COMBINED",
3562             round_mode="HALF_AWAY_FROM_ZERO",
3563             name=None):
3564  return gen_array_ops.quantize_v2(
3565      input,
3566      min_range,
3567      max_range,
3568      T,
3569      mode=mode,
3570      round_mode=round_mode,
3571      name=name)
3572
3573
3574@tf_export("searchsorted")
3575def searchsorted(sorted_sequence,
3576                 values,
3577                 side="left",
3578                 out_type=dtypes.int32,
3579                 name=None):
3580  """Searches input tensor for values on the innermost dimension.
3581
3582  A 2-D example:
3583
3584  ```
3585    sorted_sequence = [[0, 3, 9, 9, 10],
3586                       [1, 2, 3, 4, 5]]
3587    values = [[2, 4, 9],
3588              [0, 2, 6]]
3589
3590    result = searchsorted(sorted_sequence, values, side="left")
3591
3592    result == [[1, 2, 2],
3593               [0, 1, 5]]
3594
3595    result = searchsorted(sorted_sequence, values, side="right")
3596
3597    result == [[1, 2, 4],
3598               [0, 2, 5]]
3599  ```
3600
3601  Args:
3602    sorted_sequence: N-D `Tensor` containing a sorted sequence.
3603    values: N-D `Tensor` containing the search values.
3604    side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to
3605      upper_bound.
3606    out_type: The output type (`int32` or `int64`).  Default is `tf.int32`.
3607    name: Optional name for the operation.
3608
3609  Returns:
3610    An N-D `Tensor` the size of values containing the result of applying either
3611    lower_bound or upper_bound (depending on side) to each value.  The result
3612    is not a global index to the entire `Tensor`, but the index in the last
3613    dimension.
3614
3615  Raises:
3616    ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.
3617                If the total size of values exceeds `2^31 - 1` elements.
3618                If the first `N-1` dimensions of the two tensors don't match.
3619  """
3620  sequence_size = shape_internal(sorted_sequence)[-1]
3621  values_size = shape_internal(values)[-1]
3622  sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])
3623  values_2d = reshape(values, [-1, values_size])
3624  if side == "right":
3625    output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type,
3626                                       name)
3627  elif side == "left":
3628    output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type,
3629                                       name)
3630  else:
3631    raise ValueError("side must be either 'right' or 'left'.  Saw: %s." % side)
3632  return reshape(output, shape_internal(values))
3633
3634
3635quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
3636
3637
3638@tf_export("image.extract_image_patches", v1=[])
3639def extract_image_patches_v2(
3640    images,
3641    sizes,
3642    strides,
3643    rates,
3644    padding,
3645    name=None):
3646  # pylint: disable=line-too-long
3647  r"""Extract `patches` from `images` and put them in the \"depth\" output dimension.
3648
3649  Args:
3650    images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth]
3651    sizes: The size of the sliding window for each dimension of `images`.
3652    strides: A 1-D Tensor of length 4. How far the centers of two consecutive
3653      patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`.
3654    rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`.
3655      This is the input stride, specifying how far two consecutive patch samples
3656      are in the input. Equivalent to extracting patches with `patch_sizes_eff =
3657      patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling
3658      them spatially by a factor of `rates`. This is equivalent to `rate` in
3659      dilated (a.k.a. Atrous) convolutions.
3660    padding: The type of padding algorithm to use.
3661      We specify the size-related attributes as: ```python ksizes = [1,
3662        ksize_rows, ksize_cols, 1] strides = [1, strides_rows, strides_cols, 1]
3663        rates = [1, rates_rows, rates_cols, 1]
3664    name: A name for the operation (optional).
3665
3666  Returns:
3667    A 4-D Tensor. Has the same type as `images`, and with shape `[batch,
3668    out_rows, out_cols, ksize_rows * ksize_cols * depth]` containing image
3669    patches with size `ksize_rows x ksize_cols x depth` vectorized in the
3670    \"depth\" dimension. Note `out_rows` and `out_cols` are the dimensions of
3671    the output patches.
3672  """
3673  # pylint: enable=line-too-long
3674  return gen_array_ops.extract_image_patches(
3675      images, sizes, strides, rates, padding, name)
3676
3677
3678@tf_export(v1=["image.extract_image_patches", "extract_image_patches"])
3679@deprecation.deprecated_args(
3680    None, "ksizes is deprecated, use sizes instead", "ksizes")
3681def extract_image_patches(  # pylint: disable=missing-docstring
3682    images,
3683    ksizes=None,
3684    strides=None,
3685    rates=None,
3686    padding=None,
3687    name=None,
3688    sizes=None):
3689  ksizes = deprecation.deprecated_argument_lookup(
3690      "sizes", sizes, "ksizes", ksizes)
3691  return gen_array_ops.extract_image_patches(
3692      images, ksizes, strides, rates, padding, name)
3693
3694
3695extract_image_patches.__doc__ = gen_array_ops.extract_image_patches.__doc__
3696