• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Math Operations.
16
17Note: Functions taking `Tensor` arguments can also take anything accepted by
18`tf.convert_to_tensor`.
19
20Note: Elementwise binary operations in TensorFlow follow [numpy-style
21broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
22
23TensorFlow provides a variety of math functions including:
24
25* Basic arithmetic operators and trigonometric functions.
26* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)
27* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)
28* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)
29* Segment functions (like: `tf.math.segment_sum`)
30
31See: `tf.linalg` for matrix and tensor functions.
32
33<a id=Segmentation></a>
34
35## About Segmentation
36
37TensorFlow provides several operations that you can use to perform common
38math computations on tensor segments.
39Here a segmentation is a partitioning of a tensor along
40the first dimension, i.e. it  defines a mapping from the first dimension onto
41`segment_ids`. The `segment_ids` tensor should be the size of
42the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
43where `k<d0`.
44In particular, a segmentation of a matrix tensor is a mapping of rows to
45segments.
46
47For example:
48
49```python
50c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
51tf.math.segment_sum(c, tf.constant([0, 0, 1]))
52#  ==>  [[0 0 0 0]
53#        [5 6 7 8]]
54```
55
56The standard `segment_*` functions assert that the segment indices are sorted.
57If you have unsorted indices use the equivalent `unsorted_segment_` function.
58These functions take an additional argument `num_segments` so that the output
59tensor can be efficiently allocated.
60
61``` python
62c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
63tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
64# ==> [[ 6,  8, 10, 12],
65#       [-1, -2, -3, -4]]
66```
67
68"""
69from __future__ import absolute_import
70from __future__ import division
71from __future__ import print_function
72
73import numbers
74import numpy as np
75import six
76from six.moves import builtins
77from six.moves import xrange  # pylint: disable=redefined-builtin
78
79from tensorflow.python.eager import context
80from tensorflow.python.framework import constant_op
81from tensorflow.python.framework import dtypes
82from tensorflow.python.framework import graph_util
83from tensorflow.python.framework import ops
84from tensorflow.python.framework import sparse_tensor
85from tensorflow.python.framework import tensor_shape
86from tensorflow.python.framework import tensor_util
87from tensorflow.python.ops import array_ops
88from tensorflow.python.ops import gen_array_ops
89from tensorflow.python.ops import gen_bitwise_ops
90from tensorflow.python.ops import gen_data_flow_ops
91from tensorflow.python.ops import gen_math_ops
92from tensorflow.python.ops import gen_nn_ops
93from tensorflow.python.ops import gen_sparse_ops
94# go/tf-wildcard-import
95# pylint: disable=wildcard-import
96from tensorflow.python.ops.gen_math_ops import *
97# pylint: enable=wildcard-import
98from tensorflow.python.platform import tf_logging as logging
99from tensorflow.python.util import compat
100from tensorflow.python.util import deprecation
101from tensorflow.python.util import dispatch
102from tensorflow.python.util import nest
103from tensorflow.python.util import tf_decorator
104from tensorflow.python.util import traceback_utils
105from tensorflow.python.util.compat import collections_abc
106from tensorflow.python.util.lazy_loader import LazyLoader
107from tensorflow.python.util.tf_export import tf_export
108
109
110np_dtypes = LazyLoader(
111    "np_dtypes", globals(),
112    "tensorflow.python.ops.numpy_ops.np_dtypes")
113
114
115# Aliases for some automatically-generated names.
116nextafter = gen_math_ops.next_after
117
118
119@tf_export("linspace", v1=["lin_space", "linspace"])
120@dispatch.add_dispatch_support
121@deprecation.deprecated_endpoints("lin_space")
122def linspace_nd(start, stop, num, name=None, axis=0):
123  r"""Generates evenly-spaced values in an interval along a given axis.
124
125  A sequence of `num` evenly-spaced values are generated beginning at `start`
126  along a given `axis`.
127  If `num > 1`, the values in the sequence increase by
128  `(stop - start) / (num - 1)`, so that the last one is exactly `stop`.
129  If `num <= 0`, `ValueError` is raised.
130
131  Matches
132  [np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s
133  behaviour
134  except when `num == 0`.
135
136  For example:
137
138  ```
139  tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
140  ```
141
142  `Start` and `stop` can be tensors of arbitrary size:
143
144  >>> tf.linspace([0., 5.], [10., 40.], 5, axis=0)
145  <tf.Tensor: shape=(5, 2), dtype=float32, numpy=
146  array([[ 0.  ,  5.  ],
147         [ 2.5 , 13.75],
148         [ 5.  , 22.5 ],
149         [ 7.5 , 31.25],
150         [10.  , 40.  ]], dtype=float32)>
151
152  `Axis` is where the values will be generated (the dimension in the
153  returned tensor which corresponds to the axis will be equal to `num`)
154
155  >>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1)
156  <tf.Tensor: shape=(2, 5), dtype=float32, numpy=
157  array([[ 0.  ,  2.5 ,  5.  ,  7.5 , 10.  ],
158         [ 5.  , 13.75, 22.5 , 31.25, 40.  ]], dtype=float32)>
159
160
161
162  Args:
163    start: A `Tensor`. Must be one of the following types: `bfloat16`,
164      `float32`, `float64`. N-D tensor. First entry in the range.
165    stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor.
166      Last entry in the range.
167    num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D
168      tensor. Number of values to generate.
169    name: A name for the operation (optional).
170    axis: Axis along which the operation is performed (used only when N-D
171      tensors are provided).
172
173  Returns:
174    A `Tensor`. Has the same type as `start`.
175  """
176
177  with ops.name_scope(name, "linspace", [start, stop]):
178    start = ops.convert_to_tensor(start, name="start")
179    # stop must be convertible to the same dtype as start
180    stop = ops.convert_to_tensor(stop, name="stop", dtype=start.dtype)
181    num_int = array_ops.convert_to_int_tensor(num, name="num")
182    num = cast(num_int, dtype=start.dtype)
183
184    broadcast_shape = array_ops.broadcast_dynamic_shape(
185        array_ops.shape(start), array_ops.shape(stop))
186    start = array_ops.broadcast_to(start, broadcast_shape)
187    stop = array_ops.broadcast_to(stop, broadcast_shape)
188
189    expanded_start = array_ops.expand_dims(start, axis=axis)
190    expanded_stop = array_ops.expand_dims(stop, axis=axis)
191
192    shape = array_ops.shape(expanded_start)
193    ndims = array_ops.shape(shape)[0]
194
195    axis = array_ops.where_v2(axis >= 0, axis, ndims + axis)
196
197    # The purpose is to avoid having negative values when repeating.
198    num_fill = gen_math_ops.maximum(num_int - 2, 0)
199    # To avoid having negative values in the range or zero division
200    # the result is sliced in the end so a correct result is returned for
201    # num == 1, and num == 0.
202    n_steps = gen_math_ops.maximum(num_int - 1, 1)
203    delta = (expanded_stop - expanded_start) / cast(n_steps,
204                                                    expanded_stop.dtype)
205    # Re-cast tensors as delta.
206    expanded_start = cast(expanded_start, delta.dtype)
207    expanded_stop = cast(expanded_stop, delta.dtype)
208    # If num < 0, we will throw exception in the range
209    # otherwise use the same div for delta
210    range_end = array_ops.where_v2(num_int >= 0, n_steps, -1)
211    # Even though range supports an output dtype, its limited
212    # (e.g. doesn't support half at the moment).
213    desired_range = cast(range(1, range_end, dtype=dtypes.int64), delta.dtype)
214    mask = gen_math_ops.equal(axis, range(ndims))
215    # desired_range_shape is [1. 1. 1. ... 1. num_fill 1. 1. ... 1.], where the
216    # index of num_fill is equal to axis.
217    desired_range_shape = array_ops.where_v2(mask, num_fill, 1)
218    desired_range = array_ops.reshape(desired_range, desired_range_shape)
219
220    res = expanded_start + delta * desired_range
221
222    # Add the start and endpoints to the result, and slice out the desired
223    # portion.
224    all_tensors = (expanded_start, res, expanded_stop)
225    concatenated = array_ops.concat(all_tensors, axis=axis)
226    begin = array_ops.zeros_like(shape)
227    size = array_ops.where_v2(mask, num_int, shape)
228
229    return array_ops.slice(concatenated, begin, size)
230
231
232linspace = linspace_nd
233
234arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max)  # pylint: disable=used-before-assignment
235arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min)  # pylint: disable=used-before-assignment
236tf_export(v1=["arg_max"])(dispatch.add_dispatch_support(arg_max))
237tf_export(v1=["arg_min"])(dispatch.add_dispatch_support(arg_min))
238
239
240# This is set by resource_variable_ops.py. It is included in this way since
241# there is a circular dependency between math_ops and resource_variable_ops
242_resource_variable_type = None
243
244
245def _set_doc(doc):
246
247  def _decorator(func):
248    func.__doc__ = doc
249    return func
250
251  return _decorator
252
253
254# pylint: disable=redefined-builtin
255@tf_export(v1=["math.argmax", "argmax"])
256@dispatch.add_dispatch_support
257@deprecation.deprecated_args(None, "Use the `axis` argument instead",
258                             "dimension")
259@_set_doc(
260    gen_math_ops.arg_max.__doc__.replace("dimensions",
261                                         "axes").replace("dimension", "axis"))
262def argmax(input,
263           axis=None,
264           name=None,
265           dimension=None,
266           output_type=dtypes.int64):
267  axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
268                                                dimension)
269  return argmax_v2(input, axis, output_type, name)
270
271
272@tf_export("math.argmax", "argmax", v1=[])
273@dispatch.add_dispatch_support
274def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):
275  """Returns the index with the largest value across axes of a tensor.
276
277  In case of identity returns the smallest index.
278
279  For example:
280
281  >>> A = tf.constant([2, 20, 30, 3, 6])
282  >>> tf.math.argmax(A)  # A[2] is maximum in tensor A
283  <tf.Tensor: shape=(), dtype=int64, numpy=2>
284  >>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],
285  ...                  [14, 45, 23, 5, 27]])
286  >>> tf.math.argmax(B, 0)
287  <tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])>
288  >>> tf.math.argmax(B, 1)
289  <tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])>
290  >>> C = tf.constant([0, 0, 0, 0])
291  >>> tf.math.argmax(C) # Returns smallest index in case of ties
292  <tf.Tensor: shape=(), dtype=int64, numpy=0>
293
294  Args:
295    input: A `Tensor`.
296    axis: An integer, the axis to reduce across. Default to 0.
297    output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults
298      to `tf.int64`.
299    name: An optional name for the operation.
300
301  Returns:
302    A `Tensor` of type `output_type`.
303  """
304  if axis is None:
305    axis = 0
306  return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
307
308
309@tf_export(v1=["math.argmin", "argmin"])
310@dispatch.add_dispatch_support
311@deprecation.deprecated_args(None, "Use the `axis` argument instead",
312                             "dimension")
313@_set_doc(
314    gen_math_ops.arg_min.__doc__.replace("dimensions",
315                                         "axes").replace("dimension", "axis"))
316def argmin(input,
317           axis=None,
318           name=None,
319           dimension=None,
320           output_type=dtypes.int64):
321  axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
322                                                dimension)
323  return argmin_v2(input, axis, output_type, name)
324
325
326@tf_export("math.argmin", "argmin", v1=[])
327@dispatch.add_dispatch_support
328def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):
329  """Returns the index with the smallest value across axes of a tensor.
330
331  Returns the smallest index in case of ties.
332
333  Args:
334    input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
335      `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`,
336      `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`,
337      `uint64`.
338    axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
339      int32 or int64, must be in the range `-rank(input), rank(input))`.
340      Describes which axis of the input Tensor to reduce across. For vectors,
341      use axis = 0.
342    output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to
343      `tf.int64`.
344    name: A name for the operation (optional).
345
346  Returns:
347    A `Tensor` of type `output_type`.
348
349  Usage:
350  ```python
351  import tensorflow as tf
352  a = [1, 10, 26.9, 2.8, 166.32, 62.3]
353  b = tf.math.argmin(input = a)
354  c = tf.keras.backend.eval(b)
355  # c = 0
356  # here a[0] = 1 which is the smallest element of a across axis 0
357  ```
358  """
359  if axis is None:
360    axis = 0
361  return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
362
363
364# pylint: enable=redefined-builtin
365
366
367# pylint: disable=anomalous-backslash-in-string,protected-access
368# pylint: disable=g-docstring-has-escape
369@tf_export("math.abs", "abs")
370@dispatch.add_dispatch_support
371def abs(x, name=None):  # pylint: disable=redefined-builtin
372  r"""Computes the absolute value of a tensor.
373
374  Given a tensor of integer or floating-point values, this operation returns a
375  tensor of the same type, where each element contains the absolute value of the
376  corresponding element in the input.
377
378  Given a tensor `x` of complex numbers, this operation returns a tensor of type
379  `float32` or `float64` that is the absolute value of each element in `x`. For
380  a complex number \\(a + bj\\), its absolute value is computed as
381  \\(\sqrt{a^2 + b^2}\\).
382
383  For example:
384
385  >>> # real number
386  >>> x = tf.constant([-2.25, 3.25])
387  >>> tf.abs(x)
388  <tf.Tensor: shape=(2,), dtype=float32,
389  numpy=array([2.25, 3.25], dtype=float32)>
390
391  >>> # complex number
392  >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
393  >>> tf.abs(x)
394  <tf.Tensor: shape=(2, 1), dtype=float64, numpy=
395  array([[5.25594901],
396         [6.60492241]])>
397
398  Args:
399    x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
400      `int32`, `int64`, `complex64` or `complex128`.
401    name: A name for the operation (optional).
402
403  Returns:
404    A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,
405      with absolute values. Note, for `complex64` or `complex128` input, the
406      returned `Tensor` will be of type `float32` or `float64`, respectively.
407  """
408  with ops.name_scope(name, "Abs", [x]) as name:
409    x = ops.convert_to_tensor(x, name="x")
410    if x.dtype.is_complex:
411      return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
412    return gen_math_ops._abs(x, name=name)
413
414
415# pylint: enable=g-docstring-has-escape
416
417
418# pylint: disable=redefined-builtin
419def _bucketize(input, boundaries, name=None):
420  return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)
421
422
423# pylint: enable=redefined-builtin
424
425
426class DivideDelegateWithName(object):
427  """Use Python2/Python3 division delegation to implement divide for tensors."""
428
429  def __init__(self, x, name):
430    """Construct DivideDelegateWithName.
431
432    Args:
433      x: Tensor to use as left operand in operator overloads
434      name: The name that is preferred for the op created.
435    """
436    self.x = x
437    self.name = name
438
439  def __truediv__(self, y):
440    return _truediv_python3(self.x, y, self.name)
441
442  def __floordiv__(self, y):
443    return floordiv(self.x, y, self.name)
444
445  def __div__(self, y):
446    return _div_python2(self.x, y, self.name)
447
448
449@tf_export("math.divide", "divide")
450@dispatch.add_dispatch_support
451def divide(x, y, name=None):
452  """Computes Python style division of `x` by `y`.
453
454  For example:
455
456  >>> x = tf.constant([16, 12, 11])
457  >>> y = tf.constant([4, 6, 2])
458  >>> tf.divide(x,y)
459  <tf.Tensor: shape=(3,), dtype=float64,
460  numpy=array([4. , 2. , 5.5])>
461
462  Args:
463    x: A `Tensor`
464    y: A `Tensor`
465    name: A name for the operation (optional).
466
467  Returns:
468    A `Tensor` with same shape as input
469  """
470
471  if name is not None:
472    # Cannot use tensors operator overload, because it has no way to track
473    # override names. Use a dummy class to track the runtime division behavior
474    return DivideDelegateWithName(x, name) / y
475  else:
476    # We do conversion here to make sure at least x is a tensor.
477    if not tensor_util.is_tf_type(x):
478      dtype = y.dtype.base_dtype if tensor_util.is_tf_type(y) else None
479      x = ops.convert_to_tensor(x, dtype=dtype)
480    return x / y
481
482
483@tf_export("math.multiply", "multiply")
484@dispatch.add_dispatch_support
485def multiply(x, y, name=None):
486  """Returns an element-wise x * y.
487
488  For example:
489
490  >>> x = tf.constant(([1, 2, 3, 4]))
491  >>> tf.math.multiply(x, x)
492  <tf.Tensor: shape=(4,), dtype=..., numpy=array([ 1,  4,  9, 16], dtype=int32)>
493
494  Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also
495  pass in non-`Tensor` arguments:
496
497  >>> tf.math.multiply(7,6)
498  <tf.Tensor: shape=(), dtype=int32, numpy=42>
499
500  If `x.shape` is not the same as `y.shape`, they will be broadcast to a
501  compatible shape. (More about broadcasting
502  [here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).)
503
504  For example:
505
506  >>> x = tf.ones([1, 2]);
507  >>> y = tf.ones([2, 1]);
508  >>> x * y  # Taking advantage of operator overriding
509  <tf.Tensor: shape=(2, 2), dtype=float32, numpy=
510  array([[1., 1.],
511       [1., 1.]], dtype=float32)>
512
513  The reduction version of this elementwise operation is `tf.math.reduce_prod`
514
515  Args:
516    x: A Tensor. Must be one of the following types: `bfloat16`,
517      `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`,
518      `int16`, `int32`, `int64`, `complex64`, `complex128`.
519    y: A `Tensor`. Must have the same type as `x`.
520    name: A name for the operation (optional).
521
522  Returns:
523
524  A `Tensor`.  Has the same type as `x`.
525
526  Raises:
527
528   * InvalidArgumentError: When `x` and `y` have incompatible shapes or types.
529  """
530
531  return gen_math_ops.mul(x, y, name)
532
533
534# TODO(aselle): put deprecation in after another round of global code changes
535@deprecation.deprecated(
536    "2016-12-30",
537    "`tf.mul(x, y)` is deprecated; use `tf.math.multiply(x, y)` or `x * y`")
538def _mul(x, y, name=None):
539  return gen_math_ops.mul(x, y, name)
540
541
542_mul.__doc__ = (
543    gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__))
544
545
546@tf_export("math.subtract", "subtract")
547@dispatch.add_dispatch_support
548def subtract(x, y, name=None):
549  return gen_math_ops.sub(x, y, name)
550
551
552subtract.__doc__ = gen_math_ops.sub.__doc__
553
554
555# TODO(aselle): put deprecation in after another round of global code changes
556@deprecation.deprecated(
557    "2016-12-30",
558    "`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
559def _sub(x, y, name=None):
560  return gen_math_ops.sub(x, y, name)
561
562
563_sub.__doc__ = (
564    gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__))
565
566negative = gen_math_ops.neg
567
568
569# pylint: disable=g-docstring-has-escape
570@deprecation.deprecated(
571    "2016-12-30",
572    "`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
573def _neg(x, name=None):
574  """Computes numerical negative value element-wise.
575
576  I.e., \\(y = -x\\).
577
578  Args:
579    x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
580      `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
581    name: A name for the operation (optional).
582
583  Returns:
584    A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
585  """
586  return negative(x, name)
587
588
589# pylint: enable=g-docstring-has-escape
590
591
592@tf_export(v1=["math.scalar_mul", "scalar_mul"])
593@dispatch.add_dispatch_support
594def scalar_mul(scalar, x, name=None):
595  """Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
596
597  This is a special case of `tf.math.multiply`, where the first value must be a
598  `scalar`. Unlike the general form of `tf.math.multiply`, this is operation is
599  guaranteed to be efficient for `tf.IndexedSlices`.
600
601  >>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3])
602  >>> with tf.GradientTape() as g:
603  ...   g.watch(x)
604  ...   y = tf.gather(x, [1, 2])  # IndexedSlices
605  ...   z = tf.math.scalar_mul(10.0, y)
606
607  Args:
608    scalar: A 0-D scalar `Tensor`. Must have known shape.
609    x: A `Tensor` or `IndexedSlices` to be scaled.
610    name: A name for the operation (optional).
611
612  Returns:
613    `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
614
615  Raises:
616    ValueError: if scalar is not a 0-D `scalar`.
617  """
618  scalar = ops.convert_to_tensor(
619      scalar, dtype=x.dtype.base_dtype, name="scalar")
620  shape = scalar.get_shape()
621  if shape.ndims == 0:
622    if isinstance(x, ops.IndexedSlices):
623      return ops.IndexedSlices(
624          gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
625    else:
626      return gen_math_ops.mul(scalar, x, name)
627  else:
628    raise ValueError(
629        f"The input scalar must be a 0-D value. Received shape {shape}.")
630
631
632@tf_export("math.softplus", "nn.softplus", v1=["math.softplus", "nn.softplus"])
633@dispatch.add_dispatch_support
634def softplus(features, name=None):
635  """Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`.
636
637  `softplus` is a smooth approximation of `relu`. Like `relu`, `softplus` always
638  takes on positive values.
639
640  <img style="width:100%" src="https://www.tensorflow.org/images/softplus.png">
641
642  Example:
643
644  >>> import tensorflow as tf
645  >>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy()
646  array([0.6931472, 1.3132616], dtype=float32)
647
648  Args:
649    features: `Tensor`
650    name: Optional: name to associate with this operation.
651  Returns:
652    `Tensor`
653  """
654  return gen_nn_ops.softplus(features, name)
655
656
657@tf_export("math.scalar_mul", "scalar_mul", v1=[])
658@dispatch.add_dispatch_support
659@_set_doc(scalar_mul.__doc__)
660def scalar_mul_v2(scalar, x, name=None):
661  with ops.name_scope(name, "scalar_mul", [x]) as name:
662    return scalar_mul(scalar, x, name)
663
664
665@tf_export("math.pow", "pow")
666@dispatch.add_dispatch_support
667def pow(x, y, name=None):  # pylint: disable=redefined-builtin
668  r"""Computes the power of one value to another.
669
670  Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
671  corresponding elements in `x` and `y`. For example:
672
673  ```python
674  x = tf.constant([[2, 2], [3, 3]])
675  y = tf.constant([[8, 16], [2, 3]])
676  tf.pow(x, y)  # [[256, 65536], [9, 27]]
677  ```
678
679  Args:
680    x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
681      `complex64`, or `complex128`.
682    y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
683      `complex64`, or `complex128`.
684    name: A name for the operation (optional).
685
686  Returns:
687    A `Tensor`.
688  """
689  with ops.name_scope(name, "Pow", [x]) as name:
690    return gen_math_ops._pow(x, y, name=name)
691
692
693# pylint: disable=redefined-builtin,redefined-outer-name
694@tf_export("dtypes.complex", "complex")
695@dispatch.add_dispatch_support
696def complex(real, imag, name=None):
697  r"""Converts two real numbers to a complex number.
698
699  Given a tensor `real` representing the real part of a complex number, and a
700  tensor `imag` representing the imaginary part of a complex number, this
701  operation returns complex numbers elementwise of the form \\(a + bj\\), where
702  *a* represents the `real` part and *b* represents the `imag` part.
703
704  The input tensors `real` and `imag` must have the same shape.
705
706  For example:
707
708  ```python
709  real = tf.constant([2.25, 3.25])
710  imag = tf.constant([4.75, 5.75])
711  tf.complex(real, imag)  # [[2.25 + 4.75j], [3.25 + 5.75j]]
712  ```
713
714  Args:
715    real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
716    imag: A `Tensor`. Must have the same type as `real`.
717    name: A name for the operation (optional).
718
719  Returns:
720    A `Tensor` of type `complex64` or `complex128`.
721
722  Raises:
723    TypeError: Real and imag must be correct types
724  """
725  real = ops.convert_to_tensor(real, name="real")
726  imag = ops.convert_to_tensor(imag, name="imag")
727  with ops.name_scope(name, "Complex", [real, imag]) as name:
728    input_types = (real.dtype, imag.dtype)
729    if input_types == (dtypes.float64, dtypes.float64):
730      Tout = dtypes.complex128
731    elif input_types == (dtypes.float32, dtypes.float32):
732      Tout = dtypes.complex64
733    else:
734      raise TypeError(
735          f"The `real` and `imag` components have incorrect types: "
736          f"{real.dtype.name} {imag.dtype.name}. They must be consistent, and "
737          f"one of {[dtypes.float32, dtypes.float64]}")
738    return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
739
740
741@tf_export("math.sign", "sign")
742@dispatch.add_dispatch_support
743def sign(x, name=None):
744  r"""Returns an element-wise indication of the sign of a number.
745
746  `y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0`.
747
748  For complex numbers, `y = sign(x) = x / |x| if x != 0, otherwise y = 0`.
749
750  Example usage:
751
752  >>> # real number
753  >>> tf.math.sign([0., 2., -3.])
754  <tf.Tensor: shape=(3,), dtype=float32,
755  numpy=array([ 0.,  1., -1.], dtype=float32)>
756
757  >>> # complex number
758  >>> tf.math.sign([1 + 1j, 0 + 0j])
759  <tf.Tensor: shape=(2,), dtype=complex128,
760  numpy=array([0.70710678+0.70710678j, 0.        +0.j        ])>
761
762  Args:
763   x: A Tensor. Must be one of the following types: bfloat16, half, float32,
764     float64, int32, int64, complex64, complex128.
765   name: A name for the operation (optional).
766
767  Returns:
768   A Tensor. Has the same type as x.
769
770   If x is a SparseTensor, returns SparseTensor(x.indices,
771     tf.math.sign(x.values, ...), x.dense_shape).
772  """
773  x = ops.convert_to_tensor(x)
774  if x.dtype.is_complex:
775    return gen_math_ops.div_no_nan(
776        x,
777        cast(
778            gen_math_ops.complex_abs(
779                x,
780                Tout=dtypes.float32
781                if x.dtype == dtypes.complex64 else dtypes.float64),
782            dtype=x.dtype),
783        name=name)
784  return gen_math_ops.sign(x, name=name)
785
786
787@tf_export("math.real", v1=["math.real", "real"])
788@dispatch.add_dispatch_support
789@deprecation.deprecated_endpoints("real")
790@dispatch.add_dispatch_support
791def real(input, name=None):
792  r"""Returns the real part of a complex (or real) tensor.
793
794  Given a tensor `input`, this operation returns a tensor of type `float` that
795  is the real part of each element in `input` considered as a complex number.
796
797  For example:
798
799  ```python
800  x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
801  tf.math.real(x)  # [-2.25, 3.25]
802  ```
803
804  If `input` is already real, it is returned unchanged.
805
806  Args:
807    input: A `Tensor`. Must have numeric type.
808    name: A name for the operation (optional).
809
810  Returns:
811    A `Tensor` of type `float32` or `float64`.
812  """
813  with ops.name_scope(name, "Real", [input]) as name:
814    input = ops.convert_to_tensor(input, name="input")
815    if input.dtype.is_complex:
816      real_dtype = input.dtype.real_dtype
817      return gen_math_ops.real(input, Tout=real_dtype, name=name)
818    else:
819      return input
820
821
822@tf_export("math.imag", v1=["math.imag", "imag"])
823@dispatch.add_dispatch_support
824@deprecation.deprecated_endpoints("imag")
825@dispatch.add_dispatch_support
826def imag(input, name=None):
827  r"""Returns the imaginary part of a complex (or real) tensor.
828
829  Given a tensor `input`, this operation returns a tensor of type `float` that
830  is the imaginary part of each element in `input` considered as a complex
831  number. If `input` is real, a tensor of all zeros is returned.
832
833  For example:
834
835  ```python
836  x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
837  tf.math.imag(x)  # [4.75, 5.75]
838  ```
839
840  Args:
841    input: A `Tensor`. Must be one of the following types: `float`, `double`,
842      `complex64`, `complex128`.
843    name: A name for the operation (optional).
844
845  Returns:
846    A `Tensor` of type `float32` or `float64`.
847  """
848  with ops.name_scope(name, "Imag", [input]) as name:
849    input = ops.convert_to_tensor(input, name="input")
850    if input.dtype.is_complex:
851      return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
852    else:
853      return array_ops.zeros_like(input)
854
855
856@tf_export("math.angle", v1=["math.angle", "angle"])
857@dispatch.add_dispatch_support
858@deprecation.deprecated_endpoints("angle")
859@dispatch.add_dispatch_support
860def angle(input, name=None):
861  r"""Returns the element-wise argument of a complex (or real) tensor.
862
863  Given a tensor `input`, this operation returns a tensor of type `float` that
864  is the argument of each element in `input` considered as a complex number.
865
866  The elements in `input` are considered to be complex numbers of the form
867  \\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
868  If `input` is real then *b* is zero by definition.
869
870  The argument returned by this function is of the form \\(atan2(b, a)\\).
871  If `input` is real, a tensor of all zeros is returned.
872
873  For example:
874
875  ```
876  input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64)
877  tf.math.angle(input).numpy()
878  # ==> array([2.0131705, 1.056345 ], dtype=float32)
879  ```
880
881  Args:
882    input: A `Tensor`. Must be one of the following types: `float`, `double`,
883      `complex64`, `complex128`.
884    name: A name for the operation (optional).
885
886  Returns:
887    A `Tensor` of type `float32` or `float64`.
888  """
889  with ops.name_scope(name, "Angle", [input]) as name:
890    input = ops.convert_to_tensor(input, name="input")
891    if input.dtype.is_complex:
892      return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
893    else:
894      return array_ops.where(input < 0, np.pi * array_ops.ones_like(input),
895                             array_ops.zeros_like(input))
896
897
898# pylint: enable=redefined-outer-name,redefined-builtin
899
900
901@tf_export("math.round", "round")
902@dispatch.add_dispatch_support
903def round(x, name=None):  # pylint: disable=redefined-builtin
904  """Rounds the values of a tensor to the nearest integer, element-wise.
905
906  Rounds half to even.  Also known as bankers rounding. If you want to round
907  according to the current system rounding mode use tf::cint.
908  For example:
909
910  ```python
911  x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
912  tf.round(x)  # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
913  ```
914
915  Args:
916    x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
917    name: A name for the operation (optional).
918
919  Returns:
920    A `Tensor` of same shape and type as `x`.
921  """
922  x = ops.convert_to_tensor(x, name="x")
923  if x.dtype.is_integer:
924    return x
925  else:
926    return gen_math_ops.round(x, name=name)
927
928
929@tf_export("cast", "dtypes.cast")
930@dispatch.add_dispatch_support
931def cast(x, dtype, name=None):
932  """Casts a tensor to a new type.
933
934  The operation casts `x` (in case of `Tensor`) or `x.values`
935  (in case of `SparseTensor` or `IndexedSlices`) to `dtype`.
936
937  For example:
938
939  >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
940  >>> tf.cast(x, tf.int32)
941  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
942
943  Notice `tf.cast` has an alias `tf.dtypes.cast`:
944
945  >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
946  >>> tf.dtypes.cast(x, tf.int32)
947  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
948
949  The operation supports data types (for `x` and `dtype`) of
950  `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
951  `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
952  In case of casting from complex types (`complex64`, `complex128`) to real
953  types, only the real part of `x` is returned. In case of casting from real
954  types to complex types (`complex64`, `complex128`), the imaginary part of the
955  returned value is set to `0`. The handling of complex types here matches the
956  behavior of numpy.
957
958  Note casting nan and inf values to integral types has undefined behavior.
959
960  Args:
961    x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
962      be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
963      `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
964      `bfloat16`.
965    dtype: The destination type. The list of supported dtypes is the same as
966      `x`.
967    name: A name for the operation (optional).
968
969  Returns:
970    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
971      same type as `dtype`.
972
973  Raises:
974    TypeError: If `x` cannot be cast to the `dtype`.
975  """
976  base_type = dtypes.as_dtype(dtype).base_dtype
977  if isinstance(x,
978                (ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
979    return x
980  with ops.name_scope(name, "Cast", [x]) as name:
981    if isinstance(x, sparse_tensor.SparseTensor):
982      values_cast = cast(x.values, base_type, name=name)
983      x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
984    elif isinstance(x, ops.IndexedSlices):
985      values_cast = cast(x.values, base_type, name=name)
986      x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape)
987    else:
988      # TODO(josh11b): If x is not already a Tensor, we could return
989      # ops.convert_to_tensor(x, dtype=dtype, ...)  here, but that
990      # allows some conversions that cast() can't do, e.g. casting numbers to
991      # strings.
992      x = ops.convert_to_tensor(x, name="x")
993      if x.dtype.base_dtype != base_type:
994        x = gen_math_ops.cast(x, base_type, name=name)
995    if x.dtype.is_complex and base_type.is_floating:
996      logging.warn("Casting complex to real discards imaginary part.")
997    return x
998
999
1000@tf_export("dtypes.saturate_cast", "saturate_cast")
1001@dispatch.add_dispatch_support
1002def saturate_cast(value, dtype, name=None):
1003  """Performs a safe saturating cast of `value` to `dtype`.
1004
1005  This function casts the input to `dtype` without applying any scaling.  If
1006  there is a danger that values would over or underflow in the cast, this op
1007  applies the appropriate clamping before the cast.
1008
1009  Args:
1010    value: A `Tensor`.
1011    dtype: The desired output `DType`.
1012    name: A name for the operation (optional).
1013
1014  Returns:
1015    `value` safely cast to `dtype`.
1016  """
1017  # When casting to a type with smaller representable range, clamp.
1018  # Note that this covers casting to unsigned types as well.
1019  with ops.name_scope(name, "saturate_cast", [value]) as name:
1020    value = ops.convert_to_tensor(value, name="value")
1021    dtype = dtypes.as_dtype(dtype).base_dtype
1022    if value.dtype.min < dtype.min:
1023      value = gen_math_ops.maximum(
1024          value,
1025          ops.convert_to_tensor(dtype.min, dtype=value.dtype, name="min"))
1026    if value.dtype.max > dtype.max:
1027      value = gen_math_ops.minimum(
1028          value,
1029          ops.convert_to_tensor(dtype.max, dtype=value.dtype, name="max"))
1030    return cast(value, dtype, name=name)
1031
1032
1033@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1034@tf_export(v1=["to_float"])
1035@dispatch.add_dispatch_support
1036def to_float(x, name="ToFloat"):
1037  """Casts a tensor to type `float32`.
1038
1039  Args:
1040    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1041    name: A name for the operation (optional).
1042
1043  Returns:
1044    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1045    type `float32`.
1046
1047  Raises:
1048    TypeError: If `x` cannot be cast to the `float32`.
1049
1050  @compatibility(TF2)
1051
1052  This name was deprecated and removed in TF2, but has an exact replacement
1053  `tf.cast(..., tf.float32)`. There are no further issues with eager execution
1054  or tf.function.
1055
1056  Before:
1057
1058  >>> tf.compat.v1.to_float(tf.constant(3.14, dtype=tf.double))
1059  <tf.Tensor: shape=(), dtype=float32, numpy=3.14>
1060
1061  After:
1062
1063  >>> tf.cast(tf.constant(3.14, dtype=tf.double), tf.float32)
1064  <tf.Tensor: shape=(), dtype=float32, numpy=3.14>
1065
1066  @end_compatibility
1067
1068  """
1069  return cast(x, dtypes.float32, name=name)
1070
1071
1072@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1073@tf_export(v1=["to_double"])
1074@dispatch.add_dispatch_support
1075def to_double(x, name="ToDouble"):
1076  """Casts a tensor to type `float64`.
1077
1078  Args:
1079    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1080    name: A name for the operation (optional).
1081
1082  Returns:
1083    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1084    type `float64`.
1085
1086  Raises:
1087    TypeError: If `x` cannot be cast to the `float64`.
1088
1089  @compatibility(TF2)
1090
1091  This name was deprecated and removed in TF2, but has an exact replacement
1092  `tf.cast(..., tf.double)`. There are no further issues with eager execution or
1093  tf.function.
1094
1095  Before:
1096
1097  >>> tf.compat.v1.to_double(tf.constant(3.14, dtype=tf.float32))
1098  <tf.Tensor: shape=(), dtype=float64, numpy=3.14>
1099
1100  After:
1101
1102  >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.double)
1103  <tf.Tensor: shape=(), dtype=float64, numpy=3.14>
1104
1105  @end_compatibility
1106
1107  """
1108  return cast(x, dtypes.float64, name=name)
1109
1110
1111@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1112@tf_export(v1=["to_int32"])
1113@dispatch.add_dispatch_support
1114def to_int32(x, name="ToInt32"):
1115  """Casts a tensor to type `int32`.
1116
1117  Args:
1118    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1119    name: A name for the operation (optional).
1120
1121  Returns:
1122    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1123    type `int32`.
1124
1125  Raises:
1126    TypeError: If `x` cannot be cast to the `int32`.
1127
1128  @compatibility(TF2)
1129
1130  This name was deprecated and removed in TF2, but has an exact replacement
1131  `tf.cast(..., tf.int32)`. There are no further issues with eager execution or
1132  tf.function.
1133
1134  Before:
1135
1136  >>> tf.compat.v1.to_int32(tf.constant(1, dtype=tf.int64))
1137  <tf.Tensor: shape=(), dtype=int32, numpy=1>
1138
1139  After:
1140
1141  >>> tf.cast(tf.constant(1, dtype=tf.int64), tf.int32)
1142  <tf.Tensor: shape=(), dtype=int32, numpy=1>
1143
1144  @end_compatibility
1145
1146  """
1147  return cast(x, dtypes.int32, name=name)
1148
1149
1150@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1151@tf_export(v1=["to_int64"])
1152@dispatch.add_dispatch_support
1153def to_int64(x, name="ToInt64"):
1154  """Casts a tensor to type `int64`.
1155
1156  Args:
1157    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1158    name: A name for the operation (optional).
1159
1160  Returns:
1161    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1162    type `int64`.
1163
1164  Raises:
1165    TypeError: If `x` cannot be cast to the `int64`.
1166
1167  @compatibility(TF2)
1168
1169  This name was deprecated and removed in TF2, but has an exact replacement
1170  `tf.cast(..., tf.int64)`. There are no further issues with eager execution or
1171  tf.function.
1172
1173  Before:
1174
1175  >>> tf.compat.v1.to_int64(tf.constant(1, dtype=tf.int32))
1176  <tf.Tensor: shape=(), dtype=int64, numpy=1>
1177
1178  After:
1179
1180  >>> tf.cast(tf.constant(1, dtype=tf.int32), tf.int64)
1181  <tf.Tensor: shape=(), dtype=int64, numpy=1>
1182
1183  @end_compatibility
1184
1185  """
1186  return cast(x, dtypes.int64, name=name)
1187
1188
1189@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1190@tf_export(v1=["to_bfloat16"])
1191@dispatch.add_dispatch_support
1192def to_bfloat16(x, name="ToBFloat16"):
1193  """Casts a tensor to type `bfloat16`.
1194
1195  Args:
1196    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1197    name: A name for the operation (optional).
1198
1199  Returns:
1200    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1201    type `bfloat16`.
1202
1203  Raises:
1204    TypeError: If `x` cannot be cast to the `bfloat16`.
1205
1206  @compatibility(TF2)
1207
1208  This name was deprecated and removed in TF2, but has an exact replacement
1209  `tf.cast(..., tf.bfloat16)`. There are no further issues with eager execution
1210  or tf.function.
1211
1212  Before:
1213
1214  >>> tf.compat.v1.to_bfloat16(tf.constant(3.14, dtype=tf.float32))
1215  <tf.Tensor: shape=(), dtype=bfloat16, numpy=3.14>
1216
1217  After:
1218
1219  >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.bfloat16)
1220  <tf.Tensor: shape=(), dtype=bfloat16, numpy=3.14>
1221
1222  @end_compatibility
1223
1224  """
1225  return cast(x, dtypes.bfloat16, name=name)
1226
1227
1228@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1229@tf_export(v1=["to_complex64"])
1230@dispatch.add_dispatch_support
1231def to_complex64(x, name="ToComplex64"):
1232  """Casts a tensor to type `complex64`.
1233
1234  Args:
1235    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1236    name: A name for the operation (optional).
1237
1238  Returns:
1239    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1240    type `complex64`.
1241
1242  Raises:
1243    TypeError: If `x` cannot be cast to the `complex64`.
1244
1245  @compatibility(TF2)
1246
1247  This name was deprecated and removed in TF2, but has an exact replacement
1248  `tf.cast(..., tf.complex64)`. There are no further issues with eager execution
1249  or tf.function.
1250
1251  Before:
1252
1253  >>> tf.compat.v1.to_complex64(tf.constant(1. + 2.j, dtype=tf.complex128))
1254  <tf.Tensor: shape=(), dtype=complex64, numpy=(1+2j)>
1255
1256  After:
1257
1258  >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex128), tf.complex64)
1259  <tf.Tensor: shape=(), dtype=complex64, numpy=(1+2j)>
1260
1261  @end_compatibility
1262
1263  """
1264  return cast(x, dtypes.complex64, name=name)
1265
1266
1267@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1268@tf_export(v1=["to_complex128"])
1269@dispatch.add_dispatch_support
1270def to_complex128(x, name="ToComplex128"):
1271  """Casts a tensor to type `complex128`.
1272
1273  Args:
1274    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1275    name: A name for the operation (optional).
1276
1277  Returns:
1278    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1279    type `complex128`.
1280
1281  Raises:
1282    TypeError: If `x` cannot be cast to the `complex128`.
1283
1284  @compatibility(TF2)
1285
1286  This name was deprecated and removed in TF2, but has an exact replacement
1287  `tf.cast(..., tf.complex128)`. There are no further issues with eager
1288  execution or tf.function.
1289
1290  Before:
1291
1292  >>> tf.compat.v1.to_complex128(tf.constant(1. + 2.j, dtype=tf.complex64))
1293  <tf.Tensor: shape=(), dtype=complex128, numpy=(1+2j)>
1294
1295  After:
1296
1297  >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex64), tf.complex128)
1298  <tf.Tensor: shape=(), dtype=complex128, numpy=(1+2j)>
1299
1300  @end_compatibility
1301
1302  """
1303  return cast(x, dtypes.complex128, name=name)
1304
1305
1306ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
1307ops.Tensor._override_operator("__abs__", abs)
1308
1309
1310def _maybe_get_dtype(x):
1311  """Returns a numpy type if available from x. Skips if x is numpy.ndarray."""
1312  # Don't put np.ndarray in this list, because np.result_type looks at the
1313  # value (not just dtype) of np.ndarray to decide the result type.
1314  if isinstance(x, numbers.Real):
1315    return x
1316  if isinstance(x, ops.Tensor):
1317    return x.dtype.as_numpy_dtype
1318  if isinstance(x, dtypes.DType):
1319    return x.as_numpy_dtype
1320  if isinstance(x, tensor_shape.TensorShape):
1321    return np.int32
1322  if isinstance(x, (list, tuple)):
1323    raise ValueError(f"Cannot determine dtype.  Got sequence {x}.")
1324  return x
1325
1326
1327def maybe_promote_tensors(*tensors, force_same_dtype=True):
1328  """Promote tensors if numpy style promotion is enabled."""
1329  if not tensors:
1330    return tensors
1331  if not ops._numpy_style_type_promotion:
1332    if not force_same_dtype:
1333      return tensors
1334    promoted_tensors = []
1335    promoted_tensors.append(tensors[0])
1336    dtype = tensors[0].dtype.base_dtype
1337    for tensor in tensors[1:]:
1338      promoted_tensors.append(
1339          ops.convert_to_tensor(tensor, dtype, name="x"))
1340    return promoted_tensors
1341  result_type = np_dtypes._result_type(
1342      *[_maybe_get_dtype(x) for x in nest.flatten(tensors)])
1343  def _promote_or_cast(x):
1344    if isinstance(x, ops.Tensor):
1345      x = cast(x, result_type)
1346    else:
1347      x = ops.convert_to_tensor(x, result_type)
1348    return x
1349  return [_promote_or_cast(x) for x in tensors]
1350
1351
1352def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
1353  """Register operators with different tensor and scalar versions.
1354
1355  If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
1356  sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
1357
1358  Args:
1359    func: the operator
1360    op_name: name of the operator being overridden
1361    clazz_object: class to override for.  Either `Tensor` or `SparseTensor`.
1362  """
1363
1364  @traceback_utils.filter_traceback
1365  def binary_op_wrapper(x, y):
1366    with ops.name_scope(None, op_name, [x, y]) as name:
1367      try:
1368        # force_same_dtype=False to preserve existing TF behavior
1369        # TODO(b/178860388): Figure out why binary_op_wrapper and
1370        #   r_binary_op_wrapper use different force_same_dtype values.
1371        x, y = maybe_promote_tensors(x, y, force_same_dtype=False)
1372        return func(x, y, name=name)
1373      except (TypeError, ValueError) as e:
1374        # Even if dispatching the op failed, the RHS may be a tensor aware
1375        # object that can implement the operator with knowledge of itself
1376        # and the tensor.
1377        # If the RHS is not tensor aware we still want to raise the
1378        # original error from the LHS, because it may be more
1379        # informative.
1380        if hasattr(type(y), "__r%s__" % op_name):
1381          try:
1382            r_op = getattr(y, "__r%s__" % op_name)
1383            out = r_op(x)
1384            if out is NotImplemented:
1385              raise
1386            return out
1387          except (TypeError, ValueError):
1388            raise e
1389        else:
1390          raise
1391
1392  @traceback_utils.filter_traceback
1393  def binary_op_wrapper_sparse(sp_x, y):
1394    with ops.name_scope(None, op_name, [sp_x, y]) as name:
1395      y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
1396      return sparse_tensor.SparseTensor(
1397          sp_x.indices,
1398          func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name),
1399          sp_x.dense_shape)
1400
1401  @traceback_utils.filter_traceback
1402  def r_binary_op_wrapper(y, x):
1403    with ops.name_scope(None, op_name, [x, y]) as name:
1404      # TODO(b/178860388): Figure out why binary_op_wrapper and
1405      #   r_binary_op_wrapper use different force_same_dtype values.
1406      y, x = maybe_promote_tensors(y, x)
1407      return func(x, y, name=name)
1408
1409  # Propagate func.__doc__ to the wrappers
1410  try:
1411    doc = func.__doc__
1412  except AttributeError:
1413    doc = None
1414  binary_op_wrapper.__doc__ = doc
1415  r_binary_op_wrapper.__doc__ = doc
1416  binary_op_wrapper_sparse.__doc__ = doc
1417
1418  if clazz_object is ops.Tensor:
1419    clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
1420    del binary_op_wrapper
1421    clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
1422    del r_binary_op_wrapper
1423  else:
1424    clazz_object._override_operator("__%s__" % op_name,
1425                                    binary_op_wrapper_sparse)
1426    del binary_op_wrapper_sparse
1427
1428
1429# Conversion table for __truediv__.  None entries mean no conversion required.
1430_TRUEDIV_TABLE = {
1431    dtypes.uint8: dtypes.float32,
1432    dtypes.int8: dtypes.float32,
1433    dtypes.uint16: dtypes.float32,
1434    dtypes.int16: dtypes.float32,
1435    dtypes.uint32: dtypes.float64,
1436    dtypes.int32: dtypes.float64,
1437    dtypes.uint64: dtypes.float64,
1438    dtypes.int64: dtypes.float64,
1439    dtypes.bfloat16: None,
1440    dtypes.float16: None,
1441    dtypes.float32: None,
1442    dtypes.float64: None,
1443    dtypes.complex64: None,
1444    dtypes.complex128: None,
1445}
1446
1447
1448# NOTE: the support of "sparse (true)div dense" is currently not baked in into
1449# "tf.(true_)div()".  Until such an API decision is made, the supported usage is
1450# to explicitly use the "/" operator to invoke either truediv or div.
1451def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
1452  """Internal helper function for 'sp_t / dense_t'."""
1453  with ops.name_scope(name, "truediv",
1454                      [sp_indices, sp_values, sp_shape, y]) as name:
1455    sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
1456    y = ops.convert_to_tensor(y, name="y")
1457    x_dtype = sp_values.dtype.base_dtype
1458    y_dtype = y.dtype.base_dtype
1459    if x_dtype != y_dtype:
1460      raise TypeError(f"`x` and `y` must have the same dtype, "
1461                      f"got {x_dtype!r} != {y_dtype!r}.")
1462    try:
1463      dtype = _TRUEDIV_TABLE[x_dtype]
1464    except KeyError:
1465      raise TypeError(
1466          f"Invalid dtype {x_dtype!r} in __truediv__. Expected one "
1467          f"of {{{', '.join([repr(x) for x in _TRUEDIV_TABLE.keys()])}}}.")
1468    if dtype is not None:
1469      sp_values = cast(sp_values, dtype)
1470      y = cast(y, dtype)
1471    return gen_sparse_ops.sparse_dense_cwise_div(
1472        sp_indices, sp_values, sp_shape, y, name=name)
1473
1474
1475def _truediv_python3(x, y, name=None):
1476  with ops.name_scope(name, "truediv", [x, y]) as name:
1477    x = ops.convert_to_tensor(x, name="x")
1478    y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
1479    x_dtype = x.dtype.base_dtype
1480    y_dtype = y.dtype.base_dtype
1481    if x_dtype != y_dtype:
1482      raise TypeError(f"`x` and `y` must have the same dtype, "
1483                      f"got {x_dtype!r} != {y_dtype!r}.")
1484    try:
1485      dtype = _TRUEDIV_TABLE[x_dtype]
1486    except KeyError:
1487      raise TypeError(
1488          f"Invalid dtype {x_dtype!r} in __truediv__. Expected one "
1489          f"of {{{', '.join([repr(x) for x in _TRUEDIV_TABLE.keys()])}}}.")
1490    if dtype is not None:
1491      x = cast(x, dtype)
1492      y = cast(y, dtype)
1493    return gen_math_ops.real_div(x, y, name=name)
1494
1495
1496def _div_python2(x, y, name=None):
1497  """Divide two values using Python 2 semantics.
1498
1499  Used for Tensor.__div__.
1500
1501  Args:
1502    x: `Tensor` numerator of real numeric type.
1503    y: `Tensor` denominator of real numeric type.
1504    name: A name for the operation (optional).
1505
1506  Returns:
1507    `x / y` returns the quotient of x and y.
1508  """
1509
1510  with ops.name_scope(name, "div", [x, y]) as name:
1511    x = ops.convert_to_tensor(x, name="x")
1512    y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
1513    x_dtype = x.dtype.base_dtype
1514    y_dtype = y.dtype.base_dtype
1515    if x_dtype != y_dtype:
1516      raise TypeError(f"`x` and `y` must have the same dtype, "
1517                      f"got {x_dtype!r} != {y_dtype!r}.")
1518    if x_dtype.is_floating or x_dtype.is_complex:
1519      return gen_math_ops.real_div(x, y, name=name)
1520    else:
1521      return gen_math_ops.floor_div(x, y, name=name)
1522
1523
1524@tf_export("math.truediv", "truediv")
1525@dispatch.add_dispatch_support
1526def truediv(x, y, name=None):
1527  """Divides x / y elementwise (using Python 3 division operator semantics).
1528
1529  NOTE: Prefer using the Tensor operator or tf.divide which obey Python
1530  division operator semantics.
1531
1532  This function forces Python 3 division operator semantics where all integer
1533  arguments are cast to floating types first.   This op is generated by normal
1534  `x / y` division in Python 3 and in Python 2.7 with
1535  `from __future__ import division`.  If you want integer division that rounds
1536  down, use `x // y` or `tf.math.floordiv`.
1537
1538  `x` and `y` must have the same numeric type.  If the inputs are floating
1539  point, the output will have the same type.  If the inputs are integral, the
1540  inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
1541  and `int64` (matching the behavior of Numpy).
1542
1543  Args:
1544    x: `Tensor` numerator of numeric type.
1545    y: `Tensor` denominator of numeric type.
1546    name: A name for the operation (optional).
1547
1548  Returns:
1549    `x / y` evaluated in floating point.
1550
1551  Raises:
1552    TypeError: If `x` and `y` have different dtypes.
1553  """
1554  return _truediv_python3(x, y, name)
1555
1556
1557@deprecation.deprecated(
1558    date=None,
1559    instructions="Deprecated in favor of operator or tf.math.divide.")
1560@tf_export(v1=["div"])
1561@dispatch.add_dispatch_support
1562def div(x, y, name=None):
1563  """Divides x / y elementwise (using Python 2 division operator semantics).
1564
1565  @compatibility(TF2)
1566  This function is deprecated in TF2. Prefer using the Tensor division operator,
1567  `tf.divide`, or `tf.math.divide`, which obey the Python 3 division operator
1568  semantics.
1569  @end_compatibility
1570
1571
1572  This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`
1573  and `y` are both integers then the result will be an integer. This is in
1574  contrast to Python 3, where division with `/` is always a float while division
1575  with `//` is always an integer.
1576
1577  Args:
1578    x: `Tensor` numerator of real numeric type.
1579    y: `Tensor` denominator of real numeric type.
1580    name: A name for the operation (optional).
1581
1582  Returns:
1583    `x / y` returns the quotient of x and y.
1584  """
1585  return _div_python2(x, y, name)
1586
1587
1588@tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"])
1589@dispatch.add_dispatch_support
1590@deprecation.deprecated_endpoints("div_no_nan")
1591@dispatch.add_dispatch_support
1592def div_no_nan(x, y, name=None):
1593  """Computes a safe divide which returns 0 if `y` (denominator) is zero.
1594
1595  For example:
1596
1597  >>> tf.constant(3.0) / 0.0
1598  <tf.Tensor: shape=(), dtype=float32, numpy=inf>
1599  >>> tf.math.divide_no_nan(3.0, 0.0)
1600  <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
1601
1602  Args:
1603    x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1604    y: A `Tensor` whose dtype is compatible with `x`.
1605    name: A name for the operation (optional).
1606
1607  Returns:
1608    The element-wise value of the x divided by y.
1609  """
1610
1611  with ops.name_scope(name, "div_no_nan", [x, y]) as name:
1612    x = ops.convert_to_tensor(x, name="x")
1613    y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
1614    return gen_math_ops.div_no_nan(x, y, name=name)
1615
1616
1617@tf_export("math.multiply_no_nan")
1618@dispatch.add_dispatch_support
1619def multiply_no_nan(x, y, name=None):
1620  """Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite.
1621
1622  Args:
1623    x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1624    y: A `Tensor` whose dtype is compatible with `x`.
1625    name: A name for the operation (optional).
1626
1627  Returns:
1628    The element-wise value of the x times y.
1629  """
1630
1631  with ops.name_scope(name, "multiply_no_nan", [x, y]) as name:
1632    x = ops.convert_to_tensor(x, name="x")
1633    y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
1634    x_dtype = x.dtype.base_dtype
1635    y_dtype = y.dtype.base_dtype
1636    if x_dtype != y_dtype:
1637      raise TypeError(f"`x` and `y` must have the same dtype, "
1638                      f"got {x_dtype!r} != {y_dtype!r}")
1639    return gen_math_ops.mul_no_nan(x, y, name=name)
1640
1641
1642# TODO(aselle): This should be removed
1643mod = gen_math_ops.floor_mod
1644
1645
1646# TODO(aselle): Deprecate this once all internal functionality uses
1647# tf.truncatediv
1648@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"])
1649@dispatch.add_dispatch_support
1650@deprecation.deprecated_endpoints("floordiv")
1651def floordiv(x, y, name=None):
1652  """Divides `x / y` elementwise, rounding toward the most negative integer.
1653
1654  The same as `tf.compat.v1.div(x,y)` for integers, but uses
1655  `tf.floor(tf.compat.v1.div(x,y))` for
1656  floating point arguments so that the result is always an integer (though
1657  possibly an integer represented as floating point).  This op is generated by
1658  `x // y` floor division in Python 3 and in Python 2.7 with
1659  `from __future__ import division`.
1660
1661  `x` and `y` must have the same type, and the result will have the same type
1662  as well.
1663
1664  Args:
1665    x: `Tensor` numerator of real numeric type.
1666    y: `Tensor` denominator of real numeric type.
1667    name: A name for the operation (optional).
1668
1669  Returns:
1670    `x / y` rounded down.
1671
1672  Raises:
1673    TypeError: If the inputs are complex.
1674  """
1675  with ops.name_scope(name, "floordiv", [x, y]) as name:
1676    return gen_math_ops.floor_div(x, y, name=name)
1677
1678
1679realdiv = gen_math_ops.real_div
1680truncatediv = gen_math_ops.truncate_div
1681# TODO(aselle): Rename this to floordiv when we can.
1682floor_div = gen_math_ops.floor_div
1683truncatemod = gen_math_ops.truncate_mod
1684floormod = gen_math_ops.floor_mod
1685
1686
1687@tf_export("__operators__.add", v1=[])
1688@dispatch.add_dispatch_support
1689def _add_dispatch(x, y, name=None):
1690  """The operation invoked by the `Tensor.__add__` operator.
1691
1692    Purpose in the API:
1693
1694      This method is exposed in TensorFlow's API so that library developers
1695      can register dispatching for `Tensor.__add__` to allow it to handle
1696      custom composite tensors & other custom objects.
1697
1698      The API symbol is not intended to be called by users directly and does
1699      appear in TensorFlow's generated documentation.
1700
1701  Args:
1702    x: The left-hand side of the `+` operator.
1703    y: The right-hand side of the `+` operator.
1704    name: an optional name for the operation.
1705
1706  Returns:
1707    The result of the elementwise `+` operation.
1708  """
1709  if not isinstance(y, ops.Tensor) and not isinstance(
1710      y, sparse_tensor.SparseTensor):
1711    y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
1712  if x.dtype == dtypes.string:
1713    return gen_math_ops.add(x, y, name=name)
1714  else:
1715    return gen_math_ops.add_v2(x, y, name=name)
1716
1717
1718def _mul_dispatch(x, y, name=None):
1719  """Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
1720  if isinstance(y, sparse_tensor.SparseTensor):  # Case: Dense * Sparse.
1721    new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
1722                                                     y.dense_shape, x, name)
1723    return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
1724  else:
1725    return multiply(x, y, name=name)
1726
1727
1728# NOTE(aselle): When integer division is added for sparse_dense_cwise,
1729# div, truediv, and floordiv should be delegated appropriately for
1730# Python semantics, analogous to dense cwise tensor operations.
1731_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
1732                              sparse_tensor.SparseTensor)
1733_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
1734                              sparse_tensor.SparseTensor)
1735_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
1736                              sparse_tensor.SparseTensor)
1737
1738_OverrideBinaryOperatorHelper(_add_dispatch, "add")
1739_OverrideBinaryOperatorHelper(subtract, "sub")
1740_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
1741_OverrideBinaryOperatorHelper(div, "div")
1742_OverrideBinaryOperatorHelper(truediv, "truediv")
1743_OverrideBinaryOperatorHelper(floordiv, "floordiv")
1744_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
1745_OverrideBinaryOperatorHelper(pow, "pow")
1746
1747
1748@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"])
1749@dispatch.add_dispatch_support
1750@deprecation.deprecated_endpoints("logical_xor")
1751def logical_xor(x, y, name="LogicalXor"):
1752  """Logical XOR function.
1753
1754  x ^ y = (x | y) & ~(x & y)
1755
1756  Requires that `x` and `y` have the same shape or have
1757  [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
1758  shapes. For example, `x` and `y` can be:
1759
1760  - Two single elements of type `bool`
1761  - One `tf.Tensor` of type `bool` and one single `bool`, where the result will
1762    be calculated by applying logical XOR with the single element to each
1763    element in the larger Tensor.
1764  - Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
1765    the result will be the element-wise logical XOR of the two input tensors.
1766
1767  Usage:
1768
1769  >>> a = tf.constant([True])
1770  >>> b = tf.constant([False])
1771  >>> tf.math.logical_xor(a, b)
1772  <tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>
1773
1774  >>> c = tf.constant([True])
1775  >>> x = tf.constant([False, True, True, False])
1776  >>> tf.math.logical_xor(c, x)
1777  <tf.Tensor: shape=(4,), dtype=bool, numpy=array([ True, False, False,  True])>
1778
1779  >>> y = tf.constant([False, False, True, True])
1780  >>> z = tf.constant([False, True, False, True])
1781  >>> tf.math.logical_xor(y, z)
1782  <tf.Tensor: shape=(4,), dtype=bool, numpy=array([False,  True,  True, False])>
1783
1784  Args:
1785      x: A `tf.Tensor` type bool.
1786      y: A `tf.Tensor` of type bool.
1787      name: A name for the operation (optional).
1788
1789  Returns:
1790    A `tf.Tensor` of type bool with the same size as that of x or y.
1791  """
1792  # TODO(alemi) Make this a cwise op if people end up relying on it.
1793  return gen_math_ops.logical_and(
1794      gen_math_ops.logical_or(x, y),
1795      gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
1796      name=name)
1797
1798
1799def and_(x, y, name=None):
1800  if x.dtype == dtypes.bool:
1801    return gen_math_ops.logical_and(x, y, name)
1802  return gen_bitwise_ops.bitwise_and(x, y)
1803
1804
1805def or_(x, y, name=None):
1806  if x.dtype == dtypes.bool:
1807    return gen_math_ops.logical_or(x, y, name)
1808  return gen_bitwise_ops.bitwise_or(x, y)
1809
1810
1811def xor_(x, y, name=None):
1812  if x.dtype == dtypes.bool:
1813    return logical_xor(x, y, name)
1814  return gen_bitwise_ops.bitwise_xor(x, y)
1815
1816
1817def invert_(x, name=None):
1818  if x.dtype == dtypes.bool:
1819    return gen_math_ops.logical_not(x, name=name)
1820  return gen_bitwise_ops.invert(x, name=name)
1821
1822
1823_OverrideBinaryOperatorHelper(and_, "and")
1824_OverrideBinaryOperatorHelper(or_, "or")
1825_OverrideBinaryOperatorHelper(xor_, "xor")
1826ops.Tensor._override_operator("__invert__", invert_)
1827
1828
1829def _promote_dtypes_decorator(fn):
1830  def wrapper(x, y, *args, **kwargs):
1831    x, y = maybe_promote_tensors(x, y, force_same_dtype=False)
1832    return fn(x, y, *args, **kwargs)
1833  return tf_decorator.make_decorator(fn, wrapper)
1834
1835
1836ops.Tensor._override_operator("__lt__", _promote_dtypes_decorator(
1837    gen_math_ops.less))
1838ops.Tensor._override_operator("__le__", _promote_dtypes_decorator(
1839    gen_math_ops.less_equal))
1840ops.Tensor._override_operator("__gt__", _promote_dtypes_decorator(
1841    gen_math_ops.greater))
1842ops.Tensor._override_operator("__ge__", _promote_dtypes_decorator(
1843    gen_math_ops.greater_equal))
1844
1845
1846@tf_export("math.equal", "equal")
1847@dispatch.add_dispatch_support
1848def equal(x, y, name=None):
1849  """Returns the truth value of (x == y) element-wise.
1850
1851  Performs a [broadcast](
1852  https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
1853  arguments and then an element-wise equality comparison, returning a Tensor of
1854  boolean values.
1855
1856  For example:
1857
1858  >>> x = tf.constant([2, 4])
1859  >>> y = tf.constant(2)
1860  >>> tf.math.equal(x, y)
1861  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  False])>
1862
1863  >>> x = tf.constant([2, 4])
1864  >>> y = tf.constant([2, 4])
1865  >>> tf.math.equal(x, y)
1866  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  True])>
1867
1868  Args:
1869    x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
1870    y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
1871    name: A name for the operation (optional).
1872
1873  Returns:
1874    A `tf.Tensor` of type bool with the same size as that of x or y.
1875
1876  Raises:
1877    `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
1878  """
1879  return gen_math_ops.equal(x, y, name=name)
1880
1881
1882@tf_export("math.not_equal", "not_equal")
1883@dispatch.add_dispatch_support
1884def not_equal(x, y, name=None):
1885  """Returns the truth value of (x != y) element-wise.
1886
1887  Performs a [broadcast](
1888  https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
1889  arguments and then an element-wise inequality comparison, returning a Tensor
1890  of boolean values.
1891
1892  For example:
1893
1894  >>> x = tf.constant([2, 4])
1895  >>> y = tf.constant(2)
1896  >>> tf.math.not_equal(x, y)
1897  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  True])>
1898
1899  >>> x = tf.constant([2, 4])
1900  >>> y = tf.constant([2, 4])
1901  >>> tf.math.not_equal(x, y)
1902  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  False])>
1903
1904  Args:
1905    x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
1906    y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
1907    name: A name for the operation (optional).
1908
1909  Returns:
1910    A `tf.Tensor` of type bool with the same size as that of x or y.
1911
1912  Raises:
1913    `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
1914  """
1915  return gen_math_ops.not_equal(x, y, name=name)
1916
1917
1918@tf_export("__operators__.eq", v1=[])
1919@dispatch.add_dispatch_support
1920def tensor_equals(self, other):
1921  """The operation invoked by the `Tensor.__eq__` operator.
1922
1923  Compares two tensors element-wise for equality if they are
1924  broadcast-compatible; or returns False if they are not broadcast-compatible.
1925  (Note that this behavior differs from `tf.math.equal`, which raises an
1926  exception if the two tensors are not broadcast-compatible.)
1927
1928  Purpose in the API:
1929
1930    This method is exposed in TensorFlow's API so that library developers
1931    can register dispatching for `Tensor.__eq__` to allow it to handle
1932    custom composite tensors & other custom objects.
1933
1934    The API symbol is not intended to be called by users directly and does
1935    appear in TensorFlow's generated documentation.
1936
1937  Args:
1938    self: The left-hand side of the `==` operator.
1939    other: The right-hand side of the `==` operator.
1940
1941  Returns:
1942    The result of the elementwise `==` operation, or `False` if the arguments
1943    are not broadcast-compatible.
1944  """
1945  if other is None:
1946    return False
1947  g = getattr(self, "graph", None)
1948  if (ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and
1949      (g is None or g.building_function)):
1950    self, other = maybe_promote_tensors(self, other)
1951    return gen_math_ops.equal(self, other, incompatible_shape_error=False)
1952  else:
1953    # In legacy graph mode, tensor equality is object equality
1954    return self is other
1955
1956
1957@tf_export("__operators__.ne", v1=[])
1958@dispatch.add_dispatch_support
1959def tensor_not_equals(self, other):
1960  """The operation invoked by the `Tensor.__ne__` operator.
1961
1962  Compares two tensors element-wise for inequality if they are
1963  broadcast-compatible; or returns True if they are not broadcast-compatible.
1964  (Note that this behavior differs from `tf.math.not_equal`, which raises an
1965  exception if the two tensors are not broadcast-compatible.)
1966
1967  Purpose in the API:
1968
1969    This method is exposed in TensorFlow's API so that library developers
1970    can register dispatching for `Tensor.__ne__` to allow it to handle
1971    custom composite tensors & other custom objects.
1972
1973    The API symbol is not intended to be called by users directly and does
1974    appear in TensorFlow's generated documentation.
1975
1976  Args:
1977    self: The left-hand side of the `!=` operator.
1978    other: The right-hand side of the `!=` operator.
1979
1980  Returns:
1981    The result of the elementwise `!=` operation, or `True` if the arguments
1982    are not broadcast-compatible.
1983  """
1984  if other is None:
1985    return True
1986  if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():
1987    self, other = maybe_promote_tensors(self, other)
1988    return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
1989  else:
1990    # In legacy graph mode, tensor equality is object equality
1991    return self is not other
1992
1993
1994ops.Tensor._override_operator("__eq__", tensor_equals)
1995ops.Tensor._override_operator("__ne__", tensor_not_equals)
1996
1997
1998@tf_export("range")
1999@dispatch.add_dispatch_support
2000def range(start, limit=None, delta=1, dtype=None, name="range"):  # pylint: disable=redefined-builtin
2001  """Creates a sequence of numbers.
2002
2003  Creates a sequence of numbers that begins at `start` and extends by
2004  increments of `delta` up to but not including `limit`.
2005
2006  The dtype of the resulting tensor is inferred from the inputs unless
2007  it is provided explicitly.
2008
2009  Like the Python builtin `range`, `start` defaults to 0, so that
2010  `range(n) = range(0, n)`.
2011
2012  For example:
2013
2014  >>> start = 3
2015  >>> limit = 18
2016  >>> delta = 3
2017  >>> tf.range(start, limit, delta)
2018  <tf.Tensor: shape=(5,), dtype=int32,
2019  numpy=array([ 3,  6,  9, 12, 15], dtype=int32)>
2020
2021  >>> start = 3
2022  >>> limit = 1
2023  >>> delta = -0.5
2024  >>> tf.range(start, limit, delta)
2025  <tf.Tensor: shape=(4,), dtype=float32,
2026  numpy=array([3. , 2.5, 2. , 1.5], dtype=float32)>
2027
2028  >>> limit = 5
2029  >>> tf.range(limit)
2030  <tf.Tensor: shape=(5,), dtype=int32,
2031  numpy=array([0, 1, 2, 3, 4], dtype=int32)>
2032
2033  Args:
2034    start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`
2035      is not None; otherwise, acts as range limit and first entry defaults to 0.
2036    limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,
2037      defaults to the value of `start` while the first entry of the range
2038      defaults to 0.
2039    delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to
2040      1.
2041    dtype: The type of the elements of the resulting tensor.
2042    name: A name for the operation. Defaults to "range".
2043
2044  Returns:
2045    An 1-D `Tensor` of type `dtype`.
2046
2047  @compatibility(numpy)
2048  Equivalent to np.arange
2049  @end_compatibility
2050  """
2051  if limit is None:
2052    start, limit = 0, start
2053
2054  with ops.name_scope(name, "Range", [start, limit, delta]) as name:
2055    if not isinstance(start, ops.Tensor):
2056      start = ops.convert_to_tensor(start, dtype=dtype, name="start")
2057    if not isinstance(limit, ops.Tensor):
2058      limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
2059    if not isinstance(delta, ops.Tensor):
2060      delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
2061
2062    # infer dtype if not explicitly provided
2063    if dtype is None:
2064      dtype_hierarchy = [
2065          dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
2066      ]
2067      assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
2068      inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
2069                           key=dtype_hierarchy.index)
2070    else:
2071      inferred_dtype = dtype
2072    # Always try to perform a cast even when start/limit/delta are already
2073    # tensors. This will resolve the case where start/limit/delta's original's
2074    # dtype is different from provided dtype.
2075    start = cast(start, inferred_dtype)
2076    limit = cast(limit, inferred_dtype)
2077    delta = cast(delta, inferred_dtype)
2078
2079    return gen_math_ops._range(start, limit, delta, name=name)
2080
2081
2082def _range_tensor_conversion_function(value, dtype=None, name=None,
2083                                      as_ref=False):
2084  del as_ref
2085  return range(value.start, value.stop, value.step, dtype=dtype, name=name)
2086
2087
2088if not six.PY2:
2089  ops.register_tensor_conversion_function(builtins.range,
2090                                          _range_tensor_conversion_function)
2091
2092# Reduction operations
2093def _ReductionDims(x, axis):  # pylint: disable=invalid-name
2094  """Returns range(0, rank(x)) if axis is None."""
2095  if axis is not None:
2096    return axis
2097  else:
2098    x_rank = None
2099    if isinstance(x, ops.Tensor):
2100      x_rank = x.shape.rank
2101    elif (isinstance(x, sparse_tensor.SparseTensor) and
2102          x.dense_shape.shape.is_fully_defined()):
2103      x_rank = x.dense_shape.shape.dims[0].value  # sparse.dense_shape is 1-D.
2104    # Fast path: avoid creating Rank and Range ops if ndims is known.
2105    if x_rank:
2106      return constant_op.constant(np.arange(x_rank, dtype=np.int32))
2107    else:
2108      # Otherwise, we rely on Range and Rank to do the right thing at run-time.
2109      return range(0, array_ops.rank(x))
2110
2111
2112def _has_fully_defined_shape(tensor):
2113  """Returns true if tensor has a fully defined shape."""
2114  return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
2115
2116
2117def _may_reduce_to_scalar(keepdims, axis, output):
2118  """Set a reduction's output shape to be a scalar if we are certain."""
2119  if not _has_fully_defined_shape(output) and (not keepdims) and (
2120      axis is None):
2121    output.set_shape(())
2122  return output
2123
2124
2125@tf_export(v1=["math.reduce_sum", "reduce_sum"])
2126@dispatch.add_dispatch_support
2127@deprecation.deprecated_args(None,
2128                             "keep_dims is deprecated, use keepdims instead",
2129                             "keep_dims")
2130def reduce_sum_v1(input_tensor,
2131                  axis=None,
2132                  keepdims=None,
2133                  name=None,
2134                  reduction_indices=None,
2135                  keep_dims=None):
2136  """Computes the sum of elements across dimensions of a tensor.
2137
2138  This is the reduction operation for the elementwise `tf.math.add` op.
2139
2140  Reduces `input_tensor` along the dimensions given in `axis`.
2141  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2142  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2143  reduced dimensions are retained with length 1.
2144
2145  If `axis` is None, all dimensions are reduced, and a
2146  tensor with a single element is returned.
2147
2148  For example:
2149
2150    >>> # x has a shape of (2, 3) (two rows and three columns):
2151    >>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
2152    >>> x.numpy()
2153    array([[1, 1, 1],
2154           [1, 1, 1]], dtype=int32)
2155    >>> # sum all the elements
2156    >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
2157    >>> tf.reduce_sum(x).numpy()
2158    6
2159    >>> # reduce along the first dimension
2160    >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2161    >>> tf.reduce_sum(x, 0).numpy()
2162    array([2, 2, 2], dtype=int32)
2163    >>> # reduce along the second dimension
2164    >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
2165    >>> tf.reduce_sum(x, 1).numpy()
2166    array([3, 3], dtype=int32)
2167    >>> # keep the original dimensions
2168    >>> tf.reduce_sum(x, 1, keepdims=True).numpy()
2169    array([[3],
2170           [3]], dtype=int32)
2171    >>> # reduce along both dimensions
2172    >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
2173    >>> # or, equivalently, reduce along rows, then reduce the resultant array
2174    >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2175    >>> # 2 + 2 + 2 = 6
2176    >>> tf.reduce_sum(x, [0, 1]).numpy()
2177    6
2178
2179  Args:
2180    input_tensor: The tensor to reduce. Should have numeric type.
2181    axis: The dimensions to reduce. If `None` (the default), reduces all
2182      dimensions. Must be in the range `[-rank(input_tensor),
2183      rank(input_tensor))`.
2184    keepdims: If true, retains reduced dimensions with length 1.
2185    name: A name for the operation (optional).
2186    reduction_indices: The old (deprecated) name for axis.
2187    keep_dims: Deprecated alias for `keepdims`.
2188
2189  Returns:
2190    The reduced tensor, of the same dtype as the input_tensor.
2191
2192  @compatibility(numpy)
2193  Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
2194  int64 while tensorflow returns the same dtype as the input.
2195  @end_compatibility
2196  """
2197  axis = deprecation.deprecated_argument_lookup("axis", axis,
2198                                                "reduction_indices",
2199                                                reduction_indices)
2200  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2201                                                    "keep_dims", keep_dims)
2202  return reduce_sum(input_tensor, axis, keepdims, name)
2203
2204
2205@tf_export("math.reduce_sum", "reduce_sum", v1=[])
2206@dispatch.add_dispatch_support
2207def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
2208  """Computes the sum of elements across dimensions of a tensor.
2209
2210  This is the reduction operation for the elementwise `tf.math.add` op.
2211
2212  Reduces `input_tensor` along the dimensions given in `axis`.
2213  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2214  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2215  reduced dimensions are retained with length 1.
2216
2217  If `axis` is None, all dimensions are reduced, and a
2218  tensor with a single element is returned.
2219
2220  For example:
2221
2222    >>> # x has a shape of (2, 3) (two rows and three columns):
2223    >>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
2224    >>> x.numpy()
2225    array([[1, 1, 1],
2226           [1, 1, 1]], dtype=int32)
2227    >>> # sum all the elements
2228    >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
2229    >>> tf.reduce_sum(x).numpy()
2230    6
2231    >>> # reduce along the first dimension
2232    >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2233    >>> tf.reduce_sum(x, 0).numpy()
2234    array([2, 2, 2], dtype=int32)
2235    >>> # reduce along the second dimension
2236    >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
2237    >>> tf.reduce_sum(x, 1).numpy()
2238    array([3, 3], dtype=int32)
2239    >>> # keep the original dimensions
2240    >>> tf.reduce_sum(x, 1, keepdims=True).numpy()
2241    array([[3],
2242           [3]], dtype=int32)
2243    >>> # reduce along both dimensions
2244    >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
2245    >>> # or, equivalently, reduce along rows, then reduce the resultant array
2246    >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2247    >>> # 2 + 2 + 2 = 6
2248    >>> tf.reduce_sum(x, [0, 1]).numpy()
2249    6
2250
2251  Args:
2252    input_tensor: The tensor to reduce. Should have numeric type.
2253    axis: The dimensions to reduce. If `None` (the default), reduces all
2254      dimensions. Must be in the range `[-rank(input_tensor),
2255      rank(input_tensor)]`.
2256    keepdims: If true, retains reduced dimensions with length 1.
2257    name: A name for the operation (optional).
2258
2259  Returns:
2260    The reduced tensor, of the same dtype as the input_tensor.
2261
2262  @compatibility(numpy)
2263  Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
2264  int64 while tensorflow returns the same dtype as the input.
2265  @end_compatibility
2266  """
2267
2268  return reduce_sum_with_dims(input_tensor, axis, keepdims, name,
2269                              _ReductionDims(input_tensor, axis))
2270
2271
2272def reduce_sum_with_dims(input_tensor,
2273                         axis=None,
2274                         keepdims=False,
2275                         name=None,
2276                         dims=None):
2277  keepdims = False if keepdims is None else bool(keepdims)
2278  return _may_reduce_to_scalar(
2279      keepdims, axis,
2280      gen_math_ops._sum(input_tensor, dims, keepdims, name=name))
2281
2282
2283@tf_export("math.reduce_euclidean_norm")
2284@dispatch.add_dispatch_support
2285def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):
2286  """Computes the Euclidean norm of elements across dimensions of a tensor.
2287
2288  Reduces `input_tensor` along the dimensions given in `axis`.
2289  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2290  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2291  reduced dimensions are retained with length 1.
2292
2293  If `axis` is None, all dimensions are reduced, and a
2294  tensor with a single element is returned.
2295
2296  For example:
2297
2298  ```python
2299  x = tf.constant([[1, 2, 3], [1, 1, 1]]) # x.dtype is tf.int32
2300  tf.math.reduce_euclidean_norm(x)  # returns 4 as dtype is tf.int32
2301  y = tf.constant([[1, 2, 3], [1, 1, 1]], dtype = tf.float32)
2302  tf.math.reduce_euclidean_norm(y)  # returns 4.1231055 which is sqrt(17)
2303  tf.math.reduce_euclidean_norm(y, 0)  # [sqrt(2), sqrt(5), sqrt(10)]
2304  tf.math.reduce_euclidean_norm(y, 1)  # [sqrt(14), sqrt(3)]
2305  tf.math.reduce_euclidean_norm(y, 1, keepdims=True)  # [[sqrt(14)], [sqrt(3)]]
2306  tf.math.reduce_euclidean_norm(y, [0, 1])  # sqrt(17)
2307  ```
2308
2309  Args:
2310    input_tensor: The tensor to reduce. Should have numeric type.
2311    axis: The dimensions to reduce. If `None` (the default), reduces all
2312      dimensions. Must be in the range `[-rank(input_tensor),
2313      rank(input_tensor))`.
2314    keepdims: If true, retains reduced dimensions with length 1.
2315    name: A name for the operation (optional).
2316
2317  Returns:
2318    The reduced tensor, of the same dtype as the input_tensor.
2319  """
2320  keepdims = bool(keepdims)
2321  return _may_reduce_to_scalar(
2322      keepdims, axis,
2323      gen_math_ops.euclidean_norm(
2324          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2325          name=name))
2326
2327
2328@tf_export(v1=["math.count_nonzero", "count_nonzero"])
2329@dispatch.add_dispatch_support
2330@deprecation.deprecated_args(None,
2331                             "keep_dims is deprecated, use keepdims instead",
2332                             "keep_dims")
2333@deprecation.deprecated_args(
2334    None, "reduction_indices is deprecated, use axis instead",
2335    "reduction_indices")
2336def count_nonzero(input_tensor=None,
2337                  axis=None,
2338                  keepdims=None,
2339                  dtype=dtypes.int64,
2340                  name=None,
2341                  reduction_indices=None,
2342                  keep_dims=None,
2343                  input=None):  # pylint: disable=redefined-builtin
2344  """Computes number of nonzero elements across dimensions of a tensor.
2345
2346  Reduces `input_tensor` along the dimensions given in `axis`.
2347  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2348  entry in `axis`. If `keepdims` is true, the reduced dimensions
2349  are retained with length 1.
2350
2351  If `axis` has no entries, all dimensions are reduced, and a
2352  tensor with a single element is returned.
2353
2354  **NOTE** Floating point comparison to zero is done by exact floating point
2355  equality check.  Small values are **not** rounded to zero for purposes of
2356  the nonzero check.
2357
2358  For example:
2359
2360  ```python
2361  x = tf.constant([[0, 1, 0], [1, 1, 0]])
2362  tf.math.count_nonzero(x)  # 3
2363  tf.math.count_nonzero(x, 0)  # [1, 2, 0]
2364  tf.math.count_nonzero(x, 1)  # [1, 2]
2365  tf.math.count_nonzero(x, 1, keepdims=True)  # [[1], [2]]
2366  tf.math.count_nonzero(x, [0, 1])  # 3
2367  ```
2368
2369  **NOTE** Strings are compared against zero-length empty string `""`. Any
2370  string with a size greater than zero is already considered as nonzero.
2371
2372  For example:
2373  ```python
2374  x = tf.constant(["", "a", "  ", "b", ""])
2375  tf.math.count_nonzero(x) # 3, with "a", "  ", and "b" as nonzero strings.
2376  ```
2377
2378  Args:
2379    input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or
2380      `string`.
2381    axis: The dimensions to reduce. If `None` (the default), reduces all
2382      dimensions. Must be in the range `[-rank(input_tensor),
2383      rank(input_tensor))`.
2384    keepdims: If true, retains reduced dimensions with length 1.
2385    dtype: The output dtype; defaults to `tf.int64`.
2386    name: A name for the operation (optional).
2387    reduction_indices: The old (deprecated) name for axis.
2388    keep_dims: Deprecated alias for `keepdims`.
2389    input: Overrides input_tensor. For compatibility.
2390
2391  Returns:
2392    The reduced tensor (number of nonzero values).
2393  """
2394  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2395                                                    "keep_dims", keep_dims)
2396  input_tensor = deprecation.deprecated_argument_lookup("input", input,
2397                                                        "input_tensor",
2398                                                        input_tensor)
2399  axis = deprecation.deprecated_argument_lookup("axis", axis,
2400                                                "reduction_indices",
2401                                                reduction_indices)
2402
2403  return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)
2404
2405
2406@tf_export("math.count_nonzero", v1=[])
2407@dispatch.add_dispatch_support
2408def count_nonzero_v2(
2409    input,  # pylint: disable=redefined-builtin
2410    axis=None,
2411    keepdims=None,
2412    dtype=dtypes.int64,
2413    name=None):
2414  """Computes number of nonzero elements across dimensions of a tensor.
2415
2416  Reduces `input` along the dimensions given in `axis`.
2417  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2418  entry in `axis`. If `keepdims` is true, the reduced dimensions
2419  are retained with length 1.
2420
2421  If `axis` has no entries, all dimensions are reduced, and a
2422  tensor with a single element is returned.
2423
2424  **NOTE** Floating point comparison to zero is done by exact floating point
2425  equality check.  Small values are **not** rounded to zero for purposes of
2426  the nonzero check.
2427
2428  For example:
2429
2430  ```python
2431  x = tf.constant([[0, 1, 0], [1, 1, 0]])
2432  tf.math.count_nonzero(x)  # 3
2433  tf.math.count_nonzero(x, 0)  # [1, 2, 0]
2434  tf.math.count_nonzero(x, 1)  # [1, 2]
2435  tf.math.count_nonzero(x, 1, keepdims=True)  # [[1], [2]]
2436  tf.math.count_nonzero(x, [0, 1])  # 3
2437  ```
2438
2439  **NOTE** Strings are compared against zero-length empty string `""`. Any
2440  string with a size greater than zero is already considered as nonzero.
2441
2442  For example:
2443  ```python
2444  x = tf.constant(["", "a", "  ", "b", ""])
2445  tf.math.count_nonzero(x) # 3, with "a", "  ", and "b" as nonzero strings.
2446  ```
2447
2448  Args:
2449    input: The tensor to reduce. Should be of numeric type, `bool`, or `string`.
2450    axis: The dimensions to reduce. If `None` (the default), reduces all
2451      dimensions. Must be in the range `[-rank(input), rank(input))`.
2452    keepdims: If true, retains reduced dimensions with length 1.
2453    dtype: The output dtype; defaults to `tf.int64`.
2454    name: A name for the operation (optional).
2455
2456  Returns:
2457    The reduced tensor (number of nonzero values).
2458  """
2459  if keepdims is None:
2460    keepdims = False
2461  with ops.name_scope(name, "count_nonzero", [input]):
2462    input = ops.convert_to_tensor(input, name="input")
2463    # A scalar of 'zero' is enough as `not_equal` will broadcast.
2464    zero = array_ops.zeros([], dtype=input.dtype)
2465    return cast(
2466        reduce_sum(
2467            # int64 reduction happens on GPU
2468            cast(gen_math_ops.not_equal(input, zero), dtypes.int64),
2469            axis=axis,
2470            keepdims=keepdims),
2471        dtype=dtype)
2472
2473
2474@tf_export(v1=["math.reduce_mean", "reduce_mean"])
2475@dispatch.add_dispatch_support
2476def reduce_mean_v1(input_tensor,
2477                   axis=None,
2478                   keepdims=None,
2479                   name=None,
2480                   reduction_indices=None,
2481                   keep_dims=None):
2482  """Computes the mean of elements across dimensions of a tensor.
2483
2484  Reduces `input_tensor` along the dimensions given in `axis` by computing the
2485  mean of elements across the dimensions in `axis`.
2486  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2487  the entries in `axis`, which must be unique. If `keepdims` is true, the
2488  reduced dimensions are retained with length 1.
2489
2490  If `axis` is None, all dimensions are reduced, and a tensor with a single
2491  element is returned.
2492
2493  For example:
2494
2495  >>> x = tf.constant([[1., 1.], [2., 2.]])
2496  >>> tf.reduce_mean(x)
2497  <tf.Tensor: shape=(), dtype=float32, numpy=1.5>
2498  >>> tf.reduce_mean(x, 0)
2499  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
2500  >>> tf.reduce_mean(x, 1)
2501  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
2502
2503  Args:
2504    input_tensor: The tensor to reduce. Should have numeric type.
2505    axis: The dimensions to reduce. If `None` (the default), reduces all
2506      dimensions. Must be in the range `[-rank(input_tensor),
2507      rank(input_tensor))`.
2508    keepdims: If true, retains reduced dimensions with length 1.
2509    name: A name for the operation (optional).
2510    reduction_indices: The old (deprecated) name for axis.
2511    keep_dims: Deprecated alias for `keepdims`.
2512
2513  Returns:
2514    The reduced tensor.
2515
2516  @compatibility(numpy)
2517  Equivalent to np.mean
2518
2519  Please note that `np.mean` has a `dtype` parameter that could be used to
2520  specify the output type. By default this is `dtype=float64`. On the other
2521  hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
2522  for example:
2523
2524  >>> x = tf.constant([1, 0, 1, 0])
2525  >>> tf.reduce_mean(x)
2526  <tf.Tensor: shape=(), dtype=int32, numpy=0>
2527  >>> y = tf.constant([1., 0., 1., 0.])
2528  >>> tf.reduce_mean(y)
2529  <tf.Tensor: shape=(), dtype=float32, numpy=0.5>
2530
2531  @end_compatibility
2532  """
2533  axis = deprecation.deprecated_argument_lookup("axis", axis,
2534                                                "reduction_indices",
2535                                                reduction_indices)
2536  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2537                                                    "keep_dims", keep_dims)
2538  return reduce_mean(input_tensor, axis, keepdims, name)
2539
2540
2541@tf_export("math.reduce_mean", "reduce_mean", v1=[])
2542@dispatch.add_dispatch_support
2543def reduce_mean(input_tensor, axis=None, keepdims=False, name=None):
2544  """Computes the mean of elements across dimensions of a tensor.
2545
2546  Reduces `input_tensor` along the dimensions given in `axis` by computing the
2547  mean of elements across the dimensions in `axis`.
2548  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2549  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2550  reduced dimensions are retained with length 1.
2551
2552  If `axis` is None, all dimensions are reduced, and a tensor with a single
2553  element is returned.
2554
2555  For example:
2556
2557  >>> x = tf.constant([[1., 1.], [2., 2.]])
2558  >>> tf.reduce_mean(x)
2559  <tf.Tensor: shape=(), dtype=float32, numpy=1.5>
2560  >>> tf.reduce_mean(x, 0)
2561  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
2562  >>> tf.reduce_mean(x, 1)
2563  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
2564
2565  Args:
2566    input_tensor: The tensor to reduce. Should have numeric type.
2567    axis: The dimensions to reduce. If `None` (the default), reduces all
2568      dimensions. Must be in the range `[-rank(input_tensor),
2569      rank(input_tensor))`.
2570    keepdims: If true, retains reduced dimensions with length 1.
2571    name: A name for the operation (optional).
2572
2573  Returns:
2574    The reduced tensor.
2575
2576  @compatibility(numpy)
2577  Equivalent to np.mean
2578
2579  Please note that `np.mean` has a `dtype` parameter that could be used to
2580  specify the output type. By default this is `dtype=float64`. On the other
2581  hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
2582  for example:
2583
2584  >>> x = tf.constant([1, 0, 1, 0])
2585  >>> tf.reduce_mean(x)
2586  <tf.Tensor: shape=(), dtype=int32, numpy=0>
2587  >>> y = tf.constant([1., 0., 1., 0.])
2588  >>> tf.reduce_mean(y)
2589  <tf.Tensor: shape=(), dtype=float32, numpy=0.5>
2590
2591  @end_compatibility
2592  """
2593  keepdims = False if keepdims is None else bool(keepdims)
2594  return _may_reduce_to_scalar(
2595      keepdims, axis,
2596      gen_math_ops.mean(
2597          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2598          name=name))
2599
2600
2601@tf_export("math.reduce_variance")
2602@dispatch.add_dispatch_support
2603def reduce_variance(input_tensor, axis=None, keepdims=False, name=None):
2604  """Computes the variance of elements across dimensions of a tensor.
2605
2606  Reduces `input_tensor` along the dimensions given in `axis`.
2607  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2608  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2609  reduced dimensions are retained with length 1.
2610
2611  If `axis` is None, all dimensions are reduced, and a
2612  tensor with a single element is returned.
2613
2614  For example:
2615
2616  >>> x = tf.constant([[1., 2.], [3., 4.]])
2617  >>> tf.math.reduce_variance(x)
2618  <tf.Tensor: shape=(), dtype=float32, numpy=1.25>
2619  >>> tf.math.reduce_variance(x, 0)
2620  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], ...)>
2621  >>> tf.math.reduce_variance(x, 1)
2622  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.25, 0.25], ...)>
2623
2624  Args:
2625    input_tensor: The tensor to reduce. Should have real or complex type.
2626    axis: The dimensions to reduce. If `None` (the default), reduces all
2627      dimensions. Must be in the range `[-rank(input_tensor),
2628      rank(input_tensor))`.
2629    keepdims: If true, retains reduced dimensions with length 1.
2630    name: A name scope for the associated operations (optional).
2631
2632  Returns:
2633    The reduced tensor, of the same dtype as the input_tensor. Note,  for
2634    `complex64` or `complex128` input, the returned `Tensor` will be of type
2635    `float32` or `float64`, respectively.
2636
2637  @compatibility(numpy)
2638  Equivalent to np.var
2639
2640  Please note `np.var` has a `dtype` parameter that could be used to specify the
2641  output type. By default this is `dtype=float64`. On the other hand,
2642  `tf.math.reduce_variance` has aggressive type inference from `input_tensor`.
2643  @end_compatibility
2644  """
2645  name = name if name else "reduce_variance"
2646  with ops.name_scope(name):
2647    input_tensor = ops.convert_to_tensor(input_tensor)
2648    means = reduce_mean(input_tensor, axis=axis, keepdims=True)
2649    if means.dtype.is_integer:
2650      raise TypeError(f"Input must be either real or complex. "
2651                      f"Received integer type {means.dtype}.")
2652    diff = input_tensor - means
2653    if diff.dtype.is_complex:
2654      # For complex values we need to take the absolute value before squaring.
2655      # This is achieved by multiplying with the conjugate.
2656      real_dtype = diff.dtype.real_dtype
2657      squared_deviations = gen_math_ops.real(
2658          gen_math_ops.mul(gen_math_ops.conj(diff), diff), Tout=real_dtype)
2659    else:
2660      squared_deviations = gen_math_ops.square(diff)
2661    return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)
2662
2663
2664@tf_export("math.reduce_std")
2665@dispatch.add_dispatch_support
2666def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
2667  """Computes the standard deviation of elements across dimensions of a tensor.
2668
2669  Reduces `input_tensor` along the dimensions given in `axis`.
2670  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2671  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2672  reduced dimensions are retained with length 1.
2673
2674  If `axis` is None, all dimensions are reduced, and a
2675  tensor with a single element is returned.
2676
2677  For example:
2678
2679  >>> x = tf.constant([[1., 2.], [3., 4.]])
2680  >>> tf.math.reduce_std(x)
2681  <tf.Tensor: shape=(), dtype=float32, numpy=1.118034>
2682  >>> tf.math.reduce_std(x, 0)
2683  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], dtype=float32)>
2684  >>> tf.math.reduce_std(x, 1)
2685  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.5, 0.5], dtype=float32)>
2686
2687  Args:
2688    input_tensor: The tensor to reduce. Should have real or complex type.
2689    axis: The dimensions to reduce. If `None` (the default), reduces all
2690      dimensions. Must be in the range `[-rank(input_tensor),
2691      rank(input_tensor))`.
2692    keepdims: If true, retains reduced dimensions with length 1.
2693    name: A name scope for the associated operations (optional).
2694
2695  Returns:
2696    The reduced tensor, of the same dtype as the input_tensor. Note,  for
2697    `complex64` or `complex128` input, the returned `Tensor` will be of type
2698    `float32` or `float64`, respectively.
2699
2700  @compatibility(numpy)
2701  Equivalent to np.std
2702
2703  Please note `np.std` has a `dtype` parameter that could be used to specify the
2704  output type. By default this is `dtype=float64`. On the other hand,
2705  `tf.math.reduce_std` has aggressive type inference from `input_tensor`.
2706  @end_compatibility
2707  """
2708  name = name if name else "reduce_std"
2709  with ops.name_scope(name):
2710    input_tensor = ops.convert_to_tensor(input_tensor)
2711    variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
2712    return gen_math_ops.sqrt(variance)
2713
2714
2715@tf_export("math.reduce_prod", "reduce_prod", v1=[])
2716@dispatch.add_dispatch_support
2717def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
2718  """Computes `tf.math.multiply` of elements across dimensions of a tensor.
2719
2720  This is the reduction operation for the elementwise `tf.math.multiply` op.
2721
2722  Reduces `input_tensor` along the dimensions given in `axis`.
2723  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2724  entry in `axis`. If `keepdims` is true, the reduced dimensions
2725  are retained with length 1.
2726
2727  If `axis` is None, all dimensions are reduced, and a
2728  tensor with a single element is returned.
2729
2730  For example:
2731
2732    >>> x = tf.constant([[1., 2.], [3., 4.]])
2733    >>> tf.math.reduce_prod(x)
2734    <tf.Tensor: shape=(), dtype=float32, numpy=24.>
2735    >>> tf.math.reduce_prod(x, 0)
2736    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
2737    >>> tf.math.reduce_prod(x, 1)
2738    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
2739    dtype=float32)>
2740
2741  Args:
2742    input_tensor: The tensor to reduce. Should have numeric type.
2743    axis: The dimensions to reduce. If `None` (the default), reduces all
2744      dimensions. Must be in the range `[-rank(input_tensor),
2745      rank(input_tensor))`.
2746    keepdims: If true, retains reduced dimensions with length 1.
2747    name: A name for the operation (optional).
2748
2749  Returns:
2750    The reduced tensor.
2751
2752  @compatibility(numpy)
2753  Equivalent to np.prod
2754  @end_compatibility
2755  """
2756  keepdims = False if keepdims is None else bool(keepdims)
2757  return _may_reduce_to_scalar(
2758      keepdims, axis,
2759      gen_math_ops.prod(
2760          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2761          name=name))
2762
2763
2764@tf_export(v1=["math.reduce_prod", "reduce_prod"])
2765@dispatch.add_dispatch_support
2766@deprecation.deprecated_args(None,
2767                             "keep_dims is deprecated, use keepdims instead",
2768                             "keep_dims")
2769def reduce_prod_v1(input_tensor,
2770                   axis=None,
2771                   keepdims=None,
2772                   name=None,
2773                   reduction_indices=None,
2774                   keep_dims=None):
2775  """Computes `tf.math.multiply` of elements across dimensions of a tensor.
2776
2777  This is the reduction operation for the elementwise `tf.math.multiply` op.
2778
2779  Reduces `input_tensor` along the dimensions given in `axis`.
2780  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2781  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2782  reduced dimensions are retained with length 1.
2783
2784  If `axis` is None, all dimensions are reduced, and a
2785  tensor with a single element is returned.
2786
2787  For example:
2788
2789    >>> x = tf.constant([[1., 2.], [3., 4.]])
2790    >>> tf.math.reduce_prod(x)
2791    <tf.Tensor: shape=(), dtype=float32, numpy=24.>
2792    >>> tf.math.reduce_prod(x, 0)
2793    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
2794    >>> tf.math.reduce_prod(x, 1)
2795    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
2796    dtype=float32)>
2797
2798  Args:
2799    input_tensor: The tensor to reduce. Should have numeric type.
2800    axis: The dimensions to reduce. If `None` (the default), reduces all
2801      dimensions. Must be in the range `[-rank(input_tensor),
2802      rank(input_tensor))`.
2803    keepdims: If true, retains reduced dimensions with length 1.
2804    name: A name for the operation (optional).
2805    reduction_indices: The old (deprecated) name for axis.
2806    keep_dims: Deprecated alias for `keepdims`.
2807
2808  Returns:
2809    The reduced tensor.
2810
2811  @compatibility(numpy)
2812  Equivalent to np.prod
2813  @end_compatibility
2814  """
2815  axis = deprecation.deprecated_argument_lookup("axis", axis,
2816                                                "reduction_indices",
2817                                                reduction_indices)
2818  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2819                                                    "keep_dims", keep_dims)
2820  return reduce_prod(input_tensor, axis, keepdims, name)
2821
2822
2823@tf_export(v1=["math.reduce_min", "reduce_min"])
2824@dispatch.add_dispatch_support
2825@deprecation.deprecated_args(None,
2826                             "keep_dims is deprecated, use keepdims instead",
2827                             "keep_dims")
2828def reduce_min_v1(input_tensor,
2829                  axis=None,
2830                  keepdims=None,
2831                  name=None,
2832                  reduction_indices=None,
2833                  keep_dims=None):
2834  """Computes the `tf.math.minimum` of elements across dimensions of a tensor.
2835
2836  This is the reduction operation for the elementwise `tf.math.minimum` op.
2837
2838  Reduces `input_tensor` along the dimensions given in `axis`.
2839  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2840  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2841  reduced dimensions are retained with length 1.
2842
2843  If `axis` is None, all dimensions are reduced, and a
2844  tensor with a single element is returned.
2845
2846  Usage example:
2847
2848    >>> x = tf.constant([5, 1, 2, 4])
2849    >>> tf.reduce_min(x)
2850    <tf.Tensor: shape=(), dtype=int32, numpy=1>
2851    >>> x = tf.constant([-5, -1, -2, -4])
2852    >>> tf.reduce_min(x)
2853    <tf.Tensor: shape=(), dtype=int32, numpy=-5>
2854    >>> x = tf.constant([4, float('nan')])
2855    >>> tf.reduce_min(x)
2856    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
2857    >>> x = tf.constant([float('nan'), float('nan')])
2858    >>> tf.reduce_min(x)
2859    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
2860    >>> x = tf.constant([float('-inf'), float('inf')])
2861    >>> tf.reduce_min(x)
2862    <tf.Tensor: shape=(), dtype=float32, numpy=-inf>
2863
2864  See the numpy docs for `np.amin` and `np.nanmin` behavior.
2865
2866  Args:
2867    input_tensor: The tensor to reduce. Should have real numeric type.
2868    axis: The dimensions to reduce. If `None` (the default), reduces all
2869      dimensions. Must be in the range `[-rank(input_tensor),
2870      rank(input_tensor))`.
2871    keepdims: If true, retains reduced dimensions with length 1.
2872    name: A name for the operation (optional).
2873    reduction_indices: The old (deprecated) name for axis.
2874    keep_dims: Deprecated alias for `keepdims`.
2875
2876  Returns:
2877    The reduced tensor.
2878  """
2879  axis = deprecation.deprecated_argument_lookup("axis", axis,
2880                                                "reduction_indices",
2881                                                reduction_indices)
2882  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2883                                                    "keep_dims", keep_dims)
2884  return reduce_min(input_tensor, axis, keepdims, name)
2885
2886
2887@tf_export("math.reduce_min", "reduce_min", v1=[])
2888@dispatch.add_dispatch_support
2889def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
2890  """Computes the `tf.math.minimum` of elements across dimensions of a tensor.
2891
2892  This is the reduction operation for the elementwise `tf.math.minimum` op.
2893
2894  Reduces `input_tensor` along the dimensions given in `axis`.
2895  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2896  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2897  reduced dimensions are retained with length 1.
2898
2899  If `axis` is None, all dimensions are reduced, and a
2900  tensor with a single element is returned.
2901
2902  For example:
2903
2904  >>> a = tf.constant([
2905  ...   [[1, 2], [3, 4]],
2906  ...   [[1, 2], [3, 4]]
2907  ... ])
2908  >>> tf.reduce_min(a)
2909  <tf.Tensor: shape=(), dtype=int32, numpy=1>
2910
2911  Choosing a specific axis returns minimum element in the given axis:
2912
2913  >>> b = tf.constant([[1, 2, 3], [4, 5, 6]])
2914  >>> tf.reduce_min(b, axis=0)
2915  <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 2, 3], dtype=int32)>
2916  >>> tf.reduce_min(b, axis=1)
2917  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 4], dtype=int32)>
2918
2919  Setting `keepdims` to `True` retains the dimension of `input_tensor`:
2920
2921  >>> tf.reduce_min(a, keepdims=True)
2922  <tf.Tensor: shape=(1, 1, 1), dtype=int32, numpy=array([[[1]]], dtype=int32)>
2923  >>> tf.math.reduce_min(a, axis=0, keepdims=True)
2924  <tf.Tensor: shape=(1, 2, 2), dtype=int32, numpy=
2925  array([[[1, 2],
2926          [3, 4]]], dtype=int32)>
2927
2928  Args:
2929    input_tensor: The tensor to reduce. Should have real numeric type.
2930    axis: The dimensions to reduce. If `None` (the default), reduces all
2931      dimensions. Must be in the range `[-rank(input_tensor),
2932      rank(input_tensor))`.
2933    keepdims: If true, retains reduced dimensions with length 1.
2934    name: A name for the operation (optional).
2935
2936  Returns:
2937    The reduced tensor.
2938
2939  @compatibility(numpy)
2940  Equivalent to np.min
2941  @end_compatibility
2942  """
2943  keepdims = False if keepdims is None else bool(keepdims)
2944  return _may_reduce_to_scalar(
2945      keepdims, axis,
2946      gen_math_ops._min(
2947          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2948          name=name))
2949
2950
2951@tf_export(v1=["math.reduce_max", "reduce_max"])
2952@dispatch.add_dispatch_support
2953@deprecation.deprecated_args(None,
2954                             "keep_dims is deprecated, use keepdims instead",
2955                             "keep_dims")
2956def reduce_max_v1(input_tensor,
2957                  axis=None,
2958                  keepdims=None,
2959                  name=None,
2960                  reduction_indices=None,
2961                  keep_dims=None):
2962  """Computes `tf.math.maximum` of elements across dimensions of a tensor.
2963
2964  This is the reduction operation for the elementwise `tf.math.maximum` op.
2965
2966  Reduces `input_tensor` along the dimensions given in `axis`.
2967  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2968  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2969  reduced dimensions are retained with length 1.
2970
2971  If `axis` is None, all dimensions are reduced, and a
2972  tensor with a single element is returned.
2973
2974  Usage example:
2975
2976    >>> x = tf.constant([5, 1, 2, 4])
2977    >>> tf.reduce_max(x)
2978    <tf.Tensor: shape=(), dtype=int32, numpy=5>
2979    >>> x = tf.constant([-5, -1, -2, -4])
2980    >>> tf.reduce_max(x)
2981    <tf.Tensor: shape=(), dtype=int32, numpy=-1>
2982    >>> x = tf.constant([4, float('nan')])
2983    >>> tf.reduce_max(x)
2984    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
2985    >>> x = tf.constant([float('nan'), float('nan')])
2986    >>> tf.reduce_max(x)
2987    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
2988    >>> x = tf.constant([float('-inf'), float('inf')])
2989    >>> tf.reduce_max(x)
2990    <tf.Tensor: shape=(), dtype=float32, numpy=inf>
2991
2992  See the numpy docs for `np.amax` and `np.nanmax` behavior.
2993
2994  Args:
2995    input_tensor: The tensor to reduce. Should have real numeric type.
2996    axis: The dimensions to reduce. If `None` (the default), reduces all
2997      dimensions. Must be in the range `[-rank(input_tensor),
2998      rank(input_tensor))`.
2999    keepdims: If true, retains reduced dimensions with length 1.
3000    name: A name for the operation (optional).
3001    reduction_indices: The old (deprecated) name for axis.
3002    keep_dims: Deprecated alias for `keepdims`.
3003
3004  Returns:
3005    The reduced tensor.
3006  """
3007  axis = deprecation.deprecated_argument_lookup("axis", axis,
3008                                                "reduction_indices",
3009                                                reduction_indices)
3010  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3011                                                    "keep_dims", keep_dims)
3012  return reduce_max(input_tensor, axis, keepdims, name)
3013
3014
3015@tf_export("math.reduce_max", "reduce_max", v1=[])
3016@dispatch.add_dispatch_support
3017def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
3018  """Computes `tf.math.maximum` of elements across dimensions of a tensor.
3019
3020  This is the reduction operation for the elementwise `tf.math.maximum` op.
3021
3022  Reduces `input_tensor` along the dimensions given in `axis`.
3023  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3024  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3025  reduced dimensions are retained with length 1.
3026
3027  If `axis` is None, all dimensions are reduced, and a
3028  tensor with a single element is returned.
3029
3030  Usage example:
3031
3032    >>> x = tf.constant([5, 1, 2, 4])
3033    >>> tf.reduce_max(x)
3034    <tf.Tensor: shape=(), dtype=int32, numpy=5>
3035    >>> x = tf.constant([-5, -1, -2, -4])
3036    >>> tf.reduce_max(x)
3037    <tf.Tensor: shape=(), dtype=int32, numpy=-1>
3038    >>> x = tf.constant([4, float('nan')])
3039    >>> tf.reduce_max(x)
3040    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
3041    >>> x = tf.constant([float('nan'), float('nan')])
3042    >>> tf.reduce_max(x)
3043    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
3044    >>> x = tf.constant([float('-inf'), float('inf')])
3045    >>> tf.reduce_max(x)
3046    <tf.Tensor: shape=(), dtype=float32, numpy=inf>
3047
3048  See the numpy docs for `np.amax` and `np.nanmax` behavior.
3049
3050  Args:
3051    input_tensor: The tensor to reduce. Should have real numeric type.
3052    axis: The dimensions to reduce. If `None` (the default), reduces all
3053      dimensions. Must be in the range `[-rank(input_tensor),
3054      rank(input_tensor))`.
3055    keepdims: If true, retains reduced dimensions with length 1.
3056    name: A name for the operation (optional).
3057
3058  Returns:
3059    The reduced tensor.
3060  """
3061  return reduce_max_with_dims(input_tensor, axis, keepdims, name,
3062                              _ReductionDims(input_tensor, axis))
3063
3064
3065def reduce_max_with_dims(input_tensor,
3066                         axis=None,
3067                         keepdims=False,
3068                         name=None,
3069                         dims=None):
3070  keepdims = False if keepdims is None else bool(keepdims)
3071  return _may_reduce_to_scalar(
3072      keepdims, axis,
3073      gen_math_ops._max(input_tensor, dims, keepdims, name=name))
3074
3075
3076@tf_export(v1=["math.reduce_all", "reduce_all"])
3077@dispatch.add_dispatch_support
3078@deprecation.deprecated_args(None,
3079                             "keep_dims is deprecated, use keepdims instead",
3080                             "keep_dims")
3081def reduce_all_v1(input_tensor,
3082                  axis=None,
3083                  keepdims=None,
3084                  name=None,
3085                  reduction_indices=None,
3086                  keep_dims=None):
3087  """Computes `tf.math.logical_and` of elements across dimensions of a tensor.
3088
3089  This is the reduction operation for the elementwise `tf.math.logical_and` op.
3090
3091  Reduces `input_tensor` along the dimensions given in `axis`.
3092  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3093  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3094  reduced dimensions are retained with length 1.
3095
3096  If `axis` is None, all dimensions are reduced, and a
3097  tensor with a single element is returned.
3098
3099  For example:
3100
3101    >>> x = tf.constant([[True,  True], [False, False]])
3102    >>> tf.math.reduce_all(x)
3103    <tf.Tensor: shape=(), dtype=bool, numpy=False>
3104    >>> tf.math.reduce_all(x, 0)
3105    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
3106    >>> tf.math.reduce_all(x, 1)
3107    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3108
3109  Args:
3110    input_tensor: The boolean tensor to reduce.
3111    axis: The dimensions to reduce. If `None` (the default), reduces all
3112      dimensions. Must be in the range `[-rank(input_tensor),
3113      rank(input_tensor))`.
3114    keepdims: If true, retains reduced dimensions with length 1.
3115    name: A name for the operation (optional).
3116    reduction_indices: The old (deprecated) name for axis.
3117    keep_dims: Deprecated alias for `keepdims`.
3118
3119  Returns:
3120    The reduced tensor.
3121
3122  @compatibility(numpy)
3123  Equivalent to np.all
3124  @end_compatibility
3125  """
3126  axis = deprecation.deprecated_argument_lookup("axis", axis,
3127                                                "reduction_indices",
3128                                                reduction_indices)
3129  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3130                                                    "keep_dims", keep_dims)
3131  return reduce_all(input_tensor, axis, keepdims, name)
3132
3133
3134@tf_export("math.reduce_all", "reduce_all", v1=[])
3135@dispatch.add_dispatch_support
3136def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
3137  """Computes `tf.math.logical_and` of elements across dimensions of a tensor.
3138
3139  This is the reduction operation for the elementwise `tf.math.logical_and` op.
3140
3141  Reduces `input_tensor` along the dimensions given in `axis`.
3142  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3143  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3144  reduced dimensions are retained with length 1.
3145
3146  If `axis` is None, all dimensions are reduced, and a
3147  tensor with a single element is returned.
3148
3149  For example:
3150
3151    >>> x = tf.constant([[True,  True], [False, False]])
3152    >>> tf.math.reduce_all(x)
3153    <tf.Tensor: shape=(), dtype=bool, numpy=False>
3154    >>> tf.math.reduce_all(x, 0)
3155    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
3156    >>> tf.math.reduce_all(x, 1)
3157    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3158
3159  Args:
3160    input_tensor: The boolean tensor to reduce.
3161    axis: The dimensions to reduce. If `None` (the default), reduces all
3162      dimensions. Must be in the range `[-rank(input_tensor),
3163      rank(input_tensor))`.
3164    keepdims: If true, retains reduced dimensions with length 1.
3165    name: A name for the operation (optional).
3166
3167  Returns:
3168    The reduced tensor.
3169
3170  @compatibility(numpy)
3171  Equivalent to np.all
3172  @end_compatibility
3173  """
3174  keepdims = False if keepdims is None else bool(keepdims)
3175  return _may_reduce_to_scalar(
3176      keepdims, axis,
3177      gen_math_ops._all(
3178          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
3179          name=name))
3180
3181
3182@tf_export(v1=["math.reduce_any", "reduce_any"])
3183@dispatch.add_dispatch_support
3184@deprecation.deprecated_args(None,
3185                             "keep_dims is deprecated, use keepdims instead",
3186                             "keep_dims")
3187def reduce_any_v1(input_tensor,
3188                  axis=None,
3189                  keepdims=None,
3190                  name=None,
3191                  reduction_indices=None,
3192                  keep_dims=None):
3193  """Computes `tf.math.logical_or` of elements across dimensions of a tensor.
3194
3195  This is the reduction operation for the elementwise `tf.math.logical_or` op.
3196
3197  Reduces `input_tensor` along the dimensions given in `axis`.
3198  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3199  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3200  reduced dimensions are retained with length 1.
3201
3202  If `axis` is None, all dimensions are reduced, and a
3203  tensor with a single element is returned.
3204
3205  For example:
3206
3207    >>> x = tf.constant([[True,  True], [False, False]])
3208    >>> tf.reduce_any(x)
3209    <tf.Tensor: shape=(), dtype=bool, numpy=True>
3210    >>> tf.reduce_any(x, 0)
3211    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  True])>
3212    >>> tf.reduce_any(x, 1)
3213    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3214
3215  Args:
3216    input_tensor: The boolean tensor to reduce.
3217    axis: The dimensions to reduce. If `None` (the default), reduces all
3218      dimensions. Must be in the range `[-rank(input_tensor),
3219      rank(input_tensor))`.
3220    keepdims: If true, retains reduced dimensions with length 1.
3221    name: A name for the operation (optional).
3222    reduction_indices: The old (deprecated) name for axis.
3223    keep_dims: Deprecated alias for `keepdims`.
3224
3225  Returns:
3226    The reduced tensor.
3227
3228  @compatibility(numpy)
3229  Equivalent to np.any
3230  @end_compatibility
3231  """
3232  axis = deprecation.deprecated_argument_lookup("axis", axis,
3233                                                "reduction_indices",
3234                                                reduction_indices)
3235  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3236                                                    "keep_dims", keep_dims)
3237  return reduce_any(input_tensor, axis, keepdims, name)
3238
3239
3240@tf_export("math.reduce_any", "reduce_any", v1=[])
3241@dispatch.add_dispatch_support
3242def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
3243  """Computes `tf.math.logical_or` of elements across dimensions of a tensor.
3244
3245  This is the reduction operation for the elementwise `tf.math.logical_or` op.
3246
3247  Reduces `input_tensor` along the dimensions given in `axis`.
3248  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3249  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3250  reduced dimensions are retained with length 1.
3251
3252  If `axis` is None, all dimensions are reduced, and a
3253  tensor with a single element is returned.
3254
3255  For example:
3256
3257    >>> x = tf.constant([[True,  True], [False, False]])
3258    >>> tf.reduce_any(x)
3259    <tf.Tensor: shape=(), dtype=bool, numpy=True>
3260    >>> tf.reduce_any(x, 0)
3261    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  True])>
3262    >>> tf.reduce_any(x, 1)
3263    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3264
3265  Args:
3266    input_tensor: The boolean tensor to reduce.
3267    axis: The dimensions to reduce. If `None` (the default), reduces all
3268      dimensions. Must be in the range `[-rank(input_tensor),
3269      rank(input_tensor))`.
3270    keepdims: If true, retains reduced dimensions with length 1.
3271    name: A name for the operation (optional).
3272
3273  Returns:
3274    The reduced tensor.
3275
3276  @compatibility(numpy)
3277  Equivalent to np.any
3278  @end_compatibility
3279  """
3280  keepdims = False if keepdims is None else bool(keepdims)
3281  return _may_reduce_to_scalar(
3282      keepdims, axis,
3283      gen_math_ops._any(
3284          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
3285          name=name))
3286
3287
3288@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"])
3289@dispatch.add_dispatch_support
3290@deprecation.deprecated_args(None,
3291                             "keep_dims is deprecated, use keepdims instead",
3292                             "keep_dims")
3293def reduce_logsumexp_v1(input_tensor,
3294                        axis=None,
3295                        keepdims=None,
3296                        name=None,
3297                        reduction_indices=None,
3298                        keep_dims=None):
3299  """Computes log(sum(exp(elements across dimensions of a tensor))).
3300
3301  Reduces `input_tensor` along the dimensions given in `axis`.
3302  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3303  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3304  reduced dimensions are retained with length 1.
3305
3306  If `axis` has no entries, all dimensions are reduced, and a
3307  tensor with a single element is returned.
3308
3309  This function is more numerically stable than log(sum(exp(input))). It avoids
3310  overflows caused by taking the exp of large inputs and underflows caused by
3311  taking the log of small inputs.
3312
3313  For example:
3314
3315  ```python
3316  x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
3317  tf.reduce_logsumexp(x)  # log(6)
3318  tf.reduce_logsumexp(x, 0)  # [log(2), log(2), log(2)]
3319  tf.reduce_logsumexp(x, 1)  # [log(3), log(3)]
3320  tf.reduce_logsumexp(x, 1, keepdims=True)  # [[log(3)], [log(3)]]
3321  tf.reduce_logsumexp(x, [0, 1])  # log(6)
3322  ```
3323
3324  Args:
3325    input_tensor: The tensor to reduce. Should have numeric type.
3326    axis: The dimensions to reduce. If `None` (the default), reduces all
3327      dimensions. Must be in the range `[-rank(input_tensor),
3328      rank(input_tensor))`.
3329    keepdims: If true, retains reduced dimensions with length 1.
3330    name: A name for the operation (optional).
3331    reduction_indices: The old (deprecated) name for axis.
3332    keep_dims: Deprecated alias for `keepdims`.
3333
3334  Returns:
3335    The reduced tensor.
3336  """
3337  axis = deprecation.deprecated_argument_lookup("axis", axis,
3338                                                "reduction_indices",
3339                                                reduction_indices)
3340  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3341                                                    "keep_dims", keep_dims)
3342  return reduce_logsumexp(input_tensor, axis, keepdims, name)
3343
3344
3345@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[])
3346@dispatch.add_dispatch_support
3347def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):
3348  """Computes log(sum(exp(elements across dimensions of a tensor))).
3349
3350  Reduces `input_tensor` along the dimensions given in `axis`.
3351  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3352  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3353  reduced dimensions are retained with length 1.
3354
3355  If `axis` has no entries, all dimensions are reduced, and a
3356  tensor with a single element is returned.
3357
3358  This function is more numerically stable than log(sum(exp(input))). It avoids
3359  overflows caused by taking the exp of large inputs and underflows caused by
3360  taking the log of small inputs.
3361
3362  For example:
3363
3364  ```python
3365  x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
3366  tf.reduce_logsumexp(x)  # log(6)
3367  tf.reduce_logsumexp(x, 0)  # [log(2), log(2), log(2)]
3368  tf.reduce_logsumexp(x, 1)  # [log(3), log(3)]
3369  tf.reduce_logsumexp(x, 1, keepdims=True)  # [[log(3)], [log(3)]]
3370  tf.reduce_logsumexp(x, [0, 1])  # log(6)
3371  ```
3372
3373  Args:
3374    input_tensor: The tensor to reduce. Should have numeric type.
3375    axis: The dimensions to reduce. If `None` (the default), reduces all
3376      dimensions. Must be in the range `[-rank(input_tensor),
3377      rank(input_tensor))`.
3378    keepdims: If true, retains reduced dimensions with length 1.
3379    name: A name for the operation (optional).
3380
3381  Returns:
3382    The reduced tensor.
3383  """
3384  keepdims = False if keepdims is None else keepdims
3385  input_tensor = ops.convert_to_tensor(input_tensor)
3386  with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
3387    reduce_dim = _ReductionDims(input_tensor, axis)
3388    raw_max = reduce_max_with_dims(
3389        input_tensor, axis=axis, keepdims=True, dims=reduce_dim)
3390    my_max = array_ops.stop_gradient(
3391        gen_math_ops.select(
3392            gen_math_ops.is_finite(raw_max), raw_max,
3393            gen_array_ops.zeros_like(raw_max)))
3394    result = gen_math_ops.log(
3395        reduce_sum_with_dims(
3396            gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)),
3397            axis=axis,
3398            keepdims=keepdims,
3399            dims=reduce_dim))
3400    if not keepdims:
3401      my_max = array_ops.reshape(my_max, gen_array_ops.shape(result))
3402    result = _add_dispatch(result, my_max, name=name)
3403    return _may_reduce_to_scalar(keepdims, axis, result)
3404
3405
3406@tf_export("linalg.trace", v1=["linalg.trace", "trace"])
3407@dispatch.add_dispatch_support
3408@deprecation.deprecated_endpoints("trace")
3409@dispatch.add_dispatch_support
3410def trace(x, name=None):
3411  """Compute the trace of a tensor `x`.
3412
3413  `trace(x)` returns the sum along the main diagonal of each inner-most matrix
3414  in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
3415  is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
3416
3417  `output[i, j, k, ..., l] = trace(x[i, j, k, ..., l, :, :])`
3418
3419  For example:
3420
3421  ```python
3422  x = tf.constant([[1, 2], [3, 4]])
3423  tf.linalg.trace(x)  # 5
3424
3425  x = tf.constant([[1, 2, 3],
3426                   [4, 5, 6],
3427                   [7, 8, 9]])
3428  tf.linalg.trace(x)  # 15
3429
3430  x = tf.constant([[[1, 2, 3],
3431                    [4, 5, 6],
3432                    [7, 8, 9]],
3433                   [[-1, -2, -3],
3434                    [-4, -5, -6],
3435                    [-7, -8, -9]]])
3436  tf.linalg.trace(x)  # [15, -15]
3437  ```
3438
3439  Args:
3440    x: tensor.
3441    name: A name for the operation (optional).
3442
3443  Returns:
3444    The trace of input tensor.
3445  """
3446  with ops.name_scope(name, "Trace", [x]) as name:
3447    x = ops.convert_to_tensor(x, name="x")
3448    return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
3449
3450
3451@tf_export("linalg.matmul", "matmul")
3452@dispatch.add_dispatch_support
3453def matmul(a,
3454           b,
3455           transpose_a=False,
3456           transpose_b=False,
3457           adjoint_a=False,
3458           adjoint_b=False,
3459           a_is_sparse=False,
3460           b_is_sparse=False,
3461           output_type=None,
3462           name=None):
3463  """Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
3464
3465  The inputs must, following any transpositions, be tensors of rank >= 2
3466  where the inner 2 dimensions specify valid matrix multiplication dimensions,
3467  and any further outer dimensions specify matching batch size.
3468
3469  Both matrices must be of the same type. The supported types are:
3470  `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`,
3471  `complex64`, `complex128`.
3472
3473  Either matrix can be transposed or adjointed (conjugated and transposed) on
3474  the fly by setting one of the corresponding flag to `True`. These are `False`
3475  by default.
3476
3477  If one or both of the matrices contain a lot of zeros, a more efficient
3478  multiplication algorithm can be used by setting the corresponding
3479  `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
3480  This optimization is only available for plain matrices (rank-2 tensors) with
3481  datatypes `bfloat16` or `float32`.
3482
3483  A simple 2-D tensor matrix multiplication:
3484
3485  >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
3486  >>> a  # 2-D tensor
3487  <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
3488  array([[1, 2, 3],
3489         [4, 5, 6]], dtype=int32)>
3490  >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
3491  >>> b  # 2-D tensor
3492  <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
3493  array([[ 7,  8],
3494         [ 9, 10],
3495         [11, 12]], dtype=int32)>
3496  >>> c = tf.matmul(a, b)
3497  >>> c  # `a` * `b`
3498  <tf.Tensor: shape=(2, 2), dtype=int32, numpy=
3499  array([[ 58,  64],
3500         [139, 154]], dtype=int32)>
3501
3502  A batch matrix multiplication with batch shape [2]:
3503
3504  >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])
3505  >>> a  # 3-D tensor
3506  <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=
3507  array([[[ 1,  2,  3],
3508          [ 4,  5,  6]],
3509         [[ 7,  8,  9],
3510          [10, 11, 12]]], dtype=int32)>
3511  >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])
3512  >>> b  # 3-D tensor
3513  <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
3514  array([[[13, 14],
3515          [15, 16],
3516          [17, 18]],
3517         [[19, 20],
3518          [21, 22],
3519          [23, 24]]], dtype=int32)>
3520  >>> c = tf.matmul(a, b)
3521  >>> c  # `a` * `b`
3522  <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
3523  array([[[ 94, 100],
3524          [229, 244]],
3525         [[508, 532],
3526          [697, 730]]], dtype=int32)>
3527
3528  Since python >= 3.5 the @ operator is supported
3529  (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,
3530  it simply calls the `tf.matmul()` function, so the following lines are
3531  equivalent:
3532
3533  >>> d = a @ b @ [[10], [11]]
3534  >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])
3535
3536  Args:
3537    a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,
3538      `complex64`, `complex128` and rank > 1.
3539    b: `tf.Tensor` with same type and rank as `a`.
3540    transpose_a: If `True`, `a` is transposed before multiplication.
3541    transpose_b: If `True`, `b` is transposed before multiplication.
3542    adjoint_a: If `True`, `a` is conjugated and transposed before
3543      multiplication.
3544    adjoint_b: If `True`, `b` is conjugated and transposed before
3545      multiplication.
3546    a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this
3547      **does not support `tf.sparse.SparseTensor`**, it just makes optimizations
3548      that assume most values in `a` are zero.
3549      See `tf.sparse.sparse_dense_matmul`
3550      for some support for `tf.sparse.SparseTensor` multiplication.
3551    b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this
3552      **does not support `tf.sparse.SparseTensor`**, it just makes optimizations
3553      that assume most values in `a` are zero.
3554      See `tf.sparse.sparse_dense_matmul`
3555      for some support for `tf.sparse.SparseTensor` multiplication.
3556    output_type: The output datatype if needed. Defaults to None in which case
3557      the output_type is the same as input type. Currently only works when input
3558      tensors are type (u)int8 and output_type can be int32.
3559    name: Name for the operation (optional).
3560
3561  Returns:
3562    A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix
3563    is the product of the corresponding matrices in `a` and `b`, e.g. if all
3564    transpose or adjoint attributes are `False`:
3565
3566    `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,
3567    for all indices `i`, `j`.
3568
3569    Note: This is matrix product, not element-wise product.
3570
3571
3572  Raises:
3573    ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and
3574      `adjoint_b` are both set to `True`.
3575    TypeError: If output_type is specified but the types of `a`, `b` and
3576      `output_type` is not (u)int8, (u)int8 and int32.
3577  """
3578
3579  with ops.name_scope(name, "MatMul", [a, b]) as name:
3580    if transpose_a and adjoint_a:
3581      raise ValueError(
3582          f"Only one of `transpose_a` and `adjoint_a` can be True. "
3583          f"Received `transpose_a`={transpose_a}, "
3584          f"`adjoint_a`={adjoint_a}.")
3585    if transpose_b and adjoint_b:
3586      raise ValueError(
3587          f"Only one of `transpose_b` and `adjoint_b` can be True. "
3588          f"Received `transpose_b`={transpose_b}, "
3589          f"`adjoint_b`={adjoint_b}.")
3590
3591    if context.executing_eagerly():
3592      if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):
3593        a = ops.convert_to_tensor(a, name="a")
3594      if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):
3595        b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
3596    else:
3597      a = ops.convert_to_tensor(a, name="a")
3598      b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
3599
3600    # TODO(apassos) remove _shape_tuple here when it is not needed.
3601    a_shape = a._shape_tuple()  # pylint: disable=protected-access
3602    b_shape = b._shape_tuple()  # pylint: disable=protected-access
3603
3604    output_may_have_non_empty_batch_shape = (
3605        (a_shape is None or len(a_shape) > 2) or
3606        (b_shape is None or len(b_shape) > 2))
3607
3608    # TODO(b/178749687): remove this boolean and all related branches once the
3609    # bridges are ready.
3610    # batch_matmul_v3 is for when input type is different from output type.
3611    use_batch_matmul_v3 = False
3612    if output_type and (output_type != a.dtype or output_type != b.dtype):
3613      use_batch_matmul_v3 = True
3614
3615    if (not a_is_sparse and
3616        not b_is_sparse) and output_may_have_non_empty_batch_shape:
3617      # BatchMatmul does not support transpose, so we conjugate the matrix and
3618      # use adjoint instead. Conj() is a noop for real matrices.
3619      if transpose_a:
3620        a = conj(a)
3621        adjoint_a = True
3622      if transpose_b:
3623        b = conj(b)
3624        adjoint_b = True
3625      if use_batch_matmul_v3:
3626        return gen_math_ops.batch_mat_mul_v3(
3627            a, b, adj_x=adjoint_a, adj_y=adjoint_b, Tout=output_type, name=name)
3628      else:
3629        return gen_math_ops.batch_mat_mul_v2(
3630            a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
3631
3632    # Neither matmul nor sparse_matmul support adjoint, so we conjugate
3633    # the matrix and use transpose instead. Conj() is a noop for real
3634    # matrices.
3635    if adjoint_a:
3636      a = conj(a)
3637      transpose_a = True
3638    if adjoint_b:
3639      b = conj(b)
3640      transpose_b = True
3641
3642    use_sparse_matmul = False
3643    if a_is_sparse or b_is_sparse:
3644      sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
3645      use_sparse_matmul = (
3646          a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)
3647    if (((a.dtype == dtypes.bfloat16 and
3648          b.dtype not in (dtypes.int8, dtypes.uint8)) or
3649         (b.dtype == dtypes.bfloat16 and
3650          a.dtype not in (dtypes.int8, dtypes.uint8))) and a.dtype != b.dtype):
3651      # matmul currently doesn't handle mixed-precision inputs other than
3652      # fp16 * int8 which is supported in BatchMatMulV3.
3653      use_sparse_matmul = True
3654    if use_sparse_matmul:
3655      ret = sparse_matmul(
3656          a,
3657          b,
3658          transpose_a=transpose_a,
3659          transpose_b=transpose_b,
3660          a_is_sparse=a_is_sparse,
3661          b_is_sparse=b_is_sparse,
3662          name=name)
3663      # sparse_matmul always returns float32, even with
3664      # bfloat16 inputs. This prevents us from configuring bfloat16 training.
3665      # casting to bfloat16 also matches non-sparse matmul behavior better.
3666      if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:
3667        ret = cast(ret, dtypes.bfloat16)
3668      return ret
3669    else:
3670      if use_batch_matmul_v3:
3671        adjoint_a = adjoint_a or transpose_a
3672        adjoint_b = adjoint_b or transpose_b
3673        return gen_math_ops.batch_mat_mul_v3(
3674            a, b, adj_x=adjoint_a, adj_y=adjoint_b, Tout=output_type, name=name)
3675      else:
3676        return gen_math_ops.mat_mul(
3677            a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
3678
3679
3680@tf_export("linalg.matvec")
3681@dispatch.add_dispatch_support
3682def matvec(a,
3683           b,
3684           transpose_a=False,
3685           adjoint_a=False,
3686           a_is_sparse=False,
3687           b_is_sparse=False,
3688           name=None):
3689  """Multiplies matrix `a` by vector `b`, producing `a` * `b`.
3690
3691  The matrix `a` must, following any transpositions, be a tensor of rank >= 2,
3692  with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast
3693  with `shape(b)[:-1]`.
3694
3695  Both `a` and `b` must be of the same type. The supported types are:
3696  `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
3697
3698  Matrix `a` can be transposed or adjointed (conjugated and transposed) on
3699  the fly by setting one of the corresponding flag to `True`. These are `False`
3700  by default.
3701
3702  If one or both of the inputs contain a lot of zeros, a more efficient
3703  multiplication algorithm can be used by setting the corresponding
3704  `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
3705  This optimization is only available for plain matrices/vectors (rank-2/1
3706  tensors) with datatypes `bfloat16` or `float32`.
3707
3708  For example:
3709
3710  ```python
3711  # 2-D tensor `a`
3712  # [[1, 2, 3],
3713  #  [4, 5, 6]]
3714  a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
3715
3716  # 1-D tensor `b`
3717  # [7, 9, 11]
3718  b = tf.constant([7, 9, 11], shape=[3])
3719
3720  # `a` * `b`
3721  # [ 58,  64]
3722  c = tf.linalg.matvec(a, b)
3723
3724
3725  # 3-D tensor `a`
3726  # [[[ 1,  2,  3],
3727  #   [ 4,  5,  6]],
3728  #  [[ 7,  8,  9],
3729  #   [10, 11, 12]]]
3730  a = tf.constant(np.arange(1, 13, dtype=np.int32),
3731                  shape=[2, 2, 3])
3732
3733  # 2-D tensor `b`
3734  # [[13, 14, 15],
3735  #  [16, 17, 18]]
3736  b = tf.constant(np.arange(13, 19, dtype=np.int32),
3737                  shape=[2, 3])
3738
3739  # `a` * `b`
3740  # [[ 86, 212],
3741  #  [410, 563]]
3742  c = tf.linalg.matvec(a, b)
3743  ```
3744
3745  Args:
3746    a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
3747      `complex128` and rank > 1.
3748    b: `Tensor` with same type as `a` and compatible dimensions.
3749    transpose_a: If `True`, `a` is transposed before multiplication.
3750    adjoint_a: If `True`, `a` is conjugated and transposed before
3751      multiplication.
3752    a_is_sparse: If `True`, `a` is treated as a sparse matrix.
3753    b_is_sparse: If `True`, `b` is treated as a sparse matrix.
3754    name: Name for the operation (optional).
3755
3756  Returns:
3757    A `Tensor` of the same type as `a` and `b` where each inner-most vector is
3758    the product of the corresponding matrices in `a` and vectors in `b`, e.g. if
3759    all transpose or adjoint attributes are `False`:
3760
3761    `output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.
3762
3763    Note: This is matrix-vector product, not element-wise product.
3764
3765
3766  Raises:
3767    ValueError: If transpose_a and adjoint_a are both set to True.
3768  """
3769  with ops.name_scope(name, "MatVec", [a, b]) as name:
3770    output = matmul(
3771        a,
3772        array_ops.expand_dims(b, axis=-1),
3773        transpose_a=transpose_a,
3774        adjoint_a=adjoint_a,
3775        a_is_sparse=a_is_sparse,
3776        b_is_sparse=b_is_sparse)
3777    return array_ops.squeeze(output, axis=-1)
3778
3779
3780# TODO(b/178650720): Also support numpy-style type promotion in freestanding TF
3781#   functions (e.g. tf.add).
3782def matmul_wrapper(a, b, name=None):  # pylint: disable=missing-function-docstring
3783  if ops._numpy_style_type_promotion:
3784    return a._matmul(b)
3785  return matmul(a, b, name=name)
3786matmul_wrapper.__doc__ = matmul.__doc__
3787_OverrideBinaryOperatorHelper(matmul_wrapper, "matmul")
3788
3789sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")(
3790    gen_math_ops.sparse_mat_mul)
3791tf_export(v1=["sparse_matmul"])(sparse_matmul)
3792@dispatch.add_dispatch_support
3793
3794
3795@ops.RegisterStatistics("MatMul", "flops")
3796def _calc_mat_mul_flops(graph, node):
3797  """Calculates the compute resources needed for MatMul."""
3798  transpose_a = node.attr["transpose_a"].b
3799  a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
3800  a_shape.assert_is_fully_defined()
3801  if transpose_a:
3802    k = int(a_shape[0])
3803  else:
3804    k = int(a_shape[1])
3805  output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
3806  output_shape.assert_is_fully_defined()
3807  output_count = np.prod(output_shape.as_list())
3808  return ops.OpStats("flops", (k * output_count * 2))
3809
3810
3811@ops.RegisterStatistics("BatchMatMul", "flops")
3812@ops.RegisterStatistics("BatchMatMulV2", "flops")
3813@ops.RegisterStatistics("BatchMatMulV3", "flops")
3814def _calc_batch_mat_mul_flops(graph, node):
3815  """Calculates the compute resources needed for BatchMatMul."""
3816  transpose_a = node.attr["transpose_a"].b
3817  a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
3818  a_shape.assert_is_fully_defined()
3819  if transpose_a:
3820    k = int(a_shape[-2])
3821  else:
3822    k = int(a_shape[-1])
3823  output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
3824  output_shape.assert_is_fully_defined()
3825  output_count = np.prod(output_shape.as_list())
3826  return ops.OpStats("flops", (k * output_count * 2))
3827
3828
3829def _as_indexed_slices(x, optimize=True):
3830  """Convert 'x' to IndexedSlices.
3831
3832  Convert a dense Tensor to a block-sparse IndexedSlices.
3833
3834  Args:
3835    x: Either a Tensor object, or an IndexedSlices object.
3836    optimize: if true, attempt to optimize the conversion of 'x'.
3837
3838  Returns:
3839    An IndexedSlices object.
3840
3841  Raises:
3842    TypeError: If 'x' is not a Tensor or an IndexedSlices object.
3843  """
3844  # TODO(touts): op_scope
3845  if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
3846    raise TypeError(f"Not a Tensor or IndexedSlices: {type(x)}.")
3847  if isinstance(x, ops.IndexedSlices):
3848    return x
3849  x_shape = array_ops.shape_internal(x, optimize=optimize)
3850  return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
3851
3852
3853def _as_indexed_slices_list(inputs, optimize=True):
3854  """Convert all elements of 'inputs' to IndexedSlices.
3855
3856  Additionally, homogenize the types of all the indices to
3857  either int32 or int64.
3858
3859  Args:
3860    inputs: List containing either Tensor or IndexedSlices objects.
3861    optimize: if true, attempt to optimize the conversion of each input.
3862
3863  Returns:
3864    A list of IndexedSlices objects.
3865
3866  Raises:
3867    TypeError: If 'inputs' is not a list or a tuple.
3868  """
3869  if not isinstance(inputs, (list, tuple)):
3870    raise TypeError(f"Expected a list or tuple, not {type(inputs)}.")
3871  outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
3872  with_int32_index = [
3873      o.indices for o in outputs if o.indices.dtype == dtypes.int32
3874  ]
3875  if not with_int32_index or len(with_int32_index) == len(outputs):
3876    return outputs
3877  casted_outputs = []
3878  for o in outputs:
3879    if o.indices.dtype == dtypes.int32:
3880      casted_outputs.append(
3881          ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
3882                            o.dense_shape))
3883    else:
3884      casted_outputs.append(o)
3885  return casted_outputs
3886
3887
3888@tf_export("math.add", "add")
3889@dispatch.add_dispatch_support
3890def add(x, y, name=None):
3891  """Returns x + y element-wise.
3892
3893  Example usages below.
3894
3895  Add a scalar and a list:
3896
3897  >>> x = [1, 2, 3, 4, 5]
3898  >>> y = 1
3899  >>> tf.add(x, y)
3900  <tf.Tensor: shape=(5,), dtype=int32, numpy=array([2, 3, 4, 5, 6],
3901  dtype=int32)>
3902
3903  Note that binary `+` operator can be used instead:
3904
3905  >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5])
3906  >>> y = tf.convert_to_tensor(1)
3907  >>> x + y
3908  <tf.Tensor: shape=(5,), dtype=int32, numpy=array([2, 3, 4, 5, 6],
3909  dtype=int32)>
3910
3911  Add a tensor and a list of same shape:
3912
3913  >>> x = [1, 2, 3, 4, 5]
3914  >>> y = tf.constant([1, 2, 3, 4, 5])
3915  >>> tf.add(x, y)
3916  <tf.Tensor: shape=(5,), dtype=int32,
3917  numpy=array([ 2,  4,  6,  8, 10], dtype=int32)>
3918
3919  **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a
3920  non-tensor, the non-tensor input will adopt (or get casted to) the data type
3921  of the tensor input. This can potentially cause unwanted overflow or underflow
3922  conversion.
3923
3924  For example,
3925
3926  >>> x = tf.constant([1, 2], dtype=tf.int8)
3927  >>> y = [2**7 + 1, 2**7 + 2]
3928  >>> tf.add(x, y)
3929  <tf.Tensor: shape=(2,), dtype=int8, numpy=array([-126, -124], dtype=int8)>
3930
3931  When adding two input values of different shapes, `Add` follows NumPy
3932  broadcasting rules. The two input array shapes are compared element-wise.
3933  Starting with the trailing dimensions, the two dimensions either have to be
3934  equal or one of them needs to be `1`.
3935
3936  For example,
3937
3938  >>> x = np.ones(6).reshape(1, 2, 1, 3)
3939  >>> y = np.ones(6).reshape(2, 1, 3, 1)
3940  >>> tf.add(x, y).shape.as_list()
3941  [2, 2, 3, 3]
3942
3943  Another example with two arrays of different dimension.
3944
3945  >>> x = np.ones([1, 2, 1, 4])
3946  >>> y = np.ones([3, 4])
3947  >>> tf.add(x, y).shape.as_list()
3948  [1, 2, 3, 4]
3949
3950  The reduction version of this elementwise operation is `tf.math.reduce_sum`
3951
3952  Args:
3953    x: A `tf.Tensor`. Must be one of the following types: bfloat16, half,
3954      float32, float64, uint8, int8, int16, int32, int64, complex64, complex128,
3955      string.
3956    y: A `tf.Tensor`. Must have the same type as x.
3957    name: A name for the operation (optional)
3958  """
3959  with ops.name_scope(name, "Add", [x]) as name:
3960    x = ops.convert_to_tensor(x, name="x")
3961    y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
3962    if x.dtype == dtypes.string:
3963      return gen_math_ops.add(x, y, name=name)
3964    else:
3965      return gen_math_ops.add_v2(x, y, name=name)
3966
3967
3968@tf_export("math.add_n", "add_n")
3969@dispatch.add_dispatch_support
3970def add_n(inputs, name=None):
3971  """Adds all input tensors element-wise.
3972
3973  `tf.math.add_n` performs the same operation as `tf.math.accumulate_n`, but it
3974  waits for all of its inputs to be ready before beginning to sum.
3975  This buffering can result in higher memory consumption when inputs are ready
3976  at different times, since the minimum temporary storage required is
3977  proportional to the input size rather than the output size.
3978
3979  This op does not [broadcast](
3980  https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)
3981  its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator)
3982  instead.
3983
3984  For example:
3985
3986  >>> a = tf.constant([[3, 5], [4, 8]])
3987  >>> b = tf.constant([[1, 6], [2, 9]])
3988  >>> tf.math.add_n([a, b, a])
3989  <tf.Tensor: shape=(2, 2), dtype=int32, numpy=
3990  array([[ 7, 16],
3991         [10, 25]], dtype=int32)>
3992
3993  Args:
3994    inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the
3995      same shape and type. `tf.IndexedSlices` objects will be converted into
3996      dense tensors prior to adding.
3997    name: A name for the operation (optional).
3998
3999  Returns:
4000    A `tf.Tensor` of the same shape and type as the elements of `inputs`.
4001
4002  Raises:
4003    ValueError: If `inputs` don't all have same shape and dtype or the shape
4004    cannot be inferred.
4005  """
4006  if not inputs or not isinstance(inputs, collections_abc.Iterable):
4007    raise ValueError("Inputs must be an iterable of at least one "
4008                     "Tensor/IndexedSlices with the same dtype and shape.")
4009  inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
4010  if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs):
4011    raise ValueError("Inputs must be an iterable of at least one "
4012                     "Tensor/IndexedSlices with the same dtype and shape.")
4013
4014  if len(inputs) == 1:
4015    if isinstance(inputs[0], ops.IndexedSlices):
4016      values = ops.convert_to_tensor(inputs[0])
4017    else:
4018      values = inputs[0]
4019    if name:
4020      return array_ops.identity(values, name=name)
4021    return values
4022  return gen_math_ops.add_n(inputs, name=name)
4023
4024
4025@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"])
4026@dispatch.add_dispatch_support
4027@deprecation.deprecated_endpoints("accumulate_n")
4028def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
4029  """Returns the element-wise sum of a list of tensors.
4030
4031  Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
4032  otherwise, these are inferred.
4033
4034  `accumulate_n` performs the same operation as `tf.math.add_n`.
4035
4036  For example:
4037
4038  ```python
4039  a = tf.constant([[1, 2], [3, 4]])
4040  b = tf.constant([[5, 0], [0, 6]])
4041  tf.math.accumulate_n([a, b, a])  # [[7, 4], [6, 14]]
4042
4043  # Explicitly pass shape and type
4044  tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
4045                                                                 # [[7,  4],
4046                                                                 #  [6, 14]]
4047  ```
4048
4049  Args:
4050    inputs: A list of `Tensor` objects, each with same shape and type.
4051    shape: Expected shape of elements of `inputs` (optional). Also controls the
4052      output shape of this op, which may affect type inference in other ops. A
4053      value of `None` means "infer the input shape from the shapes in `inputs`".
4054    tensor_dtype: Expected data type of `inputs` (optional). A value of `None`
4055      means "infer the input dtype from `inputs[0]`".
4056    name: A name for the operation (optional).
4057
4058  Returns:
4059    A `Tensor` of same shape and type as the elements of `inputs`.
4060
4061  Raises:
4062    ValueError: If `inputs` don't all have same shape and dtype or the shape
4063    cannot be inferred.
4064  """
4065
4066  def _input_error():
4067    return ValueError("inputs must be a list of at least one Tensor with the "
4068                      "same dtype and shape")
4069
4070  if not inputs or not isinstance(inputs, (list, tuple)):
4071    raise _input_error()
4072  inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
4073  if not all(isinstance(x, ops.Tensor) for x in inputs):
4074    raise _input_error()
4075  if not all(x.dtype == inputs[0].dtype for x in inputs):
4076    raise _input_error()
4077  if shape is not None:
4078    shape = tensor_shape.as_shape(shape)
4079  else:
4080    shape = tensor_shape.unknown_shape()
4081  for input_tensor in inputs:
4082    if isinstance(input_tensor, ops.Tensor):
4083      shape = shape.merge_with(input_tensor.get_shape())
4084
4085  # tensor_dtype is for safety only; operator's output type computed in C++
4086  if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
4087    raise TypeError(
4088        f"The `tensor_dtype` argument is {tensor_dtype}, but `input` is of type "
4089        f"{inputs[0].dtype}. These must be equal. Try casting the input to the "
4090        f"desired type.")
4091
4092  if len(inputs) == 1 and name is None:
4093    return inputs[0]
4094  elif len(inputs) == 1 and name is not None:
4095    return array_ops.identity(inputs[0], name=name)
4096  return add_n(inputs, name=name)
4097
4098
4099@ops.RegisterGradient("AccumulateNV2")
4100def _accumulate_n_grad(op, grad):
4101  """Same as gradient for AddN. Copies the gradient to all inputs."""
4102  # Not broadcasting.
4103  return [grad] * len(op.inputs)
4104
4105
4106@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
4107@dispatch.add_dispatch_support
4108def sigmoid(x, name=None):
4109  r"""Computes sigmoid of `x` element-wise.
4110
4111  Formula for calculating $\mathrm{sigmoid}(x) = y = 1 / (1 + \exp(-x))$.
4112
4113  For $x \in (-\infty, \infty)$, $\mathrm{sigmoid}(x) \in (0, 1)$.
4114
4115  Example Usage:
4116
4117  If a positive number is large, then its sigmoid will approach to 1 since the
4118  formula will be `y = <large_num> / (1 + <large_num>)`
4119
4120  >>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
4121  >>> tf.math.sigmoid(x)
4122  <tf.Tensor: shape=(4,), dtype=float32,
4123  numpy=array([0.5      , 0.7310586, 1.       , 1.       ], dtype=float32)>
4124
4125  If a negative number is large, its sigmoid will approach to 0 since the
4126  formula will be `y = 1 / (1 + <large_num>)`
4127
4128  >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
4129  >>> tf.math.sigmoid(x)
4130  <tf.Tensor: shape=(4,), dtype=float32, numpy=
4131  array([0.0000000e+00, 1.9287499e-22, 2.6894143e-01, 0.5],
4132        dtype=float32)>
4133
4134  Args:
4135    x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or
4136      `complex128`.
4137    name: A name for the operation (optional).
4138
4139  Returns:
4140    A Tensor with the same type as `x`.
4141
4142  Usage Example:
4143
4144  >>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)
4145  >>> tf.sigmoid(x)
4146  <tf.Tensor: shape=(3,), dtype=float32,
4147  numpy=array([0. , 0.5, 1. ], dtype=float32)>
4148
4149  @compatibility(scipy)
4150  Equivalent to scipy.special.expit
4151  @end_compatibility
4152  """
4153  with ops.name_scope(name, "Sigmoid", [x]) as name:
4154    x = ops.convert_to_tensor(x, name="x")
4155    return gen_math_ops.sigmoid(x, name=name)
4156
4157
4158@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"])
4159@dispatch.add_dispatch_support
4160@deprecation.deprecated_endpoints("log_sigmoid")
4161def log_sigmoid(x, name=None):
4162  """Computes log sigmoid of `x` element-wise.
4163
4164  Specifically, `y = log(1 / (1 + exp(-x)))`.  For numerical stability,
4165  we use `y = -tf.nn.softplus(-x)`.
4166
4167  Args:
4168    x: A Tensor with type `float32` or `float64`.
4169    name: A name for the operation (optional).
4170
4171  Returns:
4172    A Tensor with the same type as `x`.
4173
4174  Usage Example:
4175
4176  If a positive number is large, then its log_sigmoid will approach to 0 since
4177  the formula will be `y = log( <large_num> / (1 + <large_num>) )` which
4178  approximates to `log (1)` which is 0.
4179
4180  >>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
4181  >>> tf.math.log_sigmoid(x)
4182  <tf.Tensor: shape=(4,), dtype=float32, numpy=
4183  array([-6.9314718e-01, -3.1326169e-01, -1.9287499e-22, -0.0000000e+00],
4184        dtype=float32)>
4185
4186  If a negative number is large, its log_sigmoid will approach to the number
4187  itself since the formula will be `y = log( 1 / (1 + <large_num>) )` which is
4188  `log (1) - log ( (1 + <large_num>) )` which approximates to `- <large_num>`
4189  that is the number itself.
4190
4191  >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
4192  >>> tf.math.log_sigmoid(x)
4193  <tf.Tensor: shape=(4,), dtype=float32, numpy=
4194  array([-100.       ,  -50.       ,   -1.3132616,   -0.6931472],
4195        dtype=float32)>
4196  """
4197  with ops.name_scope(name, "LogSigmoid", [x]) as name:
4198    x = ops.convert_to_tensor(x, name="x")
4199    return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)  # pylint: disable=invalid-unary-operand-type
4200
4201
4202@tf_export("math.cumsum", "cumsum")
4203@dispatch.add_dispatch_support
4204def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
4205  """Compute the cumulative sum of the tensor `x` along `axis`.
4206
4207  By default, this op performs an inclusive cumsum, which means that the first
4208  element of the input is identical to the first element of the output:
4209  For example:
4210
4211  >>> # tf.cumsum([a, b, c])   # [a, a + b, a + b + c]
4212  >>> x = tf.constant([2, 4, 6, 8])
4213  >>> tf.cumsum(x)
4214  <tf.Tensor: shape=(4,), dtype=int32,
4215  numpy=array([ 2,  6, 12, 20], dtype=int32)>
4216
4217  >>> # using varying `axis` values
4218  >>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])
4219  >>> tf.cumsum(y, axis=0)
4220  <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
4221  array([[ 2,  4,  6,  8],
4222         [ 3,  7, 11, 15]], dtype=int32)>
4223  >>> tf.cumsum(y, axis=1)
4224  <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
4225  array([[ 2,  6, 12, 20],
4226         [ 1,  4,  9, 16]], dtype=int32)>
4227
4228  By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
4229  instead:
4230
4231  >>> # tf.cumsum([a, b, c], exclusive=True)  => [0, a, a + b]
4232  >>> x = tf.constant([2, 4, 6, 8])
4233  >>> tf.cumsum(x, exclusive=True)
4234  <tf.Tensor: shape=(4,), dtype=int32,
4235  numpy=array([ 0,  2,  6, 12], dtype=int32)>
4236
4237  By setting the `reverse` kwarg to `True`, the cumsum is performed in the
4238  opposite direction:
4239
4240  >>> # tf.cumsum([a, b, c], reverse=True)  # [a + b + c, b + c, c]
4241  >>> x = tf.constant([2, 4, 6, 8])
4242  >>> tf.cumsum(x, reverse=True)
4243  <tf.Tensor: shape=(4,), dtype=int32,
4244  numpy=array([20, 18, 14,  8], dtype=int32)>
4245
4246  This is more efficient than using separate `tf.reverse` ops.
4247  The `reverse` and `exclusive` kwargs can also be combined:
4248
4249  >>> # tf.cumsum([a, b, c], exclusive=True, reverse=True)  # [b + c, c, 0]
4250  >>> x = tf.constant([2, 4, 6, 8])
4251  >>> tf.cumsum(x, exclusive=True, reverse=True)
4252  <tf.Tensor: shape=(4,), dtype=int32,
4253  numpy=array([18, 14,  8,  0], dtype=int32)>
4254
4255  Args:
4256    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
4257      `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
4258      `complex128`, `qint8`, `quint8`, `qint32`, `half`.
4259    axis: A `Tensor` of type `int32` (default: 0). Must be in the range
4260      `[-rank(x), rank(x))`.
4261    exclusive: If `True`, perform exclusive cumsum.
4262    reverse: A `bool` (default: False).
4263    name: A name for the operation (optional).
4264
4265  Returns:
4266    A `Tensor`. Has the same type as `x`.
4267  """
4268  with ops.name_scope(name, "Cumsum", [x]) as name:
4269    x = ops.convert_to_tensor(x, name="x")
4270    return gen_math_ops.cumsum(
4271        x, axis, exclusive=exclusive, reverse=reverse, name=name)
4272
4273
4274@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"])
4275@dispatch.add_dispatch_support
4276@deprecation.deprecated_endpoints("cumprod")
4277def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
4278  """Compute the cumulative product of the tensor `x` along `axis`.
4279
4280  By default, this op performs an inclusive cumprod, which means that the
4281  first element of the input is identical to the first element of the output:
4282
4283  ```python
4284  tf.math.cumprod([a, b, c])  # [a, a * b, a * b * c]
4285  ```
4286
4287  By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
4288  performed
4289  instead:
4290
4291  ```python
4292  tf.math.cumprod([a, b, c], exclusive=True)  # [1, a, a * b]
4293  ```
4294
4295  By setting the `reverse` kwarg to `True`, the cumprod is performed in the
4296  opposite direction:
4297
4298  ```python
4299  tf.math.cumprod([a, b, c], reverse=True)  # [a * b * c, b * c, c]
4300  ```
4301
4302  This is more efficient than using separate `tf.reverse` ops.
4303  The `reverse` and `exclusive` kwargs can also be combined:
4304
4305  ```python
4306  tf.math.cumprod([a, b, c], exclusive=True, reverse=True)  # [b * c, c, 1]
4307  ```
4308
4309  Args:
4310    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
4311      `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
4312      `complex128`, `qint8`, `quint8`, `qint32`, `half`.
4313    axis: A `Tensor` of type `int32` (default: 0). Must be in the range
4314      `[-rank(x), rank(x))`.
4315    exclusive: If `True`, perform exclusive cumprod.
4316    reverse: A `bool` (default: False).
4317    name: A name for the operation (optional).
4318
4319  Returns:
4320    A `Tensor`. Has the same type as `x`.
4321  """
4322  with ops.name_scope(name, "Cumprod", [x]) as name:
4323    x = ops.convert_to_tensor(x, name="x")
4324    return gen_math_ops.cumprod(
4325        x, axis, exclusive=exclusive, reverse=reverse, name=name)
4326
4327
4328@tf_export("math.cumulative_logsumexp", v1=["math.cumulative_logsumexp"])
4329@dispatch.add_dispatch_support
4330def cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None):
4331  """Compute the cumulative log-sum-exp of the tensor `x` along `axis`.
4332
4333  By default, this op performs an inclusive cumulative log-sum-exp, which means
4334  that the first element of the input is identical to the first element of
4335  the output.
4336
4337  This operation is significantly more numerically stable than the equivalent
4338  tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although
4339  computes the same result given infinite numerical precision. However, note
4340  that in some cases, it may be less stable than `tf.math.reduce_logsumexp`
4341  for a given element, as it applies the "log-sum-exp trick" in a different
4342  way.
4343
4344  More precisely, where `tf.math.reduce_logsumexp` uses the following trick:
4345
4346  ```
4347  log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)
4348  ```
4349
4350  it cannot be directly used here as there is no fast way of applying it
4351  to each prefix `x[:i]`. Instead, this function implements a prefix
4352  scan using pairwise log-add-exp, which is a commutative and associative
4353  (up to floating point precision) operator:
4354
4355  ```
4356  log_add_exp(x, y) = log(exp(x) + exp(y))
4357                    = log(1 + exp(min(x, y) - max(x, y))) + max(x, y)
4358  ```
4359
4360  However, reducing using the above operator leads to a different computation
4361  tree (logs are taken repeatedly instead of only at the end), and the maximum
4362  is only computed pairwise instead of over the entire prefix. In general, this
4363  leads to a different and slightly less precise computation.
4364
4365  Args:
4366    x: A `Tensor`. Must be one of the following types: `float16`, `float32`,
4367      `float64`.
4368    axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the
4369      range `[-rank(x), rank(x))`.
4370    exclusive: If `True`, perform exclusive cumulative log-sum-exp.
4371    reverse: If `True`, performs the cumulative log-sum-exp in the reverse
4372      direction.
4373    name: A name for the operation (optional).
4374
4375  Returns:
4376    A `Tensor`. Has the same shape and type as `x`.
4377  """
4378  with ops.name_scope(name, "CumulativeLogsumexp", [x]) as name:
4379    x = ops.convert_to_tensor(x, name="x")
4380    return gen_math_ops.cumulative_logsumexp(
4381        x, axis, exclusive=exclusive, reverse=reverse, name=name)
4382
4383
4384@tf_export("math.conj", v1=["math.conj", "conj"])
4385@dispatch.add_dispatch_support
4386@deprecation.deprecated_endpoints("conj")
4387def conj(x, name=None):
4388  r"""Returns the complex conjugate of a complex number.
4389
4390  Given a tensor `x` of complex numbers, this operation returns a tensor of
4391  complex numbers that are the complex conjugate of each element in `x`. The
4392  complex numbers in `x` must be of the form \\(a + bj\\), where `a` is the
4393  real part and `b` is the imaginary part.
4394
4395  The complex conjugate returned by this operation is of the form \\(a - bj\\).
4396
4397  For example:
4398
4399  >>> x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
4400  >>> tf.math.conj(x)
4401  <tf.Tensor: shape=(2,), dtype=complex128,
4402  numpy=array([-2.25-4.75j,  3.25-5.75j])>
4403
4404  If `x` is real, it is returned unchanged.
4405
4406  For example:
4407
4408  >>> x = tf.constant([-2.25, 3.25])
4409  >>> tf.math.conj(x)
4410  <tf.Tensor: shape=(2,), dtype=float32,
4411  numpy=array([-2.25,  3.25], dtype=float32)>
4412
4413  Args:
4414    x: `Tensor` to conjugate.  Must have numeric or variant type.
4415    name: A name for the operation (optional).
4416
4417  Returns:
4418    A `Tensor` that is the conjugate of `x` (with the same type).
4419
4420  Raises:
4421    TypeError: If `x` is not a numeric tensor.
4422
4423  @compatibility(numpy)
4424  Equivalent to numpy.conj.
4425  @end_compatibility
4426  """
4427  if isinstance(x, ops.Tensor):
4428    dt = x.dtype
4429    if dt.is_floating or dt.is_integer:
4430      return x
4431  with ops.name_scope(name, "Conj", [x]) as name:
4432    x = ops.convert_to_tensor(x, name="x")
4433    if x.dtype.is_complex or x.dtype == dtypes.variant:
4434      return gen_math_ops.conj(x, name=name)
4435    elif x.dtype.is_floating or x.dtype.is_integer:
4436      return x
4437    else:
4438      raise TypeError(
4439          f"Expected numeric or variant tensor, got dtype {x.dtype!r}.")
4440
4441
4442def reduced_shape(input_shape, axes):
4443  """Helper function for reduction ops.
4444
4445  Args:
4446    input_shape: 1-D Tensor, the shape of the Tensor being reduced.
4447    axes: 1-D Tensor, the reduction axes.
4448
4449  Returns:
4450    A 1-D Tensor, the output shape as if keepdims were set to True.
4451  """
4452  # TODO(allenl): Refactor `reduced_shape` to take the tensor corresponding to
4453  # `input_shape` rather than `tf.shape` of it. Then we can check if the shape
4454  # is fully defined here, which may be faster executing eagerly than running
4455  # `tf.shape` and then fetching its constant value.
4456  constant_input_shape = tensor_util.constant_value(input_shape)
4457  if constant_input_shape is not None:
4458    constant_axes = tensor_util.constant_value(axes)
4459    if constant_axes is not None:
4460      constant_axes = np.array(constant_axes, dtype=np.int32)
4461      constant_input_shape = np.array(constant_input_shape, dtype=np.int32)
4462      constant_input_shape[constant_axes] = 1
4463      return constant_input_shape
4464
4465  # Example:
4466  # cast needed for SparseTensor reductions
4467  input_shape = cast(input_shape, dtypes.int32)  # [2, 3, 5, 7]
4468  axes = cast(axes, dtypes.int32)  # [1, 2]
4469
4470  input_rank = array_ops.size(input_shape)  # 4
4471  axes = (axes + input_rank) % input_rank
4472  axes_shape = array_ops.shape(axes)  # [2]
4473  return gen_data_flow_ops.dynamic_stitch(  # [2, 1, 1, 7]
4474      [
4475          range(input_rank),  # [0, 1, 2, 3]
4476          axes
4477      ],  # [1, 2]
4478      [
4479          input_shape,  # [2, 3, 5, 7]
4480          array_ops.ones(axes_shape, dtype=dtypes.int32)
4481      ])  # [1, 1]
4482
4483
4484def _unsorted_segment_N(data, segment_ids, num_segments):
4485  """ Helper function for unsorted_segment_mean/_sqrtN.
4486
4487  Computes the number
4488      of segment entries with 0-entries set to 1 to allow division by N.
4489  """
4490  num_segments = ops.convert_to_tensor(num_segments)
4491  # bincount doesn't support negative indices so we use unsorted_segment_sum
4492  segment_ids_shape = array_ops.shape_internal(segment_ids)
4493  ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
4494  n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)
4495  # add dimensions for all non-reduced axes
4496  broadcastable_shape = array_ops.concat(
4497      [num_segments[array_ops.newaxis],
4498       array_ops.ones([array_ops.rank(data)
4499                       - array_ops.rank(segment_ids)],
4500                      dtype=num_segments.dtype)],
4501      axis=0)
4502  n = array_ops.reshape(n, broadcastable_shape)
4503  return gen_math_ops.maximum(n, 1)
4504
4505
4506@tf_export(
4507    "math.unsorted_segment_mean",
4508    v1=["math.unsorted_segment_mean", "unsorted_segment_mean"])
4509@dispatch.add_dispatch_support
4510@deprecation.deprecated_endpoints("unsorted_segment_mean")
4511@dispatch.add_dispatch_support
4512def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
4513  r"""Computes the mean along segments of a tensor.
4514
4515  Read [the section on
4516  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4517  for an explanation of segments.
4518
4519  This operator is similar to the `tf.math.unsorted_segment_sum` operator.
4520  Instead of computing the sum over segments, it computes the mean of all
4521  entries belonging to a segment such that:
4522
4523  \\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples
4524  `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of
4525  occurrences of id \\i\\.
4526
4527  If there is no entry for a given segment ID `i`, it outputs 0.
4528
4529  If the given segment ID `i` is negative, the value is dropped and will not
4530  be added to the sum of the segment.
4531
4532  Args:
4533    data: A `Tensor` with floating point or complex dtype.
4534    segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
4535    num_segments: An integer scalar `Tensor`.  The number of distinct segment
4536      IDs.
4537    name: A name for the operation (optional).
4538
4539  Returns:
4540    A `Tensor`.  Has same shape as data, except for the first `segment_ids.rank`
4541    dimensions, which are replaced with a single dimension which has size
4542   `num_segments`.
4543  """
4544  with ops.name_scope(name, "UnsortedSegmentMean"):
4545    data = ops.convert_to_tensor(data)
4546    segment_ids = ops.convert_to_tensor(segment_ids)
4547    N = _unsorted_segment_N(data, segment_ids, num_segments)
4548    summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
4549    return summed / N
4550
4551
4552@tf_export(
4553    "math.unsorted_segment_sqrt_n",
4554    v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"])
4555@dispatch.add_dispatch_support
4556@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
4557@dispatch.add_dispatch_support
4558def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
4559  r"""Computes the sum along segments of a tensor divided by the sqrt(N).
4560
4561  Read [the section on
4562  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4563  for an explanation of segments.
4564
4565  This operator is similar to the `tf.math.unsorted_segment_sum` operator.
4566  Additionally to computing the sum over segments, it divides the results by
4567  sqrt(N).
4568
4569  \\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over
4570  tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the
4571  number of occurrences of id \\i\\.
4572
4573  If there is no entry for a given segment ID `i`, it outputs 0.
4574
4575  Note that this op only supports floating point and complex dtypes,
4576  due to tf.sqrt only supporting these types.
4577
4578  If the given segment ID `i` is negative, the value is dropped and will not
4579  be added to the sum of the segment.
4580
4581  Args:
4582    data: A `Tensor` with floating point or complex dtype.
4583    segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
4584    num_segments: An integer scalar `Tensor`.  The number of distinct segment
4585      IDs.
4586    name: A name for the operation (optional).
4587
4588  Returns:
4589    A `Tensor`.  Has same shape as data, except for the first `segment_ids.rank`
4590    dimensions, which are replaced with a single dimension which has size
4591   `num_segments`.
4592  """
4593  with ops.name_scope(name, "UnsortedSegmentSqrtN"):
4594    data = ops.convert_to_tensor(data)
4595    segment_ids = ops.convert_to_tensor(segment_ids)
4596    N = _unsorted_segment_N(data, segment_ids, num_segments)
4597    summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
4598    return summed / gen_math_ops.sqrt(N)
4599
4600
4601@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"])
4602@deprecation.deprecated_endpoints("sparse_segment_sum")
4603def sparse_segment_sum(data,
4604                       indices,
4605                       segment_ids,
4606                       name=None,
4607                       num_segments=None):
4608  r"""Computes the sum along sparse segments of a tensor.
4609
4610  Read [the section on
4611  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4612  for an explanation of segments.
4613
4614  Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
4615  first dimension, selecting a subset of dimension 0, specified by `indices`.
4616  `segment_ids` is allowed to have missing ids, in which case the output will
4617  be zeros at those indices. In those cases `num_segments` is used to determine
4618  the size of the output.
4619
4620  For example:
4621
4622  ```python
4623  c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
4624
4625  # Select two rows, one segment.
4626  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
4627  # => [[0 0 0 0]]
4628
4629  # Select two rows, two segment.
4630  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
4631  # => [[ 1  2  3  4]
4632  #     [-1 -2 -3 -4]]
4633
4634  # With missing segment ids.
4635  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
4636                        num_segments=4)
4637  # => [[ 1  2  3  4]
4638  #     [ 0  0  0  0]
4639  #     [-1 -2 -3 -4]
4640  #     [ 0  0  0  0]]
4641
4642  # Select all rows, two segments.
4643  tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
4644  # => [[0 0 0 0]
4645  #     [5 6 7 8]]
4646
4647  # Which is equivalent to:
4648  tf.math.segment_sum(c, tf.constant([0, 0, 1]))
4649  ```
4650
4651  Args:
4652    data: A `Tensor` with data that will be assembled in the output.
4653    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4654      `segment_ids`.
4655    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4656      should be sorted and can be repeated.
4657    name: A name for the operation (optional).
4658    num_segments: An optional int32 scalar. Indicates the size of the output
4659      `Tensor`.
4660
4661  Returns:
4662    A `tensor` of the shape as data, except for dimension 0 which
4663    has size `k`, the number of segments specified via `num_segments` or
4664    inferred for the last element in `segments_ids`.
4665  """
4666  if num_segments is not None:
4667    return gen_math_ops.sparse_segment_sum_with_num_segments(
4668        data=data,
4669        indices=indices,
4670        segment_ids=segment_ids,
4671        num_segments=num_segments,
4672        name=name)
4673  else:
4674    return gen_math_ops.sparse_segment_sum(
4675        data=data, indices=indices, segment_ids=segment_ids, name=name)
4676
4677
4678@tf_export("sparse.segment_sum", v1=[])
4679def sparse_segment_sum_v2(data,
4680                          indices,
4681                          segment_ids,
4682                          num_segments=None,
4683                          name=None):
4684  r"""Computes the sum along sparse segments of a tensor.
4685
4686  Read [the section on
4687  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4688  for an explanation of segments.
4689
4690  Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
4691  first dimension, selecting a subset of dimension 0, specified by `indices`.
4692  `segment_ids` is allowed to have missing ids, in which case the output will
4693  be zeros at those indices. In those cases `num_segments` is used to determine
4694  the size of the output.
4695
4696  For example:
4697
4698  ```python
4699  c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
4700
4701  # Select two rows, one segment.
4702  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
4703  # => [[0 0 0 0]]
4704
4705  # Select two rows, two segment.
4706  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
4707  # => [[ 1  2  3  4]
4708  #     [-1 -2 -3 -4]]
4709
4710  # With missing segment ids.
4711  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
4712                        num_segments=4)
4713  # => [[ 1  2  3  4]
4714  #     [ 0  0  0  0]
4715  #     [-1 -2 -3 -4]
4716  #     [ 0  0  0  0]]
4717
4718  # Select all rows, two segments.
4719  tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
4720  # => [[0 0 0 0]
4721  #     [5 6 7 8]]
4722
4723  # Which is equivalent to:
4724  tf.math.segment_sum(c, tf.constant([0, 0, 1]))
4725  ```
4726
4727  Args:
4728    data: A `Tensor` with data that will be assembled in the output.
4729    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4730      `segment_ids`.
4731    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4732      should be sorted and can be repeated.
4733    num_segments: An optional int32 scalar. Indicates the size of the output
4734      `Tensor`.
4735    name: A name for the operation (optional).
4736
4737  Returns:
4738    A `tensor` of the shape as data, except for dimension 0 which
4739    has size `k`, the number of segments specified via `num_segments` or
4740    inferred for the last element in `segments_ids`.
4741  """
4742  return sparse_segment_sum(
4743      data, indices, segment_ids, name=name, num_segments=num_segments)
4744
4745
4746@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"])
4747@deprecation.deprecated_endpoints("sparse_segment_mean")
4748def sparse_segment_mean(data,
4749                        indices,
4750                        segment_ids,
4751                        name=None,
4752                        num_segments=None):
4753  r"""Computes the mean along sparse segments of a tensor.
4754
4755  Read [the section on
4756  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4757  for an explanation of segments.
4758
4759  Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
4760  `data`'s first dimension, selecting a subset of dimension 0, specified by
4761  `indices`.
4762  `segment_ids` is allowed to have missing ids, in which case the output will
4763  be zeros at those indices. In those cases `num_segments` is used to determine
4764  the size of the output.
4765
4766  Args:
4767    data: A `Tensor` with data that will be assembled in the output.
4768    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4769      `segment_ids`.
4770    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4771      should be sorted and can be repeated.
4772    name: A name for the operation (optional).
4773    num_segments: An optional int32 scalar. Indicates the size of the output
4774      `Tensor`.
4775
4776  Returns:
4777    A `tensor` of the shape as data, except for dimension 0 which
4778    has size `k`, the number of segments specified via `num_segments` or
4779    inferred for the last element in `segments_ids`.
4780  """
4781  if num_segments is not None:
4782    return gen_math_ops.sparse_segment_mean_with_num_segments(
4783        data=data,
4784        indices=indices,
4785        segment_ids=segment_ids,
4786        num_segments=num_segments,
4787        name=name)
4788  else:
4789    return gen_math_ops.sparse_segment_mean(
4790        data=data, indices=indices, segment_ids=segment_ids, name=name)
4791
4792
4793@tf_export("sparse.segment_mean", v1=[])
4794def sparse_segment_mean_v2(data,
4795                           indices,
4796                           segment_ids,
4797                           num_segments=None,
4798                           name=None):
4799  r"""Computes the mean along sparse segments of a tensor.
4800
4801  Read [the section on
4802  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4803  for an explanation of segments.
4804
4805  Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
4806  `data`'s first dimension, selecting a subset of dimension 0, specified by
4807  `indices`.
4808  `segment_ids` is allowed to have missing ids, in which case the output will
4809  be zeros at those indices. In those cases `num_segments` is used to determine
4810  the size of the output.
4811
4812  Args:
4813    data: A `Tensor` with data that will be assembled in the output.
4814    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4815      `segment_ids`.
4816    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4817      should be sorted and can be repeated.
4818    num_segments: An optional int32 scalar. Indicates the size of the output
4819      `Tensor`.
4820    name: A name for the operation (optional).
4821
4822  Returns:
4823    A `tensor` of the shape as data, except for dimension 0 which
4824    has size `k`, the number of segments specified via `num_segments` or
4825    inferred for the last element in `segments_ids`.
4826  """
4827  return sparse_segment_mean(
4828      data, indices, segment_ids, name=name, num_segments=num_segments)
4829
4830
4831@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"])
4832@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
4833def sparse_segment_sqrt_n(data,
4834                          indices,
4835                          segment_ids,
4836                          name=None,
4837                          num_segments=None):
4838  r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
4839
4840  `N` is the size of the segment being reduced.
4841
4842  Args:
4843    data: A `Tensor` with data that will be assembled in the output.
4844    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4845      `segment_ids`.
4846    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4847      should be sorted and can be repeated.
4848    name: A name for the operation (optional).
4849    num_segments: An optional int32 scalar. Indicates the size of the output
4850      `Tensor`.
4851
4852  Returns:
4853    A `tensor` of the shape as data, except for dimension 0 which
4854    has size `k`, the number of segments specified via `num_segments` or
4855    inferred for the last element in `segments_ids`.
4856  """
4857  if num_segments is not None:
4858    return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(
4859        data=data,
4860        indices=indices,
4861        segment_ids=segment_ids,
4862        num_segments=num_segments,
4863        name=name)
4864  else:
4865    return gen_math_ops.sparse_segment_sqrt_n(
4866        data=data, indices=indices, segment_ids=segment_ids, name=name)
4867
4868
4869@tf_export("sparse.segment_sqrt_n", v1=[])
4870def sparse_segment_sqrt_n_v2(data,
4871                             indices,
4872                             segment_ids,
4873                             num_segments=None,
4874                             name=None):
4875  r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
4876
4877  Read [the section on
4878  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4879  for an explanation of segments.
4880
4881  Like `tf.sparse.segment_mean`, but instead of dividing by the size of the
4882  segment, `N`, divide by `sqrt(N)` instead.
4883
4884  Args:
4885    data: A `Tensor` with data that will be assembled in the output.
4886    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4887      `segment_ids`.
4888    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4889      should be sorted and can be repeated.
4890    num_segments: An optional int32 scalar. Indicates the size of the output
4891      `Tensor`.
4892    name: A name for the operation (optional).
4893
4894  Returns:
4895    A `tensor` of the shape as data, except for dimension 0 which
4896    has size `k`, the number of segments specified via `num_segments` or
4897    inferred for the last element in `segments_ids`.
4898  """
4899  return sparse_segment_sqrt_n(
4900      data, indices, segment_ids, name=name, num_segments=num_segments)
4901
4902
4903@tf_export("tensordot", "linalg.tensordot")
4904@dispatch.add_dispatch_support
4905def tensordot(a, b, axes, name=None):
4906  r"""Tensor contraction of a and b along specified axes and outer product.
4907
4908  Tensordot (also known as tensor contraction) sums the product of elements
4909  from `a` and `b` over the indices specified by `axes`.
4910
4911  This operation corresponds to `numpy.tensordot(a, b, axes)`.
4912
4913  Example 1: When `a` and `b` are matrices (order 2), the case `axes=1`
4914  is equivalent to matrix multiplication.
4915
4916  Example 2: When `a` and `b` are matrices (order 2), the case
4917  `axes = [[1], [0]]` is equivalent to matrix multiplication.
4918
4919  Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives
4920  the outer product, a tensor of order 4.
4921
4922  Example 4: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
4923  tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
4924  \\(c_{jklm}\\) whose entry
4925  corresponding to the indices \\((j,k,l,m)\\) is given by:
4926
4927  \\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
4928
4929  In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
4930
4931  Args:
4932    a: `Tensor` of type `float32` or `float64`.
4933    b: `Tensor` with the same type as `a`.
4934    axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
4935      If axes is a scalar, sum over the last N axes of a and the first N axes of
4936      b in order. If axes is a list or `Tensor` the first and second row contain
4937      the set of unique integers specifying axes along which the contraction is
4938      computed, for `a` and `b`, respectively. The number of axes for `a` and
4939      `b` must be equal. If `axes=0`, computes the outer product between `a` and
4940      `b`.
4941    name: A name for the operation (optional).
4942
4943  Returns:
4944    A `Tensor` with the same type as `a`.
4945
4946  Raises:
4947    ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
4948    IndexError: If the values in axes exceed the rank of the corresponding
4949      tensor.
4950  """
4951
4952  def _tensordot_reshape(a, axes, flipped=False):
4953    """Helper method to perform transpose and reshape for contraction op.
4954
4955    This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
4956    using `array_ops.transpose` and `array_ops.reshape`. The method takes a
4957    tensor and performs the correct transpose and reshape operation for a given
4958    set of indices. It returns the reshaped tensor as well as a list of indices
4959    necessary to reshape the tensor again after matrix multiplication.
4960
4961    Args:
4962      a: `Tensor`.
4963      axes: List or `int32` `Tensor` of unique indices specifying valid axes of
4964        `a`.
4965      flipped: An optional `bool`. Defaults to `False`. If `True`, the method
4966        assumes that `a` is the second argument in the contraction operation.
4967
4968    Returns:
4969      A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
4970      the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
4971      either a list of integers or an `int32` `Tensor`, depending on whether
4972      the shape of a is fully specified, and free_dims_static is either a list
4973      of integers and None values, or None, representing the inferred
4974      static shape of the free dimensions
4975    """
4976    if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
4977      shape_a = a.get_shape().as_list()
4978      axes = [i if i >= 0 else i + len(shape_a) for i in axes]
4979      free = [i for i in xrange(len(shape_a)) if i not in axes]
4980      free_dims = [shape_a[i] for i in free]
4981      prod_free = int(np.prod([shape_a[i] for i in free]))
4982      prod_axes = int(np.prod([shape_a[i] for i in axes]))
4983      perm = list(axes) + free if flipped else free + list(axes)
4984      new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
4985      if (perm != np.arange(len(shape_a))).any():
4986        a_trans = array_ops.transpose(a, perm)
4987      else:
4988        a_trans = a
4989      if a_trans.get_shape().as_list() != new_shape:
4990        reshaped_a = array_ops.reshape(a_trans, new_shape)
4991      else:
4992        reshaped_a = a_trans
4993      return reshaped_a, free_dims, free_dims
4994    else:
4995      if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
4996        shape_a = a.get_shape().as_list()
4997        axes = [i if i >= 0 else i + len(shape_a) for i in axes]
4998        free = [i for i in xrange(len(shape_a)) if i not in axes]
4999        axes_dims = [shape_a[i] for i in axes]
5000        free_dims = [shape_a[i] for i in free]
5001        free_dims_static = free_dims
5002        axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
5003        free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free")
5004        shape_a = array_ops.shape(a)
5005      else:
5006        free_dims_static = None
5007        shape_a = array_ops.shape(a)
5008        rank_a = array_ops.rank(a)
5009        axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
5010        axes = array_ops.where(axes >= 0, axes, axes + rank_a)
5011        free, _ = gen_array_ops.list_diff(range(rank_a), axes, dtypes.int32)
5012      free_dims = array_ops.gather(shape_a, free)
5013      axes_dims = array_ops.gather(shape_a, axes)
5014      prod_free_dims = reduce_prod(free_dims)
5015      prod_axes_dims = reduce_prod(axes_dims)
5016      if flipped:
5017        perm = array_ops.concat([axes, free], 0)
5018        new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
5019      else:
5020        perm = array_ops.concat([free, axes], 0)
5021        new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
5022      reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
5023      return reshaped_a, free_dims, free_dims_static
5024
5025  def _tensordot_axes(a, axes):
5026    """Generates two sets of contraction axes for the two tensor arguments."""
5027    a_shape = a.get_shape()
5028    if isinstance(axes, compat.integral_types):
5029      if axes < 0:
5030        raise ValueError(f"`axes` must be at least 0. Received: {axes}.")
5031      if a_shape.ndims is not None:
5032        if axes > a_shape.ndims:
5033          raise ValueError(f"`axes` must not be larger than the number of "
5034                           f"dimensions of tensor {a}.  Received {axes}, vs "
5035                           f"tensor dimensions {a_shape.ndims}.")
5036        return (list(xrange(a_shape.ndims - axes,
5037                            a_shape.ndims)), list(xrange(axes)))
5038      else:
5039        rank = array_ops.rank(a)
5040        return (range(rank - axes, rank,
5041                      dtype=dtypes.int32), range(axes, dtype=dtypes.int32))
5042    elif isinstance(axes, (list, tuple)):
5043      if len(axes) != 2:
5044        raise ValueError(
5045            f"`axes` must be an integer or have length 2. Received {axes}.")
5046      a_axes = axes[0]
5047      b_axes = axes[1]
5048      if isinstance(a_axes, compat.integral_types) and \
5049          isinstance(b_axes, compat.integral_types):
5050        a_axes = [a_axes]
5051        b_axes = [b_axes]
5052      if len(a_axes) != len(b_axes):
5053        raise ValueError(f"Different number of contraction axes `a` and `b`, "
5054                         f"{len(a_axes)} != {len(b_axes)}.")
5055      return a_axes, b_axes
5056    else:
5057      axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
5058      return axes[0], axes[1]
5059
5060  with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
5061    a = ops.convert_to_tensor(a, name="a")
5062    b = ops.convert_to_tensor(b, name="b")
5063    a_axes, b_axes = _tensordot_axes(a, axes)
5064    a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
5065    b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(
5066        b, b_axes, True)
5067    ab_matmul = matmul(a_reshape, b_reshape)
5068    if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
5069      if (ab_matmul.get_shape().is_fully_defined() and
5070          ab_matmul.get_shape().as_list() == a_free_dims + b_free_dims):
5071        return ab_matmul
5072      else:
5073        return array_ops.reshape(
5074            ab_matmul, a_free_dims + b_free_dims, name=name)
5075    else:
5076      a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
5077      b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
5078      product = array_ops.reshape(
5079          ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
5080      if a_free_dims_static is not None and b_free_dims_static is not None:
5081        product.set_shape(a_free_dims_static + b_free_dims_static)
5082      return product
5083
5084
5085@tf_export("math.polyval")
5086@dispatch.add_dispatch_support
5087def polyval(coeffs, x, name=None):
5088  r"""Computes the elementwise value of a polynomial.
5089
5090  If `x` is a tensor and `coeffs` is a list n + 1 tensors,
5091  this function returns the value of the n-th order polynomial
5092
5093  `p(x) = coeffs[n-1] + coeffs[n-2] * x + ...  + coeffs[0] * x**(n-1)`
5094
5095  evaluated using Horner's method, i.e.
5096
5097  ```python
5098  p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + x * coeffs[0]))
5099  ```
5100
5101  Usage Example:
5102
5103  >>> coefficients = [1.0, 2.5, -4.2]
5104  >>> x = 5.0
5105  >>> y = tf.math.polyval(coefficients, x)
5106  >>> y
5107  <tf.Tensor: shape=(), dtype=float32, numpy=33.3>
5108
5109  Usage Example:
5110
5111  >>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0)
5112  <tf.Tensor: shape=(), dtype=int32, numpy=21>
5113
5114  `tf.math.polyval` can also be used in polynomial regression. Taking
5115  advantage of this function can facilitate writing a polynomial equation
5116  as compared to explicitly writing it out, especially for higher degree
5117  polynomials.
5118
5119  >>> x = tf.constant(3)
5120  >>> theta1 = tf.Variable(2)
5121  >>> theta2 = tf.Variable(1)
5122  >>> theta3 = tf.Variable(0)
5123  >>> tf.math.polyval([theta1, theta2, theta3], x)
5124  <tf.Tensor: shape=(), dtype=int32, numpy=21>
5125
5126  Args:
5127    coeffs: A list of `Tensor` representing the coefficients of the polynomial.
5128    x: A `Tensor` representing the variable of the polynomial.
5129    name: A name for the operation (optional).
5130
5131  Returns:
5132    A `tensor` of the shape as the expression p(x) with usual broadcasting
5133    rules for element-wise addition and multiplication applied.
5134
5135  @compatibility(numpy)
5136  Equivalent to numpy.polyval.
5137  @end_compatibility
5138  """
5139  if not isinstance(coeffs, list):
5140    raise ValueError(
5141        f"Argument coeffs must be list type. Received type {type(coeffs)}.")
5142
5143  with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
5144    x = ops.convert_to_tensor(x, name="x")
5145    if len(coeffs) < 1:
5146      return array_ops.zeros_like(x, name=name)
5147    coeffs = [
5148        ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
5149        for index, coeff in enumerate(coeffs)
5150    ]
5151    p = coeffs[0]
5152    for c in coeffs[1:]:
5153      p = c + p * x
5154    return p
5155
5156
5157@tf_export("math.reciprocal_no_nan")
5158@dispatch.add_dispatch_support
5159def reciprocal_no_nan(x, name=None):
5160  """Performs a safe reciprocal operation, element wise.
5161
5162  If a particular element is zero, the reciprocal for that element is
5163  also set to zero.
5164
5165  For example:
5166  ```python
5167  x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)
5168  tf.math.reciprocal_no_nan(x)  # [ 0.5, 2, 0.0, 1.0 ]
5169  ```
5170
5171  Args:
5172    x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or
5173      `complex128`.
5174    name: A name for the operation (optional).
5175
5176  Returns:
5177    A `Tensor` of same shape and type as `x`.
5178
5179  Raises:
5180    TypeError: x must be of a valid dtype.
5181
5182  """
5183
5184  with ops.name_scope(name, "reciprocal_no_nan", [x]) as scope:
5185    x = ops.convert_to_tensor(x, name="x")
5186    one = constant_op.constant(1, dtype=x.dtype.base_dtype, name="one")
5187    return gen_math_ops.div_no_nan(one, x, name=scope)
5188
5189
5190@tf_export("math.xlog1py")
5191@dispatch.add_dispatch_support
5192def xlog1py(x, y, name=None):
5193  r"""Compute x * log1p(y).
5194
5195  Given `x` and `y`, compute `x * log1p(y)`. This function safely returns
5196  zero when `x = 0`, no matter what the value of `y` is.
5197
5198  Example:
5199
5200  >>> tf.math.xlog1py(0., 1.)
5201  <tf.Tensor: shape=(), dtype=float32, numpy=0.>
5202  >>> tf.math.xlog1py(1., 1.)
5203  <tf.Tensor: shape=(), dtype=float32, numpy=0.6931472>
5204  >>> tf.math.xlog1py(2., 2.)
5205  <tf.Tensor: shape=(), dtype=float32, numpy=2.1972246>
5206  >>> tf.math.xlog1py(0., -1.)
5207  <tf.Tensor: shape=(), dtype=float32, numpy=0.>
5208
5209  Args:
5210    x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
5211      `complex64`, `complex128`
5212    y: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
5213      `complex64`, `complex128`
5214    name: A name for the operation (optional).
5215
5216  Returns:
5217    `x * log1p(y)`.
5218
5219  @compatibility(scipy)
5220  Equivalent to scipy.special.xlog1py
5221  @end_compatibility
5222  """
5223  with ops.name_scope(name, "xlog1py", [x]):
5224    return gen_math_ops.xlog1py(x, y)
5225
5226
5227@tf_export("math.erfinv")
5228@dispatch.add_dispatch_support
5229def erfinv(x, name=None):
5230  """Compute inverse error function.
5231
5232  Given `x`, compute the inverse error function of `x`. This function
5233  is the inverse of `tf.math.erf`.
5234
5235  Args:
5236    x: `Tensor` with type `float` or `double`.
5237    name: A name for the operation (optional).
5238  Returns:
5239    Inverse error function of `x`.
5240  """
5241  with ops.name_scope(name, "erfinv", [x]):
5242    return gen_math_ops.erfinv(x)
5243
5244
5245@tf_export("math.ndtri")
5246@dispatch.add_dispatch_support
5247def ndtri(x, name=None):
5248  """Compute quantile of Standard Normal.
5249
5250  Args:
5251    x: `Tensor` with type `float` or `double`.
5252    name: A name for the operation (optional).
5253  Returns:
5254    Inverse error function of `x`.
5255  """
5256  with ops.name_scope(name, "ndtri", [x]):
5257    return gen_math_ops.ndtri(x)
5258
5259
5260@tf_export("math.erfcinv")
5261@dispatch.add_dispatch_support
5262def erfcinv(x, name=None):
5263  """Computes the inverse of complementary error function.
5264
5265  Given `x`, compute the inverse complementary error function of `x`.
5266  This function is the inverse of `tf.math.erfc`, and is defined on
5267  `[0, 2]`.
5268
5269  >>> tf.math.erfcinv([0., 0.2, 1., 1.5, 2.])
5270  <tf.Tensor: shape=(5,), dtype=float32, numpy=
5271  array([       inf,  0.9061935, -0.       , -0.4769363,       -inf],
5272        dtype=float32)>
5273
5274  Args:
5275    x: `Tensor` with type `float` or `double`.
5276    name: A name for the operation (optional).
5277  Returns:
5278    Inverse complementary error function of `x`.
5279
5280  @compatibility(numpy)
5281  Equivalent to scipy.special.erfcinv
5282  @end_compatibility
5283  """
5284  with ops.name_scope(name, "erfcinv", [x]):
5285    x = ops.convert_to_tensor(x, name="start")
5286    return -ndtri(0.5 * x) * np.sqrt(0.5)
5287
5288
5289@tf_export("math.ceil", v1=["math.ceil", "ceil"])
5290@dispatch.add_dispatch_support
5291@deprecation.deprecated_endpoints("ceil")
5292@dispatch.add_dispatch_support
5293def ceil(x, name=None):
5294  """Return the ceiling of the input, element-wise.
5295
5296  For example:
5297
5298  >>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
5299  <tf.Tensor: shape=(7,), dtype=float32,
5300  numpy=array([-1., -1., -0.,  1.,  2.,  2.,  2.], dtype=float32)>
5301
5302  Args:
5303    x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
5304      `float32`, `float64`. `int32`
5305    name: A name for the operation (optional).
5306
5307  Returns:
5308    A `tf.Tensor`. Has the same type as `x`.
5309
5310  @compatibility(numpy)
5311  Equivalent to np.ceil
5312  @end_compatibility
5313  """
5314  return gen_math_ops.ceil(x, name)
5315
5316
5317@tf_export("math.sqrt", "sqrt")
5318@dispatch.add_dispatch_support
5319def sqrt(x, name=None):  # pylint: disable=redefined-builtin
5320  r"""Computes element-wise square root of the input tensor.
5321
5322  Note: This operation does not support integer types.
5323
5324  >>> x = tf.constant([[4.0], [16.0]])
5325  >>> tf.sqrt(x)
5326  <tf.Tensor: shape=(2, 1), dtype=float32, numpy=
5327    array([[2.],
5328           [4.]], dtype=float32)>
5329  >>> y = tf.constant([[-4.0], [16.0]])
5330  >>> tf.sqrt(y)
5331  <tf.Tensor: shape=(2, 1), dtype=float32, numpy=
5332    array([[nan],
5333           [ 4.]], dtype=float32)>
5334  >>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)
5335  >>> tf.sqrt(z)
5336  <tf.Tensor: shape=(2, 1), dtype=complex128, numpy=
5337    array([[0.0+1.j],
5338           [4.0+0.j]])>
5339
5340  Note: In order to support complex type, please provide an input tensor
5341  of `complex64` or `complex128`.
5342
5343  Args:
5344    x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
5345      `complex64`, `complex128`
5346    name: A name for the operation (optional).
5347
5348  Returns:
5349    A `tf.Tensor` of same size, type and sparsity as `x`.
5350  """
5351  return gen_math_ops.sqrt(x, name)
5352
5353
5354# pylint: disable=g-docstring-has-escape
5355@tf_export("math.exp", "exp")
5356@dispatch.add_dispatch_support
5357def exp(x, name=None):
5358  r"""Computes exponential of x element-wise.  \\(y = e^x\\).
5359
5360  This function computes the exponential of the input tensor element-wise.
5361  i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor.
5362  \\(e\\) denotes Euler's number and is approximately equal to 2.718281.
5363  Output is positive for any real input.
5364
5365  >>> x = tf.constant(2.0)
5366  >>> tf.math.exp(x)
5367  <tf.Tensor: shape=(), dtype=float32, numpy=7.389056>
5368
5369  >>> x = tf.constant([2.0, 8.0])
5370  >>> tf.math.exp(x)
5371  <tf.Tensor: shape=(2,), dtype=float32,
5372  numpy=array([   7.389056, 2980.958   ], dtype=float32)>
5373
5374  For complex numbers, the exponential value is calculated as
5375  $$
5376  e^{x+iy} = {e^x} {e^{iy}} = {e^x} ({\cos (y) + i \sin (y)})
5377  $$
5378
5379  For `1+1j` the value would be computed as:
5380  $$
5381  e^1 (\cos (1) + i \sin (1)) = 2.7182817 \times (0.5403023+0.84147096j)
5382  $$
5383
5384  >>> x = tf.constant(1 + 1j)
5385  >>> tf.math.exp(x)
5386  <tf.Tensor: shape=(), dtype=complex128,
5387  numpy=(1.4686939399158851+2.2873552871788423j)>
5388
5389  Args:
5390    x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
5391      `float32`, `float64`, `complex64`, `complex128`.
5392    name: A name for the operation (optional).
5393
5394  Returns:
5395    A `tf.Tensor`. Has the same type as `x`.
5396
5397  @compatibility(numpy)
5398  Equivalent to np.exp
5399  @end_compatibility
5400  """
5401  return gen_math_ops.exp(x, name)
5402
5403
5404# pylint: enable=g-docstring-has-escape
5405
5406
5407@tf_export("math.sobol_sample")
5408@dispatch.add_dispatch_support
5409def sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None):
5410  """Generates points from the Sobol sequence.
5411
5412  Creates a Sobol sequence with `num_results` samples. Each sample has dimension
5413  `dim`. Skips the first `skip` samples.
5414
5415  Args:
5416    dim: Positive scalar `Tensor` representing each sample's dimension.
5417    num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol
5418        points to return in the output.
5419    skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of
5420        initial points of the Sobol sequence to skip. Default value is 0.
5421    dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or
5422        `tf.float64`. Defaults to `tf.float32`.
5423    name: (Optional) Python `str` name prefixed to ops created by this function.
5424
5425  Returns:
5426    `Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
5427  """
5428  with ops.name_scope(name, "sobol", [dim, num_results, skip]):
5429    return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype)
5430
5431
5432@tf_export("math.rsqrt", v1=["math.rsqrt", "rsqrt"])
5433@dispatch.add_dispatch_support
5434@deprecation.deprecated_endpoints("rsqrt")
5435@dispatch.add_dispatch_support
5436def rsqrt(x, name=None):
5437  """Computes reciprocal of square root of x element-wise.
5438
5439  For example:
5440
5441  >>> x = tf.constant([2., 0., -2.])
5442  >>> tf.math.rsqrt(x)
5443  <tf.Tensor: shape=(3,), dtype=float32,
5444  numpy=array([0.707, inf, nan], dtype=float32)>
5445
5446  Args:
5447    x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
5448      `float32`, `float64`.
5449    name: A name for the operation (optional).
5450
5451  Returns:
5452    A `tf.Tensor`. Has the same type as `x`.
5453  """
5454  return gen_math_ops.rsqrt(x, name)
5455
5456
5457@tf_export("math.acos", "acos")
5458@dispatch.add_dispatch_support
5459def acos(x, name=None):
5460  """Computes acos of x element-wise.
5461
5462  Provided an input tensor, the `tf.math.acos` operation
5463  returns the inverse cosine of each element of the tensor.
5464  If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
5465
5466  Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
5467
5468  For example:
5469
5470  >>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32)
5471  >>> tf.math.acos(x)
5472  <tf.Tensor: shape=(6,), dtype=float32,
5473  numpy= array([0. , 2.0943952, nan, 1.3694383, 1.5707964, nan],
5474  dtype=float32)>
5475
5476  Args:
5477    x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
5478      `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`,
5479      `complex64`, `complex128`, `string`.
5480    name: A name for the operation (optional).
5481
5482  Returns:
5483    A `Tensor`. Has the same type as x.
5484  """
5485  return gen_math_ops.acos(x, name)
5486
5487
5488@tf_export("math.floor", "floor")
5489@dispatch.add_dispatch_support
5490def floor(x, name=None):
5491  """Returns element-wise largest integer not greater than x.
5492
5493  Both input range is `(-inf, inf)` and the
5494  output range consists of all integer values.
5495
5496  For example:
5497
5498  >>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")])
5499  >>> tf.floor(x).numpy()
5500  array([ 1., -2.,  5., -3.,  0., inf], dtype=float32)
5501
5502  Args:
5503    x:  A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
5504      `float32`, `float64`.
5505    name: A name for the operation (optional).
5506
5507  Returns:
5508    A `Tensor`. Has the same type as x.
5509  """
5510  return gen_math_ops.floor(x, name)
5511