• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Math Operations.
16
17Note: Functions taking `Tensor` arguments can also take anything accepted by
18`tf.convert_to_tensor`.
19
20Note: Elementwise binary operations in TensorFlow follow [numpy-style
21broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
22
23TensorFlow provides a variety of math functions including:
24
25* Basic arithmetic operators and trigonometric functions.
26* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)
27* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)
28* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)
29* Segment functions (like: `tf.math.segment_sum`)
30
31See: `tf.linalg` for matrix and tensor functions.
32
33<a id=Segmentation></a>
34
35## About Segmentation
36
37TensorFlow provides several operations that you can use to perform common
38math computations on tensor segments.
39Here a segmentation is a partitioning of a tensor along
40the first dimension, i.e. it  defines a mapping from the first dimension onto
41`segment_ids`. The `segment_ids` tensor should be the size of
42the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
43where `k<d0`.
44In particular, a segmentation of a matrix tensor is a mapping of rows to
45segments.
46
47For example:
48
49```python
50c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
51tf.math.segment_sum(c, tf.constant([0, 0, 1]))
52#  ==>  [[0 0 0 0]
53#        [5 6 7 8]]
54```
55
56The standard `segment_*` functions assert that the segment indices are sorted.
57If you have unsorted indices use the equivalent `unsorted_segment_` function.
58These functions take an additional argument `num_segments` so that the output
59tensor can be efficiently allocated.
60
61``` python
62c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
63tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
64# ==> [[ 6,  8, 10, 12],
65#       [-1, -2, -3, -4]]
66```
67
68"""
69import builtins
70import numbers
71import numpy as np
72
73from tensorflow.python.eager import context
74from tensorflow.python.framework import constant_op
75from tensorflow.python.framework import dtypes
76from tensorflow.python.framework import graph_util
77from tensorflow.python.framework import indexed_slices
78from tensorflow.python.framework import ops
79from tensorflow.python.framework import sparse_tensor
80from tensorflow.python.framework import tensor_shape
81from tensorflow.python.framework import tensor_util
82from tensorflow.python.ops import array_ops
83from tensorflow.python.ops import gen_array_ops
84from tensorflow.python.ops import gen_bitwise_ops
85from tensorflow.python.ops import gen_data_flow_ops
86from tensorflow.python.ops import gen_math_ops
87from tensorflow.python.ops import gen_nn_ops
88from tensorflow.python.ops import gen_sparse_ops
89# go/tf-wildcard-import
90# pylint: disable=wildcard-import
91from tensorflow.python.ops.gen_math_ops import *
92# pylint: enable=wildcard-import
93from tensorflow.python.platform import tf_logging as logging
94from tensorflow.python.util import compat
95from tensorflow.python.util import deprecation
96from tensorflow.python.util import dispatch
97from tensorflow.python.util import nest
98from tensorflow.python.util import tf_decorator
99from tensorflow.python.util import traceback_utils
100from tensorflow.python.util.compat import collections_abc
101from tensorflow.python.util.lazy_loader import LazyLoader
102from tensorflow.python.util.tf_export import tf_export
103
104
105np_dtypes = LazyLoader(
106    "np_dtypes", globals(),
107    "tensorflow.python.ops.numpy_ops.np_dtypes")
108
109
110# Aliases for some automatically-generated names.
111nextafter = gen_math_ops.next_after
112
113
114@tf_export("linspace", v1=["lin_space", "linspace"])
115@dispatch.add_dispatch_support
116@deprecation.deprecated_endpoints("lin_space")
117def linspace_nd(start, stop, num, name=None, axis=0):
118  r"""Generates evenly-spaced values in an interval along a given axis.
119
120  A sequence of `num` evenly-spaced values are generated beginning at `start`
121  along a given `axis`.
122  If `num > 1`, the values in the sequence increase by
123  `(stop - start) / (num - 1)`, so that the last one is exactly `stop`.
124  If `num <= 0`, `ValueError` is raised.
125
126  Matches
127  [np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s
128  behaviour
129  except when `num == 0`.
130
131  For example:
132
133  ```
134  tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
135  ```
136
137  `Start` and `stop` can be tensors of arbitrary size:
138
139  >>> tf.linspace([0., 5.], [10., 40.], 5, axis=0)
140  <tf.Tensor: shape=(5, 2), dtype=float32, numpy=
141  array([[ 0.  ,  5.  ],
142         [ 2.5 , 13.75],
143         [ 5.  , 22.5 ],
144         [ 7.5 , 31.25],
145         [10.  , 40.  ]], dtype=float32)>
146
147  `Axis` is where the values will be generated (the dimension in the
148  returned tensor which corresponds to the axis will be equal to `num`)
149
150  >>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1)
151  <tf.Tensor: shape=(2, 5), dtype=float32, numpy=
152  array([[ 0.  ,  2.5 ,  5.  ,  7.5 , 10.  ],
153         [ 5.  , 13.75, 22.5 , 31.25, 40.  ]], dtype=float32)>
154
155
156
157  Args:
158    start: A `Tensor`. Must be one of the following types: `bfloat16`,
159      `float32`, `float64`. N-D tensor. First entry in the range.
160    stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor.
161      Last entry in the range.
162    num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D
163      tensor. Number of values to generate.
164    name: A name for the operation (optional).
165    axis: Axis along which the operation is performed (used only when N-D
166      tensors are provided).
167
168  Returns:
169    A `Tensor`. Has the same type as `start`.
170  """
171
172  with ops.name_scope(name, "linspace", [start, stop]):
173    start = ops.convert_to_tensor(start, name="start")
174    # stop must be convertible to the same dtype as start
175    stop = ops.convert_to_tensor(stop, name="stop", dtype=start.dtype)
176    num_int = array_ops.convert_to_int_tensor(num, name="num")
177    num = cast(num_int, dtype=start.dtype)
178
179    broadcast_shape = array_ops.broadcast_dynamic_shape(
180        array_ops.shape(start), array_ops.shape(stop))
181    start = array_ops.broadcast_to(start, broadcast_shape)
182    stop = array_ops.broadcast_to(stop, broadcast_shape)
183
184    expanded_start = array_ops.expand_dims(start, axis=axis)
185    expanded_stop = array_ops.expand_dims(stop, axis=axis)
186
187    shape = array_ops.shape(expanded_start)
188    ndims = array_ops.shape(shape)[0]
189
190    axis = array_ops.where_v2(axis >= 0, axis, ndims + axis)
191
192    # The purpose is to avoid having negative values when repeating.
193    num_fill = gen_math_ops.maximum(num_int - 2, 0)
194    # To avoid having negative values in the range or zero division
195    # the result is sliced in the end so a correct result is returned for
196    # num == 1, and num == 0.
197    n_steps = gen_math_ops.maximum(num_int - 1, 1)
198    delta = (expanded_stop - expanded_start) / cast(n_steps,
199                                                    expanded_stop.dtype)
200    # Re-cast tensors as delta.
201    expanded_start = cast(expanded_start, delta.dtype)
202    expanded_stop = cast(expanded_stop, delta.dtype)
203    # If num < 0, we will throw exception in the range
204    # otherwise use the same div for delta
205    range_end = array_ops.where_v2(num_int >= 0, n_steps, -1)
206    # Even though range supports an output dtype, its limited
207    # (e.g. doesn't support half at the moment).
208    desired_range = cast(range(1, range_end, dtype=dtypes.int64), delta.dtype)
209    mask = gen_math_ops.equal(axis, range(ndims))
210    # desired_range_shape is [1. 1. 1. ... 1. num_fill 1. 1. ... 1.], where the
211    # index of num_fill is equal to axis.
212    desired_range_shape = array_ops.where_v2(mask, num_fill, 1)
213    desired_range = array_ops.reshape(desired_range, desired_range_shape)
214
215    res = expanded_start + delta * desired_range
216
217    # Add the start and endpoints to the result, and slice out the desired
218    # portion.
219    all_tensors = (expanded_start, res, expanded_stop)
220    concatenated = array_ops.concat(all_tensors, axis=axis)
221    begin = array_ops.zeros_like(shape)
222    size = array_ops.where_v2(mask, num_int, shape)
223
224    return array_ops.slice(concatenated, begin, size)
225
226
227linspace = linspace_nd
228
229arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max)  # pylint: disable=used-before-assignment
230arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min)  # pylint: disable=used-before-assignment
231tf_export(v1=["arg_max"])(dispatch.add_dispatch_support(arg_max))
232tf_export(v1=["arg_min"])(dispatch.add_dispatch_support(arg_min))
233
234
235# This is set by resource_variable_ops.py. It is included in this way since
236# there is a circular dependency between math_ops and resource_variable_ops
237_resource_variable_type = None
238
239
240def _set_doc(doc):
241
242  def _decorator(func):
243    func.__doc__ = doc
244    return func
245
246  return _decorator
247
248
249# pylint: disable=redefined-builtin
250@tf_export(v1=["math.argmax", "argmax"])
251@dispatch.add_dispatch_support
252@deprecation.deprecated_args(None, "Use the `axis` argument instead",
253                             "dimension")
254@_set_doc(
255    gen_math_ops.arg_max.__doc__.replace("dimensions",
256                                         "axes").replace("dimension", "axis"))
257def argmax(input,
258           axis=None,
259           name=None,
260           dimension=None,
261           output_type=dtypes.int64):
262  axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
263                                                dimension)
264  return argmax_v2(input, axis, output_type, name)
265
266
267@tf_export("math.argmax", "argmax", v1=[])
268@dispatch.add_dispatch_support
269def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):
270  """Returns the index with the largest value across axes of a tensor.
271
272  In case of identity returns the smallest index.
273
274  For example:
275
276  >>> A = tf.constant([2, 20, 30, 3, 6])
277  >>> tf.math.argmax(A)  # A[2] is maximum in tensor A
278  <tf.Tensor: shape=(), dtype=int64, numpy=2>
279  >>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],
280  ...                  [14, 45, 23, 5, 27]])
281  >>> tf.math.argmax(B, 0)
282  <tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])>
283  >>> tf.math.argmax(B, 1)
284  <tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])>
285  >>> C = tf.constant([0, 0, 0, 0])
286  >>> tf.math.argmax(C) # Returns smallest index in case of ties
287  <tf.Tensor: shape=(), dtype=int64, numpy=0>
288
289  Args:
290    input: A `Tensor`.
291    axis: An integer, the axis to reduce across. Default to 0.
292    output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults
293      to `tf.int64`.
294    name: An optional name for the operation.
295
296  Returns:
297    A `Tensor` of type `output_type`.
298  """
299  if axis is None:
300    axis = 0
301  return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
302
303
304@tf_export(v1=["math.argmin", "argmin"])
305@dispatch.add_dispatch_support
306@deprecation.deprecated_args(None, "Use the `axis` argument instead",
307                             "dimension")
308@_set_doc(
309    gen_math_ops.arg_min.__doc__.replace("dimensions",
310                                         "axes").replace("dimension", "axis"))
311def argmin(input,
312           axis=None,
313           name=None,
314           dimension=None,
315           output_type=dtypes.int64):
316  axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
317                                                dimension)
318  return argmin_v2(input, axis, output_type, name)
319
320
321@tf_export("math.argmin", "argmin", v1=[])
322@dispatch.add_dispatch_support
323def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):
324  """Returns the index with the smallest value across axes of a tensor.
325
326  Returns the smallest index in case of ties.
327
328  Args:
329    input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
330      `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`,
331      `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`,
332      `uint64`.
333    axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
334      int32 or int64, must be in the range `-rank(input), rank(input))`.
335      Describes which axis of the input Tensor to reduce across. For vectors,
336      use axis = 0.
337    output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to
338      `tf.int64`.
339    name: A name for the operation (optional).
340
341  Returns:
342    A `Tensor` of type `output_type`.
343
344  Usage:
345  ```python
346  import tensorflow as tf
347  a = [1, 10, 26.9, 2.8, 166.32, 62.3]
348  b = tf.math.argmin(input = a)
349  c = tf.keras.backend.eval(b)
350  # c = 0
351  # here a[0] = 1 which is the smallest element of a across axis 0
352  ```
353  """
354  if axis is None:
355    axis = 0
356  return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
357
358
359# pylint: enable=redefined-builtin
360
361
362# pylint: disable=anomalous-backslash-in-string,protected-access
363# pylint: disable=g-docstring-has-escape
364@tf_export("math.abs", "abs")
365@dispatch.register_unary_elementwise_api
366@dispatch.add_dispatch_support
367def abs(x, name=None):  # pylint: disable=redefined-builtin
368  r"""Computes the absolute value of a tensor.
369
370  Given a tensor of integer or floating-point values, this operation returns a
371  tensor of the same type, where each element contains the absolute value of the
372  corresponding element in the input.
373
374  Given a tensor `x` of complex numbers, this operation returns a tensor of type
375  `float32` or `float64` that is the absolute value of each element in `x`. For
376  a complex number \\(a + bj\\), its absolute value is computed as
377  \\(\sqrt{a^2 + b^2}\\).
378
379  For example:
380
381  >>> # real number
382  >>> x = tf.constant([-2.25, 3.25])
383  >>> tf.abs(x)
384  <tf.Tensor: shape=(2,), dtype=float32,
385  numpy=array([2.25, 3.25], dtype=float32)>
386
387  >>> # complex number
388  >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
389  >>> tf.abs(x)
390  <tf.Tensor: shape=(2, 1), dtype=float64, numpy=
391  array([[5.25594901],
392         [6.60492241]])>
393
394  Args:
395    x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
396      `int32`, `int64`, `complex64` or `complex128`.
397    name: A name for the operation (optional).
398
399  Returns:
400    A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,
401      with absolute values. Note, for `complex64` or `complex128` input, the
402      returned `Tensor` will be of type `float32` or `float64`, respectively.
403  """
404  with ops.name_scope(name, "Abs", [x]) as name:
405    x = ops.convert_to_tensor(x, name="x")
406    if x.dtype.is_complex:
407      return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
408    return gen_math_ops._abs(x, name=name)
409
410
411# pylint: enable=g-docstring-has-escape
412
413
414# pylint: disable=redefined-builtin
415def _bucketize(input, boundaries, name=None):
416  return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)
417
418
419# pylint: enable=redefined-builtin
420
421
422class DivideDelegateWithName:
423  """Use Python2/Python3 division delegation to implement divide for tensors."""
424
425  def __init__(self, x, name):
426    """Construct DivideDelegateWithName.
427
428    Args:
429      x: Tensor to use as left operand in operator overloads
430      name: The name that is preferred for the op created.
431    """
432    self.x = x
433    self.name = name
434
435  def __truediv__(self, y):
436    return _truediv_python3(self.x, y, self.name)
437
438  def __floordiv__(self, y):
439    return floordiv(self.x, y, self.name)
440
441  def __div__(self, y):
442    return _div_python2(self.x, y, self.name)
443
444
445@tf_export("math.divide", "divide")
446@dispatch.register_binary_elementwise_api
447@dispatch.add_dispatch_support
448def divide(x, y, name=None):
449  """Computes Python style division of `x` by `y`.
450
451  For example:
452
453  >>> x = tf.constant([16, 12, 11])
454  >>> y = tf.constant([4, 6, 2])
455  >>> tf.divide(x,y)
456  <tf.Tensor: shape=(3,), dtype=float64,
457  numpy=array([4. , 2. , 5.5])>
458
459  Args:
460    x: A `Tensor`
461    y: A `Tensor`
462    name: A name for the operation (optional).
463
464  Returns:
465    A `Tensor` with same shape as input
466  """
467
468  if name is not None:
469    # Cannot use tensors operator overload, because it has no way to track
470    # override names. Use a dummy class to track the runtime division behavior
471    return DivideDelegateWithName(x, name) / y
472  else:
473    # We do conversion here to make sure at least x is a tensor.
474    if not tensor_util.is_tf_type(x):
475      dtype = y.dtype.base_dtype if tensor_util.is_tf_type(y) else None
476      x = ops.convert_to_tensor(x, dtype=dtype)
477    return x / y
478
479
480@tf_export("math.multiply", "multiply")
481@dispatch.register_binary_elementwise_api
482@dispatch.add_dispatch_support
483def multiply(x, y, name=None):
484  """Returns an element-wise x * y.
485
486  For example:
487
488  >>> x = tf.constant(([1, 2, 3, 4]))
489  >>> tf.math.multiply(x, x)
490  <tf.Tensor: shape=(4,), dtype=..., numpy=array([ 1,  4,  9, 16], dtype=int32)>
491
492  Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also
493  pass in non-`Tensor` arguments:
494
495  >>> tf.math.multiply(7,6)
496  <tf.Tensor: shape=(), dtype=int32, numpy=42>
497
498  If `x.shape` is not the same as `y.shape`, they will be broadcast to a
499  compatible shape. (More about broadcasting
500  [here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).)
501
502  For example:
503
504  >>> x = tf.ones([1, 2]);
505  >>> y = tf.ones([2, 1]);
506  >>> x * y  # Taking advantage of operator overriding
507  <tf.Tensor: shape=(2, 2), dtype=float32, numpy=
508  array([[1., 1.],
509       [1., 1.]], dtype=float32)>
510
511  The reduction version of this elementwise operation is `tf.math.reduce_prod`
512
513  Args:
514    x: A Tensor. Must be one of the following types: `bfloat16`,
515      `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`,
516      `int16`, `int32`, `int64`, `complex64`, `complex128`.
517    y: A `Tensor`. Must have the same type as `x`.
518    name: A name for the operation (optional).
519
520  Returns:
521
522  A `Tensor`.  Has the same type as `x`.
523
524  Raises:
525
526   * InvalidArgumentError: When `x` and `y` have incompatible shapes or types.
527  """
528
529  return gen_math_ops.mul(x, y, name)
530
531
532# TODO(aselle): put deprecation in after another round of global code changes
533@deprecation.deprecated(
534    "2016-12-30",
535    "`tf.mul(x, y)` is deprecated; use `tf.math.multiply(x, y)` or `x * y`")
536def _mul(x, y, name=None):
537  return gen_math_ops.mul(x, y, name)
538
539
540_mul.__doc__ = (
541    gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__))
542
543
544@tf_export("math.subtract", "subtract")
545@dispatch.register_binary_elementwise_api
546@dispatch.add_dispatch_support
547def subtract(x, y, name=None):
548  return gen_math_ops.sub(x, y, name)
549
550
551subtract.__doc__ = gen_math_ops.sub.__doc__
552
553
554# TODO(aselle): put deprecation in after another round of global code changes
555@deprecation.deprecated(
556    "2016-12-30",
557    "`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
558def _sub(x, y, name=None):
559  return gen_math_ops.sub(x, y, name)
560
561
562_sub.__doc__ = (
563    gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__))
564
565negative = gen_math_ops.neg
566
567
568# pylint: disable=g-docstring-has-escape
569@deprecation.deprecated(
570    "2016-12-30",
571    "`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
572def _neg(x, name=None):
573  """Computes numerical negative value element-wise.
574
575  I.e., \\(y = -x\\).
576
577  Args:
578    x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
579      `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
580    name: A name for the operation (optional).
581
582  Returns:
583    A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
584  """
585  return negative(x, name)
586
587
588# pylint: enable=g-docstring-has-escape
589
590
591@tf_export(v1=["math.scalar_mul", "scalar_mul"])
592@dispatch.register_binary_elementwise_api
593@dispatch.add_dispatch_support
594def scalar_mul(scalar, x, name=None):
595  """Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
596
597  This is a special case of `tf.math.multiply`, where the first value must be a
598  `scalar`. Unlike the general form of `tf.math.multiply`, this is operation is
599  guaranteed to be efficient for `tf.IndexedSlices`.
600
601  >>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3])
602  >>> with tf.GradientTape() as g:
603  ...   g.watch(x)
604  ...   y = tf.gather(x, [1, 2])  # IndexedSlices
605  ...   z = tf.math.scalar_mul(10.0, y)
606
607  Args:
608    scalar: A 0-D scalar `Tensor`. Must have known shape.
609    x: A `Tensor` or `IndexedSlices` to be scaled.
610    name: A name for the operation (optional).
611
612  Returns:
613    `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
614
615  Raises:
616    ValueError: if scalar is not a 0-D `scalar`.
617  """
618  base_dtype = dtypes.as_dtype(x.dtype).base_dtype
619  scalar = ops.convert_to_tensor(
620      scalar, dtype=base_dtype, name="scalar")
621  shape = scalar.get_shape()
622  if shape.ndims == 0:
623    if isinstance(x, indexed_slices.IndexedSlices):
624      return indexed_slices.IndexedSlices(
625          gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
626    else:
627      return gen_math_ops.mul(scalar, x, name)
628  else:
629    raise ValueError(
630        f"The input scalar must be a 0-D value. Received shape {shape}.")
631
632
633@tf_export("math.softplus", "nn.softplus", v1=["math.softplus", "nn.softplus"])
634@dispatch.register_unary_elementwise_api
635@dispatch.add_dispatch_support
636def softplus(features, name=None):
637  """Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`.
638
639  `softplus` is a smooth approximation of `relu`. Like `relu`, `softplus` always
640  takes on positive values.
641
642  <img style="width:100%" src="https://www.tensorflow.org/images/softplus.png">
643
644  Example:
645
646  >>> import tensorflow as tf
647  >>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy()
648  array([0.6931472, 1.3132616], dtype=float32)
649
650  Args:
651    features: `Tensor`
652    name: Optional: name to associate with this operation.
653  Returns:
654    `Tensor`
655  """
656  return gen_nn_ops.softplus(features, name)
657
658
659@tf_export("math.scalar_mul", "scalar_mul", v1=[])
660@dispatch.register_binary_elementwise_api
661@dispatch.add_dispatch_support
662@_set_doc(scalar_mul.__doc__)
663def scalar_mul_v2(scalar, x, name=None):
664  with ops.name_scope(name, "scalar_mul", [x]) as name:
665    return scalar_mul(scalar, x, name)
666
667
668@tf_export("math.pow", "pow")
669@dispatch.register_binary_elementwise_api
670@dispatch.add_dispatch_support
671def pow(x, y, name=None):  # pylint: disable=redefined-builtin
672  r"""Computes the power of one value to another.
673
674  Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
675  corresponding elements in `x` and `y`. For example:
676
677  ```python
678  x = tf.constant([[2, 2], [3, 3]])
679  y = tf.constant([[8, 16], [2, 3]])
680  tf.pow(x, y)  # [[256, 65536], [9, 27]]
681  ```
682
683  Args:
684    x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
685      `complex64`, or `complex128`.
686    y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
687      `complex64`, or `complex128`.
688    name: A name for the operation (optional).
689
690  Returns:
691    A `Tensor`.
692  """
693  with ops.name_scope(name, "Pow", [x]) as name:
694    return gen_math_ops._pow(x, y, name=name)
695
696
697# pylint: disable=redefined-builtin,redefined-outer-name
698@tf_export("dtypes.complex", "complex")
699@dispatch.register_binary_elementwise_api
700@dispatch.add_dispatch_support
701def complex(real, imag, name=None):
702  r"""Converts two real numbers to a complex number.
703
704  Given a tensor `real` representing the real part of a complex number, and a
705  tensor `imag` representing the imaginary part of a complex number, this
706  operation returns complex numbers elementwise of the form \\(a + bj\\), where
707  *a* represents the `real` part and *b* represents the `imag` part.
708
709  The input tensors `real` and `imag` must have the same shape.
710
711  For example:
712
713  ```python
714  real = tf.constant([2.25, 3.25])
715  imag = tf.constant([4.75, 5.75])
716  tf.complex(real, imag)  # [[2.25 + 4.75j], [3.25 + 5.75j]]
717  ```
718
719  Args:
720    real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
721    imag: A `Tensor`. Must have the same type as `real`.
722    name: A name for the operation (optional).
723
724  Returns:
725    A `Tensor` of type `complex64` or `complex128`.
726
727  Raises:
728    TypeError: Real and imag must be correct types
729  """
730  real = ops.convert_to_tensor(real, name="real")
731  imag = ops.convert_to_tensor(imag, name="imag")
732  with ops.name_scope(name, "Complex", [real, imag]) as name:
733    input_types = (real.dtype, imag.dtype)
734    if input_types == (dtypes.float64, dtypes.float64):
735      Tout = dtypes.complex128
736    elif input_types == (dtypes.float32, dtypes.float32):
737      Tout = dtypes.complex64
738    else:
739      raise TypeError(
740          f"The `real` and `imag` components have incorrect types: "
741          f"{real.dtype.name} {imag.dtype.name}. They must be consistent, and "
742          f"one of {[dtypes.float32, dtypes.float64]}")
743    return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
744
745
746@tf_export("math.sign", "sign")
747@dispatch.register_unary_elementwise_api
748@dispatch.add_dispatch_support
749def sign(x, name=None):
750  r"""Returns an element-wise indication of the sign of a number.
751
752  `y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0`.
753
754  For complex numbers, `y = sign(x) = x / |x| if x != 0, otherwise y = 0`.
755
756  Example usage:
757
758  >>> # real number
759  >>> tf.math.sign([0., 2., -3.])
760  <tf.Tensor: shape=(3,), dtype=float32,
761  numpy=array([ 0.,  1., -1.], dtype=float32)>
762
763  >>> # complex number
764  >>> tf.math.sign([1 + 1j, 0 + 0j])
765  <tf.Tensor: shape=(2,), dtype=complex128,
766  numpy=array([0.70710678+0.70710678j, 0.        +0.j        ])>
767
768  Args:
769   x: A Tensor. Must be one of the following types: bfloat16, half, float32,
770     float64, int32, int64, complex64, complex128.
771   name: A name for the operation (optional).
772
773  Returns:
774   A Tensor. Has the same type as x.
775
776   If x is a SparseTensor, returns SparseTensor(x.indices,
777     tf.math.sign(x.values, ...), x.dense_shape).
778  """
779  x = ops.convert_to_tensor(x)
780  if x.dtype.is_complex:
781    return gen_math_ops.div_no_nan(
782        x,
783        cast(
784            gen_math_ops.complex_abs(
785                x,
786                Tout=dtypes.float32
787                if x.dtype == dtypes.complex64 else dtypes.float64),
788            dtype=x.dtype),
789        name=name)
790  return gen_math_ops.sign(x, name=name)
791
792
793@tf_export("math.real", v1=["math.real", "real"])
794@dispatch.register_unary_elementwise_api
795@dispatch.add_dispatch_support
796@deprecation.deprecated_endpoints("real")
797def real(input, name=None):
798  r"""Returns the real part of a complex (or real) tensor.
799
800  Given a tensor `input`, this operation returns a tensor of type `float` that
801  is the real part of each element in `input` considered as a complex number.
802
803  For example:
804
805  ```python
806  x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
807  tf.math.real(x)  # [-2.25, 3.25]
808  ```
809
810  If `input` is already real, it is returned unchanged.
811
812  Args:
813    input: A `Tensor`. Must have numeric type.
814    name: A name for the operation (optional).
815
816  Returns:
817    A `Tensor` of type `float32` or `float64`.
818  """
819  with ops.name_scope(name, "Real", [input]) as name:
820    input = ops.convert_to_tensor(input, name="input")
821    if input.dtype.is_complex:
822      real_dtype = input.dtype.real_dtype
823      return gen_math_ops.real(input, Tout=real_dtype, name=name)
824    else:
825      return input
826
827
828@tf_export("math.imag", v1=["math.imag", "imag"])
829@dispatch.register_unary_elementwise_api
830@dispatch.add_dispatch_support
831@deprecation.deprecated_endpoints("imag")
832def imag(input, name=None):
833  r"""Returns the imaginary part of a complex (or real) tensor.
834
835  Given a tensor `input`, this operation returns a tensor of type `float` that
836  is the imaginary part of each element in `input` considered as a complex
837  number. If `input` is real, a tensor of all zeros is returned.
838
839  For example:
840
841  ```python
842  x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
843  tf.math.imag(x)  # [4.75, 5.75]
844  ```
845
846  Args:
847    input: A `Tensor`. Must be one of the following types: `float`, `double`,
848      `complex64`, `complex128`.
849    name: A name for the operation (optional).
850
851  Returns:
852    A `Tensor` of type `float32` or `float64`.
853  """
854  with ops.name_scope(name, "Imag", [input]) as name:
855    input = ops.convert_to_tensor(input, name="input")
856    if input.dtype.is_complex:
857      return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
858    else:
859      return array_ops.zeros_like(input)
860
861
862@tf_export("math.angle", v1=["math.angle", "angle"])
863@dispatch.register_unary_elementwise_api
864@dispatch.add_dispatch_support
865@deprecation.deprecated_endpoints("angle")
866def angle(input, name=None):
867  r"""Returns the element-wise argument of a complex (or real) tensor.
868
869  Given a tensor `input`, this operation returns a tensor of type `float` that
870  is the argument of each element in `input` considered as a complex number.
871
872  The elements in `input` are considered to be complex numbers of the form
873  \\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
874  If `input` is real then *b* is zero by definition.
875
876  The argument returned by this function is of the form \\(atan2(b, a)\\).
877  If `input` is real, a tensor of all zeros is returned.
878
879  For example:
880
881  ```
882  input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64)
883  tf.math.angle(input).numpy()
884  # ==> array([2.0131705, 1.056345 ], dtype=float32)
885  ```
886
887  Args:
888    input: A `Tensor`. Must be one of the following types: `float`, `double`,
889      `complex64`, `complex128`.
890    name: A name for the operation (optional).
891
892  Returns:
893    A `Tensor` of type `float32` or `float64`.
894  """
895  with ops.name_scope(name, "Angle", [input]) as name:
896    input = ops.convert_to_tensor(input, name="input")
897    if input.dtype.is_complex:
898      return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
899    else:
900      return array_ops.where(input < 0, np.pi * array_ops.ones_like(input),
901                             array_ops.zeros_like(input))
902
903
904# pylint: enable=redefined-outer-name,redefined-builtin
905
906
907@tf_export("math.round", "round")
908@dispatch.register_unary_elementwise_api
909@dispatch.add_dispatch_support
910def round(x, name=None):  # pylint: disable=redefined-builtin
911  """Rounds the values of a tensor to the nearest integer, element-wise.
912
913  Rounds half to even.  Also known as bankers rounding. If you want to round
914  according to the current system rounding mode use tf::cint.
915  For example:
916
917  ```python
918  x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
919  tf.round(x)  # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
920  ```
921
922  Args:
923    x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
924    name: A name for the operation (optional).
925
926  Returns:
927    A `Tensor` of same shape and type as `x`.
928  """
929  x = ops.convert_to_tensor(x, name="x")
930  if x.dtype.is_integer:
931    return x
932  else:
933    return gen_math_ops.round(x, name=name)
934
935
936# TODO(mdan): Include a full_type argument to replace dtype.
937@tf_export("cast", "dtypes.cast")
938@dispatch.register_unary_elementwise_api
939@dispatch.add_dispatch_support
940def cast(x, dtype, name=None):
941  """Casts a tensor to a new type.
942
943  The operation casts `x` (in case of `Tensor`) or `x.values`
944  (in case of `SparseTensor` or `IndexedSlices`) to `dtype`.
945
946  For example:
947
948  >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
949  >>> tf.cast(x, tf.int32)
950  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
951
952  Notice `tf.cast` has an alias `tf.dtypes.cast`:
953
954  >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
955  >>> tf.dtypes.cast(x, tf.int32)
956  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
957
958  The operation supports data types (for `x` and `dtype`) of
959  `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
960  `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
961  In case of casting from complex types (`complex64`, `complex128`) to real
962  types, only the real part of `x` is returned. In case of casting from real
963  types to complex types (`complex64`, `complex128`), the imaginary part of the
964  returned value is set to `0`. The handling of complex types here matches the
965  behavior of numpy.
966
967  Note casting nan and inf values to integral types has undefined behavior.
968
969  Args:
970    x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
971      be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
972      `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
973      `bfloat16`.
974    dtype: The destination type. The list of supported dtypes is the same as
975      `x`.
976    name: A name for the operation (optional).
977
978  Returns:
979    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
980      same type as `dtype`.
981
982  Raises:
983    TypeError: If `x` cannot be cast to the `dtype`.
984  """
985  base_type = dtypes.as_dtype(dtype).base_dtype
986  if isinstance(x,
987                (ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
988    return x
989  with ops.name_scope(name, "Cast", [x]) as name:
990    if isinstance(x, sparse_tensor.SparseTensor):
991      values_cast = cast(x.values, base_type, name=name)
992      x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
993    elif isinstance(x, indexed_slices.IndexedSlices):
994      values_cast = cast(x.values, base_type, name=name)
995      x = indexed_slices.IndexedSlices(values_cast, x.indices, x.dense_shape)
996    else:
997      # TODO(josh11b): If x is not already a Tensor, we could return
998      # ops.convert_to_tensor(x, dtype=dtype, ...)  here, but that
999      # allows some conversions that cast() can't do, e.g. casting numbers to
1000      # strings.
1001      x = ops.convert_to_tensor(x, name="x")
1002      if x.dtype != base_type:
1003        x = gen_math_ops.cast(x, base_type, name=name)
1004    if x.dtype.is_complex and base_type.is_floating:
1005      logging.warn("Casting complex to real discards imaginary part.")
1006    return x
1007
1008
1009@tf_export("dtypes.saturate_cast", "saturate_cast")
1010@dispatch.register_unary_elementwise_api
1011@dispatch.add_dispatch_support
1012def saturate_cast(value, dtype, name=None):
1013  """Performs a safe saturating cast of `value` to `dtype`.
1014
1015  This function casts the input to `dtype` without applying any scaling.  If
1016  there is a danger that values would over or underflow in the cast, this op
1017  applies the appropriate clamping before the cast.
1018
1019  Args:
1020    value: A `Tensor`.
1021    dtype: The desired output `DType`.
1022    name: A name for the operation (optional).
1023
1024  Returns:
1025    `value` safely cast to `dtype`.
1026  """
1027  # When casting to a type with smaller representable range, clamp.
1028  # Note that this covers casting to unsigned types as well.
1029  with ops.name_scope(name, "saturate_cast", [value]) as name:
1030    value = ops.convert_to_tensor(value, name="value")
1031    dtype = dtypes.as_dtype(dtype).base_dtype
1032    if value.dtype.min < dtype.min:
1033      value = gen_math_ops.maximum(
1034          value,
1035          ops.convert_to_tensor(dtype.min, dtype=value.dtype, name="min"))
1036    if value.dtype.max > dtype.max:
1037      value = gen_math_ops.minimum(
1038          value,
1039          ops.convert_to_tensor(dtype.max, dtype=value.dtype, name="max"))
1040    return cast(value, dtype, name=name)
1041
1042
1043@tf_export(v1=["to_float"])
1044@dispatch.register_unary_elementwise_api
1045@dispatch.add_dispatch_support
1046@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1047def to_float(x, name="ToFloat"):
1048  """Casts a tensor to type `float32`.
1049
1050  Args:
1051    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1052    name: A name for the operation (optional).
1053
1054  Returns:
1055    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1056    type `float32`.
1057
1058  Raises:
1059    TypeError: If `x` cannot be cast to the `float32`.
1060
1061  @compatibility(TF2)
1062
1063  This name was deprecated and removed in TF2, but has an exact replacement
1064  `tf.cast(..., tf.float32)`. There are no further issues with eager execution
1065  or tf.function.
1066
1067  Before:
1068
1069  >>> tf.compat.v1.to_float(tf.constant(3.14, dtype=tf.double))
1070  <tf.Tensor: shape=(), dtype=float32, numpy=3.14>
1071
1072  After:
1073
1074  >>> tf.cast(tf.constant(3.14, dtype=tf.double), tf.float32)
1075  <tf.Tensor: shape=(), dtype=float32, numpy=3.14>
1076
1077  @end_compatibility
1078
1079  """
1080  return cast(x, dtypes.float32, name=name)
1081
1082
1083@tf_export(v1=["to_double"])
1084@dispatch.register_unary_elementwise_api
1085@dispatch.add_dispatch_support
1086@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1087def to_double(x, name="ToDouble"):
1088  """Casts a tensor to type `float64`.
1089
1090  Args:
1091    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1092    name: A name for the operation (optional).
1093
1094  Returns:
1095    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1096    type `float64`.
1097
1098  Raises:
1099    TypeError: If `x` cannot be cast to the `float64`.
1100
1101  @compatibility(TF2)
1102
1103  This name was deprecated and removed in TF2, but has an exact replacement
1104  `tf.cast(..., tf.double)`. There are no further issues with eager execution or
1105  tf.function.
1106
1107  Before:
1108
1109  >>> tf.compat.v1.to_double(tf.constant(3.14, dtype=tf.float32))
1110  <tf.Tensor: shape=(), dtype=float64, numpy=3.14>
1111
1112  After:
1113
1114  >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.double)
1115  <tf.Tensor: shape=(), dtype=float64, numpy=3.14>
1116
1117  @end_compatibility
1118
1119  """
1120  return cast(x, dtypes.float64, name=name)
1121
1122
1123@tf_export(v1=["to_int32"])
1124@dispatch.register_unary_elementwise_api
1125@dispatch.add_dispatch_support
1126@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1127def to_int32(x, name="ToInt32"):
1128  """Casts a tensor to type `int32`.
1129
1130  Args:
1131    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1132    name: A name for the operation (optional).
1133
1134  Returns:
1135    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1136    type `int32`.
1137
1138  Raises:
1139    TypeError: If `x` cannot be cast to the `int32`.
1140
1141  @compatibility(TF2)
1142
1143  This name was deprecated and removed in TF2, but has an exact replacement
1144  `tf.cast(..., tf.int32)`. There are no further issues with eager execution or
1145  tf.function.
1146
1147  Before:
1148
1149  >>> tf.compat.v1.to_int32(tf.constant(1, dtype=tf.int64))
1150  <tf.Tensor: shape=(), dtype=int32, numpy=1>
1151
1152  After:
1153
1154  >>> tf.cast(tf.constant(1, dtype=tf.int64), tf.int32)
1155  <tf.Tensor: shape=(), dtype=int32, numpy=1>
1156
1157  @end_compatibility
1158
1159  """
1160  return cast(x, dtypes.int32, name=name)
1161
1162
1163@tf_export(v1=["to_int64"])
1164@dispatch.register_unary_elementwise_api
1165@dispatch.add_dispatch_support
1166@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1167def to_int64(x, name="ToInt64"):
1168  """Casts a tensor to type `int64`.
1169
1170  Args:
1171    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1172    name: A name for the operation (optional).
1173
1174  Returns:
1175    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1176    type `int64`.
1177
1178  Raises:
1179    TypeError: If `x` cannot be cast to the `int64`.
1180
1181  @compatibility(TF2)
1182
1183  This name was deprecated and removed in TF2, but has an exact replacement
1184  `tf.cast(..., tf.int64)`. There are no further issues with eager execution or
1185  tf.function.
1186
1187  Before:
1188
1189  >>> tf.compat.v1.to_int64(tf.constant(1, dtype=tf.int32))
1190  <tf.Tensor: shape=(), dtype=int64, numpy=1>
1191
1192  After:
1193
1194  >>> tf.cast(tf.constant(1, dtype=tf.int32), tf.int64)
1195  <tf.Tensor: shape=(), dtype=int64, numpy=1>
1196
1197  @end_compatibility
1198
1199  """
1200  return cast(x, dtypes.int64, name=name)
1201
1202
1203@tf_export(v1=["to_bfloat16"])
1204@dispatch.register_unary_elementwise_api
1205@dispatch.add_dispatch_support
1206@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1207def to_bfloat16(x, name="ToBFloat16"):
1208  """Casts a tensor to type `bfloat16`.
1209
1210  Args:
1211    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1212    name: A name for the operation (optional).
1213
1214  Returns:
1215    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1216    type `bfloat16`.
1217
1218  Raises:
1219    TypeError: If `x` cannot be cast to the `bfloat16`.
1220
1221  @compatibility(TF2)
1222
1223  This name was deprecated and removed in TF2, but has an exact replacement
1224  `tf.cast(..., tf.bfloat16)`. There are no further issues with eager execution
1225  or tf.function.
1226
1227  Before:
1228
1229  >>> tf.compat.v1.to_bfloat16(tf.constant(3.14, dtype=tf.float32))
1230  <tf.Tensor: shape=(), dtype=bfloat16, numpy=3.14>
1231
1232  After:
1233
1234  >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.bfloat16)
1235  <tf.Tensor: shape=(), dtype=bfloat16, numpy=3.14>
1236
1237  @end_compatibility
1238
1239  """
1240  return cast(x, dtypes.bfloat16, name=name)
1241
1242
1243@tf_export(v1=["to_complex64"])
1244@dispatch.register_unary_elementwise_api
1245@dispatch.add_dispatch_support
1246@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1247def to_complex64(x, name="ToComplex64"):
1248  """Casts a tensor to type `complex64`.
1249
1250  Args:
1251    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1252    name: A name for the operation (optional).
1253
1254  Returns:
1255    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1256    type `complex64`.
1257
1258  Raises:
1259    TypeError: If `x` cannot be cast to the `complex64`.
1260
1261  @compatibility(TF2)
1262
1263  This name was deprecated and removed in TF2, but has an exact replacement
1264  `tf.cast(..., tf.complex64)`. There are no further issues with eager execution
1265  or tf.function.
1266
1267  Before:
1268
1269  >>> tf.compat.v1.to_complex64(tf.constant(1. + 2.j, dtype=tf.complex128))
1270  <tf.Tensor: shape=(), dtype=complex64, numpy=(1+2j)>
1271
1272  After:
1273
1274  >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex128), tf.complex64)
1275  <tf.Tensor: shape=(), dtype=complex64, numpy=(1+2j)>
1276
1277  @end_compatibility
1278
1279  """
1280  return cast(x, dtypes.complex64, name=name)
1281
1282
1283@tf_export(v1=["to_complex128"])
1284@dispatch.register_unary_elementwise_api
1285@dispatch.add_dispatch_support
1286@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1287def to_complex128(x, name="ToComplex128"):
1288  """Casts a tensor to type `complex128`.
1289
1290  Args:
1291    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1292    name: A name for the operation (optional).
1293
1294  Returns:
1295    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1296    type `complex128`.
1297
1298  Raises:
1299    TypeError: If `x` cannot be cast to the `complex128`.
1300
1301  @compatibility(TF2)
1302
1303  This name was deprecated and removed in TF2, but has an exact replacement
1304  `tf.cast(..., tf.complex128)`. There are no further issues with eager
1305  execution or tf.function.
1306
1307  Before:
1308
1309  >>> tf.compat.v1.to_complex128(tf.constant(1. + 2.j, dtype=tf.complex64))
1310  <tf.Tensor: shape=(), dtype=complex128, numpy=(1+2j)>
1311
1312  After:
1313
1314  >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex64), tf.complex128)
1315  <tf.Tensor: shape=(), dtype=complex128, numpy=(1+2j)>
1316
1317  @end_compatibility
1318
1319  """
1320  return cast(x, dtypes.complex128, name=name)
1321
1322
1323ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
1324ops.Tensor._override_operator("__abs__", abs)
1325
1326
1327def _maybe_get_dtype(x):
1328  """Returns a numpy type if available from x. Skips if x is numpy.ndarray."""
1329  # Don't put np.ndarray in this list, because np.result_type looks at the
1330  # value (not just dtype) of np.ndarray to decide the result type.
1331  if isinstance(x, numbers.Real):
1332    return x
1333  if isinstance(x, ops.Tensor):
1334    return x.dtype.as_numpy_dtype
1335  if isinstance(x, dtypes.DType):
1336    return x.as_numpy_dtype
1337  if isinstance(x, tensor_shape.TensorShape):
1338    return np.int32
1339  if isinstance(x, (list, tuple)):
1340    raise ValueError(f"Cannot determine dtype.  Got sequence {x}.")
1341  return x
1342
1343
1344def maybe_promote_tensors(*tensors, force_same_dtype=False):
1345  """Promotes tensors if numpy style promotion is enabled.
1346
1347  This function promotes `tensors` according to numpy promotion rules
1348  if numpy style promotion is enabled.  Otherwise, if
1349  `force_same_dtype` is `True`, it force-casts `tensors[1:]` to
1350  `tensor[0]`'s dtype. Note that this force-cast can be problematic.
1351  For example, when some `tensors[1:]` elements can be silently
1352  downcasted.
1353
1354  Args:
1355    *tensors: the list of tensors to promote.
1356    force_same_dtype: bool (optional, default to `False`). When numpy
1357      style promotion is disabled and `force_same_dtype` is `True`,
1358      this function will force-casts `tensors[1:]` to `tensor[0]`'s
1359      dtype (which could be problematic).
1360
1361  Returns:
1362    The promoted list of tensors.
1363  """
1364  if not tensors:
1365    return tensors
1366  if not ops._numpy_style_type_promotion:
1367    if not force_same_dtype:
1368      return tensors
1369    promoted_tensors = []
1370    promoted_tensors.append(tensors[0])
1371    dtype = tensors[0].dtype.base_dtype
1372    for tensor in tensors[1:]:
1373      promoted_tensors.append(
1374          ops.convert_to_tensor(tensor, dtype, name="x"))
1375    return promoted_tensors
1376  result_type = np_dtypes._result_type(
1377      *[_maybe_get_dtype(x) for x in nest.flatten(tensors)])
1378  def _promote_or_cast(x):
1379    if isinstance(x, ops.Tensor):
1380      x = cast(x, result_type)
1381    else:
1382      x = ops.convert_to_tensor(x, result_type)
1383    return x
1384  return [_promote_or_cast(x) for x in tensors]
1385
1386
1387def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
1388  """Register operators with different tensor and scalar versions.
1389
1390  If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
1391  sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
1392
1393  Args:
1394    func: the operator
1395    op_name: name of the operator being overridden
1396    clazz_object: class to override for.  Either `Tensor` or `SparseTensor`.
1397  """
1398
1399  @traceback_utils.filter_traceback
1400  def binary_op_wrapper(x, y):
1401    with ops.name_scope(None, op_name, [x, y]) as name:
1402      try:
1403        # force_same_dtype=False to preserve existing TF behavior
1404        # TODO(b/178860388): Figure out why binary_op_wrapper and
1405        #   r_binary_op_wrapper use different force_same_dtype values.
1406        x, y = maybe_promote_tensors(x, y)
1407        return func(x, y, name=name)
1408      except (TypeError, ValueError) as e:
1409        # Even if dispatching the op failed, the RHS may be a tensor aware
1410        # object that can implement the operator with knowledge of itself
1411        # and the tensor.
1412        # If the RHS is not tensor aware we still want to raise the
1413        # original error from the LHS, because it may be more
1414        # informative.
1415        if hasattr(type(y), "__r%s__" % op_name):
1416          try:
1417            r_op = getattr(y, "__r%s__" % op_name)
1418            out = r_op(x)
1419            if out is NotImplemented:
1420              raise
1421            return out
1422          except (TypeError, ValueError):
1423            raise e
1424        else:
1425          raise
1426
1427  @traceback_utils.filter_traceback
1428  def binary_op_wrapper_sparse(sp_x, y):
1429    with ops.name_scope(None, op_name, [sp_x, y]) as name:
1430      y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
1431      return sparse_tensor.SparseTensor(
1432          sp_x.indices,
1433          func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name),
1434          sp_x.dense_shape)
1435
1436  @traceback_utils.filter_traceback
1437  def r_binary_op_wrapper(y, x):
1438    with ops.name_scope(None, op_name, [x, y]) as name:
1439      # TODO(b/178860388): Figure out why binary_op_wrapper and
1440      #   r_binary_op_wrapper use different force_same_dtype values.
1441      y, x = maybe_promote_tensors(y, x, force_same_dtype=True)
1442      return func(x, y, name=name)
1443
1444  # Propagate func.__doc__ to the wrappers
1445  try:
1446    doc = func.__doc__
1447  except AttributeError:
1448    doc = None
1449  binary_op_wrapper.__doc__ = doc
1450  r_binary_op_wrapper.__doc__ = doc
1451  binary_op_wrapper_sparse.__doc__ = doc
1452
1453  if clazz_object is ops.Tensor:
1454    clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
1455    del binary_op_wrapper
1456    clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
1457    del r_binary_op_wrapper
1458  else:
1459    clazz_object._override_operator("__%s__" % op_name,
1460                                    binary_op_wrapper_sparse)
1461    del binary_op_wrapper_sparse
1462
1463
1464# Conversion table for __truediv__.  None entries mean no conversion required.
1465_TRUEDIV_TABLE = {
1466    dtypes.uint8: dtypes.float32,
1467    dtypes.int8: dtypes.float32,
1468    dtypes.uint16: dtypes.float32,
1469    dtypes.int16: dtypes.float32,
1470    dtypes.uint32: dtypes.float64,
1471    dtypes.int32: dtypes.float64,
1472    dtypes.uint64: dtypes.float64,
1473    dtypes.int64: dtypes.float64,
1474    dtypes.bfloat16: None,
1475    dtypes.float16: None,
1476    dtypes.float32: None,
1477    dtypes.float64: None,
1478    dtypes.complex64: None,
1479    dtypes.complex128: None,
1480}
1481
1482
1483# NOTE: the support of "sparse (true)div dense" is currently not baked in into
1484# "tf.(true_)div()".  Until such an API decision is made, the supported usage is
1485# to explicitly use the "/" operator to invoke either truediv or div.
1486def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
1487  """Internal helper function for 'sp_t / dense_t'."""
1488  with ops.name_scope(name, "truediv",
1489                      [sp_indices, sp_values, sp_shape, y]) as name:
1490    sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
1491    y = ops.convert_to_tensor(y, name="y")
1492    x_dtype = sp_values.dtype.base_dtype
1493    y_dtype = y.dtype.base_dtype
1494    if x_dtype != y_dtype:
1495      raise TypeError(f"`x` and `y` must have the same dtype, "
1496                      f"got {x_dtype!r} != {y_dtype!r}.")
1497    try:
1498      dtype = _TRUEDIV_TABLE[x_dtype]
1499    except KeyError:
1500      raise TypeError(
1501          f"Invalid dtype {x_dtype!r} in __truediv__. Expected one "
1502          f"of {{{', '.join([repr(x) for x in _TRUEDIV_TABLE.keys()])}}}.")
1503    if dtype is not None:
1504      sp_values = cast(sp_values, dtype)
1505      y = cast(y, dtype)
1506    return gen_sparse_ops.sparse_dense_cwise_div(
1507        sp_indices, sp_values, sp_shape, y, name=name)
1508
1509
1510def _truediv_python3(x, y, name=None):
1511  with ops.name_scope(name, "truediv", [x, y]) as name:
1512    x = ops.convert_to_tensor(x, name="x")
1513    y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
1514    x_dtype = x.dtype.base_dtype
1515    y_dtype = y.dtype.base_dtype
1516    if x_dtype != y_dtype:
1517      raise TypeError(f"`x` and `y` must have the same dtype, "
1518                      f"got {x_dtype!r} != {y_dtype!r}.")
1519    try:
1520      dtype = _TRUEDIV_TABLE[x_dtype]
1521    except KeyError:
1522      raise TypeError(
1523          f"Invalid dtype {x_dtype!r} in __truediv__. Expected one "
1524          f"of {{{', '.join([repr(x) for x in _TRUEDIV_TABLE.keys()])}}}.")
1525    if dtype is not None:
1526      x = cast(x, dtype)
1527      y = cast(y, dtype)
1528    return gen_math_ops.real_div(x, y, name=name)
1529
1530
1531def _div_python2(x, y, name=None):
1532  """Divide two values using Python 2 semantics.
1533
1534  Used for Tensor.__div__.
1535
1536  Args:
1537    x: `Tensor` numerator of real numeric type.
1538    y: `Tensor` denominator of real numeric type.
1539    name: A name for the operation (optional).
1540
1541  Returns:
1542    `x / y` returns the quotient of x and y.
1543  """
1544
1545  with ops.name_scope(name, "div", [x, y]) as name:
1546    x = ops.convert_to_tensor(x, name="x")
1547    y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
1548    x_dtype = x.dtype.base_dtype
1549    y_dtype = y.dtype.base_dtype
1550    if x_dtype != y_dtype:
1551      raise TypeError(f"`x` and `y` must have the same dtype, "
1552                      f"got {x_dtype!r} != {y_dtype!r}.")
1553    if x_dtype.is_floating or x_dtype.is_complex:
1554      return gen_math_ops.real_div(x, y, name=name)
1555    else:
1556      return gen_math_ops.floor_div(x, y, name=name)
1557
1558
1559@tf_export("math.truediv", "truediv")
1560@dispatch.register_binary_elementwise_api
1561@dispatch.add_dispatch_support
1562def truediv(x, y, name=None):
1563  """Divides x / y elementwise (using Python 3 division operator semantics).
1564
1565  NOTE: Prefer using the Tensor operator or tf.divide which obey Python
1566  division operator semantics.
1567
1568  This function forces Python 3 division operator semantics where all integer
1569  arguments are cast to floating types first.   This op is generated by normal
1570  `x / y` division in Python 3 and in Python 2.7 with
1571  `from __future__ import division`.  If you want integer division that rounds
1572  down, use `x // y` or `tf.math.floordiv`.
1573
1574  `x` and `y` must have the same numeric type.  If the inputs are floating
1575  point, the output will have the same type.  If the inputs are integral, the
1576  inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
1577  and `int64` (matching the behavior of Numpy).
1578
1579  Args:
1580    x: `Tensor` numerator of numeric type.
1581    y: `Tensor` denominator of numeric type.
1582    name: A name for the operation (optional).
1583
1584  Returns:
1585    `x / y` evaluated in floating point.
1586
1587  Raises:
1588    TypeError: If `x` and `y` have different dtypes.
1589  """
1590  return _truediv_python3(x, y, name)
1591
1592
1593@tf_export(v1=["div"])
1594@dispatch.register_binary_elementwise_api
1595@dispatch.add_dispatch_support
1596@deprecation.deprecated(
1597    date=None,
1598    instructions="Deprecated in favor of operator or tf.math.divide.")
1599def div(x, y, name=None):
1600  """Divides x / y elementwise (using Python 2 division operator semantics).
1601
1602  @compatibility(TF2)
1603  This function is deprecated in TF2. Prefer using the Tensor division operator,
1604  `tf.divide`, or `tf.math.divide`, which obey the Python 3 division operator
1605  semantics.
1606  @end_compatibility
1607
1608
1609  This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`
1610  and `y` are both integers then the result will be an integer. This is in
1611  contrast to Python 3, where division with `/` is always a float while division
1612  with `//` is always an integer.
1613
1614  Args:
1615    x: `Tensor` numerator of real numeric type.
1616    y: `Tensor` denominator of real numeric type.
1617    name: A name for the operation (optional).
1618
1619  Returns:
1620    `x / y` returns the quotient of x and y.
1621  """
1622  return _div_python2(x, y, name)
1623
1624
1625@tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"])
1626@dispatch.register_binary_elementwise_api
1627@dispatch.add_dispatch_support
1628@deprecation.deprecated_endpoints("div_no_nan")
1629def div_no_nan(x, y, name=None):
1630  """Computes a safe divide which returns 0 if `y` (denominator) is zero.
1631
1632  For example:
1633
1634  >>> tf.constant(3.0) / 0.0
1635  <tf.Tensor: shape=(), dtype=float32, numpy=inf>
1636  >>> tf.math.divide_no_nan(3.0, 0.0)
1637  <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
1638
1639  Note that 0 is returned if `y` is 0 even if `x` is nonfinite:
1640
1641  >>> tf.math.divide_no_nan(np.nan, 0.0)
1642  <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
1643
1644  Args:
1645    x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1646    y: A `Tensor` whose dtype is compatible with `x`.
1647    name: A name for the operation (optional).
1648
1649  Returns:
1650    The element-wise value of the x divided by y.
1651  """
1652
1653  with ops.name_scope(name, "div_no_nan", [x, y]) as name:
1654    x = ops.convert_to_tensor(x, name="x")
1655    y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
1656    return gen_math_ops.div_no_nan(x, y, name=name)
1657
1658
1659@tf_export("math.multiply_no_nan")
1660@dispatch.register_binary_elementwise_api
1661@dispatch.add_dispatch_support
1662def multiply_no_nan(x, y, name=None):
1663  """Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite.
1664
1665  Note this is noncommutative: if y is NaN or infinite and x is 0, the result
1666  will be NaN.
1667
1668  Args:
1669    x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1670    y: A `Tensor` whose dtype is compatible with `x`.
1671    name: A name for the operation (optional).
1672
1673  Returns:
1674    The element-wise value of the x times y.
1675  """
1676
1677  with ops.name_scope(name, "multiply_no_nan", [x, y]) as name:
1678    x = ops.convert_to_tensor(x, name="x")
1679    y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
1680    x_dtype = x.dtype.base_dtype
1681    y_dtype = y.dtype.base_dtype
1682    if x_dtype != y_dtype:
1683      raise TypeError(f"`x` and `y` must have the same dtype, "
1684                      f"got {x_dtype!r} != {y_dtype!r}")
1685    return gen_math_ops.mul_no_nan(x, y, name=name)
1686
1687
1688# TODO(aselle): This should be removed
1689mod = gen_math_ops.floor_mod
1690
1691
1692@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"])
1693@dispatch.register_binary_elementwise_api
1694@dispatch.add_dispatch_support
1695@deprecation.deprecated_endpoints("floordiv")
1696def floordiv(x, y, name=None):
1697  """Divides `x / y` elementwise, rounding toward the most negative integer.
1698
1699  Mathematically, this is equivalent to floor(x / y). For example:
1700    floor(8.4 / 4.0) = floor(2.1) = 2.0
1701    floor(-8.4 / 4.0) = floor(-2.1) = -3.0
1702  This is equivalent to the '//' operator in Python 3.0 and above.
1703
1704  Note: `x` and `y` must have the same type, and the result will have the same
1705  type as well.
1706
1707  Args:
1708    x: `Tensor` numerator of real numeric type.
1709    y: `Tensor` denominator of real numeric type.
1710    name: A name for the operation (optional).
1711
1712  Returns:
1713    `x / y` rounded toward -infinity.
1714
1715  Raises:
1716    TypeError: If the inputs are complex.
1717  """
1718  with ops.name_scope(name, "floordiv", [x, y]) as name:
1719    return gen_math_ops.floor_div(x, y, name=name)
1720
1721
1722realdiv = gen_math_ops.real_div
1723truncatediv = gen_math_ops.truncate_div
1724floor_div = gen_math_ops.floor_div
1725truncatemod = gen_math_ops.truncate_mod
1726floormod = gen_math_ops.floor_mod
1727
1728
1729@tf_export("__operators__.add", v1=[])
1730@dispatch.add_dispatch_support
1731def _add_dispatch(x, y, name=None):
1732  """The operation invoked by the `Tensor.__add__` operator.
1733
1734  Purpose in the API:
1735
1736    This method is exposed in TensorFlow's API so that library developers
1737    can register dispatching for `Tensor.__add__` to allow it to handle
1738    custom composite tensors & other custom objects.
1739
1740    The API symbol is not intended to be called by users directly and does
1741    appear in TensorFlow's generated documentation.
1742
1743  Args:
1744    x: The left-hand side of the `+` operator.
1745    y: The right-hand side of the `+` operator.
1746    name: an optional name for the operation.
1747
1748  Returns:
1749    The result of the elementwise `+` operation.
1750  """
1751  if not isinstance(y, ops.Tensor) and not isinstance(
1752      y, sparse_tensor.SparseTensor):
1753    y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
1754  if x.dtype == dtypes.string:
1755    return gen_math_ops.add(x, y, name=name)
1756  else:
1757    return gen_math_ops.add_v2(x, y, name=name)
1758
1759
1760def _mul_dispatch(x, y, name=None):
1761  """Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
1762  if isinstance(y, sparse_tensor.SparseTensor):  # Case: Dense * Sparse.
1763    new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
1764                                                     y.dense_shape, x, name)
1765    return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
1766  else:
1767    return multiply(x, y, name=name)
1768
1769
1770# NOTE(aselle): When integer division is added for sparse_dense_cwise,
1771# div, truediv, and floordiv should be delegated appropriately for
1772# Python semantics, analogous to dense cwise tensor operations.
1773_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
1774                              sparse_tensor.SparseTensor)
1775_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
1776                              sparse_tensor.SparseTensor)
1777_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
1778                              sparse_tensor.SparseTensor)
1779
1780_OverrideBinaryOperatorHelper(_add_dispatch, "add")
1781_OverrideBinaryOperatorHelper(subtract, "sub")
1782_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
1783_OverrideBinaryOperatorHelper(div, "div")
1784_OverrideBinaryOperatorHelper(truediv, "truediv")
1785_OverrideBinaryOperatorHelper(floordiv, "floordiv")
1786_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
1787_OverrideBinaryOperatorHelper(pow, "pow")
1788
1789
1790@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"])
1791@dispatch.register_binary_elementwise_api
1792@dispatch.add_dispatch_support
1793@deprecation.deprecated_endpoints("logical_xor")
1794def logical_xor(x, y, name="LogicalXor"):
1795  """Logical XOR function.
1796
1797  x ^ y = (x | y) & ~(x & y)
1798
1799  Requires that `x` and `y` have the same shape or have
1800  [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
1801  shapes. For example, `x` and `y` can be:
1802
1803  - Two single elements of type `bool`
1804  - One `tf.Tensor` of type `bool` and one single `bool`, where the result will
1805    be calculated by applying logical XOR with the single element to each
1806    element in the larger Tensor.
1807  - Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
1808    the result will be the element-wise logical XOR of the two input tensors.
1809
1810  Usage:
1811
1812  >>> a = tf.constant([True])
1813  >>> b = tf.constant([False])
1814  >>> tf.math.logical_xor(a, b)
1815  <tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>
1816
1817  >>> c = tf.constant([True])
1818  >>> x = tf.constant([False, True, True, False])
1819  >>> tf.math.logical_xor(c, x)
1820  <tf.Tensor: shape=(4,), dtype=bool, numpy=array([ True, False, False,  True])>
1821
1822  >>> y = tf.constant([False, False, True, True])
1823  >>> z = tf.constant([False, True, False, True])
1824  >>> tf.math.logical_xor(y, z)
1825  <tf.Tensor: shape=(4,), dtype=bool, numpy=array([False,  True,  True, False])>
1826
1827  Args:
1828      x: A `tf.Tensor` type bool.
1829      y: A `tf.Tensor` of type bool.
1830      name: A name for the operation (optional).
1831
1832  Returns:
1833    A `tf.Tensor` of type bool with the same size as that of x or y.
1834  """
1835  # TODO(alemi) Make this a cwise op if people end up relying on it.
1836  return gen_math_ops.logical_and(
1837      gen_math_ops.logical_or(x, y),
1838      gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
1839      name=name)
1840
1841
1842def and_(x, y, name=None):
1843  if x.dtype == dtypes.bool:
1844    return gen_math_ops.logical_and(x, y, name)
1845  return gen_bitwise_ops.bitwise_and(x, y)
1846
1847
1848def or_(x, y, name=None):
1849  if x.dtype == dtypes.bool:
1850    return gen_math_ops.logical_or(x, y, name)
1851  return gen_bitwise_ops.bitwise_or(x, y)
1852
1853
1854def xor_(x, y, name=None):
1855  if x.dtype == dtypes.bool:
1856    return logical_xor(x, y, name)
1857  return gen_bitwise_ops.bitwise_xor(x, y)
1858
1859
1860def invert_(x, name=None):
1861  if x.dtype == dtypes.bool:
1862    return gen_math_ops.logical_not(x, name=name)
1863  return gen_bitwise_ops.invert(x, name=name)
1864
1865
1866_OverrideBinaryOperatorHelper(and_, "and")
1867_OverrideBinaryOperatorHelper(or_, "or")
1868_OverrideBinaryOperatorHelper(xor_, "xor")
1869ops.Tensor._override_operator("__invert__", invert_)
1870
1871
1872def _promote_dtypes_decorator(fn):
1873  def wrapper(x, y, *args, **kwargs):
1874    x, y = maybe_promote_tensors(x, y)
1875    return fn(x, y, *args, **kwargs)
1876  return tf_decorator.make_decorator(fn, wrapper)
1877
1878
1879ops.Tensor._override_operator("__lt__", _promote_dtypes_decorator(
1880    gen_math_ops.less))
1881ops.Tensor._override_operator("__le__", _promote_dtypes_decorator(
1882    gen_math_ops.less_equal))
1883ops.Tensor._override_operator("__gt__", _promote_dtypes_decorator(
1884    gen_math_ops.greater))
1885ops.Tensor._override_operator("__ge__", _promote_dtypes_decorator(
1886    gen_math_ops.greater_equal))
1887
1888
1889@tf_export("math.equal", "equal")
1890@dispatch.register_binary_elementwise_api
1891@dispatch.add_dispatch_support
1892def equal(x, y, name=None):
1893  """Returns the truth value of (x == y) element-wise.
1894
1895  Performs a [broadcast](
1896  https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
1897  arguments and then an element-wise equality comparison, returning a Tensor of
1898  boolean values.
1899
1900  For example:
1901
1902  >>> x = tf.constant([2, 4])
1903  >>> y = tf.constant(2)
1904  >>> tf.math.equal(x, y)
1905  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  False])>
1906
1907  >>> x = tf.constant([2, 4])
1908  >>> y = tf.constant([2, 4])
1909  >>> tf.math.equal(x, y)
1910  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  True])>
1911
1912  Args:
1913    x: A `tf.Tensor`.
1914    y: A `tf.Tensor`.
1915    name: A name for the operation (optional).
1916
1917  Returns:
1918    A `tf.Tensor` of type bool with the same size as that of x or y.
1919
1920  Raises:
1921    `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
1922  """
1923  return gen_math_ops.equal(x, y, name=name)
1924
1925
1926@tf_export("math.not_equal", "not_equal")
1927@dispatch.register_binary_elementwise_api
1928@dispatch.add_dispatch_support
1929def not_equal(x, y, name=None):
1930  """Returns the truth value of (x != y) element-wise.
1931
1932  Performs a [broadcast](
1933  https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
1934  arguments and then an element-wise inequality comparison, returning a Tensor
1935  of boolean values.
1936
1937  For example:
1938
1939  >>> x = tf.constant([2, 4])
1940  >>> y = tf.constant(2)
1941  >>> tf.math.not_equal(x, y)
1942  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  True])>
1943
1944  >>> x = tf.constant([2, 4])
1945  >>> y = tf.constant([2, 4])
1946  >>> tf.math.not_equal(x, y)
1947  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  False])>
1948
1949  Args:
1950    x: A `tf.Tensor`.
1951    y: A `tf.Tensor`.
1952    name: A name for the operation (optional).
1953
1954  Returns:
1955    A `tf.Tensor` of type bool with the same size as that of x or y.
1956
1957  Raises:
1958    `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
1959  """
1960  return gen_math_ops.not_equal(x, y, name=name)
1961
1962
1963@tf_export("__operators__.eq", v1=[])
1964@dispatch.add_dispatch_support
1965def tensor_equals(self, other):
1966  """The operation invoked by the `Tensor.__eq__` operator.
1967
1968  Compares two tensors element-wise for equality if they are
1969  broadcast-compatible; or returns False if they are not broadcast-compatible.
1970  (Note that this behavior differs from `tf.math.equal`, which raises an
1971  exception if the two tensors are not broadcast-compatible.)
1972
1973  Purpose in the API:
1974
1975    This method is exposed in TensorFlow's API so that library developers
1976    can register dispatching for `Tensor.__eq__` to allow it to handle
1977    custom composite tensors & other custom objects.
1978
1979    The API symbol is not intended to be called by users directly and does
1980    appear in TensorFlow's generated documentation.
1981
1982  Args:
1983    self: The left-hand side of the `==` operator.
1984    other: The right-hand side of the `==` operator.
1985
1986  Returns:
1987    The result of the elementwise `==` operation, or `False` if the arguments
1988    are not broadcast-compatible.
1989  """
1990  if other is None:
1991    return False
1992  g = getattr(self, "graph", None)
1993  if (ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and
1994      (g is None or g.building_function)):
1995    self, other = maybe_promote_tensors(self, other)
1996    return gen_math_ops.equal(self, other, incompatible_shape_error=False)
1997  else:
1998    # In legacy graph mode, tensor equality is object equality
1999    return self is other
2000
2001
2002@tf_export("__operators__.ne", v1=[])
2003@dispatch.add_dispatch_support
2004def tensor_not_equals(self, other):
2005  """The operation invoked by the `Tensor.__ne__` operator.
2006
2007  Compares two tensors element-wise for inequality if they are
2008  broadcast-compatible; or returns True if they are not broadcast-compatible.
2009  (Note that this behavior differs from `tf.math.not_equal`, which raises an
2010  exception if the two tensors are not broadcast-compatible.)
2011
2012  Purpose in the API:
2013
2014    This method is exposed in TensorFlow's API so that library developers
2015    can register dispatching for `Tensor.__ne__` to allow it to handle
2016    custom composite tensors & other custom objects.
2017
2018    The API symbol is not intended to be called by users directly and does
2019    appear in TensorFlow's generated documentation.
2020
2021  Args:
2022    self: The left-hand side of the `!=` operator.
2023    other: The right-hand side of the `!=` operator.
2024
2025  Returns:
2026    The result of the elementwise `!=` operation, or `True` if the arguments
2027    are not broadcast-compatible.
2028  """
2029  if other is None:
2030    return True
2031  if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():
2032    self, other = maybe_promote_tensors(self, other)
2033    return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
2034  else:
2035    # In legacy graph mode, tensor equality is object equality
2036    return self is not other
2037
2038
2039ops.Tensor._override_operator("__eq__", tensor_equals)
2040ops.Tensor._override_operator("__ne__", tensor_not_equals)
2041
2042
2043@tf_export("range")
2044@dispatch.add_dispatch_support
2045def range(start, limit=None, delta=1, dtype=None, name="range"):  # pylint: disable=redefined-builtin
2046  """Creates a sequence of numbers.
2047
2048  Creates a sequence of numbers that begins at `start` and extends by
2049  increments of `delta` up to but not including `limit`.
2050
2051  The dtype of the resulting tensor is inferred from the inputs unless
2052  it is provided explicitly.
2053
2054  Like the Python builtin `range`, `start` defaults to 0, so that
2055  `range(n) = range(0, n)`.
2056
2057  For example:
2058
2059  >>> start = 3
2060  >>> limit = 18
2061  >>> delta = 3
2062  >>> tf.range(start, limit, delta)
2063  <tf.Tensor: shape=(5,), dtype=int32,
2064  numpy=array([ 3,  6,  9, 12, 15], dtype=int32)>
2065
2066  >>> start = 3
2067  >>> limit = 1
2068  >>> delta = -0.5
2069  >>> tf.range(start, limit, delta)
2070  <tf.Tensor: shape=(4,), dtype=float32,
2071  numpy=array([3. , 2.5, 2. , 1.5], dtype=float32)>
2072
2073  >>> limit = 5
2074  >>> tf.range(limit)
2075  <tf.Tensor: shape=(5,), dtype=int32,
2076  numpy=array([0, 1, 2, 3, 4], dtype=int32)>
2077
2078  Args:
2079    start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`
2080      is not None; otherwise, acts as range limit and first entry defaults to 0.
2081    limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,
2082      defaults to the value of `start` while the first entry of the range
2083      defaults to 0.
2084    delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to
2085      1.
2086    dtype: The type of the elements of the resulting tensor.
2087    name: A name for the operation. Defaults to "range".
2088
2089  Returns:
2090    An 1-D `Tensor` of type `dtype`.
2091
2092  @compatibility(numpy)
2093  Equivalent to np.arange
2094  @end_compatibility
2095  """
2096  if limit is None:
2097    start, limit = 0, start
2098
2099  with ops.name_scope(name, "Range", [start, limit, delta]) as name:
2100    if not isinstance(start, ops.Tensor):
2101      start = ops.convert_to_tensor(start, dtype=dtype, name="start")
2102    if not isinstance(limit, ops.Tensor):
2103      limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
2104    if not isinstance(delta, ops.Tensor):
2105      delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
2106
2107    # infer dtype if not explicitly provided
2108    if dtype is None:
2109      dtype_hierarchy = [
2110          dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
2111      ]
2112      assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
2113      inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
2114                           key=dtype_hierarchy.index)
2115    else:
2116      inferred_dtype = dtype
2117    # Always try to perform a cast even when start/limit/delta are already
2118    # tensors. This will resolve the case where start/limit/delta's original's
2119    # dtype is different from provided dtype.
2120    start = cast(start, inferred_dtype)
2121    limit = cast(limit, inferred_dtype)
2122    delta = cast(delta, inferred_dtype)
2123
2124    return gen_math_ops._range(start, limit, delta, name=name)
2125
2126
2127def _range_tensor_conversion_function(value, dtype=None, name=None,
2128                                      as_ref=False):
2129  del as_ref
2130  return range(value.start, value.stop, value.step, dtype=dtype, name=name)
2131
2132
2133ops.register_tensor_conversion_function(builtins.range,
2134                                        _range_tensor_conversion_function)
2135
2136
2137# Reduction operations
2138def _ReductionDims(x, axis):  # pylint: disable=invalid-name
2139  """Returns range(0, rank(x)) if axis is None."""
2140  if axis is not None:
2141    return axis
2142  else:
2143    try:
2144      x_rank = x.shape.rank
2145    except AttributeError:
2146      x_rank = None
2147
2148    # Fast path: avoid creating Rank and Range ops if ndims is known.
2149    if x_rank:
2150      return constant_op.constant(np.arange(x_rank, dtype=np.int32))
2151    else:
2152      # Otherwise, we rely on Range and Rank to do the right thing at run-time.
2153      return range(0, array_ops.rank(x))
2154
2155
2156def _has_fully_defined_shape(tensor):
2157  """Returns true if tensor has a fully defined shape."""
2158  return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
2159
2160
2161def _may_reduce_to_scalar(keepdims, axis, output):
2162  """Set a reduction's output shape to be a scalar if we are certain."""
2163  if not _has_fully_defined_shape(output) and (not keepdims) and (
2164      axis is None):
2165    output.set_shape(())
2166  return output
2167
2168
2169@tf_export(v1=["math.reduce_sum", "reduce_sum"])
2170@dispatch.add_dispatch_support
2171@deprecation.deprecated_args(None,
2172                             "keep_dims is deprecated, use keepdims instead",
2173                             "keep_dims")
2174def reduce_sum_v1(input_tensor,
2175                  axis=None,
2176                  keepdims=None,
2177                  name=None,
2178                  reduction_indices=None,
2179                  keep_dims=None):
2180  """Computes the sum of elements across dimensions of a tensor.
2181
2182  This is the reduction operation for the elementwise `tf.math.add` op.
2183
2184  Reduces `input_tensor` along the dimensions given in `axis`.
2185  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2186  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2187  reduced dimensions are retained with length 1.
2188
2189  If `axis` is None, all dimensions are reduced, and a
2190  tensor with a single element is returned.
2191
2192  For example:
2193
2194    >>> # x has a shape of (2, 3) (two rows and three columns):
2195    >>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
2196    >>> x.numpy()
2197    array([[1, 1, 1],
2198           [1, 1, 1]], dtype=int32)
2199    >>> # sum all the elements
2200    >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
2201    >>> tf.reduce_sum(x).numpy()
2202    6
2203    >>> # reduce along the first dimension
2204    >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2205    >>> tf.reduce_sum(x, 0).numpy()
2206    array([2, 2, 2], dtype=int32)
2207    >>> # reduce along the second dimension
2208    >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
2209    >>> tf.reduce_sum(x, 1).numpy()
2210    array([3, 3], dtype=int32)
2211    >>> # keep the original dimensions
2212    >>> tf.reduce_sum(x, 1, keepdims=True).numpy()
2213    array([[3],
2214           [3]], dtype=int32)
2215    >>> # reduce along both dimensions
2216    >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
2217    >>> # or, equivalently, reduce along rows, then reduce the resultant array
2218    >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2219    >>> # 2 + 2 + 2 = 6
2220    >>> tf.reduce_sum(x, [0, 1]).numpy()
2221    6
2222
2223  Args:
2224    input_tensor: The tensor to reduce. Should have numeric type.
2225    axis: The dimensions to reduce. If `None` (the default), reduces all
2226      dimensions. Must be in the range `[-rank(input_tensor),
2227      rank(input_tensor))`.
2228    keepdims: If true, retains reduced dimensions with length 1.
2229    name: A name for the operation (optional).
2230    reduction_indices: The old (deprecated) name for axis.
2231    keep_dims: Deprecated alias for `keepdims`.
2232
2233  Returns:
2234    The reduced tensor, of the same dtype as the input_tensor.
2235
2236  @compatibility(numpy)
2237  Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
2238  int64 while tensorflow returns the same dtype as the input.
2239  @end_compatibility
2240  """
2241  axis = deprecation.deprecated_argument_lookup("axis", axis,
2242                                                "reduction_indices",
2243                                                reduction_indices)
2244  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2245                                                    "keep_dims", keep_dims)
2246  return reduce_sum(input_tensor, axis, keepdims, name)
2247
2248
2249@tf_export("math.reduce_sum", "reduce_sum", v1=[])
2250@dispatch.add_dispatch_support
2251def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
2252  """Computes the sum of elements across dimensions of a tensor.
2253
2254  This is the reduction operation for the elementwise `tf.math.add` op.
2255
2256  Reduces `input_tensor` along the dimensions given in `axis`.
2257  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2258  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2259  reduced dimensions are retained with length 1.
2260
2261  If `axis` is None, all dimensions are reduced, and a
2262  tensor with a single element is returned.
2263
2264  For example:
2265
2266    >>> # x has a shape of (2, 3) (two rows and three columns):
2267    >>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
2268    >>> x.numpy()
2269    array([[1, 1, 1],
2270           [1, 1, 1]], dtype=int32)
2271    >>> # sum all the elements
2272    >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
2273    >>> tf.reduce_sum(x).numpy()
2274    6
2275    >>> # reduce along the first dimension
2276    >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2277    >>> tf.reduce_sum(x, 0).numpy()
2278    array([2, 2, 2], dtype=int32)
2279    >>> # reduce along the second dimension
2280    >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
2281    >>> tf.reduce_sum(x, 1).numpy()
2282    array([3, 3], dtype=int32)
2283    >>> # keep the original dimensions
2284    >>> tf.reduce_sum(x, 1, keepdims=True).numpy()
2285    array([[3],
2286           [3]], dtype=int32)
2287    >>> # reduce along both dimensions
2288    >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
2289    >>> # or, equivalently, reduce along rows, then reduce the resultant array
2290    >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2291    >>> # 2 + 2 + 2 = 6
2292    >>> tf.reduce_sum(x, [0, 1]).numpy()
2293    6
2294
2295  Args:
2296    input_tensor: The tensor to reduce. Should have numeric type.
2297    axis: The dimensions to reduce. If `None` (the default), reduces all
2298      dimensions. Must be in the range `[-rank(input_tensor),
2299      rank(input_tensor)]`.
2300    keepdims: If true, retains reduced dimensions with length 1.
2301    name: A name for the operation (optional).
2302
2303  Returns:
2304    The reduced tensor, of the same dtype as the input_tensor.
2305
2306  @compatibility(numpy)
2307  Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
2308  int64 while tensorflow returns the same dtype as the input.
2309  @end_compatibility
2310  """
2311
2312  return reduce_sum_with_dims(input_tensor, axis, keepdims, name,
2313                              _ReductionDims(input_tensor, axis))
2314
2315
2316def reduce_sum_with_dims(input_tensor,
2317                         axis=None,
2318                         keepdims=False,
2319                         name=None,
2320                         dims=None):
2321  keepdims = False if keepdims is None else bool(keepdims)
2322  return _may_reduce_to_scalar(
2323      keepdims, axis,
2324      gen_math_ops._sum(input_tensor, dims, keepdims, name=name))
2325
2326
2327@tf_export("math.reduce_euclidean_norm")
2328@dispatch.add_dispatch_support
2329def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):
2330  """Computes the Euclidean norm of elements across dimensions of a tensor.
2331
2332  Reduces `input_tensor` along the dimensions given in `axis`.
2333  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2334  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2335  reduced dimensions are retained with length 1.
2336
2337  If `axis` is None, all dimensions are reduced, and a
2338  tensor with a single element is returned.
2339
2340  For example:
2341
2342  ```python
2343  x = tf.constant([[1, 2, 3], [1, 1, 1]]) # x.dtype is tf.int32
2344  tf.math.reduce_euclidean_norm(x)  # returns 4 as dtype is tf.int32
2345  y = tf.constant([[1, 2, 3], [1, 1, 1]], dtype = tf.float32)
2346  tf.math.reduce_euclidean_norm(y)  # returns 4.1231055 which is sqrt(17)
2347  tf.math.reduce_euclidean_norm(y, 0)  # [sqrt(2), sqrt(5), sqrt(10)]
2348  tf.math.reduce_euclidean_norm(y, 1)  # [sqrt(14), sqrt(3)]
2349  tf.math.reduce_euclidean_norm(y, 1, keepdims=True)  # [[sqrt(14)], [sqrt(3)]]
2350  tf.math.reduce_euclidean_norm(y, [0, 1])  # sqrt(17)
2351  ```
2352
2353  Args:
2354    input_tensor: The tensor to reduce. Should have numeric type.
2355    axis: The dimensions to reduce. If `None` (the default), reduces all
2356      dimensions. Must be in the range `[-rank(input_tensor),
2357      rank(input_tensor))`.
2358    keepdims: If true, retains reduced dimensions with length 1.
2359    name: A name for the operation (optional).
2360
2361  Returns:
2362    The reduced tensor, of the same dtype as the input_tensor.
2363  """
2364  keepdims = bool(keepdims)
2365  return _may_reduce_to_scalar(
2366      keepdims, axis,
2367      gen_math_ops.euclidean_norm(
2368          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2369          name=name))
2370
2371
2372@tf_export(v1=["math.count_nonzero", "count_nonzero"])
2373@dispatch.add_dispatch_support
2374@deprecation.deprecated_args(None,
2375                             "keep_dims is deprecated, use keepdims instead",
2376                             "keep_dims")
2377@deprecation.deprecated_args(
2378    None, "reduction_indices is deprecated, use axis instead",
2379    "reduction_indices")
2380def count_nonzero(input_tensor=None,
2381                  axis=None,
2382                  keepdims=None,
2383                  dtype=dtypes.int64,
2384                  name=None,
2385                  reduction_indices=None,
2386                  keep_dims=None,
2387                  input=None):  # pylint: disable=redefined-builtin
2388  """Computes number of nonzero elements across dimensions of a tensor.
2389
2390  Reduces `input_tensor` along the dimensions given in `axis`.
2391  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2392  entry in `axis`. If `keepdims` is true, the reduced dimensions
2393  are retained with length 1.
2394
2395  If `axis` has no entries, all dimensions are reduced, and a
2396  tensor with a single element is returned.
2397
2398  **NOTE** Floating point comparison to zero is done by exact floating point
2399  equality check.  Small values are **not** rounded to zero for purposes of
2400  the nonzero check.
2401
2402  For example:
2403
2404  ```python
2405  x = tf.constant([[0, 1, 0], [1, 1, 0]])
2406  tf.math.count_nonzero(x)  # 3
2407  tf.math.count_nonzero(x, 0)  # [1, 2, 0]
2408  tf.math.count_nonzero(x, 1)  # [1, 2]
2409  tf.math.count_nonzero(x, 1, keepdims=True)  # [[1], [2]]
2410  tf.math.count_nonzero(x, [0, 1])  # 3
2411  ```
2412
2413  **NOTE** Strings are compared against zero-length empty string `""`. Any
2414  string with a size greater than zero is already considered as nonzero.
2415
2416  For example:
2417  ```python
2418  x = tf.constant(["", "a", "  ", "b", ""])
2419  tf.math.count_nonzero(x) # 3, with "a", "  ", and "b" as nonzero strings.
2420  ```
2421
2422  Args:
2423    input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or
2424      `string`.
2425    axis: The dimensions to reduce. If `None` (the default), reduces all
2426      dimensions. Must be in the range `[-rank(input_tensor),
2427      rank(input_tensor))`.
2428    keepdims: If true, retains reduced dimensions with length 1.
2429    dtype: The output dtype; defaults to `tf.int64`.
2430    name: A name for the operation (optional).
2431    reduction_indices: The old (deprecated) name for axis.
2432    keep_dims: Deprecated alias for `keepdims`.
2433    input: Overrides input_tensor. For compatibility.
2434
2435  Returns:
2436    The reduced tensor (number of nonzero values).
2437  """
2438  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2439                                                    "keep_dims", keep_dims)
2440  input_tensor = deprecation.deprecated_argument_lookup("input", input,
2441                                                        "input_tensor",
2442                                                        input_tensor)
2443  axis = deprecation.deprecated_argument_lookup("axis", axis,
2444                                                "reduction_indices",
2445                                                reduction_indices)
2446
2447  return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)
2448
2449
2450@tf_export("math.count_nonzero", v1=[])
2451@dispatch.add_dispatch_support
2452def count_nonzero_v2(
2453    input,  # pylint: disable=redefined-builtin
2454    axis=None,
2455    keepdims=None,
2456    dtype=dtypes.int64,
2457    name=None):
2458  """Computes number of nonzero elements across dimensions of a tensor.
2459
2460  Reduces `input` along the dimensions given in `axis`.
2461  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2462  entry in `axis`. If `keepdims` is true, the reduced dimensions
2463  are retained with length 1.
2464
2465  If `axis` has no entries, all dimensions are reduced, and a
2466  tensor with a single element is returned.
2467
2468  **NOTE** Floating point comparison to zero is done by exact floating point
2469  equality check.  Small values are **not** rounded to zero for purposes of
2470  the nonzero check.
2471
2472  For example:
2473
2474  ```python
2475  x = tf.constant([[0, 1, 0], [1, 1, 0]])
2476  tf.math.count_nonzero(x)  # 3
2477  tf.math.count_nonzero(x, 0)  # [1, 2, 0]
2478  tf.math.count_nonzero(x, 1)  # [1, 2]
2479  tf.math.count_nonzero(x, 1, keepdims=True)  # [[1], [2]]
2480  tf.math.count_nonzero(x, [0, 1])  # 3
2481  ```
2482
2483  **NOTE** Strings are compared against zero-length empty string `""`. Any
2484  string with a size greater than zero is already considered as nonzero.
2485
2486  For example:
2487  ```python
2488  x = tf.constant(["", "a", "  ", "b", ""])
2489  tf.math.count_nonzero(x) # 3, with "a", "  ", and "b" as nonzero strings.
2490  ```
2491
2492  Args:
2493    input: The tensor to reduce. Should be of numeric type, `bool`, or `string`.
2494    axis: The dimensions to reduce. If `None` (the default), reduces all
2495      dimensions. Must be in the range `[-rank(input), rank(input))`.
2496    keepdims: If true, retains reduced dimensions with length 1.
2497    dtype: The output dtype; defaults to `tf.int64`.
2498    name: A name for the operation (optional).
2499
2500  Returns:
2501    The reduced tensor (number of nonzero values).
2502  """
2503  if keepdims is None:
2504    keepdims = False
2505  with ops.name_scope(name, "count_nonzero", [input]):
2506    input = ops.convert_to_tensor(input, name="input")
2507    # A scalar of 'zero' is enough as `not_equal` will broadcast.
2508    zero = array_ops.zeros([], dtype=input.dtype)
2509    return cast(
2510        reduce_sum(
2511            # int64 reduction happens on GPU
2512            cast(gen_math_ops.not_equal(input, zero), dtypes.int64),
2513            axis=axis,
2514            keepdims=keepdims),
2515        dtype=dtype)
2516
2517
2518@tf_export(v1=["math.reduce_mean", "reduce_mean"])
2519@dispatch.add_dispatch_support
2520def reduce_mean_v1(input_tensor,
2521                   axis=None,
2522                   keepdims=None,
2523                   name=None,
2524                   reduction_indices=None,
2525                   keep_dims=None):
2526  """Computes the mean of elements across dimensions of a tensor.
2527
2528  Reduces `input_tensor` along the dimensions given in `axis` by computing the
2529  mean of elements across the dimensions in `axis`.
2530  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2531  the entries in `axis`, which must be unique. If `keepdims` is true, the
2532  reduced dimensions are retained with length 1.
2533
2534  If `axis` is None, all dimensions are reduced, and a tensor with a single
2535  element is returned.
2536
2537  For example:
2538
2539  >>> x = tf.constant([[1., 1.], [2., 2.]])
2540  >>> tf.reduce_mean(x)
2541  <tf.Tensor: shape=(), dtype=float32, numpy=1.5>
2542  >>> tf.reduce_mean(x, 0)
2543  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
2544  >>> tf.reduce_mean(x, 1)
2545  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
2546
2547  Args:
2548    input_tensor: The tensor to reduce. Should have numeric type.
2549    axis: The dimensions to reduce. If `None` (the default), reduces all
2550      dimensions. Must be in the range `[-rank(input_tensor),
2551      rank(input_tensor))`.
2552    keepdims: If true, retains reduced dimensions with length 1.
2553    name: A name for the operation (optional).
2554    reduction_indices: The old (deprecated) name for axis.
2555    keep_dims: Deprecated alias for `keepdims`.
2556
2557  Returns:
2558    The reduced tensor.
2559
2560  @compatibility(numpy)
2561  Equivalent to np.mean
2562
2563  Please note that `np.mean` has a `dtype` parameter that could be used to
2564  specify the output type. By default this is `dtype=float64`. On the other
2565  hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
2566  for example:
2567
2568  >>> x = tf.constant([1, 0, 1, 0])
2569  >>> tf.reduce_mean(x)
2570  <tf.Tensor: shape=(), dtype=int32, numpy=0>
2571  >>> y = tf.constant([1., 0., 1., 0.])
2572  >>> tf.reduce_mean(y)
2573  <tf.Tensor: shape=(), dtype=float32, numpy=0.5>
2574
2575  @end_compatibility
2576  """
2577  axis = deprecation.deprecated_argument_lookup("axis", axis,
2578                                                "reduction_indices",
2579                                                reduction_indices)
2580  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2581                                                    "keep_dims", keep_dims)
2582  return reduce_mean(input_tensor, axis, keepdims, name)
2583
2584
2585@tf_export("math.reduce_mean", "reduce_mean", v1=[])
2586@dispatch.add_dispatch_support
2587def reduce_mean(input_tensor, axis=None, keepdims=False, name=None):
2588  """Computes the mean of elements across dimensions of a tensor.
2589
2590  Reduces `input_tensor` along the dimensions given in `axis` by computing the
2591  mean of elements across the dimensions in `axis`.
2592  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2593  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2594  reduced dimensions are retained with length 1.
2595
2596  If `axis` is None, all dimensions are reduced, and a tensor with a single
2597  element is returned.
2598
2599  For example:
2600
2601  >>> x = tf.constant([[1., 1.], [2., 2.]])
2602  >>> tf.reduce_mean(x)
2603  <tf.Tensor: shape=(), dtype=float32, numpy=1.5>
2604  >>> tf.reduce_mean(x, 0)
2605  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
2606  >>> tf.reduce_mean(x, 1)
2607  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
2608
2609  Args:
2610    input_tensor: The tensor to reduce. Should have numeric type.
2611    axis: The dimensions to reduce. If `None` (the default), reduces all
2612      dimensions. Must be in the range `[-rank(input_tensor),
2613      rank(input_tensor))`.
2614    keepdims: If true, retains reduced dimensions with length 1.
2615    name: A name for the operation (optional).
2616
2617  Returns:
2618    The reduced tensor.
2619
2620  @compatibility(numpy)
2621  Equivalent to np.mean
2622
2623  Please note that `np.mean` has a `dtype` parameter that could be used to
2624  specify the output type. By default this is `dtype=float64`. On the other
2625  hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
2626  for example:
2627
2628  >>> x = tf.constant([1, 0, 1, 0])
2629  >>> tf.reduce_mean(x)
2630  <tf.Tensor: shape=(), dtype=int32, numpy=0>
2631  >>> y = tf.constant([1., 0., 1., 0.])
2632  >>> tf.reduce_mean(y)
2633  <tf.Tensor: shape=(), dtype=float32, numpy=0.5>
2634
2635  @end_compatibility
2636  """
2637  keepdims = False if keepdims is None else bool(keepdims)
2638  return _may_reduce_to_scalar(
2639      keepdims, axis,
2640      gen_math_ops.mean(
2641          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2642          name=name))
2643
2644
2645@tf_export("math.reduce_variance")
2646@dispatch.add_dispatch_support
2647def reduce_variance(input_tensor, axis=None, keepdims=False, name=None):
2648  """Computes the variance of elements across dimensions of a tensor.
2649
2650  Reduces `input_tensor` along the dimensions given in `axis`.
2651  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2652  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2653  reduced dimensions are retained with length 1.
2654
2655  If `axis` is None, all dimensions are reduced, and a
2656  tensor with a single element is returned.
2657
2658  For example:
2659
2660  >>> x = tf.constant([[1., 2.], [3., 4.]])
2661  >>> tf.math.reduce_variance(x)
2662  <tf.Tensor: shape=(), dtype=float32, numpy=1.25>
2663  >>> tf.math.reduce_variance(x, 0)
2664  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], ...)>
2665  >>> tf.math.reduce_variance(x, 1)
2666  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.25, 0.25], ...)>
2667
2668  Args:
2669    input_tensor: The tensor to reduce. Should have real or complex type.
2670    axis: The dimensions to reduce. If `None` (the default), reduces all
2671      dimensions. Must be in the range `[-rank(input_tensor),
2672      rank(input_tensor))`.
2673    keepdims: If true, retains reduced dimensions with length 1.
2674    name: A name scope for the associated operations (optional).
2675
2676  Returns:
2677    The reduced tensor, of the same dtype as the input_tensor. Note,  for
2678    `complex64` or `complex128` input, the returned `Tensor` will be of type
2679    `float32` or `float64`, respectively.
2680
2681  @compatibility(numpy)
2682  Equivalent to np.var
2683
2684  Please note `np.var` has a `dtype` parameter that could be used to specify the
2685  output type. By default this is `dtype=float64`. On the other hand,
2686  `tf.math.reduce_variance` has aggressive type inference from `input_tensor`.
2687  @end_compatibility
2688  """
2689  name = name if name else "reduce_variance"
2690  with ops.name_scope(name):
2691    input_tensor = ops.convert_to_tensor(input_tensor)
2692    means = reduce_mean(input_tensor, axis=axis, keepdims=True)
2693    if means.dtype.is_integer:
2694      raise TypeError(f"Input must be either real or complex. "
2695                      f"Received integer type {means.dtype}.")
2696    diff = input_tensor - means
2697    if diff.dtype.is_complex:
2698      # For complex values we need to take the absolute value before squaring.
2699      # This is achieved by multiplying with the conjugate.
2700      real_dtype = diff.dtype.real_dtype
2701      squared_deviations = gen_math_ops.real(
2702          gen_math_ops.mul(gen_math_ops.conj(diff), diff), Tout=real_dtype)
2703    else:
2704      squared_deviations = gen_math_ops.square(diff)
2705    return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)
2706
2707
2708@tf_export("math.reduce_std")
2709@dispatch.add_dispatch_support
2710def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
2711  """Computes the standard deviation of elements across dimensions of a tensor.
2712
2713  Reduces `input_tensor` along the dimensions given in `axis`.
2714  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2715  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2716  reduced dimensions are retained with length 1.
2717
2718  If `axis` is None, all dimensions are reduced, and a
2719  tensor with a single element is returned.
2720
2721  For example:
2722
2723  >>> x = tf.constant([[1., 2.], [3., 4.]])
2724  >>> tf.math.reduce_std(x)
2725  <tf.Tensor: shape=(), dtype=float32, numpy=1.118034>
2726  >>> tf.math.reduce_std(x, 0)
2727  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], dtype=float32)>
2728  >>> tf.math.reduce_std(x, 1)
2729  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.5, 0.5], dtype=float32)>
2730
2731  Args:
2732    input_tensor: The tensor to reduce. Should have real or complex type.
2733    axis: The dimensions to reduce. If `None` (the default), reduces all
2734      dimensions. Must be in the range `[-rank(input_tensor),
2735      rank(input_tensor))`.
2736    keepdims: If true, retains reduced dimensions with length 1.
2737    name: A name scope for the associated operations (optional).
2738
2739  Returns:
2740    The reduced tensor, of the same dtype as the input_tensor. Note,  for
2741    `complex64` or `complex128` input, the returned `Tensor` will be of type
2742    `float32` or `float64`, respectively.
2743
2744  @compatibility(numpy)
2745  Equivalent to np.std
2746
2747  Please note `np.std` has a `dtype` parameter that could be used to specify the
2748  output type. By default this is `dtype=float64`. On the other hand,
2749  `tf.math.reduce_std` has aggressive type inference from `input_tensor`.
2750  @end_compatibility
2751  """
2752  name = name if name else "reduce_std"
2753  with ops.name_scope(name):
2754    input_tensor = ops.convert_to_tensor(input_tensor)
2755    variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
2756    return gen_math_ops.sqrt(variance)
2757
2758
2759@tf_export("math.reduce_prod", "reduce_prod", v1=[])
2760@dispatch.add_dispatch_support
2761def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
2762  """Computes `tf.math.multiply` of elements across dimensions of a tensor.
2763
2764  This is the reduction operation for the elementwise `tf.math.multiply` op.
2765
2766  Reduces `input_tensor` along the dimensions given in `axis`.
2767  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2768  entry in `axis`. If `keepdims` is true, the reduced dimensions
2769  are retained with length 1.
2770
2771  If `axis` is None, all dimensions are reduced, and a
2772  tensor with a single element is returned.
2773
2774  For example:
2775
2776    >>> x = tf.constant([[1., 2.], [3., 4.]])
2777    >>> tf.math.reduce_prod(x)
2778    <tf.Tensor: shape=(), dtype=float32, numpy=24.>
2779    >>> tf.math.reduce_prod(x, 0)
2780    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
2781    >>> tf.math.reduce_prod(x, 1)
2782    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
2783    dtype=float32)>
2784
2785  Args:
2786    input_tensor: The tensor to reduce. Should have numeric type.
2787    axis: The dimensions to reduce. If `None` (the default), reduces all
2788      dimensions. Must be in the range `[-rank(input_tensor),
2789      rank(input_tensor))`.
2790    keepdims: If true, retains reduced dimensions with length 1.
2791    name: A name for the operation (optional).
2792
2793  Returns:
2794    The reduced tensor.
2795
2796  @compatibility(numpy)
2797  Equivalent to np.prod
2798  @end_compatibility
2799  """
2800  keepdims = False if keepdims is None else bool(keepdims)
2801  return _may_reduce_to_scalar(
2802      keepdims, axis,
2803      gen_math_ops.prod(
2804          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2805          name=name))
2806
2807
2808@tf_export(v1=["math.reduce_prod", "reduce_prod"])
2809@dispatch.add_dispatch_support
2810@deprecation.deprecated_args(None,
2811                             "keep_dims is deprecated, use keepdims instead",
2812                             "keep_dims")
2813def reduce_prod_v1(input_tensor,
2814                   axis=None,
2815                   keepdims=None,
2816                   name=None,
2817                   reduction_indices=None,
2818                   keep_dims=None):
2819  """Computes `tf.math.multiply` of elements across dimensions of a tensor.
2820
2821  This is the reduction operation for the elementwise `tf.math.multiply` op.
2822
2823  Reduces `input_tensor` along the dimensions given in `axis`.
2824  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2825  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2826  reduced dimensions are retained with length 1.
2827
2828  If `axis` is None, all dimensions are reduced, and a
2829  tensor with a single element is returned.
2830
2831  For example:
2832
2833    >>> x = tf.constant([[1., 2.], [3., 4.]])
2834    >>> tf.math.reduce_prod(x)
2835    <tf.Tensor: shape=(), dtype=float32, numpy=24.>
2836    >>> tf.math.reduce_prod(x, 0)
2837    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
2838    >>> tf.math.reduce_prod(x, 1)
2839    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
2840    dtype=float32)>
2841
2842  Args:
2843    input_tensor: The tensor to reduce. Should have numeric type.
2844    axis: The dimensions to reduce. If `None` (the default), reduces all
2845      dimensions. Must be in the range `[-rank(input_tensor),
2846      rank(input_tensor))`.
2847    keepdims: If true, retains reduced dimensions with length 1.
2848    name: A name for the operation (optional).
2849    reduction_indices: The old (deprecated) name for axis.
2850    keep_dims: Deprecated alias for `keepdims`.
2851
2852  Returns:
2853    The reduced tensor.
2854
2855  @compatibility(numpy)
2856  Equivalent to np.prod
2857  @end_compatibility
2858  """
2859  axis = deprecation.deprecated_argument_lookup("axis", axis,
2860                                                "reduction_indices",
2861                                                reduction_indices)
2862  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2863                                                    "keep_dims", keep_dims)
2864  return reduce_prod(input_tensor, axis, keepdims, name)
2865
2866
2867@tf_export(v1=["math.reduce_min", "reduce_min"])
2868@dispatch.add_dispatch_support
2869@deprecation.deprecated_args(None,
2870                             "keep_dims is deprecated, use keepdims instead",
2871                             "keep_dims")
2872def reduce_min_v1(input_tensor,
2873                  axis=None,
2874                  keepdims=None,
2875                  name=None,
2876                  reduction_indices=None,
2877                  keep_dims=None):
2878  """Computes the `tf.math.minimum` of elements across dimensions of a tensor.
2879
2880  This is the reduction operation for the elementwise `tf.math.minimum` op.
2881
2882  Reduces `input_tensor` along the dimensions given in `axis`.
2883  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2884  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2885  reduced dimensions are retained with length 1.
2886
2887  If `axis` is None, all dimensions are reduced, and a
2888  tensor with a single element is returned.
2889
2890  Usage example:
2891
2892    >>> x = tf.constant([5, 1, 2, 4])
2893    >>> tf.reduce_min(x)
2894    <tf.Tensor: shape=(), dtype=int32, numpy=1>
2895    >>> x = tf.constant([-5, -1, -2, -4])
2896    >>> tf.reduce_min(x)
2897    <tf.Tensor: shape=(), dtype=int32, numpy=-5>
2898    >>> x = tf.constant([4, float('nan')])
2899    >>> tf.reduce_min(x)
2900    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
2901    >>> x = tf.constant([float('nan'), float('nan')])
2902    >>> tf.reduce_min(x)
2903    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
2904    >>> x = tf.constant([float('-inf'), float('inf')])
2905    >>> tf.reduce_min(x)
2906    <tf.Tensor: shape=(), dtype=float32, numpy=-inf>
2907
2908  See the numpy docs for `np.amin` and `np.nanmin` behavior.
2909
2910  Args:
2911    input_tensor: The tensor to reduce. Should have real numeric type.
2912    axis: The dimensions to reduce. If `None` (the default), reduces all
2913      dimensions. Must be in the range `[-rank(input_tensor),
2914      rank(input_tensor))`.
2915    keepdims: If true, retains reduced dimensions with length 1.
2916    name: A name for the operation (optional).
2917    reduction_indices: The old (deprecated) name for axis.
2918    keep_dims: Deprecated alias for `keepdims`.
2919
2920  Returns:
2921    The reduced tensor.
2922  """
2923  axis = deprecation.deprecated_argument_lookup("axis", axis,
2924                                                "reduction_indices",
2925                                                reduction_indices)
2926  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2927                                                    "keep_dims", keep_dims)
2928  return reduce_min(input_tensor, axis, keepdims, name)
2929
2930
2931@tf_export("math.reduce_min", "reduce_min", v1=[])
2932@dispatch.add_dispatch_support
2933def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
2934  """Computes the `tf.math.minimum` of elements across dimensions of a tensor.
2935
2936  This is the reduction operation for the elementwise `tf.math.minimum` op.
2937
2938  Reduces `input_tensor` along the dimensions given in `axis`.
2939  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2940  of the entries in `axis`, which must be unique. If `keepdims` is true, the
2941  reduced dimensions are retained with length 1.
2942
2943  If `axis` is None, all dimensions are reduced, and a
2944  tensor with a single element is returned.
2945
2946  For example:
2947
2948  >>> a = tf.constant([
2949  ...   [[1, 2], [3, 4]],
2950  ...   [[1, 2], [3, 4]]
2951  ... ])
2952  >>> tf.reduce_min(a)
2953  <tf.Tensor: shape=(), dtype=int32, numpy=1>
2954
2955  Choosing a specific axis returns minimum element in the given axis:
2956
2957  >>> b = tf.constant([[1, 2, 3], [4, 5, 6]])
2958  >>> tf.reduce_min(b, axis=0)
2959  <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 2, 3], dtype=int32)>
2960  >>> tf.reduce_min(b, axis=1)
2961  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 4], dtype=int32)>
2962
2963  Setting `keepdims` to `True` retains the dimension of `input_tensor`:
2964
2965  >>> tf.reduce_min(a, keepdims=True)
2966  <tf.Tensor: shape=(1, 1, 1), dtype=int32, numpy=array([[[1]]], dtype=int32)>
2967  >>> tf.math.reduce_min(a, axis=0, keepdims=True)
2968  <tf.Tensor: shape=(1, 2, 2), dtype=int32, numpy=
2969  array([[[1, 2],
2970          [3, 4]]], dtype=int32)>
2971
2972  Args:
2973    input_tensor: The tensor to reduce. Should have real numeric type.
2974    axis: The dimensions to reduce. If `None` (the default), reduces all
2975      dimensions. Must be in the range `[-rank(input_tensor),
2976      rank(input_tensor))`.
2977    keepdims: If true, retains reduced dimensions with length 1.
2978    name: A name for the operation (optional).
2979
2980  Returns:
2981    The reduced tensor.
2982
2983  @compatibility(numpy)
2984  Equivalent to np.min
2985  @end_compatibility
2986  """
2987  keepdims = False if keepdims is None else bool(keepdims)
2988  return _may_reduce_to_scalar(
2989      keepdims, axis,
2990      gen_math_ops._min(
2991          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2992          name=name))
2993
2994
2995@tf_export(v1=["math.reduce_max", "reduce_max"])
2996@dispatch.add_dispatch_support
2997@deprecation.deprecated_args(None,
2998                             "keep_dims is deprecated, use keepdims instead",
2999                             "keep_dims")
3000def reduce_max_v1(input_tensor,
3001                  axis=None,
3002                  keepdims=None,
3003                  name=None,
3004                  reduction_indices=None,
3005                  keep_dims=None):
3006  """Computes `tf.math.maximum` of elements across dimensions of a tensor.
3007
3008  This is the reduction operation for the elementwise `tf.math.maximum` op.
3009
3010  Reduces `input_tensor` along the dimensions given in `axis`.
3011  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3012  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3013  reduced dimensions are retained with length 1.
3014
3015  If `axis` is None, all dimensions are reduced, and a
3016  tensor with a single element is returned.
3017
3018  Usage example:
3019
3020    >>> x = tf.constant([5, 1, 2, 4])
3021    >>> tf.reduce_max(x)
3022    <tf.Tensor: shape=(), dtype=int32, numpy=5>
3023    >>> x = tf.constant([-5, -1, -2, -4])
3024    >>> tf.reduce_max(x)
3025    <tf.Tensor: shape=(), dtype=int32, numpy=-1>
3026    >>> x = tf.constant([4, float('nan')])
3027    >>> tf.reduce_max(x)
3028    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
3029    >>> x = tf.constant([float('nan'), float('nan')])
3030    >>> tf.reduce_max(x)
3031    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
3032    >>> x = tf.constant([float('-inf'), float('inf')])
3033    >>> tf.reduce_max(x)
3034    <tf.Tensor: shape=(), dtype=float32, numpy=inf>
3035
3036  See the numpy docs for `np.amax` and `np.nanmax` behavior.
3037
3038  Args:
3039    input_tensor: The tensor to reduce. Should have real numeric type.
3040    axis: The dimensions to reduce. If `None` (the default), reduces all
3041      dimensions. Must be in the range `[-rank(input_tensor),
3042      rank(input_tensor))`.
3043    keepdims: If true, retains reduced dimensions with length 1.
3044    name: A name for the operation (optional).
3045    reduction_indices: The old (deprecated) name for axis.
3046    keep_dims: Deprecated alias for `keepdims`.
3047
3048  Returns:
3049    The reduced tensor.
3050  """
3051  axis = deprecation.deprecated_argument_lookup("axis", axis,
3052                                                "reduction_indices",
3053                                                reduction_indices)
3054  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3055                                                    "keep_dims", keep_dims)
3056  return reduce_max(input_tensor, axis, keepdims, name)
3057
3058
3059@tf_export("math.reduce_max", "reduce_max", v1=[])
3060@dispatch.add_dispatch_support
3061def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
3062  """Computes `tf.math.maximum` of elements across dimensions of a tensor.
3063
3064  This is the reduction operation for the elementwise `tf.math.maximum` op.
3065
3066  Reduces `input_tensor` along the dimensions given in `axis`.
3067  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3068  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3069  reduced dimensions are retained with length 1.
3070
3071  If `axis` is None, all dimensions are reduced, and a
3072  tensor with a single element is returned.
3073
3074  Usage example:
3075
3076    >>> x = tf.constant([5, 1, 2, 4])
3077    >>> tf.reduce_max(x)
3078    <tf.Tensor: shape=(), dtype=int32, numpy=5>
3079    >>> x = tf.constant([-5, -1, -2, -4])
3080    >>> tf.reduce_max(x)
3081    <tf.Tensor: shape=(), dtype=int32, numpy=-1>
3082    >>> x = tf.constant([4, float('nan')])
3083    >>> tf.reduce_max(x)
3084    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
3085    >>> x = tf.constant([float('nan'), float('nan')])
3086    >>> tf.reduce_max(x)
3087    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
3088    >>> x = tf.constant([float('-inf'), float('inf')])
3089    >>> tf.reduce_max(x)
3090    <tf.Tensor: shape=(), dtype=float32, numpy=inf>
3091
3092  See the numpy docs for `np.amax` and `np.nanmax` behavior.
3093
3094  Args:
3095    input_tensor: The tensor to reduce. Should have real numeric type.
3096    axis: The dimensions to reduce. If `None` (the default), reduces all
3097      dimensions. Must be in the range `[-rank(input_tensor),
3098      rank(input_tensor))`.
3099    keepdims: If true, retains reduced dimensions with length 1.
3100    name: A name for the operation (optional).
3101
3102  Returns:
3103    The reduced tensor.
3104  """
3105  return reduce_max_with_dims(input_tensor, axis, keepdims, name,
3106                              _ReductionDims(input_tensor, axis))
3107
3108
3109def reduce_max_with_dims(input_tensor,
3110                         axis=None,
3111                         keepdims=False,
3112                         name=None,
3113                         dims=None):
3114  keepdims = False if keepdims is None else bool(keepdims)
3115  return _may_reduce_to_scalar(
3116      keepdims, axis,
3117      gen_math_ops._max(input_tensor, dims, keepdims, name=name))
3118
3119
3120@tf_export(v1=["math.reduce_all", "reduce_all"])
3121@dispatch.add_dispatch_support
3122@deprecation.deprecated_args(None,
3123                             "keep_dims is deprecated, use keepdims instead",
3124                             "keep_dims")
3125def reduce_all_v1(input_tensor,
3126                  axis=None,
3127                  keepdims=None,
3128                  name=None,
3129                  reduction_indices=None,
3130                  keep_dims=None):
3131  """Computes `tf.math.logical_and` of elements across dimensions of a tensor.
3132
3133  This is the reduction operation for the elementwise `tf.math.logical_and` op.
3134
3135  Reduces `input_tensor` along the dimensions given in `axis`.
3136  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3137  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3138  reduced dimensions are retained with length 1.
3139
3140  If `axis` is None, all dimensions are reduced, and a
3141  tensor with a single element is returned.
3142
3143  For example:
3144
3145    >>> x = tf.constant([[True,  True], [False, False]])
3146    >>> tf.math.reduce_all(x)
3147    <tf.Tensor: shape=(), dtype=bool, numpy=False>
3148    >>> tf.math.reduce_all(x, 0)
3149    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
3150    >>> tf.math.reduce_all(x, 1)
3151    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3152
3153  Args:
3154    input_tensor: The boolean tensor to reduce.
3155    axis: The dimensions to reduce. If `None` (the default), reduces all
3156      dimensions. Must be in the range `[-rank(input_tensor),
3157      rank(input_tensor))`.
3158    keepdims: If true, retains reduced dimensions with length 1.
3159    name: A name for the operation (optional).
3160    reduction_indices: The old (deprecated) name for axis.
3161    keep_dims: Deprecated alias for `keepdims`.
3162
3163  Returns:
3164    The reduced tensor.
3165
3166  @compatibility(numpy)
3167  Equivalent to np.all
3168  @end_compatibility
3169  """
3170  axis = deprecation.deprecated_argument_lookup("axis", axis,
3171                                                "reduction_indices",
3172                                                reduction_indices)
3173  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3174                                                    "keep_dims", keep_dims)
3175  return reduce_all(input_tensor, axis, keepdims, name)
3176
3177
3178@tf_export("math.reduce_all", "reduce_all", v1=[])
3179@dispatch.add_dispatch_support
3180def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
3181  """Computes `tf.math.logical_and` of elements across dimensions of a tensor.
3182
3183  This is the reduction operation for the elementwise `tf.math.logical_and` op.
3184
3185  Reduces `input_tensor` along the dimensions given in `axis`.
3186  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3187  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3188  reduced dimensions are retained with length 1.
3189
3190  If `axis` is None, all dimensions are reduced, and a
3191  tensor with a single element is returned.
3192
3193  For example:
3194
3195    >>> x = tf.constant([[True,  True], [False, False]])
3196    >>> tf.math.reduce_all(x)
3197    <tf.Tensor: shape=(), dtype=bool, numpy=False>
3198    >>> tf.math.reduce_all(x, 0)
3199    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
3200    >>> tf.math.reduce_all(x, 1)
3201    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3202
3203  Args:
3204    input_tensor: The boolean tensor to reduce.
3205    axis: The dimensions to reduce. If `None` (the default), reduces all
3206      dimensions. Must be in the range `[-rank(input_tensor),
3207      rank(input_tensor))`.
3208    keepdims: If true, retains reduced dimensions with length 1.
3209    name: A name for the operation (optional).
3210
3211  Returns:
3212    The reduced tensor.
3213
3214  @compatibility(numpy)
3215  Equivalent to np.all
3216  @end_compatibility
3217  """
3218  keepdims = False if keepdims is None else bool(keepdims)
3219  return _may_reduce_to_scalar(
3220      keepdims, axis,
3221      gen_math_ops._all(
3222          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
3223          name=name))
3224
3225
3226@tf_export(v1=["math.reduce_any", "reduce_any"])
3227@dispatch.add_dispatch_support
3228@deprecation.deprecated_args(None,
3229                             "keep_dims is deprecated, use keepdims instead",
3230                             "keep_dims")
3231def reduce_any_v1(input_tensor,
3232                  axis=None,
3233                  keepdims=None,
3234                  name=None,
3235                  reduction_indices=None,
3236                  keep_dims=None):
3237  """Computes `tf.math.logical_or` of elements across dimensions of a tensor.
3238
3239  This is the reduction operation for the elementwise `tf.math.logical_or` op.
3240
3241  Reduces `input_tensor` along the dimensions given in `axis`.
3242  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3243  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3244  reduced dimensions are retained with length 1.
3245
3246  If `axis` is None, all dimensions are reduced, and a
3247  tensor with a single element is returned.
3248
3249  For example:
3250
3251    >>> x = tf.constant([[True,  True], [False, False]])
3252    >>> tf.reduce_any(x)
3253    <tf.Tensor: shape=(), dtype=bool, numpy=True>
3254    >>> tf.reduce_any(x, 0)
3255    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  True])>
3256    >>> tf.reduce_any(x, 1)
3257    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3258
3259  Args:
3260    input_tensor: The boolean tensor to reduce.
3261    axis: The dimensions to reduce. If `None` (the default), reduces all
3262      dimensions. Must be in the range `[-rank(input_tensor),
3263      rank(input_tensor))`.
3264    keepdims: If true, retains reduced dimensions with length 1.
3265    name: A name for the operation (optional).
3266    reduction_indices: The old (deprecated) name for axis.
3267    keep_dims: Deprecated alias for `keepdims`.
3268
3269  Returns:
3270    The reduced tensor.
3271
3272  @compatibility(numpy)
3273  Equivalent to np.any
3274  @end_compatibility
3275  """
3276  axis = deprecation.deprecated_argument_lookup("axis", axis,
3277                                                "reduction_indices",
3278                                                reduction_indices)
3279  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3280                                                    "keep_dims", keep_dims)
3281  return reduce_any(input_tensor, axis, keepdims, name)
3282
3283
3284@tf_export("math.reduce_any", "reduce_any", v1=[])
3285@dispatch.add_dispatch_support
3286def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
3287  """Computes `tf.math.logical_or` of elements across dimensions of a tensor.
3288
3289  This is the reduction operation for the elementwise `tf.math.logical_or` op.
3290
3291  Reduces `input_tensor` along the dimensions given in `axis`.
3292  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3293  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3294  reduced dimensions are retained with length 1.
3295
3296  If `axis` is None, all dimensions are reduced, and a
3297  tensor with a single element is returned.
3298
3299  For example:
3300
3301    >>> x = tf.constant([[True,  True], [False, False]])
3302    >>> tf.reduce_any(x)
3303    <tf.Tensor: shape=(), dtype=bool, numpy=True>
3304    >>> tf.reduce_any(x, 0)
3305    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  True])>
3306    >>> tf.reduce_any(x, 1)
3307    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3308
3309  Args:
3310    input_tensor: The boolean tensor to reduce.
3311    axis: The dimensions to reduce. If `None` (the default), reduces all
3312      dimensions. Must be in the range `[-rank(input_tensor),
3313      rank(input_tensor))`.
3314    keepdims: If true, retains reduced dimensions with length 1.
3315    name: A name for the operation (optional).
3316
3317  Returns:
3318    The reduced tensor.
3319
3320  @compatibility(numpy)
3321  Equivalent to np.any
3322  @end_compatibility
3323  """
3324  keepdims = False if keepdims is None else bool(keepdims)
3325  return _may_reduce_to_scalar(
3326      keepdims, axis,
3327      gen_math_ops._any(
3328          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
3329          name=name))
3330
3331
3332@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"])
3333@dispatch.add_dispatch_support
3334@deprecation.deprecated_args(None,
3335                             "keep_dims is deprecated, use keepdims instead",
3336                             "keep_dims")
3337def reduce_logsumexp_v1(input_tensor,
3338                        axis=None,
3339                        keepdims=None,
3340                        name=None,
3341                        reduction_indices=None,
3342                        keep_dims=None):
3343  """Computes log(sum(exp(elements across dimensions of a tensor))).
3344
3345  Reduces `input_tensor` along the dimensions given in `axis`.
3346  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3347  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3348  reduced dimensions are retained with length 1.
3349
3350  If `axis` has no entries, all dimensions are reduced, and a
3351  tensor with a single element is returned.
3352
3353  This function is more numerically stable than log(sum(exp(input))). It avoids
3354  overflows caused by taking the exp of large inputs and underflows caused by
3355  taking the log of small inputs.
3356
3357  For example:
3358
3359  ```python
3360  x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
3361  tf.reduce_logsumexp(x)  # log(6)
3362  tf.reduce_logsumexp(x, 0)  # [log(2), log(2), log(2)]
3363  tf.reduce_logsumexp(x, 1)  # [log(3), log(3)]
3364  tf.reduce_logsumexp(x, 1, keepdims=True)  # [[log(3)], [log(3)]]
3365  tf.reduce_logsumexp(x, [0, 1])  # log(6)
3366  ```
3367
3368  Args:
3369    input_tensor: The tensor to reduce. Should have numeric type.
3370    axis: The dimensions to reduce. If `None` (the default), reduces all
3371      dimensions. Must be in the range `[-rank(input_tensor),
3372      rank(input_tensor))`.
3373    keepdims: If true, retains reduced dimensions with length 1.
3374    name: A name for the operation (optional).
3375    reduction_indices: The old (deprecated) name for axis.
3376    keep_dims: Deprecated alias for `keepdims`.
3377
3378  Returns:
3379    The reduced tensor.
3380  """
3381  axis = deprecation.deprecated_argument_lookup("axis", axis,
3382                                                "reduction_indices",
3383                                                reduction_indices)
3384  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3385                                                    "keep_dims", keep_dims)
3386  return reduce_logsumexp(input_tensor, axis, keepdims, name)
3387
3388
3389@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[])
3390@dispatch.add_dispatch_support
3391def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):
3392  """Computes log(sum(exp(elements across dimensions of a tensor))).
3393
3394  Reduces `input_tensor` along the dimensions given in `axis`.
3395  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3396  of the entries in `axis`, which must be unique. If `keepdims` is true, the
3397  reduced dimensions are retained with length 1.
3398
3399  If `axis` has no entries, all dimensions are reduced, and a
3400  tensor with a single element is returned.
3401
3402  This function is more numerically stable than log(sum(exp(input))). It avoids
3403  overflows caused by taking the exp of large inputs and underflows caused by
3404  taking the log of small inputs.
3405
3406  For example:
3407
3408  ```python
3409  x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
3410  tf.reduce_logsumexp(x)  # log(6)
3411  tf.reduce_logsumexp(x, 0)  # [log(2), log(2), log(2)]
3412  tf.reduce_logsumexp(x, 1)  # [log(3), log(3)]
3413  tf.reduce_logsumexp(x, 1, keepdims=True)  # [[log(3)], [log(3)]]
3414  tf.reduce_logsumexp(x, [0, 1])  # log(6)
3415  ```
3416
3417  Args:
3418    input_tensor: The tensor to reduce. Should have numeric type.
3419    axis: The dimensions to reduce. If `None` (the default), reduces all
3420      dimensions. Must be in the range `[-rank(input_tensor),
3421      rank(input_tensor))`.
3422    keepdims: If true, retains reduced dimensions with length 1.
3423    name: A name for the operation (optional).
3424
3425  Returns:
3426    The reduced tensor.
3427  """
3428  with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
3429    raw_max = reduce_max(input_tensor, axis=axis, keepdims=True)
3430    my_max = array_ops.stop_gradient(
3431        gen_math_ops.select(
3432            gen_math_ops.is_finite(raw_max), raw_max,
3433            gen_array_ops.zeros_like(raw_max)))
3434    result = gen_math_ops.log(
3435        reduce_sum(
3436            exp(subtract(input_tensor, my_max)),
3437            axis=axis,
3438            keepdims=keepdims))
3439    if not keepdims:
3440      my_max = array_ops.reshape(my_max, gen_array_ops.shape(result))
3441    result = add(result, my_max, name=name)
3442    return _may_reduce_to_scalar(keepdims, axis, result)
3443
3444
3445@tf_export("linalg.trace", v1=["linalg.trace", "trace"])
3446@dispatch.add_dispatch_support
3447@deprecation.deprecated_endpoints("trace")
3448def trace(x, name=None):
3449  """Compute the trace of a tensor `x`.
3450
3451  `trace(x)` returns the sum along the main diagonal of each inner-most matrix
3452  in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
3453  is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
3454
3455  `output[i, j, k, ..., l] = trace(x[i, j, k, ..., l, :, :])`
3456
3457  For example:
3458
3459  ```python
3460  x = tf.constant([[1, 2], [3, 4]])
3461  tf.linalg.trace(x)  # 5
3462
3463  x = tf.constant([[1, 2, 3],
3464                   [4, 5, 6],
3465                   [7, 8, 9]])
3466  tf.linalg.trace(x)  # 15
3467
3468  x = tf.constant([[[1, 2, 3],
3469                    [4, 5, 6],
3470                    [7, 8, 9]],
3471                   [[-1, -2, -3],
3472                    [-4, -5, -6],
3473                    [-7, -8, -9]]])
3474  tf.linalg.trace(x)  # [15, -15]
3475  ```
3476
3477  Args:
3478    x: tensor.
3479    name: A name for the operation (optional).
3480
3481  Returns:
3482    The trace of input tensor.
3483  """
3484  with ops.name_scope(name, "Trace", [x]) as name:
3485    x = ops.convert_to_tensor(x, name="x")
3486    return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
3487
3488
3489@tf_export("linalg.matmul", "matmul")
3490@dispatch.add_dispatch_support
3491def matmul(a,
3492           b,
3493           transpose_a=False,
3494           transpose_b=False,
3495           adjoint_a=False,
3496           adjoint_b=False,
3497           a_is_sparse=False,
3498           b_is_sparse=False,
3499           output_type=None,
3500           name=None):
3501  """Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
3502
3503  The inputs must, following any transpositions, be tensors of rank >= 2
3504  where the inner 2 dimensions specify valid matrix multiplication dimensions,
3505  and any further outer dimensions specify matching batch size.
3506
3507  Both matrices must be of the same type. The supported types are:
3508  `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`,
3509  `complex64`, `complex128`.
3510
3511  Either matrix can be transposed or adjointed (conjugated and transposed) on
3512  the fly by setting one of the corresponding flag to `True`. These are `False`
3513  by default.
3514
3515  If one or both of the matrices contain a lot of zeros, a more efficient
3516  multiplication algorithm can be used by setting the corresponding
3517  `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
3518  This optimization is only available for plain matrices (rank-2 tensors) with
3519  datatypes `bfloat16` or `float32`.
3520
3521  A simple 2-D tensor matrix multiplication:
3522
3523  >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
3524  >>> a  # 2-D tensor
3525  <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
3526  array([[1, 2, 3],
3527         [4, 5, 6]], dtype=int32)>
3528  >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
3529  >>> b  # 2-D tensor
3530  <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
3531  array([[ 7,  8],
3532         [ 9, 10],
3533         [11, 12]], dtype=int32)>
3534  >>> c = tf.matmul(a, b)
3535  >>> c  # `a` * `b`
3536  <tf.Tensor: shape=(2, 2), dtype=int32, numpy=
3537  array([[ 58,  64],
3538         [139, 154]], dtype=int32)>
3539
3540  A batch matrix multiplication with batch shape [2]:
3541
3542  >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])
3543  >>> a  # 3-D tensor
3544  <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=
3545  array([[[ 1,  2,  3],
3546          [ 4,  5,  6]],
3547         [[ 7,  8,  9],
3548          [10, 11, 12]]], dtype=int32)>
3549  >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])
3550  >>> b  # 3-D tensor
3551  <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
3552  array([[[13, 14],
3553          [15, 16],
3554          [17, 18]],
3555         [[19, 20],
3556          [21, 22],
3557          [23, 24]]], dtype=int32)>
3558  >>> c = tf.matmul(a, b)
3559  >>> c  # `a` * `b`
3560  <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
3561  array([[[ 94, 100],
3562          [229, 244]],
3563         [[508, 532],
3564          [697, 730]]], dtype=int32)>
3565
3566  Since python >= 3.5 the @ operator is supported
3567  (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,
3568  it simply calls the `tf.matmul()` function, so the following lines are
3569  equivalent:
3570
3571  >>> d = a @ b @ [[10], [11]]
3572  >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])
3573
3574  Args:
3575    a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,
3576      `complex64`, `complex128` and rank > 1.
3577    b: `tf.Tensor` with same type and rank as `a`.
3578    transpose_a: If `True`, `a` is transposed before multiplication.
3579    transpose_b: If `True`, `b` is transposed before multiplication.
3580    adjoint_a: If `True`, `a` is conjugated and transposed before
3581      multiplication.
3582    adjoint_b: If `True`, `b` is conjugated and transposed before
3583      multiplication.
3584    a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this
3585      **does not support `tf.sparse.SparseTensor`**, it just makes optimizations
3586      that assume most values in `a` are zero.
3587      See `tf.sparse.sparse_dense_matmul`
3588      for some support for `tf.sparse.SparseTensor` multiplication.
3589    b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this
3590      **does not support `tf.sparse.SparseTensor`**, it just makes optimizations
3591      that assume most values in `a` are zero.
3592      See `tf.sparse.sparse_dense_matmul`
3593      for some support for `tf.sparse.SparseTensor` multiplication.
3594    output_type: The output datatype if needed. Defaults to None in which case
3595      the output_type is the same as input type. Currently only works when input
3596      tensors are type (u)int8 and output_type can be int32.
3597    name: Name for the operation (optional).
3598
3599  Returns:
3600    A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix
3601    is the product of the corresponding matrices in `a` and `b`, e.g. if all
3602    transpose or adjoint attributes are `False`:
3603
3604    `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,
3605    for all indices `i`, `j`.
3606
3607    Note: This is matrix product, not element-wise product.
3608
3609
3610  Raises:
3611    ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and
3612      `adjoint_b` are both set to `True`.
3613    TypeError: If output_type is specified but the types of `a`, `b` and
3614      `output_type` is not (u)int8, (u)int8 and int32.
3615  """
3616
3617  with ops.name_scope(name, "MatMul", [a, b]) as name:
3618    if transpose_a and adjoint_a:
3619      raise ValueError(
3620          f"Only one of `transpose_a` and `adjoint_a` can be True. "
3621          f"Received `transpose_a`={transpose_a}, "
3622          f"`adjoint_a`={adjoint_a}.")
3623    if transpose_b and adjoint_b:
3624      raise ValueError(
3625          f"Only one of `transpose_b` and `adjoint_b` can be True. "
3626          f"Received `transpose_b`={transpose_b}, "
3627          f"`adjoint_b`={adjoint_b}.")
3628
3629    if context.executing_eagerly():
3630      if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):
3631        a = ops.convert_to_tensor(a, name="a")
3632      if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):
3633        b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
3634    else:
3635      a = ops.convert_to_tensor(a, name="a")
3636      b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
3637
3638    # TODO(apassos) remove _shape_tuple here when it is not needed.
3639    a_shape = a._shape_tuple()  # pylint: disable=protected-access
3640    b_shape = b._shape_tuple()  # pylint: disable=protected-access
3641
3642    output_may_have_non_empty_batch_shape = (
3643        (a_shape is None or len(a_shape) > 2) or
3644        (b_shape is None or len(b_shape) > 2))
3645
3646    # TODO(b/178749687): remove this boolean and all related branches once the
3647    # bridges are ready.
3648    # batch_matmul_v3 is for when input type is different from output type.
3649    use_batch_matmul_v3 = False
3650    if output_type and (output_type != a.dtype or output_type != b.dtype):
3651      use_batch_matmul_v3 = True
3652
3653    if (not a_is_sparse and
3654        not b_is_sparse) and output_may_have_non_empty_batch_shape:
3655      # BatchMatmul does not support transpose, so we conjugate the matrix and
3656      # use adjoint instead. Conj() is a noop for real matrices.
3657      if transpose_a:
3658        a = conj(a)
3659        adjoint_a = True
3660      if transpose_b:
3661        b = conj(b)
3662        adjoint_b = True
3663      if use_batch_matmul_v3:
3664        return gen_math_ops.batch_mat_mul_v3(
3665            a, b, adj_x=adjoint_a, adj_y=adjoint_b, Tout=output_type, name=name)
3666      else:
3667        return gen_math_ops.batch_mat_mul_v2(
3668            a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
3669
3670    # Neither matmul nor sparse_matmul support adjoint, so we conjugate
3671    # the matrix and use transpose instead. Conj() is a noop for real
3672    # matrices.
3673    if adjoint_a:
3674      a = conj(a)
3675      transpose_a = True
3676    if adjoint_b:
3677      b = conj(b)
3678      transpose_b = True
3679
3680    use_sparse_matmul = False
3681    if a_is_sparse or b_is_sparse:
3682      sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
3683      use_sparse_matmul = (
3684          a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)
3685    if (((a.dtype == dtypes.bfloat16 and
3686          b.dtype not in (dtypes.int8, dtypes.uint8)) or
3687         (b.dtype == dtypes.bfloat16 and
3688          a.dtype not in (dtypes.int8, dtypes.uint8))) and a.dtype != b.dtype):
3689      # matmul currently doesn't handle mixed-precision inputs other than
3690      # fp16 * int8 which is supported in BatchMatMulV3.
3691      use_sparse_matmul = True
3692    if use_sparse_matmul:
3693      ret = sparse_matmul(
3694          a,
3695          b,
3696          transpose_a=transpose_a,
3697          transpose_b=transpose_b,
3698          a_is_sparse=a_is_sparse,
3699          b_is_sparse=b_is_sparse,
3700          name=name)
3701      # sparse_matmul always returns float32, even with
3702      # bfloat16 inputs. This prevents us from configuring bfloat16 training.
3703      # casting to bfloat16 also matches non-sparse matmul behavior better.
3704      if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:
3705        ret = cast(ret, dtypes.bfloat16)
3706      return ret
3707    else:
3708      if use_batch_matmul_v3:
3709        adjoint_a = adjoint_a or transpose_a
3710        adjoint_b = adjoint_b or transpose_b
3711        return gen_math_ops.batch_mat_mul_v3(
3712            a, b, adj_x=adjoint_a, adj_y=adjoint_b, Tout=output_type, name=name)
3713      else:
3714        return gen_math_ops.mat_mul(
3715            a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
3716
3717
3718@tf_export("linalg.matvec")
3719@dispatch.add_dispatch_support
3720def matvec(a,
3721           b,
3722           transpose_a=False,
3723           adjoint_a=False,
3724           a_is_sparse=False,
3725           b_is_sparse=False,
3726           name=None):
3727  """Multiplies matrix `a` by vector `b`, producing `a` * `b`.
3728
3729  The matrix `a` must, following any transpositions, be a tensor of rank >= 2,
3730  with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast
3731  with `shape(b)[:-1]`.
3732
3733  Both `a` and `b` must be of the same type. The supported types are:
3734  `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
3735
3736  Matrix `a` can be transposed or adjointed (conjugated and transposed) on
3737  the fly by setting one of the corresponding flag to `True`. These are `False`
3738  by default.
3739
3740  If one or both of the inputs contain a lot of zeros, a more efficient
3741  multiplication algorithm can be used by setting the corresponding
3742  `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
3743  This optimization is only available for plain matrices/vectors (rank-2/1
3744  tensors) with datatypes `bfloat16` or `float32`.
3745
3746  For example:
3747
3748  ```python
3749  # 2-D tensor `a`
3750  # [[1, 2, 3],
3751  #  [4, 5, 6]]
3752  a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
3753
3754  # 1-D tensor `b`
3755  # [7, 9, 11]
3756  b = tf.constant([7, 9, 11], shape=[3])
3757
3758  # `a` * `b`
3759  # [ 58,  64]
3760  c = tf.linalg.matvec(a, b)
3761
3762
3763  # 3-D tensor `a`
3764  # [[[ 1,  2,  3],
3765  #   [ 4,  5,  6]],
3766  #  [[ 7,  8,  9],
3767  #   [10, 11, 12]]]
3768  a = tf.constant(np.arange(1, 13, dtype=np.int32),
3769                  shape=[2, 2, 3])
3770
3771  # 2-D tensor `b`
3772  # [[13, 14, 15],
3773  #  [16, 17, 18]]
3774  b = tf.constant(np.arange(13, 19, dtype=np.int32),
3775                  shape=[2, 3])
3776
3777  # `a` * `b`
3778  # [[ 86, 212],
3779  #  [410, 563]]
3780  c = tf.linalg.matvec(a, b)
3781  ```
3782
3783  Args:
3784    a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
3785      `complex128` and rank > 1.
3786    b: `Tensor` with same type as `a` and compatible dimensions.
3787    transpose_a: If `True`, `a` is transposed before multiplication.
3788    adjoint_a: If `True`, `a` is conjugated and transposed before
3789      multiplication.
3790    a_is_sparse: If `True`, `a` is treated as a sparse matrix.
3791    b_is_sparse: If `True`, `b` is treated as a sparse matrix.
3792    name: Name for the operation (optional).
3793
3794  Returns:
3795    A `Tensor` of the same type as `a` and `b` where each inner-most vector is
3796    the product of the corresponding matrices in `a` and vectors in `b`, e.g. if
3797    all transpose or adjoint attributes are `False`:
3798
3799    `output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.
3800
3801    Note: This is matrix-vector product, not element-wise product.
3802
3803
3804  Raises:
3805    ValueError: If transpose_a and adjoint_a are both set to True.
3806  """
3807  with ops.name_scope(name, "MatVec", [a, b]) as name:
3808    output = matmul(
3809        a,
3810        array_ops.expand_dims(b, axis=-1),
3811        transpose_a=transpose_a,
3812        adjoint_a=adjoint_a,
3813        a_is_sparse=a_is_sparse,
3814        b_is_sparse=b_is_sparse)
3815    return array_ops.squeeze(output, axis=-1)
3816
3817
3818# TODO(b/178650720): Also support numpy-style type promotion in freestanding TF
3819#   functions (e.g. tf.add).
3820def matmul_wrapper(a, b, name=None):  # pylint: disable=missing-function-docstring
3821  if ops._numpy_style_type_promotion:
3822    return a._matmul(b)
3823  return matmul(a, b, name=name)
3824matmul_wrapper.__doc__ = matmul.__doc__
3825_OverrideBinaryOperatorHelper(matmul_wrapper, "matmul")
3826
3827sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")(
3828    gen_math_ops.sparse_mat_mul)
3829tf_export(v1=["sparse_matmul"])(sparse_matmul)
3830@dispatch.add_dispatch_support
3831
3832
3833@ops.RegisterStatistics("MatMul", "flops")
3834def _calc_mat_mul_flops(graph, node):
3835  """Calculates the compute resources needed for MatMul."""
3836  transpose_a = node.attr["transpose_a"].b
3837  a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
3838  a_shape.assert_is_fully_defined()
3839  if transpose_a:
3840    k = int(a_shape[0])
3841  else:
3842    k = int(a_shape[1])
3843  output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
3844  output_shape.assert_is_fully_defined()
3845  output_count = np.prod(output_shape.as_list())
3846  return ops.OpStats("flops", (k * output_count * 2))
3847
3848
3849@ops.RegisterStatistics("BatchMatMul", "flops")
3850@ops.RegisterStatistics("BatchMatMulV2", "flops")
3851@ops.RegisterStatistics("BatchMatMulV3", "flops")
3852def _calc_batch_mat_mul_flops(graph, node):
3853  """Calculates the compute resources needed for BatchMatMul."""
3854  transpose_a = node.attr["transpose_a"].b
3855  a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
3856  a_shape.assert_is_fully_defined()
3857  if transpose_a:
3858    k = int(a_shape[-2])
3859  else:
3860    k = int(a_shape[-1])
3861  output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
3862  output_shape.assert_is_fully_defined()
3863  output_count = np.prod(output_shape.as_list())
3864  return ops.OpStats("flops", (k * output_count * 2))
3865
3866
3867def _as_indexed_slices(x, optimize=True):
3868  """Convert 'x' to IndexedSlices.
3869
3870  Convert a dense Tensor to a block-sparse IndexedSlices.
3871
3872  Args:
3873    x: Either a Tensor object, or an IndexedSlices object.
3874    optimize: if true, attempt to optimize the conversion of 'x'.
3875
3876  Returns:
3877    An IndexedSlices object.
3878
3879  Raises:
3880    TypeError: If 'x' is not a Tensor or an IndexedSlices object.
3881  """
3882  # TODO(touts): op_scope
3883  if not isinstance(x, (ops.Tensor, indexed_slices.IndexedSlices)):
3884    raise TypeError(f"Not a Tensor or IndexedSlices: {type(x)}.")
3885  if isinstance(x, indexed_slices.IndexedSlices):
3886    return x
3887  x_shape = array_ops.shape_internal(x, optimize=optimize)
3888  return indexed_slices.IndexedSlices(x, range(0, x_shape[0]), x_shape)
3889
3890
3891def _as_indexed_slices_list(inputs, optimize=True):
3892  """Convert all elements of 'inputs' to IndexedSlices.
3893
3894  Additionally, homogenize the types of all the indices to
3895  either int32 or int64.
3896
3897  Args:
3898    inputs: List containing either Tensor or IndexedSlices objects.
3899    optimize: if true, attempt to optimize the conversion of each input.
3900
3901  Returns:
3902    A list of IndexedSlices objects.
3903
3904  Raises:
3905    TypeError: If 'inputs' is not a list or a tuple.
3906  """
3907  if not isinstance(inputs, (list, tuple)):
3908    raise TypeError(f"Expected a list or tuple, not {type(inputs)}.")
3909  outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
3910  with_int32_index = [
3911      o.indices for o in outputs if o.indices.dtype == dtypes.int32
3912  ]
3913  if not with_int32_index or len(with_int32_index) == len(outputs):
3914    return outputs
3915  casted_outputs = []
3916  for o in outputs:
3917    if o.indices.dtype == dtypes.int32:
3918      casted_outputs.append(
3919          indexed_slices.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
3920                                       o.dense_shape))
3921    else:
3922      casted_outputs.append(o)
3923  return casted_outputs
3924
3925
3926@tf_export("math.add", "add")
3927@dispatch.register_binary_elementwise_api
3928@dispatch.add_dispatch_support
3929def add(x, y, name=None):
3930  """Returns x + y element-wise.
3931
3932  Example usages below.
3933
3934  Add a scalar and a list:
3935
3936  >>> x = [1, 2, 3, 4, 5]
3937  >>> y = 1
3938  >>> tf.add(x, y)
3939  <tf.Tensor: shape=(5,), dtype=int32, numpy=array([2, 3, 4, 5, 6],
3940  dtype=int32)>
3941
3942  Note that binary `+` operator can be used instead:
3943
3944  >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5])
3945  >>> y = tf.convert_to_tensor(1)
3946  >>> x + y
3947  <tf.Tensor: shape=(5,), dtype=int32, numpy=array([2, 3, 4, 5, 6],
3948  dtype=int32)>
3949
3950  Add a tensor and a list of same shape:
3951
3952  >>> x = [1, 2, 3, 4, 5]
3953  >>> y = tf.constant([1, 2, 3, 4, 5])
3954  >>> tf.add(x, y)
3955  <tf.Tensor: shape=(5,), dtype=int32,
3956  numpy=array([ 2,  4,  6,  8, 10], dtype=int32)>
3957
3958  **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a
3959  non-tensor, the non-tensor input will adopt (or get casted to) the data type
3960  of the tensor input. This can potentially cause unwanted overflow or underflow
3961  conversion.
3962
3963  For example,
3964
3965  >>> x = tf.constant([1, 2], dtype=tf.int8)
3966  >>> y = [2**7 + 1, 2**7 + 2]
3967  >>> tf.add(x, y)
3968  <tf.Tensor: shape=(2,), dtype=int8, numpy=array([-126, -124], dtype=int8)>
3969
3970  When adding two input values of different shapes, `Add` follows NumPy
3971  broadcasting rules. The two input array shapes are compared element-wise.
3972  Starting with the trailing dimensions, the two dimensions either have to be
3973  equal or one of them needs to be `1`.
3974
3975  For example,
3976
3977  >>> x = np.ones(6).reshape(1, 2, 1, 3)
3978  >>> y = np.ones(6).reshape(2, 1, 3, 1)
3979  >>> tf.add(x, y).shape.as_list()
3980  [2, 2, 3, 3]
3981
3982  Another example with two arrays of different dimension.
3983
3984  >>> x = np.ones([1, 2, 1, 4])
3985  >>> y = np.ones([3, 4])
3986  >>> tf.add(x, y).shape.as_list()
3987  [1, 2, 3, 4]
3988
3989  The reduction version of this elementwise operation is `tf.math.reduce_sum`
3990
3991  Args:
3992    x: A `tf.Tensor`. Must be one of the following types: bfloat16, half,
3993      float32, float64, uint8, int8, int16, int32, int64, complex64, complex128,
3994      string.
3995    y: A `tf.Tensor`. Must have the same type as x.
3996    name: A name for the operation (optional)
3997  """
3998  with ops.name_scope(name, "Add", [x]) as name:
3999    x = ops.convert_to_tensor(x, name="x")
4000    y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
4001    if x.dtype == dtypes.string:
4002      return gen_math_ops.add(x, y, name=name)
4003    else:
4004      return gen_math_ops.add_v2(x, y, name=name)
4005
4006
4007@tf_export("math.add_n", "add_n")
4008@dispatch.add_dispatch_support(iterable_parameters=["inputs"])
4009def add_n(inputs, name=None):
4010  """Returns the element-wise sum of a list of tensors.
4011
4012  All inputs in the list must have the same shape. This op does not
4013  [broadcast](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)
4014  its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator)
4015  instead.
4016
4017  For example:
4018
4019  >>> a = tf.constant([[3, 5], [4, 8]])
4020  >>> b = tf.constant([[1, 6], [2, 9]])
4021  >>> tf.math.add_n([a, b, a]).numpy()
4022  array([[ 7, 16],
4023         [10, 25]], dtype=int32)
4024
4025  See Also:
4026
4027  * `tf.reduce_sum(inputs, axis=0)` - This performe the same mathematical
4028    operation, but `tf.add_n` may be more efficient because it sums the
4029    tensors directly. `reduce_sum` on the other hand calls
4030    `tf.convert_to_tensor` on the list of tensors, unncessairly stacking them
4031    into a single tensor before summing.
4032
4033  Args:
4034    inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the
4035      same shape and type. `tf.IndexedSlices` objects will be converted into
4036      dense tensors prior to adding.
4037    name: A name for the operation (optional).
4038
4039  Returns:
4040    A `tf.Tensor` of the same shape and type as the elements of `inputs`.
4041
4042  Raises:
4043    ValueError: If `inputs` don't all have same shape and dtype or the shape
4044    cannot be inferred.
4045  """
4046  if not inputs or not isinstance(inputs, collections_abc.Iterable):
4047    raise ValueError("Inputs must be an iterable of at least one "
4048                     "Tensor/IndexedSlices with the same dtype and shape.")
4049  inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
4050  if not all(
4051      isinstance(x, (ops.Tensor, indexed_slices.IndexedSlices))
4052      for x in inputs):
4053    raise ValueError("Inputs must be an iterable of at least one "
4054                     "Tensor/IndexedSlices with the same dtype and shape.")
4055
4056  if len(inputs) == 1:
4057    if isinstance(inputs[0], indexed_slices.IndexedSlices):
4058      values = ops.convert_to_tensor(inputs[0])
4059    else:
4060      values = inputs[0]
4061    if name:
4062      return array_ops.identity(values, name=name)
4063    return values
4064  return gen_math_ops.add_n(inputs, name=name)
4065
4066
4067
4068@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"])
4069@dispatch.add_dispatch_support
4070@deprecation.deprecated(None, "Use `tf.math.add_n` Instead")
4071def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
4072  """Returns the element-wise sum of a list of tensors.
4073
4074  Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
4075  otherwise, these are inferred.
4076
4077  For example:
4078
4079  >>> a = tf.constant([[1, 2], [3, 4]])
4080  >>> b = tf.constant([[5, 0], [0, 6]])
4081  >>> tf.math.accumulate_n([a, b, a]).numpy()
4082  array([[ 7, 4],
4083         [ 6, 14]], dtype=int32)
4084
4085  >>> # Explicitly pass shape and type
4086  >>> tf.math.accumulate_n(
4087  ...     [a, b, a], shape=[2, 2], tensor_dtype=tf.int32).numpy()
4088  array([[ 7,  4],
4089         [ 6, 14]], dtype=int32)
4090
4091  Note: The input must be a list or tuple. This function does not handle
4092  `IndexedSlices`
4093
4094  See Also:
4095
4096  * `tf.reduce_sum(inputs, axis=0)` - This performe the same mathematical
4097    operation, but `tf.add_n` may be more efficient because it sums the
4098    tensors directly. `reduce_sum` on the other hand calls
4099    `tf.convert_to_tensor` on the list of tensors, unncessairly stacking them
4100    into a single tensor before summing.
4101  * `tf.add_n` - This is another python wrapper for the same Op. It has
4102    nearly identical functionality.
4103
4104  Args:
4105    inputs: A list of `Tensor` objects, each with same shape and type.
4106    shape: Expected shape of elements of `inputs` (optional). Also controls the
4107      output shape of this op, which may affect type inference in other ops. A
4108      value of `None` means "infer the input shape from the shapes in `inputs`".
4109    tensor_dtype: Expected data type of `inputs` (optional). A value of `None`
4110      means "infer the input dtype from `inputs[0]`".
4111    name: A name for the operation (optional).
4112
4113  Returns:
4114    A `Tensor` of same shape and type as the elements of `inputs`.
4115
4116  Raises:
4117    ValueError: If `inputs` don't all have same shape and dtype or the shape
4118    cannot be inferred.
4119  """
4120
4121  def _input_error():
4122    return ValueError("inputs must be a list of at least one Tensor with the "
4123                      "same dtype and shape")
4124
4125  if not inputs or not isinstance(inputs, (list, tuple)):
4126    raise _input_error()
4127  inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
4128  if not all(isinstance(x, ops.Tensor) for x in inputs):
4129    raise _input_error()
4130  if not all(x.dtype == inputs[0].dtype for x in inputs):
4131    raise _input_error()
4132  if shape is not None:
4133    shape = tensor_shape.as_shape(shape)
4134  else:
4135    shape = tensor_shape.unknown_shape()
4136  for input_tensor in inputs:
4137    if isinstance(input_tensor, ops.Tensor):
4138      shape = shape.merge_with(input_tensor.get_shape())
4139
4140  # tensor_dtype is for safety only; operator's output type computed in C++
4141  if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
4142    raise TypeError(
4143        f"The `tensor_dtype` argument is {tensor_dtype}, but `input` is of "
4144        f"type {inputs[0].dtype}. These must be equal. Try casting the input "
4145        f"to the desired type.")
4146
4147  if len(inputs) == 1 and name is None:
4148    return inputs[0]
4149  elif len(inputs) == 1 and name is not None:
4150    return array_ops.identity(inputs[0], name=name)
4151  return add_n(inputs, name=name)
4152
4153
4154@ops.RegisterGradient("AccumulateNV2")
4155def _accumulate_n_grad(op, grad):
4156  """Same as gradient for AddN. Copies the gradient to all inputs."""
4157  # Not broadcasting.
4158  return [grad] * len(op.inputs)
4159
4160
4161@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
4162@dispatch.register_unary_elementwise_api
4163@dispatch.add_dispatch_support
4164def sigmoid(x, name=None):
4165  r"""Computes sigmoid of `x` element-wise.
4166
4167  Formula for calculating $\mathrm{sigmoid}(x) = y = 1 / (1 + \exp(-x))$.
4168
4169  For $x \in (-\infty, \infty)$, $\mathrm{sigmoid}(x) \in (0, 1)$.
4170
4171  Example Usage:
4172
4173  If a positive number is large, then its sigmoid will approach to 1 since the
4174  formula will be `y = <large_num> / (1 + <large_num>)`
4175
4176  >>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
4177  >>> tf.math.sigmoid(x)
4178  <tf.Tensor: shape=(4,), dtype=float32,
4179  numpy=array([0.5, 0.7310586, 1.0, 1.0], dtype=float32)>
4180
4181  If a negative number is large, its sigmoid will approach to 0 since the
4182  formula will be `y = 1 / (1 + <large_num>)`
4183
4184  >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
4185  >>> tf.math.sigmoid(x)
4186  <tf.Tensor: shape=(4,), dtype=float32, numpy=
4187  array([0.0000000e+00, 1.9287499e-22, 2.6894143e-01, 0.5],
4188        dtype=float32)>
4189
4190  Args:
4191    x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or
4192      `complex128`.
4193    name: A name for the operation (optional).
4194
4195  Returns:
4196    A Tensor with the same type as `x`.
4197
4198  Usage Example:
4199
4200  >>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)
4201  >>> tf.sigmoid(x)
4202  <tf.Tensor: shape=(3,), dtype=float32,
4203  numpy=array([0. , 0.5, 1. ], dtype=float32)>
4204
4205  @compatibility(scipy)
4206  Equivalent to scipy.special.expit
4207  @end_compatibility
4208  """
4209  with ops.name_scope(name, "Sigmoid", [x]) as name:
4210    x = ops.convert_to_tensor(x, name="x")
4211    return gen_math_ops.sigmoid(x, name=name)
4212
4213
4214@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"])
4215@dispatch.register_unary_elementwise_api
4216@dispatch.add_dispatch_support
4217@deprecation.deprecated_endpoints("log_sigmoid")
4218def log_sigmoid(x, name=None):
4219  """Computes log sigmoid of `x` element-wise.
4220
4221  Specifically, `y = log(1 / (1 + exp(-x)))`.  For numerical stability,
4222  we use `y = -tf.nn.softplus(-x)`.
4223
4224  Args:
4225    x: A Tensor with type `float32` or `float64`.
4226    name: A name for the operation (optional).
4227
4228  Returns:
4229    A Tensor with the same type as `x`.
4230
4231  Usage Example:
4232
4233  If a positive number is large, then its log_sigmoid will approach to 0 since
4234  the formula will be `y = log( <large_num> / (1 + <large_num>) )` which
4235  approximates to `log (1)` which is 0.
4236
4237  >>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
4238  >>> tf.math.log_sigmoid(x)
4239  <tf.Tensor: shape=(4,), dtype=float32, numpy=
4240  array([-6.9314718e-01, -3.1326169e-01, -1.9287499e-22, -0.0000000e+00],
4241        dtype=float32)>
4242
4243  If a negative number is large, its log_sigmoid will approach to the number
4244  itself since the formula will be `y = log( 1 / (1 + <large_num>) )` which is
4245  `log (1) - log ( (1 + <large_num>) )` which approximates to `- <large_num>`
4246  that is the number itself.
4247
4248  >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
4249  >>> tf.math.log_sigmoid(x)
4250  <tf.Tensor: shape=(4,), dtype=float32, numpy=
4251  array([-100.       ,  -50.       ,   -1.3132616,   -0.6931472],
4252        dtype=float32)>
4253  """
4254  with ops.name_scope(name, "LogSigmoid", [x]) as name:
4255    x = ops.convert_to_tensor(x, name="x")
4256    return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)  # pylint: disable=invalid-unary-operand-type
4257
4258
4259@tf_export("math.cumsum", "cumsum")
4260@dispatch.add_dispatch_support
4261def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
4262  """Compute the cumulative sum of the tensor `x` along `axis`.
4263
4264  By default, this op performs an inclusive cumsum, which means that the first
4265  element of the input is identical to the first element of the output:
4266  For example:
4267
4268  >>> # tf.cumsum([a, b, c])   # [a, a + b, a + b + c]
4269  >>> x = tf.constant([2, 4, 6, 8])
4270  >>> tf.cumsum(x)
4271  <tf.Tensor: shape=(4,), dtype=int32,
4272  numpy=array([ 2,  6, 12, 20], dtype=int32)>
4273
4274  >>> # using varying `axis` values
4275  >>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])
4276  >>> tf.cumsum(y, axis=0)
4277  <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
4278  array([[ 2,  4,  6,  8],
4279         [ 3,  7, 11, 15]], dtype=int32)>
4280  >>> tf.cumsum(y, axis=1)
4281  <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
4282  array([[ 2,  6, 12, 20],
4283         [ 1,  4,  9, 16]], dtype=int32)>
4284
4285  By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
4286  instead:
4287
4288  >>> # tf.cumsum([a, b, c], exclusive=True)  => [0, a, a + b]
4289  >>> x = tf.constant([2, 4, 6, 8])
4290  >>> tf.cumsum(x, exclusive=True)
4291  <tf.Tensor: shape=(4,), dtype=int32,
4292  numpy=array([ 0,  2,  6, 12], dtype=int32)>
4293
4294  By setting the `reverse` kwarg to `True`, the cumsum is performed in the
4295  opposite direction:
4296
4297  >>> # tf.cumsum([a, b, c], reverse=True)  # [a + b + c, b + c, c]
4298  >>> x = tf.constant([2, 4, 6, 8])
4299  >>> tf.cumsum(x, reverse=True)
4300  <tf.Tensor: shape=(4,), dtype=int32,
4301  numpy=array([20, 18, 14,  8], dtype=int32)>
4302
4303  This is more efficient than using separate `tf.reverse` ops.
4304  The `reverse` and `exclusive` kwargs can also be combined:
4305
4306  >>> # tf.cumsum([a, b, c], exclusive=True, reverse=True)  # [b + c, c, 0]
4307  >>> x = tf.constant([2, 4, 6, 8])
4308  >>> tf.cumsum(x, exclusive=True, reverse=True)
4309  <tf.Tensor: shape=(4,), dtype=int32,
4310  numpy=array([18, 14,  8,  0], dtype=int32)>
4311
4312  Args:
4313    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
4314      `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
4315      `complex128`, `qint8`, `quint8`, `qint32`, `half`.
4316    axis: A `Tensor` of type `int32` (default: 0). Must be in the range
4317      `[-rank(x), rank(x))`.
4318    exclusive: If `True`, perform exclusive cumsum.
4319    reverse: A `bool` (default: False).
4320    name: A name for the operation (optional).
4321
4322  Returns:
4323    A `Tensor`. Has the same type as `x`.
4324  """
4325  with ops.name_scope(name, "Cumsum", [x]) as name:
4326    x = ops.convert_to_tensor(x, name="x")
4327    return gen_math_ops.cumsum(
4328        x, axis, exclusive=exclusive, reverse=reverse, name=name)
4329
4330
4331@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"])
4332@dispatch.add_dispatch_support
4333@deprecation.deprecated_endpoints("cumprod")
4334def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
4335  """Compute the cumulative product of the tensor `x` along `axis`.
4336
4337  By default, this op performs an inclusive cumprod, which means that the
4338  first element of the input is identical to the first element of the output:
4339
4340  ```python
4341  tf.math.cumprod([a, b, c])  # [a, a * b, a * b * c]
4342  ```
4343
4344  By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
4345  performed
4346  instead:
4347
4348  ```python
4349  tf.math.cumprod([a, b, c], exclusive=True)  # [1, a, a * b]
4350  ```
4351
4352  By setting the `reverse` kwarg to `True`, the cumprod is performed in the
4353  opposite direction:
4354
4355  ```python
4356  tf.math.cumprod([a, b, c], reverse=True)  # [a * b * c, b * c, c]
4357  ```
4358
4359  This is more efficient than using separate `tf.reverse` ops.
4360  The `reverse` and `exclusive` kwargs can also be combined:
4361
4362  ```python
4363  tf.math.cumprod([a, b, c], exclusive=True, reverse=True)  # [b * c, c, 1]
4364  ```
4365
4366  Args:
4367    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
4368      `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
4369      `complex128`, `qint8`, `quint8`, `qint32`, `half`.
4370    axis: A `Tensor` of type `int32` (default: 0). Must be in the range
4371      `[-rank(x), rank(x))`.
4372    exclusive: If `True`, perform exclusive cumprod.
4373    reverse: A `bool` (default: False).
4374    name: A name for the operation (optional).
4375
4376  Returns:
4377    A `Tensor`. Has the same type as `x`.
4378  """
4379  with ops.name_scope(name, "Cumprod", [x]) as name:
4380    x = ops.convert_to_tensor(x, name="x")
4381    return gen_math_ops.cumprod(
4382        x, axis, exclusive=exclusive, reverse=reverse, name=name)
4383
4384
4385@tf_export("math.cumulative_logsumexp", v1=["math.cumulative_logsumexp"])
4386@dispatch.add_dispatch_support
4387def cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None):
4388  """Compute the cumulative log-sum-exp of the tensor `x` along `axis`.
4389
4390  By default, this op performs an inclusive cumulative log-sum-exp, which means
4391  that the first element of the input is identical to the first element of
4392  the output.
4393
4394  This operation is significantly more numerically stable than the equivalent
4395  tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although
4396  computes the same result given infinite numerical precision. However, note
4397  that in some cases, it may be less stable than `tf.math.reduce_logsumexp`
4398  for a given element, as it applies the "log-sum-exp trick" in a different
4399  way.
4400
4401  More precisely, where `tf.math.reduce_logsumexp` uses the following trick:
4402
4403  ```
4404  log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)
4405  ```
4406
4407  it cannot be directly used here as there is no fast way of applying it
4408  to each prefix `x[:i]`. Instead, this function implements a prefix
4409  scan using pairwise log-add-exp, which is a commutative and associative
4410  (up to floating point precision) operator:
4411
4412  ```
4413  log_add_exp(x, y) = log(exp(x) + exp(y))
4414                    = log(1 + exp(min(x, y) - max(x, y))) + max(x, y)
4415  ```
4416
4417  However, reducing using the above operator leads to a different computation
4418  tree (logs are taken repeatedly instead of only at the end), and the maximum
4419  is only computed pairwise instead of over the entire prefix. In general, this
4420  leads to a different and slightly less precise computation.
4421
4422  Args:
4423    x: A `Tensor`. Must be one of the following types: `float16`, `float32`,
4424      `float64`.
4425    axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the
4426      range `[-rank(x), rank(x))`.
4427    exclusive: If `True`, perform exclusive cumulative log-sum-exp.
4428    reverse: If `True`, performs the cumulative log-sum-exp in the reverse
4429      direction.
4430    name: A name for the operation (optional).
4431
4432  Returns:
4433    A `Tensor`. Has the same shape and type as `x`.
4434  """
4435  with ops.name_scope(name, "CumulativeLogsumexp", [x]) as name:
4436    x = ops.convert_to_tensor(x, name="x")
4437    return gen_math_ops.cumulative_logsumexp(
4438        x, axis, exclusive=exclusive, reverse=reverse, name=name)
4439
4440
4441@tf_export("math.conj", v1=["math.conj", "conj"])
4442@dispatch.register_unary_elementwise_api
4443@dispatch.add_dispatch_support
4444@deprecation.deprecated_endpoints("conj")
4445def conj(x, name=None):
4446  r"""Returns the complex conjugate of a complex number.
4447
4448  Given a tensor `x` of complex numbers, this operation returns a tensor of
4449  complex numbers that are the complex conjugate of each element in `x`. The
4450  complex numbers in `x` must be of the form \\(a + bj\\), where `a` is the
4451  real part and `b` is the imaginary part.
4452
4453  The complex conjugate returned by this operation is of the form \\(a - bj\\).
4454
4455  For example:
4456
4457  >>> x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
4458  >>> tf.math.conj(x)
4459  <tf.Tensor: shape=(2,), dtype=complex128,
4460  numpy=array([-2.25-4.75j,  3.25-5.75j])>
4461
4462  If `x` is real, it is returned unchanged.
4463
4464  For example:
4465
4466  >>> x = tf.constant([-2.25, 3.25])
4467  >>> tf.math.conj(x)
4468  <tf.Tensor: shape=(2,), dtype=float32,
4469  numpy=array([-2.25,  3.25], dtype=float32)>
4470
4471  Args:
4472    x: `Tensor` to conjugate.  Must have numeric or variant type.
4473    name: A name for the operation (optional).
4474
4475  Returns:
4476    A `Tensor` that is the conjugate of `x` (with the same type).
4477
4478  Raises:
4479    TypeError: If `x` is not a numeric tensor.
4480
4481  @compatibility(numpy)
4482  Equivalent to numpy.conj.
4483  @end_compatibility
4484  """
4485  if isinstance(x, ops.Tensor):
4486    dt = x.dtype
4487    if dt.is_floating or dt.is_integer:
4488      return x
4489  with ops.name_scope(name, "Conj", [x]) as name:
4490    x = ops.convert_to_tensor(x, name="x")
4491    if x.dtype.is_complex or x.dtype == dtypes.variant:
4492      return gen_math_ops.conj(x, name=name)
4493    elif x.dtype.is_floating or x.dtype.is_integer:
4494      return x
4495    else:
4496      raise TypeError(
4497          f"Expected numeric or variant tensor, got dtype {x.dtype!r}.")
4498
4499
4500def reduced_shape(input_shape, axes):
4501  """Helper function for reduction ops.
4502
4503  Args:
4504    input_shape: 1-D Tensor, the shape of the Tensor being reduced.
4505    axes: 1-D Tensor, the reduction axes.
4506
4507  Returns:
4508    A 1-D Tensor, the output shape as if keepdims were set to True.
4509  """
4510  # TODO(allenl): Refactor `reduced_shape` to take the tensor corresponding to
4511  # `input_shape` rather than `tf.shape` of it. Then we can check if the shape
4512  # is fully defined here, which may be faster executing eagerly than running
4513  # `tf.shape` and then fetching its constant value.
4514  constant_input_shape = tensor_util.constant_value(input_shape)
4515  if constant_input_shape is not None:
4516    constant_axes = tensor_util.constant_value(axes)
4517    if constant_axes is not None:
4518      constant_axes = np.array(constant_axes, dtype=np.int32)
4519      constant_input_shape = np.array(constant_input_shape, dtype=np.int32)
4520      constant_input_shape[constant_axes] = 1
4521      return constant_input_shape
4522
4523  # Example:
4524  # cast needed for SparseTensor reductions
4525  input_shape = cast(input_shape, dtypes.int32)  # [2, 3, 5, 7]
4526  axes = cast(axes, dtypes.int32)  # [1, 2]
4527
4528  input_rank = array_ops.size(input_shape)  # 4
4529  axes = (axes + input_rank) % input_rank
4530  axes_shape = array_ops.shape(axes)  # [2]
4531  return gen_data_flow_ops.dynamic_stitch(  # [2, 1, 1, 7]
4532      [
4533          range(input_rank),  # [0, 1, 2, 3]
4534          axes
4535      ],  # [1, 2]
4536      [
4537          input_shape,  # [2, 3, 5, 7]
4538          array_ops.ones(axes_shape, dtype=dtypes.int32)
4539      ])  # [1, 1]
4540
4541
4542def _unsorted_segment_N(data, segment_ids, num_segments):
4543  """ Helper function for unsorted_segment_mean/_sqrtN.
4544
4545  Computes the number
4546      of segment entries with 0-entries set to 1 to allow division by N.
4547  """
4548  num_segments = ops.convert_to_tensor(num_segments)
4549  # bincount doesn't support negative indices so we use unsorted_segment_sum
4550  segment_ids_shape = array_ops.shape_internal(segment_ids)
4551  ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
4552  n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)
4553  # add dimensions for all non-reduced axes
4554  broadcastable_shape = array_ops.concat(
4555      [num_segments[array_ops.newaxis],
4556       array_ops.ones([array_ops.rank(data)
4557                       - array_ops.rank(segment_ids)],
4558                      dtype=num_segments.dtype)],
4559      axis=0)
4560  n = array_ops.reshape(n, broadcastable_shape)
4561  return gen_math_ops.maximum(n, 1)
4562
4563
4564@tf_export(
4565    "math.unsorted_segment_mean",
4566    v1=["math.unsorted_segment_mean", "unsorted_segment_mean"])
4567@dispatch.add_dispatch_support
4568@deprecation.deprecated_endpoints("unsorted_segment_mean")
4569def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
4570  r"""Computes the mean along segments of a tensor.
4571
4572  Read [the section on
4573  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4574  for an explanation of segments.
4575
4576  This operator is similar to the `tf.math.unsorted_segment_sum` operator.
4577  Instead of computing the sum over segments, it computes the mean of all
4578  entries belonging to a segment such that:
4579
4580  \\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples
4581  `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of
4582  occurrences of id \\i\\.
4583
4584  If there is no entry for a given segment ID `i`, it outputs 0.
4585
4586  If the given segment ID `i` is negative, the value is dropped and will not
4587  be added to the sum of the segment.
4588
4589  Caution: On CPU, values in `segment_ids` are always validated to be less than
4590  `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
4591  does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
4592  result in safe but unspecified behavior, which may include ignoring
4593  out-of-bound indices or outputting a tensor with a 0 stored in the first
4594  dimension of its shape if `num_segments` is 0.
4595
4596  Args:
4597    data: A `Tensor` with floating point or complex dtype.
4598    segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
4599      The values must be less than `num_segments`.
4600      The values are always validated to be in range on CPU,
4601      never validated on GPU.
4602    num_segments: An integer scalar `Tensor`.  The number of distinct segment
4603      IDs.
4604    name: A name for the operation (optional).
4605
4606  Returns:
4607    A `Tensor`.  Has same shape as data, except for the first `segment_ids.rank`
4608    dimensions, which are replaced with a single dimension which has size
4609   `num_segments`.
4610  """
4611  with ops.name_scope(name, "UnsortedSegmentMean"):
4612    data = ops.convert_to_tensor(data)
4613    segment_ids = ops.convert_to_tensor(segment_ids)
4614    N = _unsorted_segment_N(data, segment_ids, num_segments)
4615    summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
4616    return summed / N
4617
4618
4619@tf_export(
4620    "math.unsorted_segment_sqrt_n",
4621    v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"])
4622@dispatch.add_dispatch_support
4623@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
4624def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
4625  r"""Computes the sum along segments of a tensor divided by the sqrt(N).
4626
4627  Read [the section on
4628  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4629  for an explanation of segments.
4630
4631  This operator is similar to the `tf.math.unsorted_segment_sum` operator.
4632  Additionally to computing the sum over segments, it divides the results by
4633  sqrt(N).
4634
4635  \\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over
4636  tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the
4637  number of occurrences of id \\i\\.
4638
4639  If there is no entry for a given segment ID `i`, it outputs 0.
4640
4641  Note that this op only supports floating point and complex dtypes,
4642  due to tf.sqrt only supporting these types.
4643
4644  If the given segment ID `i` is negative, the value is dropped and will not
4645  be added to the sum of the segment.
4646
4647  Caution: On CPU, values in `segment_ids` are always validated to be less than
4648  `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
4649  does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
4650  result in safe but unspecified behavior, which may include ignoring
4651  out-of-bound indices or outputting a tensor with a 0 stored in the first
4652  dimension of its shape if `num_segments` is 0.
4653
4654  Args:
4655    data: A `Tensor` with floating point or complex dtype.
4656    segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
4657      The values must be in the range `[0, num_segments)`.
4658      The values are always validated to be in range on CPU,
4659      never validated on GPU.
4660    num_segments: An integer scalar `Tensor`.  The number of distinct segment
4661      IDs.
4662    name: A name for the operation (optional).
4663
4664  Returns:
4665    A `Tensor`.  Has same shape as data, except for the first `segment_ids.rank`
4666    dimensions, which are replaced with a single dimension which has size
4667   `num_segments`.
4668  """
4669  with ops.name_scope(name, "UnsortedSegmentSqrtN"):
4670    data = ops.convert_to_tensor(data)
4671    segment_ids = ops.convert_to_tensor(segment_ids)
4672    N = _unsorted_segment_N(data, segment_ids, num_segments)
4673    summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
4674    return summed / gen_math_ops.sqrt(N)
4675
4676
4677@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"])
4678@deprecation.deprecated_endpoints("sparse_segment_sum")
4679def sparse_segment_sum(data,
4680                       indices,
4681                       segment_ids,
4682                       name=None,
4683                       num_segments=None):
4684  r"""Computes the sum along sparse segments of a tensor.
4685
4686  Read [the section on
4687  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4688  for an explanation of segments.
4689
4690  Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
4691  first dimension, selecting a subset of dimension 0, specified by `indices`.
4692  `segment_ids` is allowed to have missing ids, in which case the output will
4693  be zeros at those indices. In those cases `num_segments` is used to determine
4694  the size of the output.
4695
4696  For example:
4697
4698  ```python
4699  c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
4700
4701  # Select two rows, one segment.
4702  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
4703  # => [[0 0 0 0]]
4704
4705  # Select two rows, two segment.
4706  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
4707  # => [[ 1  2  3  4]
4708  #     [-1 -2 -3 -4]]
4709
4710  # With missing segment ids.
4711  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
4712                        num_segments=4)
4713  # => [[ 1  2  3  4]
4714  #     [ 0  0  0  0]
4715  #     [-1 -2 -3 -4]
4716  #     [ 0  0  0  0]]
4717
4718  # Select all rows, two segments.
4719  tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
4720  # => [[0 0 0 0]
4721  #     [5 6 7 8]]
4722
4723  # Which is equivalent to:
4724  tf.math.segment_sum(c, tf.constant([0, 0, 1]))
4725  ```
4726
4727  Args:
4728    data: A `Tensor` with data that will be assembled in the output.
4729    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4730      `segment_ids`.
4731    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4732      should be sorted and can be repeated.
4733    name: A name for the operation (optional).
4734    num_segments: An optional int32 scalar. Indicates the size of the output
4735      `Tensor`.
4736
4737  Returns:
4738    A `tensor` of the shape as data, except for dimension 0 which
4739    has size `k`, the number of segments specified via `num_segments` or
4740    inferred for the last element in `segments_ids`.
4741  """
4742  if num_segments is not None:
4743    return gen_math_ops.sparse_segment_sum_with_num_segments(
4744        data=data,
4745        indices=indices,
4746        segment_ids=segment_ids,
4747        num_segments=num_segments,
4748        name=name)
4749  else:
4750    return gen_math_ops.sparse_segment_sum(
4751        data=data, indices=indices, segment_ids=segment_ids, name=name)
4752
4753
4754@tf_export("sparse.segment_sum", v1=[])
4755def sparse_segment_sum_v2(data,
4756                          indices,
4757                          segment_ids,
4758                          num_segments=None,
4759                          name=None):
4760  r"""Computes the sum along sparse segments of a tensor.
4761
4762  Read [the section on
4763  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4764  for an explanation of segments.
4765
4766  Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
4767  first dimension, selecting a subset of dimension 0, specified by `indices`.
4768  `segment_ids` is allowed to have missing ids, in which case the output will
4769  be zeros at those indices. In those cases `num_segments` is used to determine
4770  the size of the output.
4771
4772  For example:
4773
4774  ```python
4775  c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
4776
4777  # Select two rows, one segment.
4778  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
4779  # => [[0 0 0 0]]
4780
4781  # Select two rows, two segment.
4782  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
4783  # => [[ 1  2  3  4]
4784  #     [-1 -2 -3 -4]]
4785
4786  # With missing segment ids.
4787  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
4788                        num_segments=4)
4789  # => [[ 1  2  3  4]
4790  #     [ 0  0  0  0]
4791  #     [-1 -2 -3 -4]
4792  #     [ 0  0  0  0]]
4793
4794  # Select all rows, two segments.
4795  tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
4796  # => [[0 0 0 0]
4797  #     [5 6 7 8]]
4798
4799  # Which is equivalent to:
4800  tf.math.segment_sum(c, tf.constant([0, 0, 1]))
4801  ```
4802
4803  Args:
4804    data: A `Tensor` with data that will be assembled in the output.
4805    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4806      `segment_ids`.
4807    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4808      should be sorted and can be repeated.
4809    num_segments: An optional int32 scalar. Indicates the size of the output
4810      `Tensor`.
4811    name: A name for the operation (optional).
4812
4813  Returns:
4814    A `tensor` of the shape as data, except for dimension 0 which
4815    has size `k`, the number of segments specified via `num_segments` or
4816    inferred for the last element in `segments_ids`.
4817  """
4818  return sparse_segment_sum(
4819      data, indices, segment_ids, name=name, num_segments=num_segments)
4820
4821
4822@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"])
4823@deprecation.deprecated_endpoints("sparse_segment_mean")
4824def sparse_segment_mean(data,
4825                        indices,
4826                        segment_ids,
4827                        name=None,
4828                        num_segments=None):
4829  r"""Computes the mean along sparse segments of a tensor.
4830
4831  Read [the section on
4832  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4833  for an explanation of segments.
4834
4835  Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
4836  `data`'s first dimension, selecting a subset of dimension 0, specified by
4837  `indices`.
4838  `segment_ids` is allowed to have missing ids, in which case the output will
4839  be zeros at those indices. In those cases `num_segments` is used to determine
4840  the size of the output.
4841
4842  Args:
4843    data: A `Tensor` with data that will be assembled in the output.
4844    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4845      `segment_ids`.
4846    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4847      should be sorted and can be repeated.
4848    name: A name for the operation (optional).
4849    num_segments: An optional int32 scalar. Indicates the size of the output
4850      `Tensor`.
4851
4852  Returns:
4853    A `tensor` of the shape as data, except for dimension 0 which
4854    has size `k`, the number of segments specified via `num_segments` or
4855    inferred for the last element in `segments_ids`.
4856  """
4857  if num_segments is not None:
4858    return gen_math_ops.sparse_segment_mean_with_num_segments(
4859        data=data,
4860        indices=indices,
4861        segment_ids=segment_ids,
4862        num_segments=num_segments,
4863        name=name)
4864  else:
4865    return gen_math_ops.sparse_segment_mean(
4866        data=data, indices=indices, segment_ids=segment_ids, name=name)
4867
4868
4869@tf_export("sparse.segment_mean", v1=[])
4870def sparse_segment_mean_v2(data,
4871                           indices,
4872                           segment_ids,
4873                           num_segments=None,
4874                           name=None):
4875  r"""Computes the mean along sparse segments of a tensor.
4876
4877  Read [the section on
4878  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4879  for an explanation of segments.
4880
4881  Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
4882  `data`'s first dimension, selecting a subset of dimension 0, specified by
4883  `indices`.
4884  `segment_ids` is allowed to have missing ids, in which case the output will
4885  be zeros at those indices. In those cases `num_segments` is used to determine
4886  the size of the output.
4887
4888  Args:
4889    data: A `Tensor` with data that will be assembled in the output.
4890    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4891      `segment_ids`.
4892    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4893      should be sorted and can be repeated.
4894    num_segments: An optional int32 scalar. Indicates the size of the output
4895      `Tensor`.
4896    name: A name for the operation (optional).
4897
4898  Returns:
4899    A `tensor` of the shape as data, except for dimension 0 which
4900    has size `k`, the number of segments specified via `num_segments` or
4901    inferred for the last element in `segments_ids`.
4902  """
4903  return sparse_segment_mean(
4904      data, indices, segment_ids, name=name, num_segments=num_segments)
4905
4906
4907@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"])
4908@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
4909def sparse_segment_sqrt_n(data,
4910                          indices,
4911                          segment_ids,
4912                          name=None,
4913                          num_segments=None):
4914  r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
4915
4916  `N` is the size of the segment being reduced.
4917
4918  Args:
4919    data: A `Tensor` with data that will be assembled in the output.
4920    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4921      `segment_ids`.
4922    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4923      should be sorted and can be repeated.
4924    name: A name for the operation (optional).
4925    num_segments: An optional int32 scalar. Indicates the size of the output
4926      `Tensor`.
4927
4928  Returns:
4929    A `tensor` of the shape as data, except for dimension 0 which
4930    has size `k`, the number of segments specified via `num_segments` or
4931    inferred for the last element in `segments_ids`.
4932  """
4933  if num_segments is not None:
4934    return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(
4935        data=data,
4936        indices=indices,
4937        segment_ids=segment_ids,
4938        num_segments=num_segments,
4939        name=name)
4940  else:
4941    return gen_math_ops.sparse_segment_sqrt_n(
4942        data=data, indices=indices, segment_ids=segment_ids, name=name)
4943
4944
4945@tf_export("sparse.segment_sqrt_n", v1=[])
4946def sparse_segment_sqrt_n_v2(data,
4947                             indices,
4948                             segment_ids,
4949                             num_segments=None,
4950                             name=None):
4951  r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
4952
4953  Read [the section on
4954  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4955  for an explanation of segments.
4956
4957  Like `tf.sparse.segment_mean`, but instead of dividing by the size of the
4958  segment, `N`, divide by `sqrt(N)` instead.
4959
4960  Args:
4961    data: A `Tensor` with data that will be assembled in the output.
4962    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4963      `segment_ids`.
4964    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4965      should be sorted and can be repeated.
4966    num_segments: An optional int32 scalar. Indicates the size of the output
4967      `Tensor`.
4968    name: A name for the operation (optional).
4969
4970  Returns:
4971    A `tensor` of the shape as data, except for dimension 0 which
4972    has size `k`, the number of segments specified via `num_segments` or
4973    inferred for the last element in `segments_ids`.
4974  """
4975  return sparse_segment_sqrt_n(
4976      data, indices, segment_ids, name=name, num_segments=num_segments)
4977
4978
4979@tf_export("tensordot", "linalg.tensordot")
4980@dispatch.add_dispatch_support
4981def tensordot(a, b, axes, name=None):
4982  r"""Tensor contraction of a and b along specified axes and outer product.
4983
4984  Tensordot (also known as tensor contraction) sums the product of elements
4985  from `a` and `b` over the indices specified by `axes`.
4986
4987  This operation corresponds to `numpy.tensordot(a, b, axes)`.
4988
4989  Example 1: When `a` and `b` are matrices (order 2), the case `axes=1`
4990  is equivalent to matrix multiplication.
4991
4992  Example 2: When `a` and `b` are matrices (order 2), the case
4993  `axes = [[1], [0]]` is equivalent to matrix multiplication.
4994
4995  Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives
4996  the outer product, a tensor of order 4.
4997
4998  Example 4: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
4999  tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
5000  \\(c_{jklm}\\) whose entry
5001  corresponding to the indices \\((j,k,l,m)\\) is given by:
5002
5003  \\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
5004
5005  In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
5006
5007  Args:
5008    a: `Tensor` of type `float32` or `float64`.
5009    b: `Tensor` with the same type as `a`.
5010    axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
5011      If axes is a scalar, sum over the last N axes of a and the first N axes of
5012      b in order. If axes is a list or `Tensor` the first and second row contain
5013      the set of unique integers specifying axes along which the contraction is
5014      computed, for `a` and `b`, respectively. The number of axes for `a` and
5015      `b` must be equal. If `axes=0`, computes the outer product between `a` and
5016      `b`.
5017    name: A name for the operation (optional).
5018
5019  Returns:
5020    A `Tensor` with the same type as `a`.
5021
5022  Raises:
5023    ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
5024    IndexError: If the values in axes exceed the rank of the corresponding
5025      tensor.
5026  """
5027
5028  def _tensordot_reshape(a, axes, flipped=False):
5029    """Helper method to perform transpose and reshape for contraction op.
5030
5031    This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
5032    using `array_ops.transpose` and `array_ops.reshape`. The method takes a
5033    tensor and performs the correct transpose and reshape operation for a given
5034    set of indices. It returns the reshaped tensor as well as a list of indices
5035    necessary to reshape the tensor again after matrix multiplication.
5036
5037    Args:
5038      a: `Tensor`.
5039      axes: List or `int32` `Tensor` of unique indices specifying valid axes of
5040        `a`.
5041      flipped: An optional `bool`. Defaults to `False`. If `True`, the method
5042        assumes that `a` is the second argument in the contraction operation.
5043
5044    Returns:
5045      A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
5046      the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
5047      either a list of integers or an `int32` `Tensor`, depending on whether
5048      the shape of a is fully specified, and free_dims_static is either a list
5049      of integers and None values, or None, representing the inferred
5050      static shape of the free dimensions
5051    """
5052    if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
5053      shape_a = a.get_shape().as_list()
5054      axes = [i if i >= 0 else i + len(shape_a) for i in axes]
5055      free = [i for i in builtins.range(len(shape_a)) if i not in axes]
5056      free_dims = [shape_a[i] for i in free]
5057      prod_free = int(np.prod([shape_a[i] for i in free]))
5058      prod_axes = int(np.prod([shape_a[i] for i in axes]))
5059      perm = list(axes) + free if flipped else free + list(axes)
5060      new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
5061      if (perm != np.arange(len(shape_a))).any():
5062        a_trans = array_ops.transpose(a, perm)
5063      else:
5064        a_trans = a
5065      if a_trans.get_shape().as_list() != new_shape:
5066        reshaped_a = array_ops.reshape(a_trans, new_shape)
5067      else:
5068        reshaped_a = a_trans
5069      return reshaped_a, free_dims, free_dims
5070    else:
5071      if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
5072        shape_a = a.get_shape().as_list()
5073        axes = [i if i >= 0 else i + len(shape_a) for i in axes]
5074        free = [i for i in builtins.range(len(shape_a)) if i not in axes]
5075        axes_dims = [shape_a[i] for i in axes]
5076        free_dims = [shape_a[i] for i in free]
5077        free_dims_static = free_dims
5078        axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
5079        free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free")
5080        shape_a = array_ops.shape(a)
5081      else:
5082        free_dims_static = None
5083        shape_a = array_ops.shape(a)
5084        rank_a = array_ops.rank(a)
5085        axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
5086        axes = array_ops.where(axes >= 0, axes, axes + rank_a)
5087        free, _ = gen_array_ops.list_diff(range(rank_a), axes, dtypes.int32)
5088      free_dims = array_ops.gather(shape_a, free)
5089      axes_dims = array_ops.gather(shape_a, axes)
5090      prod_free_dims = reduce_prod(free_dims)
5091      prod_axes_dims = reduce_prod(axes_dims)
5092      if flipped:
5093        perm = array_ops.concat([axes, free], 0)
5094        new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
5095      else:
5096        perm = array_ops.concat([free, axes], 0)
5097        new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
5098      reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
5099      return reshaped_a, free_dims, free_dims_static
5100
5101  def _tensordot_axes(a, axes):
5102    """Generates two sets of contraction axes for the two tensor arguments."""
5103    a_shape = a.get_shape()
5104    if isinstance(axes, compat.integral_types):
5105      if axes < 0:
5106        raise ValueError(f"`axes` must be at least 0. Received: {axes}.")
5107      if a_shape.ndims is not None:
5108        if axes > a_shape.ndims:
5109          raise ValueError(f"`axes` must not be larger than the number of "
5110                           f"dimensions of tensor {a}.  Received {axes}, vs "
5111                           f"tensor dimensions {a_shape.ndims}.")
5112        return (list(builtins.range(a_shape.ndims - axes,
5113                                    a_shape.ndims)), list(builtins.range(axes)))
5114      else:
5115        rank = array_ops.rank(a)
5116        return (range(rank - axes, rank,
5117                      dtype=dtypes.int32), range(axes, dtype=dtypes.int32))
5118    elif isinstance(axes, (list, tuple)):
5119      if len(axes) != 2:
5120        raise ValueError(
5121            f"`axes` must be an integer or have length 2. Received {axes}.")
5122      a_axes = axes[0]
5123      b_axes = axes[1]
5124      if isinstance(a_axes, compat.integral_types) and \
5125          isinstance(b_axes, compat.integral_types):
5126        a_axes = [a_axes]
5127        b_axes = [b_axes]
5128      if len(a_axes) != len(b_axes):
5129        raise ValueError(f"Different number of contraction axes `a` and `b`, "
5130                         f"{len(a_axes)} != {len(b_axes)}.")
5131      return a_axes, b_axes
5132    else:
5133      axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
5134      return axes[0], axes[1]
5135
5136  with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
5137    a = ops.convert_to_tensor(a, name="a")
5138    b = ops.convert_to_tensor(b, name="b")
5139    a_axes, b_axes = _tensordot_axes(a, axes)
5140    a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
5141    b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(
5142        b, b_axes, True)
5143    ab_matmul = matmul(a_reshape, b_reshape)
5144    if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
5145      if (ab_matmul.get_shape().is_fully_defined() and
5146          ab_matmul.get_shape().as_list() == a_free_dims + b_free_dims):
5147        return ab_matmul
5148      else:
5149        return array_ops.reshape(
5150            ab_matmul, a_free_dims + b_free_dims, name=name)
5151    else:
5152      a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
5153      b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
5154      product = array_ops.reshape(
5155          ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
5156      if a_free_dims_static is not None and b_free_dims_static is not None:
5157        product.set_shape(a_free_dims_static + b_free_dims_static)
5158      return product
5159
5160
5161@tf_export("math.polyval")
5162@dispatch.add_dispatch_support
5163def polyval(coeffs, x, name=None):
5164  r"""Computes the elementwise value of a polynomial.
5165
5166  If `x` is a tensor and `coeffs` is a list n + 1 tensors,
5167  this function returns the value of the n-th order polynomial
5168
5169  `p(x) = coeffs[n-1] + coeffs[n-2] * x + ...  + coeffs[0] * x**(n-1)`
5170
5171  evaluated using Horner's method, i.e.
5172
5173  ```python
5174  p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + x * coeffs[0]))
5175  ```
5176
5177  Usage Example:
5178
5179  >>> coefficients = [1.0, 2.5, -4.2]
5180  >>> x = 5.0
5181  >>> y = tf.math.polyval(coefficients, x)
5182  >>> y
5183  <tf.Tensor: shape=(), dtype=float32, numpy=33.3>
5184
5185  Usage Example:
5186
5187  >>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0)
5188  <tf.Tensor: shape=(), dtype=int32, numpy=21>
5189
5190  `tf.math.polyval` can also be used in polynomial regression. Taking
5191  advantage of this function can facilitate writing a polynomial equation
5192  as compared to explicitly writing it out, especially for higher degree
5193  polynomials.
5194
5195  >>> x = tf.constant(3)
5196  >>> theta1 = tf.Variable(2)
5197  >>> theta2 = tf.Variable(1)
5198  >>> theta3 = tf.Variable(0)
5199  >>> tf.math.polyval([theta1, theta2, theta3], x)
5200  <tf.Tensor: shape=(), dtype=int32, numpy=21>
5201
5202  Args:
5203    coeffs: A list of `Tensor` representing the coefficients of the polynomial.
5204    x: A `Tensor` representing the variable of the polynomial.
5205    name: A name for the operation (optional).
5206
5207  Returns:
5208    A `tensor` of the shape as the expression p(x) with usual broadcasting
5209    rules for element-wise addition and multiplication applied.
5210
5211  @compatibility(numpy)
5212  Equivalent to numpy.polyval.
5213  @end_compatibility
5214  """
5215  if not isinstance(coeffs, list):
5216    raise ValueError(
5217        f"Argument coeffs must be list type. Received type {type(coeffs)}.")
5218
5219  with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
5220    x = ops.convert_to_tensor(x, name="x")
5221    if len(coeffs) < 1:
5222      return array_ops.zeros_like(x, name=name)
5223    coeffs = [
5224        ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
5225        for index, coeff in enumerate(coeffs)
5226    ]
5227    p = coeffs[0]
5228    for c in coeffs[1:]:
5229      p = c + p * x
5230    return p
5231
5232
5233@tf_export("math.reciprocal_no_nan")
5234@dispatch.register_unary_elementwise_api
5235@dispatch.add_dispatch_support
5236def reciprocal_no_nan(x, name=None):
5237  """Performs a safe reciprocal operation, element wise.
5238
5239  If a particular element is zero, the reciprocal for that element is
5240  also set to zero.
5241
5242  For example:
5243  ```python
5244  x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)
5245  tf.math.reciprocal_no_nan(x)  # [ 0.5, 2, 0.0, 1.0 ]
5246  ```
5247
5248  Args:
5249    x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or
5250      `complex128`.
5251    name: A name for the operation (optional).
5252
5253  Returns:
5254    A `Tensor` of same shape and type as `x`.
5255
5256  Raises:
5257    TypeError: x must be of a valid dtype.
5258
5259  """
5260
5261  with ops.name_scope(name, "reciprocal_no_nan", [x]) as scope:
5262    x = ops.convert_to_tensor(x, name="x")
5263    one = constant_op.constant(1, dtype=x.dtype.base_dtype, name="one")
5264    return gen_math_ops.div_no_nan(one, x, name=scope)
5265
5266
5267@tf_export("math.xlog1py")
5268@dispatch.register_binary_elementwise_api
5269@dispatch.add_dispatch_support
5270def xlog1py(x, y, name=None):
5271  r"""Compute x * log1p(y).
5272
5273  Given `x` and `y`, compute `x * log1p(y)`. This function safely returns
5274  zero when `x = 0`, no matter what the value of `y` is.
5275
5276  Example:
5277
5278  >>> tf.math.xlog1py(0., 1.)
5279  <tf.Tensor: shape=(), dtype=float32, numpy=0.>
5280  >>> tf.math.xlog1py(1., 1.)
5281  <tf.Tensor: shape=(), dtype=float32, numpy=0.6931472>
5282  >>> tf.math.xlog1py(2., 2.)
5283  <tf.Tensor: shape=(), dtype=float32, numpy=2.1972246>
5284  >>> tf.math.xlog1py(0., -1.)
5285  <tf.Tensor: shape=(), dtype=float32, numpy=0.>
5286
5287  Args:
5288    x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
5289      `complex64`, `complex128`
5290    y: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
5291      `complex64`, `complex128`
5292    name: A name for the operation (optional).
5293
5294  Returns:
5295    `x * log1p(y)`.
5296
5297  @compatibility(scipy)
5298  Equivalent to scipy.special.xlog1py
5299  @end_compatibility
5300  """
5301  with ops.name_scope(name, "xlog1py", [x]):
5302    return gen_math_ops.xlog1py(x, y)
5303
5304
5305@tf_export("math.erfinv")
5306@dispatch.register_unary_elementwise_api
5307@dispatch.add_dispatch_support
5308def erfinv(x, name=None):
5309  """Compute inverse error function.
5310
5311  Given `x`, compute the inverse error function of `x`. This function
5312  is the inverse of `tf.math.erf`.
5313
5314  Args:
5315    x: `Tensor` with type `float` or `double`.
5316    name: A name for the operation (optional).
5317  Returns:
5318    Inverse error function of `x`.
5319  """
5320  with ops.name_scope(name, "erfinv", [x]):
5321    return gen_math_ops.erfinv(x)
5322
5323
5324@tf_export("math.ndtri")
5325@dispatch.register_unary_elementwise_api
5326@dispatch.add_dispatch_support
5327def ndtri(x, name=None):
5328  """Compute quantile of Standard Normal.
5329
5330  Args:
5331    x: `Tensor` with type `float` or `double`.
5332    name: A name for the operation (optional).
5333  Returns:
5334    Inverse error function of `x`.
5335  """
5336  with ops.name_scope(name, "ndtri", [x]):
5337    return gen_math_ops.ndtri(x)
5338
5339
5340@tf_export("math.erfcinv")
5341@dispatch.register_unary_elementwise_api
5342@dispatch.add_dispatch_support
5343def erfcinv(x, name=None):
5344  """Computes the inverse of complementary error function.
5345
5346  Given `x`, compute the inverse complementary error function of `x`.
5347  This function is the inverse of `tf.math.erfc`, and is defined on
5348  `[0, 2]`.
5349
5350  >>> tf.math.erfcinv([0., 0.2, 1., 1.5, 2.])
5351  <tf.Tensor: shape=(5,), dtype=float32, numpy=
5352  array([       inf,  0.9061935, -0.       , -0.4769363,       -inf],
5353        dtype=float32)>
5354
5355  Args:
5356    x: `Tensor` with type `float` or `double`.
5357    name: A name for the operation (optional).
5358  Returns:
5359    Inverse complementary error function of `x`.
5360
5361  @compatibility(numpy)
5362  Equivalent to scipy.special.erfcinv
5363  @end_compatibility
5364  """
5365  with ops.name_scope(name, "erfcinv", [x]):
5366    x = ops.convert_to_tensor(x, name="start")
5367    return -ndtri(0.5 * x) * np.sqrt(0.5)
5368
5369
5370@tf_export("math.ceil", v1=["math.ceil", "ceil"])
5371@dispatch.register_unary_elementwise_api
5372@dispatch.add_dispatch_support
5373@deprecation.deprecated_endpoints("ceil")
5374def ceil(x, name=None):
5375  """Return the ceiling of the input, element-wise.
5376
5377  For example:
5378
5379  >>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
5380  <tf.Tensor: shape=(7,), dtype=float32,
5381  numpy=array([-1., -1., -0.,  1.,  2.,  2.,  2.], dtype=float32)>
5382
5383  Args:
5384    x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
5385      `float32`, `float64`. `int32`
5386    name: A name for the operation (optional).
5387
5388  Returns:
5389    A `tf.Tensor`. Has the same type as `x`.
5390
5391  @compatibility(numpy)
5392  Equivalent to np.ceil
5393  @end_compatibility
5394  """
5395  return gen_math_ops.ceil(x, name)
5396
5397
5398@tf_export("math.sqrt", "sqrt")
5399@dispatch.register_unary_elementwise_api
5400@dispatch.add_dispatch_support
5401def sqrt(x, name=None):  # pylint: disable=redefined-builtin
5402  r"""Computes element-wise square root of the input tensor.
5403
5404  Note: This operation does not support integer types.
5405
5406  >>> x = tf.constant([[4.0], [16.0]])
5407  >>> tf.sqrt(x)
5408  <tf.Tensor: shape=(2, 1), dtype=float32, numpy=
5409    array([[2.],
5410           [4.]], dtype=float32)>
5411  >>> y = tf.constant([[-4.0], [16.0]])
5412  >>> tf.sqrt(y)
5413  <tf.Tensor: shape=(2, 1), dtype=float32, numpy=
5414    array([[nan],
5415           [ 4.]], dtype=float32)>
5416  >>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)
5417  >>> tf.sqrt(z)
5418  <tf.Tensor: shape=(2, 1), dtype=complex128, numpy=
5419    array([[0.0+1.j],
5420           [4.0+0.j]])>
5421
5422  Note: In order to support complex type, please provide an input tensor
5423  of `complex64` or `complex128`.
5424
5425  Args:
5426    x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
5427      `complex64`, `complex128`
5428    name: A name for the operation (optional).
5429
5430  Returns:
5431    A `tf.Tensor` of same size, type and sparsity as `x`.
5432  """
5433  return gen_math_ops.sqrt(x, name)
5434
5435
5436# pylint: disable=g-docstring-has-escape
5437@tf_export("math.exp", "exp")
5438@dispatch.register_unary_elementwise_api
5439@dispatch.add_dispatch_support
5440def exp(x, name=None):
5441  r"""Computes exponential of x element-wise.  \\(y = e^x\\).
5442
5443  This function computes the exponential of the input tensor element-wise.
5444  i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor.
5445  \\(e\\) denotes Euler's number and is approximately equal to 2.718281.
5446  Output is positive for any real input.
5447
5448  >>> x = tf.constant(2.0)
5449  >>> tf.math.exp(x)
5450  <tf.Tensor: shape=(), dtype=float32, numpy=7.389056>
5451
5452  >>> x = tf.constant([2.0, 8.0])
5453  >>> tf.math.exp(x)
5454  <tf.Tensor: shape=(2,), dtype=float32,
5455  numpy=array([   7.389056, 2980.958   ], dtype=float32)>
5456
5457  For complex numbers, the exponential value is calculated as
5458  $$
5459  e^{x+iy} = {e^x} {e^{iy}} = {e^x} ({\cos (y) + i \sin (y)})
5460  $$
5461
5462  For `1+1j` the value would be computed as:
5463  $$
5464  e^1 (\cos (1) + i \sin (1)) = 2.7182817 \times (0.5403023+0.84147096j)
5465  $$
5466
5467  >>> x = tf.constant(1 + 1j)
5468  >>> tf.math.exp(x)
5469  <tf.Tensor: shape=(), dtype=complex128,
5470  numpy=(1.4686939399158851+2.2873552871788423j)>
5471
5472  Args:
5473    x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
5474      `float32`, `float64`, `complex64`, `complex128`.
5475    name: A name for the operation (optional).
5476
5477  Returns:
5478    A `tf.Tensor`. Has the same type as `x`.
5479
5480  @compatibility(numpy)
5481  Equivalent to np.exp
5482  @end_compatibility
5483  """
5484  return gen_math_ops.exp(x, name)
5485
5486
5487# pylint: enable=g-docstring-has-escape
5488
5489
5490@tf_export("math.sobol_sample")
5491@dispatch.add_dispatch_support
5492def sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None):
5493  """Generates points from the Sobol sequence.
5494
5495  Creates a Sobol sequence with `num_results` samples. Each sample has dimension
5496  `dim`. Skips the first `skip` samples.
5497
5498  Args:
5499    dim: Positive scalar `Tensor` representing each sample's dimension.
5500    num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol
5501        points to return in the output.
5502    skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of
5503        initial points of the Sobol sequence to skip. Default value is 0.
5504    dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or
5505        `tf.float64`. Defaults to `tf.float32`.
5506    name: (Optional) Python `str` name prefixed to ops created by this function.
5507
5508  Returns:
5509    `Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
5510  """
5511  with ops.name_scope(name, "sobol", [dim, num_results, skip]):
5512    return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype)
5513
5514
5515@tf_export("math.rsqrt", v1=["math.rsqrt", "rsqrt"])
5516@dispatch.register_unary_elementwise_api
5517@dispatch.add_dispatch_support
5518@deprecation.deprecated_endpoints("rsqrt")
5519def rsqrt(x, name=None):
5520  """Computes reciprocal of square root of x element-wise.
5521
5522  For example:
5523
5524  >>> x = tf.constant([2., 0., -2.])
5525  >>> tf.math.rsqrt(x)
5526  <tf.Tensor: shape=(3,), dtype=float32,
5527  numpy=array([0.707, inf, nan], dtype=float32)>
5528
5529  Args:
5530    x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
5531      `float32`, `float64`.
5532    name: A name for the operation (optional).
5533
5534  Returns:
5535    A `tf.Tensor`. Has the same type as `x`.
5536  """
5537  return gen_math_ops.rsqrt(x, name)
5538
5539
5540@tf_export("math.acos", "acos")
5541@dispatch.register_unary_elementwise_api
5542@dispatch.add_dispatch_support
5543def acos(x, name=None):
5544  """Computes acos of x element-wise.
5545
5546  Provided an input tensor, the `tf.math.acos` operation
5547  returns the inverse cosine of each element of the tensor.
5548  If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
5549
5550  Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
5551
5552  For example:
5553
5554  >>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32)
5555  >>> tf.math.acos(x)
5556  <tf.Tensor: shape=(6,), dtype=float32,
5557  numpy= array([0. , 2.0943952, nan, 1.3694383, 1.5707964, nan],
5558  dtype=float32)>
5559
5560  Args:
5561    x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
5562      `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`,
5563      `complex64`, `complex128`.
5564    name: A name for the operation (optional).
5565
5566  Returns:
5567    A `Tensor`. Has the same type as x.
5568  """
5569  return gen_math_ops.acos(x, name)
5570
5571
5572@tf_export("math.floor", "floor")
5573@dispatch.register_unary_elementwise_api
5574@dispatch.add_dispatch_support
5575def floor(x, name=None):
5576  """Returns element-wise largest integer not greater than x.
5577
5578  Both input range is `(-inf, inf)` and the
5579  output range consists of all integer values.
5580
5581  For example:
5582
5583  >>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")])
5584  >>> tf.floor(x).numpy()
5585  array([ 1., -2.,  5., -3.,  0., inf], dtype=float32)
5586
5587  Args:
5588    x:  A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
5589      `float32`, `float64`.
5590    name: A name for the operation (optional).
5591
5592  Returns:
5593    A `Tensor`. Has the same type as x.
5594  """
5595  return gen_math_ops.floor(x, name)
5596
5597
5598# Register elementwise ops that don't have Python wrappers.
5599# Binary elementwise ops.
5600dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_and)
5601dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_or)
5602dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_xor)
5603dispatch.register_binary_elementwise_api(gen_bitwise_ops.left_shift)
5604dispatch.register_binary_elementwise_api(gen_bitwise_ops.right_shift)
5605dispatch.register_unary_elementwise_api(gen_bitwise_ops.invert)
5606dispatch.register_binary_elementwise_api(gen_math_ops.atan2)
5607dispatch.register_binary_elementwise_api(gen_math_ops.floor_div)
5608dispatch.register_binary_elementwise_api(gen_math_ops.floor_mod)
5609dispatch.register_binary_elementwise_api(gen_math_ops.greater)
5610dispatch.register_binary_elementwise_api(gen_math_ops.greater_equal)
5611dispatch.register_binary_elementwise_api(gen_math_ops.less)
5612dispatch.register_binary_elementwise_api(gen_math_ops.less_equal)
5613dispatch.register_binary_elementwise_api(gen_math_ops.logical_and)
5614dispatch.register_binary_elementwise_api(gen_math_ops.logical_or)
5615dispatch.register_binary_elementwise_api(gen_math_ops.maximum)
5616dispatch.register_binary_elementwise_api(gen_math_ops.minimum)
5617dispatch.register_binary_elementwise_api(gen_math_ops.real_div)
5618dispatch.register_binary_elementwise_api(gen_math_ops.squared_difference)
5619dispatch.register_binary_elementwise_api(gen_math_ops.truncate_div)
5620dispatch.register_binary_elementwise_api(gen_math_ops.truncate_mod)
5621dispatch.register_binary_elementwise_api(gen_math_ops.xdivy)
5622dispatch.register_binary_elementwise_api(gen_math_ops.xlogy)
5623dispatch.register_binary_elementwise_api(gen_math_ops.zeta)
5624
5625# Unary elementwise ops.
5626dispatch.register_unary_elementwise_api(gen_math_ops.acosh)
5627dispatch.register_unary_elementwise_api(gen_math_ops.asin)
5628dispatch.register_unary_elementwise_api(gen_math_ops.asinh)
5629dispatch.register_unary_elementwise_api(gen_math_ops.atan)
5630dispatch.register_unary_elementwise_api(gen_math_ops.atanh)
5631dispatch.register_unary_elementwise_api(gen_math_ops.cos)
5632dispatch.register_unary_elementwise_api(gen_math_ops.cosh)
5633dispatch.register_unary_elementwise_api(gen_math_ops.digamma)
5634dispatch.register_unary_elementwise_api(gen_math_ops.erf)
5635dispatch.register_unary_elementwise_api(gen_math_ops.erfc)
5636dispatch.register_unary_elementwise_api(gen_math_ops.expm1)
5637dispatch.register_unary_elementwise_api(gen_math_ops.is_finite)
5638dispatch.register_unary_elementwise_api(gen_math_ops.is_inf)
5639dispatch.register_unary_elementwise_api(gen_math_ops.is_nan)
5640dispatch.register_unary_elementwise_api(gen_math_ops.lgamma)
5641dispatch.register_unary_elementwise_api(gen_math_ops.log)
5642dispatch.register_unary_elementwise_api(gen_math_ops.log1p)
5643dispatch.register_unary_elementwise_api(gen_math_ops.logical_not)
5644dispatch.register_unary_elementwise_api(gen_math_ops.neg)
5645dispatch.register_unary_elementwise_api(gen_math_ops.next_after)
5646dispatch.register_unary_elementwise_api(gen_math_ops.reciprocal)
5647dispatch.register_unary_elementwise_api(gen_math_ops.rint)
5648dispatch.register_unary_elementwise_api(gen_math_ops.sin)
5649dispatch.register_unary_elementwise_api(gen_math_ops.sinh)
5650dispatch.register_unary_elementwise_api(gen_math_ops.square)
5651dispatch.register_unary_elementwise_api(gen_math_ops.tan)
5652dispatch.register_unary_elementwise_api(gen_math_ops.tanh)
5653