• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Math Operations.
16
17Note: Functions taking `Tensor` arguments can also take anything accepted by
18`tf.convert_to_tensor`.
19
20Note: Elementwise binary operations in TensorFlow follow [numpy-style
21broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
22
23TensorFlow provides a variety of math functions including:
24
25* Basic arithmetic operators and trigonometric functions.
26* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)
27* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)
28* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)
29* Segment functions (like: `tf.math.segment_sum`)
30
31See: `tf.linalg` for matrix and tensor functions.
32
33<a id=Segmentation></a>
34
35## About Segmentation
36
37TensorFlow provides several operations that you can use to perform common
38math computations on tensor segments.
39Here a segmentation is a partitioning of a tensor along
40the first dimension, i.e. it  defines a mapping from the first dimension onto
41`segment_ids`. The `segment_ids` tensor should be the size of
42the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
43where `k<d0`.
44In particular, a segmentation of a matrix tensor is a mapping of rows to
45segments.
46
47For example:
48
49```python
50c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
51tf.math.segment_sum(c, tf.constant([0, 0, 1]))
52#  ==>  [[0 0 0 0]
53#        [5 6 7 8]]
54```
55
56The standard `segment_*` functions assert that the segment indices are sorted.
57If you have unsorted indices use the equivalent `unsorted_segment_` function.
58These functions take an additional argument `num_segments` so that the output
59tensor can be efficiently allocated.
60
61``` python
62c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
63tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
64# ==> [[ 6,  8, 10, 12],
65#       [-1, -2, -3, -4]]
66```
67
68"""
69from __future__ import absolute_import
70from __future__ import division
71from __future__ import print_function
72
73import numpy as np
74import six
75from six.moves import builtins
76from six.moves import xrange  # pylint: disable=redefined-builtin
77
78from tensorflow.python.eager import context
79from tensorflow.python.framework import constant_op
80from tensorflow.python.framework import dtypes
81from tensorflow.python.framework import graph_util
82from tensorflow.python.framework import ops
83from tensorflow.python.framework import sparse_tensor
84from tensorflow.python.framework import tensor_shape
85from tensorflow.python.ops import array_ops
86from tensorflow.python.ops import gen_array_ops
87from tensorflow.python.ops import gen_data_flow_ops
88from tensorflow.python.ops import gen_math_ops
89from tensorflow.python.ops import gen_nn_ops
90from tensorflow.python.ops import gen_sparse_ops
91# go/tf-wildcard-import
92# pylint: disable=wildcard-import
93from tensorflow.python.ops.gen_math_ops import *
94# pylint: enable=wildcard-import
95from tensorflow.python.platform import tf_logging as logging
96from tensorflow.python.util import compat
97from tensorflow.python.util import deprecation
98from tensorflow.python.util import dispatch
99from tensorflow.python.util import nest
100from tensorflow.python.util.tf_export import tf_export
101
102# Aliases for some automatically-generated names.
103linspace = gen_math_ops.lin_space
104nextafter = gen_math_ops.next_after
105
106arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max)  # pylint: disable=used-before-assignment
107arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min)  # pylint: disable=used-before-assignment
108tf_export(v1=["arg_max"])(arg_max)
109tf_export(v1=["arg_min"])(arg_min)
110
111
112# This is set by resource_variable_ops.py. It is included in this way since
113# there is a circular dependency between math_ops and resource_variable_ops
114_resource_variable_type = None
115
116
117def _set_doc(doc):
118
119  def _decorator(func):
120    func.__doc__ = doc
121    return func
122
123  return _decorator
124
125
126# pylint: disable=redefined-builtin
127@tf_export(v1=["math.argmax", "argmax"])
128@deprecation.deprecated_args(None, "Use the `axis` argument instead",
129                             "dimension")
130@_set_doc(
131    gen_math_ops.arg_max.__doc__.replace("dimensions",
132                                         "axes").replace("dimension", "axis"))
133def argmax(input,
134           axis=None,
135           name=None,
136           dimension=None,
137           output_type=dtypes.int64):
138  axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
139                                                dimension)
140  return argmax_v2(input, axis, output_type, name)
141
142
143@tf_export("math.argmax", "argmax", v1=[])
144def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):
145  """Returns the index with the largest value across axes of a tensor.
146
147  Note that in case of ties the identity of the return value is not guaranteed.
148
149  For example:
150
151  >>> A = tf.constant([2, 20, 30, 3, 6])
152  >>> tf.math.argmax(A)  # A[2] is maximum in tensor A
153  <tf.Tensor: shape=(), dtype=int64, numpy=2>
154  >>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],
155  ...                  [14, 45, 23, 5, 27]])
156  >>> tf.math.argmax(B, 0)
157  <tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])>
158  >>> tf.math.argmax(B, 1)
159  <tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])>
160
161  Args:
162    input: A `Tensor`.
163    axis: An integer, the axis to reduce across. Default to 0.
164    output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults
165      to `tf.int64`.
166    name: An optional name for the operation.
167
168  Returns:
169    A `Tensor` of type `output_type`.
170  """
171  if axis is None:
172    axis = 0
173  return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
174
175
176@tf_export(v1=["math.argmin", "argmin"])
177@deprecation.deprecated_args(None, "Use the `axis` argument instead",
178                             "dimension")
179@_set_doc(
180    gen_math_ops.arg_min.__doc__.replace("dimensions",
181                                         "axes").replace("dimension", "axis"))
182def argmin(input,
183           axis=None,
184           name=None,
185           dimension=None,
186           output_type=dtypes.int64):
187  axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
188                                                dimension)
189  return argmin_v2(input, axis, output_type, name)
190
191
192@tf_export("math.argmin", "argmin", v1=[])
193def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):
194  """Returns the index with the smallest value across axes of a tensor.
195
196  Note that in case of ties the identity of the return value is not guaranteed.
197
198  Args:
199    input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
200      `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`,
201      `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`,
202      `uint64`.
203    axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
204      int32 or int64, must be in the range `-rank(input), rank(input))`.
205      Describes which axis of the input Tensor to reduce across. For vectors,
206      use axis = 0.
207    output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to
208      `tf.int64`.
209    name: A name for the operation (optional).
210
211  Returns:
212    A `Tensor` of type `output_type`.
213
214  Usage:
215  ```python
216  import tensorflow as tf
217  a = [1, 10, 26.9, 2.8, 166.32, 62.3]
218  b = tf.math.argmin(input = a)
219  c = tf.keras.backend.eval(b)
220  # c = 0
221  # here a[0] = 1 which is the smallest element of a across axis 0
222  ```
223  """
224  if axis is None:
225    axis = 0
226  return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
227
228
229# pylint: enable=redefined-builtin
230
231
232# pylint: disable=anomalous-backslash-in-string,protected-access
233# pylint: disable=g-docstring-has-escape
234@tf_export("math.abs", "abs")
235@dispatch.add_dispatch_support
236def abs(x, name=None):  # pylint: disable=redefined-builtin
237  r"""Computes the absolute value of a tensor.
238
239  Given a tensor of integer or floating-point values, this operation returns a
240  tensor of the same type, where each element contains the absolute value of the
241  corresponding element in the input.
242
243  Given a tensor `x` of complex numbers, this operation returns a tensor of type
244  `float32` or `float64` that is the absolute value of each element in `x`. For
245  a complex number \\(a + bj\\), its absolute value is computed as \\(\sqrt{a^2
246  + b^2}\\).  For example:
247
248  >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
249  >>> tf.abs(x)
250  <tf.Tensor: shape=(2, 1), dtype=float64, numpy=
251  array([[5.25594901],
252         [6.60492241]])>
253
254  Args:
255    x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
256      `int32`, `int64`, `complex64` or `complex128`.
257    name: A name for the operation (optional).
258
259  Returns:
260    A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,
261      with absolute values. Note, for `complex64` or `complex128` input, the
262      returned `Tensor` will be of type `float32` or `float64`, respectively.
263  """
264  with ops.name_scope(name, "Abs", [x]) as name:
265    x = ops.convert_to_tensor(x, name="x")
266    if x.dtype.is_complex:
267      return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
268    return gen_math_ops._abs(x, name=name)
269
270
271# pylint: enable=g-docstring-has-escape
272
273
274# pylint: disable=redefined-builtin
275def _bucketize(input, boundaries, name=None):
276  return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)
277
278
279# pylint: enable=redefined-builtin
280
281
282class DivideDelegateWithName(object):
283  """Use Python2/Python3 division delegation to implement divide for tensors."""
284
285  def __init__(self, x, name):
286    """Construct DivideDelegateWithName.
287
288    Args:
289      x: Tensor to use as left operand in operator overloads
290      name: The name that is preferred for the op created.
291    """
292    self.x = x
293    self.name = name
294
295  def __truediv__(self, y):
296    return _truediv_python3(self.x, y, self.name)
297
298  def __floordiv__(self, y):
299    return floordiv(self.x, y, self.name)
300
301  def __div__(self, y):
302    return _div_python2(self.x, y, self.name)
303
304
305@tf_export("math.divide", "divide")
306@dispatch.add_dispatch_support
307def divide(x, y, name=None):
308  """Computes Python style division of `x` by `y`.
309
310  For example:
311
312  >>> x = tf.constant([16, 12, 11])
313  >>> y = tf.constant([4, 6, 2])
314  >>> tf.divide(x,y)
315  <tf.Tensor: shape=(3,), dtype=float64,
316  numpy=array([4. , 2. , 5.5])>
317
318  Args:
319    x: A `Tensor`
320    y: A `Tensor`
321    name: A name for the operation (optional).
322
323  Returns:
324    A `Tensor` with same shape as input
325  """
326
327  if name is not None:
328    # Cannot use tensors operator overload, because it has no way to track
329    # override names. Use a dummy class to track the runtime division behavior
330    return DivideDelegateWithName(x, name) / y
331  else:
332    return x / y
333
334
335@tf_export("math.multiply", "multiply")
336@dispatch.add_dispatch_support
337def multiply(x, y, name=None):
338  return gen_math_ops.mul(x, y, name)
339
340
341multiply.__doc__ = gen_math_ops.mul.__doc__.replace("Multiply", "tf.multiply")
342
343
344# TODO(aselle): put deprecation in after another round of global code changes
345@deprecation.deprecated(
346    "2016-12-30",
347    "`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`")
348def _mul(x, y, name=None):
349  return gen_math_ops.mul(x, y, name)
350
351
352_mul.__doc__ = (
353    gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__))
354
355
356@tf_export("math.subtract", "subtract")
357@dispatch.add_dispatch_support
358def subtract(x, y, name=None):
359  return gen_math_ops.sub(x, y, name)
360
361
362subtract.__doc__ = gen_math_ops.sub.__doc__.replace("`Sub`", "`tf.subtract`")
363
364
365# TODO(aselle): put deprecation in after another round of global code changes
366@deprecation.deprecated(
367    "2016-12-30",
368    "`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
369def _sub(x, y, name=None):
370  return gen_math_ops.sub(x, y, name)
371
372
373_sub.__doc__ = (
374    gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__))
375
376negative = gen_math_ops.neg
377
378
379# pylint: disable=g-docstring-has-escape
380@deprecation.deprecated(
381    "2016-12-30",
382    "`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
383def _neg(x, name=None):
384  """Computes numerical negative value element-wise.
385
386  I.e., \\(y = -x\\).
387
388  Args:
389    x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
390      `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
391    name: A name for the operation (optional).
392
393  Returns:
394    A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
395  """
396  return negative(x, name)
397
398
399# pylint: enable=g-docstring-has-escape
400
401
402@tf_export(v1=["math.scalar_mul", "scalar_mul"])
403def scalar_mul(scalar, x, name=None):
404  """Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
405
406  Intended for use in gradient code which might deal with `IndexedSlices`
407  objects, which are easy to multiply by a scalar but more expensive to
408  multiply with arbitrary tensors.
409
410  Args:
411    scalar: A 0-D scalar `Tensor`. Must have known shape.
412    x: A `Tensor` or `IndexedSlices` to be scaled.
413    name: A name for the operation (optional).
414
415  Returns:
416    `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
417
418  Raises:
419    ValueError: if scalar is not a 0-D `scalar`.
420  """
421  scalar = ops.convert_to_tensor(
422      scalar, dtype=x.dtype.base_dtype, name="scalar")
423  shape = scalar.get_shape()
424  if shape.ndims == 0:
425    if isinstance(x, ops.IndexedSlices):
426      return ops.IndexedSlices(
427          gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
428    else:
429      return gen_math_ops.mul(scalar, x, name)
430  else:
431    raise ValueError("Only scalar multiply works, got shape %s" % shape)
432
433
434@tf_export("math.scalar_mul", "scalar_mul", v1=[])
435@_set_doc(scalar_mul.__doc__)
436def scalar_mul_v2(scalar, x, name=None):
437  with ops.name_scope(name, "scalar_mul", [x]) as name:
438    return scalar_mul(scalar, x, name)
439
440
441@tf_export("math.pow", "pow")
442@dispatch.add_dispatch_support
443def pow(x, y, name=None):  # pylint: disable=redefined-builtin
444  r"""Computes the power of one value to another.
445
446  Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
447  corresponding elements in `x` and `y`. For example:
448
449  ```python
450  x = tf.constant([[2, 2], [3, 3]])
451  y = tf.constant([[8, 16], [2, 3]])
452  tf.pow(x, y)  # [[256, 65536], [9, 27]]
453  ```
454
455  Args:
456    x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
457      `complex64`, or `complex128`.
458    y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
459      `complex64`, or `complex128`.
460    name: A name for the operation (optional).
461
462  Returns:
463    A `Tensor`.
464  """
465  with ops.name_scope(name, "Pow", [x]) as name:
466    return gen_math_ops._pow(x, y, name=name)
467
468
469# pylint: disable=redefined-builtin,redefined-outer-name
470@tf_export("dtypes.complex", "complex")
471@dispatch.add_dispatch_support
472def complex(real, imag, name=None):
473  r"""Converts two real numbers to a complex number.
474
475  Given a tensor `real` representing the real part of a complex number, and a
476  tensor `imag` representing the imaginary part of a complex number, this
477  operation returns complex numbers elementwise of the form \\(a + bj\\), where
478  *a* represents the `real` part and *b* represents the `imag` part.
479
480  The input tensors `real` and `imag` must have the same shape.
481
482  For example:
483
484  ```python
485  real = tf.constant([2.25, 3.25])
486  imag = tf.constant([4.75, 5.75])
487  tf.complex(real, imag)  # [[2.25 + 4.75j], [3.25 + 5.75j]]
488  ```
489
490  Args:
491    real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
492    imag: A `Tensor`. Must have the same type as `real`.
493    name: A name for the operation (optional).
494
495  Returns:
496    A `Tensor` of type `complex64` or `complex128`.
497
498  Raises:
499    TypeError: Real and imag must be correct types
500  """
501  real = ops.convert_to_tensor(real, name="real")
502  imag = ops.convert_to_tensor(imag, name="imag")
503  with ops.name_scope(name, "Complex", [real, imag]) as name:
504    input_types = (real.dtype, imag.dtype)
505    if input_types == (dtypes.float64, dtypes.float64):
506      Tout = dtypes.complex128
507    elif input_types == (dtypes.float32, dtypes.float32):
508      Tout = dtypes.complex64
509    else:
510      raise TypeError("real and imag have incorrect types: "
511                      "{} {}".format(real.dtype.name, imag.dtype.name))
512    return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
513
514
515@tf_export("math.sign", "sign")
516@dispatch.add_dispatch_support
517def sign(x, name=None):
518  """Returns an element-wise indication of the sign of a number.
519
520  y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0.
521
522  For complex numbers, y = sign(x) = x / |x| if x != 0, otherwise y = 0.
523
524  Example usage:
525
526  >>> tf.math.sign([0., 2., -3.])
527  <tf.Tensor: ... numpy=array([ 0.,  1., -1.], dtype=float32)>
528
529  Args:
530   x: A Tensor. Must be one of the following types: bfloat16, half, float32,
531      float64, int32, int64, complex64, complex128.
532   name: A name for the operation (optional).
533
534  Returns:
535   A Tensor. Has the same type as x.
536
537   If x is a SparseTensor, returns SparseTensor(x.indices,
538     tf.math.sign(x.values, ...), x.dense_shape).
539  """
540  x = ops.convert_to_tensor(x)
541  if x.dtype in (dtypes.complex64, dtypes.complex128):
542    return gen_math_ops.div_no_nan(
543        x,
544        cast(
545            gen_math_ops.complex_abs(
546                x,
547                Tout=dtypes.float32
548                if x.dtype == dtypes.complex64 else dtypes.float64),
549            dtype=x.dtype),
550        name=name)
551  return gen_math_ops.sign(x, name=name)
552
553
554@tf_export("math.real", v1=["math.real", "real"])
555@deprecation.deprecated_endpoints("real")
556@dispatch.add_dispatch_support
557def real(input, name=None):
558  r"""Returns the real part of a complex (or real) tensor.
559
560  Given a tensor `input`, this operation returns a tensor of type `float` that
561  is the real part of each element in `input` considered as a complex number.
562
563  For example:
564
565  ```python
566  x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
567  tf.math.real(x)  # [-2.25, 3.25]
568  ```
569
570  If `input` is already real, it is returned unchanged.
571
572  Args:
573    input: A `Tensor`. Must have numeric type.
574    name: A name for the operation (optional).
575
576  Returns:
577    A `Tensor` of type `float32` or `float64`.
578  """
579  with ops.name_scope(name, "Real", [input]) as name:
580    input = ops.convert_to_tensor(input, name="input")
581    if input.dtype.is_complex:
582      real_dtype = input.dtype.real_dtype
583      return gen_math_ops.real(input, Tout=real_dtype, name=name)
584    else:
585      return input
586
587
588@tf_export("math.imag", v1=["math.imag", "imag"])
589@deprecation.deprecated_endpoints("imag")
590@dispatch.add_dispatch_support
591def imag(input, name=None):
592  r"""Returns the imaginary part of a complex (or real) tensor.
593
594  Given a tensor `input`, this operation returns a tensor of type `float` that
595  is the imaginary part of each element in `input` considered as a complex
596  number. If `input` is real, a tensor of all zeros is returned.
597
598  For example:
599
600  ```python
601  x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
602  tf.math.imag(x)  # [4.75, 5.75]
603  ```
604
605  Args:
606    input: A `Tensor`. Must be one of the following types: `float`, `double`,
607      `complex64`, `complex128`.
608    name: A name for the operation (optional).
609
610  Returns:
611    A `Tensor` of type `float32` or `float64`.
612  """
613  with ops.name_scope(name, "Imag", [input]) as name:
614    input = ops.convert_to_tensor(input, name="input")
615    if input.dtype.is_complex:
616      return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
617    else:
618      return array_ops.zeros_like(input)
619
620
621@tf_export("math.angle", v1=["math.angle", "angle"])
622@deprecation.deprecated_endpoints("angle")
623@dispatch.add_dispatch_support
624def angle(input, name=None):
625  r"""Returns the element-wise argument of a complex (or real) tensor.
626
627  Given a tensor `input`, this operation returns a tensor of type `float` that
628  is the argument of each element in `input` considered as a complex number.
629
630  The elements in `input` are considered to be complex numbers of the form
631  \\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
632  If `input` is real then *b* is zero by definition.
633
634  The argument returned by this function is of the form \\(atan2(b, a)\\).
635  If `input` is real, a tensor of all zeros is returned.
636
637  For example:
638
639  ```
640  input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64)
641  tf.math.angle(input).numpy()
642  # ==> array([2.0131705, 1.056345 ], dtype=float32)
643  ```
644
645  Args:
646    input: A `Tensor`. Must be one of the following types: `float`, `double`,
647      `complex64`, `complex128`.
648    name: A name for the operation (optional).
649
650  Returns:
651    A `Tensor` of type `float32` or `float64`.
652  """
653  with ops.name_scope(name, "Angle", [input]) as name:
654    input = ops.convert_to_tensor(input, name="input")
655    if input.dtype.is_complex:
656      return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
657    else:
658      return array_ops.zeros_like(input)
659
660
661# pylint: enable=redefined-outer-name,redefined-builtin
662
663
664@tf_export("math.round", "round")
665@dispatch.add_dispatch_support
666def round(x, name=None):  # pylint: disable=redefined-builtin
667  """Rounds the values of a tensor to the nearest integer, element-wise.
668
669  Rounds half to even.  Also known as bankers rounding. If you want to round
670  according to the current system rounding mode use tf::cint.
671  For example:
672
673  ```python
674  x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
675  tf.round(x)  # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
676  ```
677
678  Args:
679    x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
680    name: A name for the operation (optional).
681
682  Returns:
683    A `Tensor` of same shape and type as `x`.
684  """
685  x = ops.convert_to_tensor(x, name="x")
686  if x.dtype.is_integer:
687    return x
688  else:
689    return gen_math_ops.round(x, name=name)
690
691
692@tf_export("cast", "dtypes.cast")
693@dispatch.add_dispatch_support
694def cast(x, dtype, name=None):
695  """Casts a tensor to a new type.
696
697  The operation casts `x` (in case of `Tensor`) or `x.values`
698  (in case of `SparseTensor` or `IndexedSlices`) to `dtype`.
699
700  For example:
701
702  ```python
703  x = tf.constant([1.8, 2.2], dtype=tf.float32)
704  tf.dtypes.cast(x, tf.int32)  # [1, 2], dtype=tf.int32
705  ```
706
707  The operation supports data types (for `x` and `dtype`) of
708  `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
709  `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
710  In case of casting from complex types (`complex64`, `complex128`) to real
711  types, only the real part of `x` is returned. In case of casting from real
712  types to complex types (`complex64`, `complex128`), the imaginary part of the
713  returned value is set to `0`. The handling of complex types here matches the
714  behavior of numpy.
715
716  Args:
717    x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
718      be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
719      `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
720      `bfloat16`.
721    dtype: The destination type. The list of supported dtypes is the same as
722      `x`.
723    name: A name for the operation (optional).
724
725  Returns:
726    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
727      same type as `dtype`.
728
729  Raises:
730    TypeError: If `x` cannot be cast to the `dtype`.
731  """
732  base_type = dtypes.as_dtype(dtype).base_dtype
733  if isinstance(x,
734                (ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
735    return x
736  with ops.name_scope(name, "Cast", [x]) as name:
737    if isinstance(x, sparse_tensor.SparseTensor):
738      values_cast = cast(x.values, base_type, name=name)
739      x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
740    elif isinstance(x, ops.IndexedSlices):
741      values_cast = cast(x.values, base_type, name=name)
742      x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape)
743    else:
744      # TODO(josh11b): If x is not already a Tensor, we could return
745      # ops.convert_to_tensor(x, dtype=dtype, ...)  here, but that
746      # allows some conversions that cast() can't do, e.g. casting numbers to
747      # strings.
748      x = ops.convert_to_tensor(x, name="x")
749      if x.dtype.base_dtype != base_type:
750        x = gen_math_ops.cast(x, base_type, name=name)
751    if x.dtype.is_complex and base_type.is_floating:
752      logging.warn("Casting complex to real discards imaginary part.")
753    return x
754
755
756@tf_export("dtypes.saturate_cast", "saturate_cast")
757@dispatch.add_dispatch_support
758def saturate_cast(value, dtype, name=None):
759  """Performs a safe saturating cast of `value` to `dtype`.
760
761  This function casts the input to `dtype` without applying any scaling.  If
762  there is a danger that values would over or underflow in the cast, this op
763  applies the appropriate clamping before the cast.
764
765  Args:
766    value: A `Tensor`.
767    dtype: The desired output `DType`.
768    name: A name for the operation (optional).
769
770  Returns:
771    `value` safely cast to `dtype`.
772  """
773  # When casting to a type with smaller representable range, clamp.
774  # Note that this covers casting to unsigned types as well.
775  with ops.name_scope(name, "saturate_cast", [value]) as name:
776    value = ops.convert_to_tensor(value, name="value")
777    dtype = dtypes.as_dtype(dtype).base_dtype
778    if value.dtype.min < dtype.min:
779      value = gen_math_ops.maximum(
780          value,
781          ops.convert_to_tensor(dtype.min, dtype=value.dtype, name="min"))
782    if value.dtype.max > dtype.max:
783      value = gen_math_ops.minimum(
784          value,
785          ops.convert_to_tensor(dtype.max, dtype=value.dtype, name="max"))
786    return cast(value, dtype, name=name)
787
788
789@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
790@tf_export(v1=["to_float"])
791def to_float(x, name="ToFloat"):
792  """Casts a tensor to type `float32`.
793
794  Args:
795    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
796    name: A name for the operation (optional).
797
798  Returns:
799    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
800    type `float32`.
801
802  Raises:
803    TypeError: If `x` cannot be cast to the `float32`.
804  """
805  return cast(x, dtypes.float32, name=name)
806
807
808@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
809@tf_export(v1=["to_double"])
810def to_double(x, name="ToDouble"):
811  """Casts a tensor to type `float64`.
812
813  Args:
814    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
815    name: A name for the operation (optional).
816
817  Returns:
818    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
819    type `float64`.
820
821  Raises:
822    TypeError: If `x` cannot be cast to the `float64`.
823  """
824  return cast(x, dtypes.float64, name=name)
825
826
827@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
828@tf_export(v1=["to_int32"])
829def to_int32(x, name="ToInt32"):
830  """Casts a tensor to type `int32`.
831
832  Args:
833    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
834    name: A name for the operation (optional).
835
836  Returns:
837    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
838    type `int32`.
839
840  Raises:
841    TypeError: If `x` cannot be cast to the `int32`.
842  """
843  return cast(x, dtypes.int32, name=name)
844
845
846@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
847@tf_export(v1=["to_int64"])
848def to_int64(x, name="ToInt64"):
849  """Casts a tensor to type `int64`.
850
851  Args:
852    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
853    name: A name for the operation (optional).
854
855  Returns:
856    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
857    type `int64`.
858
859  Raises:
860    TypeError: If `x` cannot be cast to the `int64`.
861  """
862  return cast(x, dtypes.int64, name=name)
863
864
865@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
866@tf_export(v1=["to_bfloat16"])
867def to_bfloat16(x, name="ToBFloat16"):
868  """Casts a tensor to type `bfloat16`.
869
870  Args:
871    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
872    name: A name for the operation (optional).
873
874  Returns:
875    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
876    type `bfloat16`.
877
878  Raises:
879    TypeError: If `x` cannot be cast to the `bfloat16`.
880  """
881  return cast(x, dtypes.bfloat16, name=name)
882
883
884@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
885@tf_export(v1=["to_complex64"])
886def to_complex64(x, name="ToComplex64"):
887  """Casts a tensor to type `complex64`.
888
889  Args:
890    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
891    name: A name for the operation (optional).
892
893  Returns:
894    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
895    type `complex64`.
896
897  Raises:
898    TypeError: If `x` cannot be cast to the `complex64`.
899  """
900  return cast(x, dtypes.complex64, name=name)
901
902
903@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
904@tf_export(v1=["to_complex128"])
905def to_complex128(x, name="ToComplex128"):
906  """Casts a tensor to type `complex128`.
907
908  Args:
909    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
910    name: A name for the operation (optional).
911
912  Returns:
913    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
914    type `complex128`.
915
916  Raises:
917    TypeError: If `x` cannot be cast to the `complex128`.
918  """
919  return cast(x, dtypes.complex128, name=name)
920
921
922ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
923ops.Tensor._override_operator("__abs__", abs)
924# __invert__ corresponds to the ~ operator.  Here we follow the numpy convention
925# ~ marks an elementwise bit-wise inverse.  This is only implemented for boolean
926# tensors and will throw a TypeError if used on nonboolean arrays
927ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
928
929
930def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
931  """Register operators with different tensor and scalar versions.
932
933  If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
934  sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
935
936  Args:
937    func: the operator
938    op_name: name of the operator being overridden
939    clazz_object: class to override for.  Either `Tensor` or `SparseTensor`.
940  """
941
942  def binary_op_wrapper(x, y):
943    with ops.name_scope(None, op_name, [x, y]) as name:
944      if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor):
945        return func(x, y, name=name)
946      elif not isinstance(y, sparse_tensor.SparseTensor):
947        try:
948          y = ops.convert_to_tensor_v2(
949              y, dtype_hint=x.dtype.base_dtype, name="y")
950        except TypeError:
951          # If the RHS is not a tensor, it might be a tensor aware object
952          # that can implement the operator with knowledge of itself
953          # and the tensor.
954          if hasattr(type(y), "__r%s__" % op_name):
955            return NotImplemented
956          else:
957            raise
958      return func(x, y, name=name)
959
960  def binary_op_wrapper_sparse(sp_x, y):
961    with ops.name_scope(None, op_name, [sp_x, y]) as name:
962      y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
963      return sparse_tensor.SparseTensor(
964          sp_x.indices,
965          func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name),
966          sp_x.dense_shape)
967
968  def r_binary_op_wrapper(y, x):
969    with ops.name_scope(None, op_name, [x, y]) as name:
970      x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
971      return func(x, y, name=name)
972
973  # Propagate func.__doc__ to the wrappers
974  try:
975    doc = func.__doc__
976  except AttributeError:
977    doc = None
978  binary_op_wrapper.__doc__ = doc
979  r_binary_op_wrapper.__doc__ = doc
980  binary_op_wrapper_sparse.__doc__ = doc
981
982  if clazz_object is ops.Tensor:
983    clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
984    del binary_op_wrapper
985    clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
986    del r_binary_op_wrapper
987  else:
988    clazz_object._override_operator("__%s__" % op_name,
989                                    binary_op_wrapper_sparse)
990    del binary_op_wrapper_sparse
991
992
993# Conversion table for __truediv__.  None entries mean no conversion required.
994_TRUEDIV_TABLE = {
995    dtypes.uint8: dtypes.float32,
996    dtypes.int8: dtypes.float32,
997    dtypes.uint16: dtypes.float32,
998    dtypes.int16: dtypes.float32,
999    dtypes.int32: dtypes.float64,
1000    dtypes.int64: dtypes.float64,
1001    dtypes.bfloat16: None,
1002    dtypes.float16: None,
1003    dtypes.float32: None,
1004    dtypes.float64: None,
1005    dtypes.complex64: None,
1006    dtypes.complex128: None,
1007}
1008
1009
1010# NOTE: the support of "sparse (true)div dense" is currently not baked in into
1011# "tf.(true_)div()".  Until such an API decision is made, the supported usage is
1012# to explicitly use the "/" operator to invoke either truediv or div.
1013def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
1014  """Internal helper function for 'sp_t / dense_t'."""
1015  with ops.name_scope(name, "truediv",
1016                      [sp_indices, sp_values, sp_shape, y]) as name:
1017    sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
1018    y = ops.convert_to_tensor(y, name="y")
1019    x_dtype = sp_values.dtype.base_dtype
1020    y_dtype = y.dtype.base_dtype
1021    if x_dtype != y_dtype:
1022      raise TypeError("x and y must have the same dtype, got %r != %r" %
1023                      (x_dtype, y_dtype))
1024    try:
1025      dtype = _TRUEDIV_TABLE[x_dtype]
1026    except KeyError:
1027      raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
1028    if dtype is not None:
1029      sp_values = cast(sp_values, dtype)
1030      y = cast(y, dtype)
1031    return gen_sparse_ops.sparse_dense_cwise_div(
1032        sp_indices, sp_values, sp_shape, y, name=name)
1033
1034
1035def _truediv_python3(x, y, name=None):
1036  with ops.name_scope(name, "truediv", [x, y]) as name:
1037    x = ops.convert_to_tensor(x, name="x")
1038    y = ops.convert_to_tensor(y, name="y")
1039    x_dtype = x.dtype.base_dtype
1040    y_dtype = y.dtype.base_dtype
1041    if x_dtype != y_dtype:
1042      raise TypeError("x and y must have the same dtype, got %r != %r" %
1043                      (x_dtype, y_dtype))
1044    try:
1045      dtype = _TRUEDIV_TABLE[x_dtype]
1046    except KeyError:
1047      raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
1048    if dtype is not None:
1049      x = cast(x, dtype)
1050      y = cast(y, dtype)
1051    return gen_math_ops.real_div(x, y, name=name)
1052
1053
1054def _div_python2(x, y, name=None):
1055  """Divide two values using Python 2 semantics.
1056
1057  Used for Tensor.__div__.
1058
1059  Args:
1060    x: `Tensor` numerator of real numeric type.
1061    y: `Tensor` denominator of real numeric type.
1062    name: A name for the operation (optional).
1063
1064  Returns:
1065    `x / y` returns the quotient of x and y.
1066  """
1067
1068  with ops.name_scope(name, "div", [x, y]) as name:
1069    x = ops.convert_to_tensor(x, name="x")
1070    y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
1071    x_dtype = x.dtype.base_dtype
1072    y_dtype = y.dtype.base_dtype
1073    if x_dtype != y_dtype:
1074      raise TypeError("x and y must have the same dtype, got %r != %r" %
1075                      (x_dtype, y_dtype))
1076    if x_dtype.is_floating or x_dtype.is_complex:
1077      return gen_math_ops.real_div(x, y, name=name)
1078    else:
1079      return gen_math_ops.floor_div(x, y, name=name)
1080
1081
1082@tf_export("math.truediv", "truediv")
1083@dispatch.add_dispatch_support
1084def truediv(x, y, name=None):
1085  """Divides x / y elementwise (using Python 3 division operator semantics).
1086
1087  NOTE: Prefer using the Tensor operator or tf.divide which obey Python
1088  division operator semantics.
1089
1090  This function forces Python 3 division operator semantics where all integer
1091  arguments are cast to floating types first.   This op is generated by normal
1092  `x / y` division in Python 3 and in Python 2.7 with
1093  `from __future__ import division`.  If you want integer division that rounds
1094  down, use `x // y` or `tf.math.floordiv`.
1095
1096  `x` and `y` must have the same numeric type.  If the inputs are floating
1097  point, the output will have the same type.  If the inputs are integral, the
1098  inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
1099  and `int64` (matching the behavior of Numpy).
1100
1101  Args:
1102    x: `Tensor` numerator of numeric type.
1103    y: `Tensor` denominator of numeric type.
1104    name: A name for the operation (optional).
1105
1106  Returns:
1107    `x / y` evaluated in floating point.
1108
1109  Raises:
1110    TypeError: If `x` and `y` have different dtypes.
1111  """
1112  return _truediv_python3(x, y, name)
1113
1114
1115@deprecation.deprecated(
1116    date=None,
1117    instructions="Deprecated in favor of operator or tf.math.divide.")
1118@tf_export(v1=["div"])
1119def div(x, y, name=None):
1120  """Divides x / y elementwise (using Python 2 division operator semantics).
1121
1122  NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
1123  3 division operator semantics.
1124
1125  This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`
1126  and `y` are both integers then the result will be an integer. This is in
1127  contrast to Python 3, where division with `/` is always a float while division
1128  with `//` is always an integer.
1129
1130  Args:
1131    x: `Tensor` numerator of real numeric type.
1132    y: `Tensor` denominator of real numeric type.
1133    name: A name for the operation (optional).
1134
1135  Returns:
1136    `x / y` returns the quotient of x and y.
1137  """
1138  return _div_python2(x, y, name)
1139
1140
1141@tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"])
1142@deprecation.deprecated_endpoints("div_no_nan")
1143@dispatch.add_dispatch_support
1144def div_no_nan(x, y, name=None):
1145  """Computes a safe divide which returns 0 if the y is zero.
1146
1147  Args:
1148    x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1149    y: A `Tensor` whose dtype is compatible with `x`.
1150    name: A name for the operation (optional).
1151
1152  Returns:
1153    The element-wise value of the x divided by y.
1154  """
1155
1156  with ops.name_scope(name, "div_no_nan", [x, y]) as name:
1157    x = ops.convert_to_tensor(x, name="x")
1158    y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
1159    return gen_math_ops.div_no_nan(x, y, name=name)
1160
1161
1162@tf_export("math.multiply_no_nan")
1163@dispatch.add_dispatch_support
1164def multiply_no_nan(x, y, name=None):
1165  """Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite.
1166
1167  Args:
1168    x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1169    y: A `Tensor` whose dtype is compatible with `x`.
1170    name: A name for the operation (optional).
1171
1172  Returns:
1173    The element-wise value of the x times y.
1174  """
1175
1176  with ops.name_scope(name, "multiply_no_nan", [x, y]) as name:
1177    x = ops.convert_to_tensor(x, name="x")
1178    y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
1179    x_dtype = x.dtype.base_dtype
1180    y_dtype = y.dtype.base_dtype
1181    if x_dtype != y_dtype:
1182      raise TypeError("x and y must have the same dtype, got %r != %r" %
1183                      (x_dtype, y_dtype))
1184    return gen_math_ops.mul_no_nan(x, y, name=name)
1185
1186
1187# TODO(aselle): This should be removed
1188mod = gen_math_ops.floor_mod
1189
1190
1191# TODO(aselle): Deprecate this once all internal functionality uses
1192# tf.truncatediv
1193@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"])
1194@dispatch.add_dispatch_support
1195@deprecation.deprecated_endpoints("floordiv")
1196def floordiv(x, y, name=None):
1197  """Divides `x / y` elementwise, rounding toward the most negative integer.
1198
1199  The same as `tf.compat.v1.div(x,y)` for integers, but uses
1200  `tf.floor(tf.compat.v1.div(x,y))` for
1201  floating point arguments so that the result is always an integer (though
1202  possibly an integer represented as floating point).  This op is generated by
1203  `x // y` floor division in Python 3 and in Python 2.7 with
1204  `from __future__ import division`.
1205
1206  `x` and `y` must have the same type, and the result will have the same type
1207  as well.
1208
1209  Args:
1210    x: `Tensor` numerator of real numeric type.
1211    y: `Tensor` denominator of real numeric type.
1212    name: A name for the operation (optional).
1213
1214  Returns:
1215    `x / y` rounded down.
1216
1217  Raises:
1218    TypeError: If the inputs are complex.
1219  """
1220  with ops.name_scope(name, "floordiv", [x, y]) as name:
1221    return gen_math_ops.floor_div(x, y, name=name)
1222
1223
1224realdiv = gen_math_ops.real_div
1225truncatediv = gen_math_ops.truncate_div
1226# TODO(aselle): Rename this to floordiv when we can.
1227floor_div = gen_math_ops.floor_div
1228truncatemod = gen_math_ops.truncate_mod
1229floormod = gen_math_ops.floor_mod
1230
1231
1232def _add_dispatch(x, y, name=None):
1233  """Dispatches to add for strings and add_v2 for all other types."""
1234  if x.dtype == dtypes.string:
1235    return gen_math_ops.add(x, y, name=name)
1236  else:
1237    return gen_math_ops.add_v2(x, y, name=name)
1238
1239
1240def _mul_dispatch(x, y, name=None):
1241  """Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
1242  is_tensor_y = isinstance(y, ops.Tensor)
1243  if is_tensor_y:
1244    return gen_math_ops.mul(x, y, name=name)
1245  else:
1246    assert isinstance(y, sparse_tensor.SparseTensor)  # Case: Dense * Sparse.
1247    new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
1248                                                     y.dense_shape, x, name)
1249    return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
1250
1251
1252# NOTE(aselle): When integer division is added for sparse_dense_cwise,
1253# div, truediv, and floordiv should be delegated appropriately for
1254# Python sematnics, analogous to dense cwise tensor operations.
1255_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
1256                              sparse_tensor.SparseTensor)
1257_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
1258                              sparse_tensor.SparseTensor)
1259_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
1260                              sparse_tensor.SparseTensor)
1261
1262_OverrideBinaryOperatorHelper(_add_dispatch, "add")
1263_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub")
1264_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
1265_OverrideBinaryOperatorHelper(_div_python2, "div")
1266_OverrideBinaryOperatorHelper(_truediv_python3, "truediv")
1267_OverrideBinaryOperatorHelper(floordiv, "floordiv")
1268_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
1269_OverrideBinaryOperatorHelper(pow, "pow")
1270
1271
1272@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"])
1273@dispatch.add_dispatch_support
1274@deprecation.deprecated_endpoints("logical_xor")
1275def logical_xor(x, y, name="LogicalXor"):
1276  """Logical XOR function.
1277
1278  x ^ y = (x | y) & ~(x & y)
1279
1280  The operation works for the following input types:
1281
1282  - Two single elements of type `bool`
1283  - One `tf.Tensor` of type `bool` and one single `bool`, where the result will
1284    be calculated by applying logical XOR with the single element to each
1285    element in the larger Tensor.
1286  - Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
1287    the result will be the element-wise logical XOR of the two input tensors.
1288
1289  Usage:
1290
1291  >>> a = tf.constant([True])
1292  >>> b = tf.constant([False])
1293  >>> tf.math.logical_xor(a, b)
1294  <tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>
1295
1296  >>> c = tf.constant([True])
1297  >>> x = tf.constant([False, True, True, False])
1298  >>> tf.math.logical_xor(c, x)
1299  <tf.Tensor: shape=(4,), dtype=bool, numpy=array([ True, False, False,  True])>
1300
1301  >>> y = tf.constant([False, False, True, True])
1302  >>> z = tf.constant([False, True, False, True])
1303  >>> tf.math.logical_xor(y, z)
1304  <tf.Tensor: shape=(4,), dtype=bool, numpy=array([False,  True,  True, False])>
1305
1306  Args:
1307      x: A `tf.Tensor` type bool.
1308      y: A `tf.Tensor` of type bool.
1309      name: A name for the operation (optional).
1310
1311  Returns:
1312    A `tf.Tensor` of type bool with the same size as that of x or y.
1313  """
1314  # TODO(alemi) Make this a cwise op if people end up relying on it.
1315  return gen_math_ops.logical_and(
1316      gen_math_ops.logical_or(x, y),
1317      gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
1318      name=name)
1319
1320
1321@tf_export("math.logical_and", "logical_and")
1322@dispatch.add_dispatch_support
1323def logical_and(x, y, name=None):
1324  """Logical AND function.
1325
1326  The operation works for the following input types:
1327
1328  - Two single elements of type `bool`
1329  - One `tf.Tensor` of type `bool` and one single `bool`, where the result will
1330    be calculated by applying logical AND with the single element to each
1331    element in the larger Tensor.
1332  - Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
1333    the result will be the element-wise logical AND of the two input tensors.
1334
1335  Usage:
1336
1337  >>> a = tf.constant([True])
1338  >>> b = tf.constant([False])
1339  >>> tf.math.logical_and(a, b)
1340  <tf.Tensor: shape=(1,), dtype=bool, numpy=array([False])>
1341
1342  >>> c = tf.constant([True])
1343  >>> x = tf.constant([False, True, True, False])
1344  >>> tf.math.logical_and(c, x)
1345  <tf.Tensor: shape=(4,), dtype=bool, numpy=array([False,  True,  True, False])>
1346
1347  >>> y = tf.constant([False, False, True, True])
1348  >>> z = tf.constant([False, True, False, True])
1349  >>> tf.math.logical_and(y, z)
1350  <tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, False, False,  True])>
1351
1352  Args:
1353      x: A `tf.Tensor` type bool.
1354      y: A `tf.Tensor` of type bool.
1355      name: A name for the operation (optional).
1356
1357  Returns:
1358    A `tf.Tensor` of type bool with the same size as that of x or y.
1359  """
1360  return gen_math_ops.logical_and(x, y, name)
1361
1362
1363_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
1364_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
1365_OverrideBinaryOperatorHelper(logical_xor, "xor")
1366
1367ops.Tensor._override_operator("__lt__", gen_math_ops.less)
1368ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
1369ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
1370ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
1371
1372
1373@tf_export("math.equal", "equal")
1374@dispatch.add_dispatch_support
1375def equal(x, y, name=None):
1376  """Returns the truth value of (x == y) element-wise.
1377
1378  Performs a [broadcast](
1379  https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
1380  arguments and then an element-wise equality comparison, returning a Tensor of
1381  boolean values.
1382
1383  For example:
1384
1385  >>> x = tf.constant([2, 4])
1386  >>> y = tf.constant(2)
1387  >>> tf.math.equal(x, y)
1388  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  False])>
1389
1390  >>> x = tf.constant([2, 4])
1391  >>> y = tf.constant([2, 4])
1392  >>> tf.math.equal(x, y)
1393  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  True])>
1394
1395  Args:
1396    x: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`.
1397    y: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`.
1398    name: A name for the operation (optional).
1399
1400  Returns:
1401    A `tf.Tensor` of type bool with the same size as that of x or y.
1402
1403  Raises:
1404    `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
1405  """
1406  return gen_math_ops.equal(x, y, name=name)
1407
1408
1409@tf_export("math.not_equal", "not_equal")
1410@dispatch.add_dispatch_support
1411def not_equal(x, y, name=None):
1412  """Returns the truth value of (x != y) element-wise.
1413
1414  Performs a [broadcast](
1415  https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
1416  arguments and then an element-wise inequality comparison, returning a Tensor
1417  of boolean values.
1418
1419  For example:
1420
1421  >>> x = tf.constant([2, 4])
1422  >>> y = tf.constant(2)
1423  >>> tf.math.not_equal(x, y)
1424  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  True])>
1425
1426  >>> x = tf.constant([2, 4])
1427  >>> y = tf.constant([2, 4])
1428  >>> tf.math.not_equal(x, y)
1429  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  False])>
1430
1431  Args:
1432    x: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`.
1433    y: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`.
1434    name: A name for the operation (optional).
1435
1436  Returns:
1437    A `tf.Tensor` of type bool with the same size as that of x or y.
1438
1439  Raises:
1440    `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
1441  """
1442  return gen_math_ops.not_equal(x, y, name=name)
1443
1444
1445def tensor_equals(self, other):
1446  """Compares two tensors element-wise for equality."""
1447  if other is None:
1448    return False
1449  g = getattr(self, "graph", None)
1450  if (ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and
1451      (g is None or g._building_function)):  # pylint: disable=protected-access
1452    return gen_math_ops.equal(self, other, incompatible_shape_error=False)
1453  else:
1454    # In legacy graph mode, tensor equality is object equality
1455    return self is other
1456
1457
1458def tensor_not_equals(self, other):
1459  """Compares two tensors element-wise for equality."""
1460  if other is None:
1461    return True
1462  if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():
1463    return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
1464  else:
1465    # In legacy graph mode, tensor equality is object equality
1466    return self is not other
1467
1468
1469ops.Tensor._override_operator("__eq__", tensor_equals)
1470ops.Tensor._override_operator("__ne__", tensor_not_equals)
1471
1472
1473@tf_export("range")
1474def range(start, limit=None, delta=1, dtype=None, name="range"):  # pylint: disable=redefined-builtin
1475  """Creates a sequence of numbers.
1476
1477  Creates a sequence of numbers that begins at `start` and extends by
1478  increments of `delta` up to but not including `limit`.
1479
1480  The dtype of the resulting tensor is inferred from the inputs unless
1481  it is provided explicitly.
1482
1483  Like the Python builtin `range`, `start` defaults to 0, so that
1484  `range(n) = range(0, n)`.
1485
1486  For example:
1487
1488  >>> start = 3
1489  >>> limit = 18
1490  >>> delta = 3
1491  >>> tf.range(start, limit, delta)
1492  <tf.Tensor: shape=(5,), dtype=int32,
1493  numpy=array([ 3,  6,  9, 12, 15], dtype=int32)>
1494
1495  >>> start = 3
1496  >>> limit = 1
1497  >>> delta = -0.5
1498  >>> tf.range(start, limit, delta)
1499  <tf.Tensor: shape=(4,), dtype=float32,
1500  numpy=array([3. , 2.5, 2. , 1.5], dtype=float32)>
1501
1502  >>> limit = 5
1503  >>> tf.range(limit)
1504  <tf.Tensor: shape=(5,), dtype=int32,
1505  numpy=array([0, 1, 2, 3, 4], dtype=int32)>
1506
1507  Args:
1508    start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`
1509      is not None; otherwise, acts as range limit and first entry defaults to 0.
1510    limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,
1511      defaults to the value of `start` while the first entry of the range
1512      defaults to 0.
1513    delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to
1514      1.
1515    dtype: The type of the elements of the resulting tensor.
1516    name: A name for the operation. Defaults to "range".
1517
1518  Returns:
1519    An 1-D `Tensor` of type `dtype`.
1520
1521  @compatibility(numpy)
1522  Equivalent to np.arange
1523  @end_compatibility
1524  """
1525  if limit is None:
1526    start, limit = 0, start
1527
1528  with ops.name_scope(name, "Range", [start, limit, delta]) as name:
1529    if not isinstance(start, ops.Tensor):
1530      start = ops.convert_to_tensor(start, dtype=dtype, name="start")
1531    if not isinstance(limit, ops.Tensor):
1532      limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
1533    if not isinstance(delta, ops.Tensor):
1534      delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
1535
1536    # infer dtype if not explicitly provided
1537    if dtype is None:
1538      dtype_hierarchy = [
1539          dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
1540      ]
1541      assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
1542      inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
1543                           key=dtype_hierarchy.index)
1544    else:
1545      inferred_dtype = dtype
1546    # Always try perform a cast even start/limit/delta are already tensors.
1547    # This will revole the case where start/limit/delta's original's dtype
1548    # is different from provided dtype.
1549    start = cast(start, inferred_dtype)
1550    limit = cast(limit, inferred_dtype)
1551    delta = cast(delta, inferred_dtype)
1552
1553    return gen_math_ops._range(start, limit, delta, name=name)
1554
1555
1556def _range_tensor_conversion_function(value, dtype=None, name=None,
1557                                      as_ref=False):
1558  del as_ref
1559  return range(value.start, value.stop, value.step, dtype=dtype, name=name)
1560
1561
1562if not six.PY2:
1563  ops.register_tensor_conversion_function(builtins.range,
1564                                          _range_tensor_conversion_function)
1565
1566# Reduction operations
1567def _ReductionDims(x, axis, reduction_indices=None):  # pylint: disable=invalid-name
1568  """Returns range(0, rank(x)) if reduction_indices is None."""
1569  # TODO(aselle): Remove this after deprecation
1570  if reduction_indices is not None:
1571    if axis is not None:
1572      raise ValueError("Can't specify both axis' and 'reduction_indices'.")
1573    axis = reduction_indices
1574  if axis is not None:
1575    return axis
1576  else:
1577    # Fast path: avoid creating Rank and Range ops if ndims is known.
1578    if isinstance(x, ops.Tensor):
1579      rank = x.shape.rank
1580      if rank is not None:
1581        return constant_op.constant(np.arange(rank, dtype=np.int32))
1582    elif (isinstance(x, sparse_tensor.SparseTensor) and
1583          x.dense_shape.shape.is_fully_defined()):
1584      rank = x.dense_shape.shape.dims[0].value  # sparse.dense_shape is 1-D.
1585      return constant_op.constant(np.arange(rank, dtype=np.int32))
1586
1587    # Otherwise, we rely on Range and Rank to do the right thing at run-time.
1588    return range(0, array_ops.rank(x))
1589
1590
1591def _has_fully_defined_shape(tensor):
1592  """Returns true if tensor has a fully defined shape."""
1593  return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
1594
1595
1596def _may_reduce_to_scalar(keepdims, axis, output):
1597  """Set a reduction's output shape to be a scalar if we are certain."""
1598  if not _has_fully_defined_shape(output) and (not keepdims) and (
1599      axis is None):
1600    output.set_shape(())
1601  return output
1602
1603
1604@tf_export(v1=["math.reduce_sum", "reduce_sum"])
1605@deprecation.deprecated_args(None,
1606                             "keep_dims is deprecated, use keepdims instead",
1607                             "keep_dims")
1608def reduce_sum_v1(input_tensor,
1609                  axis=None,
1610                  keepdims=None,
1611                  name=None,
1612                  reduction_indices=None,
1613                  keep_dims=None):
1614  """Computes the sum of elements across dimensions of a tensor.
1615
1616  Reduces `input_tensor` along the dimensions given in `axis`.
1617  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
1618  entry in `axis`. If `keepdims` is true, the reduced dimensions
1619  are retained with length 1.
1620
1621  If `axis` is None, all dimensions are reduced, and a
1622  tensor with a single element is returned.
1623
1624  For example:
1625
1626  ```python
1627  x = tf.constant([[1, 1, 1], [1, 1, 1]])
1628  tf.reduce_sum(x)  # 6
1629  tf.reduce_sum(x, 0)  # [2, 2, 2]
1630  tf.reduce_sum(x, 1)  # [3, 3]
1631  tf.reduce_sum(x, 1, keepdims=True)  # [[3], [3]]
1632  tf.reduce_sum(x, [0, 1])  # 6
1633  ```
1634
1635  Args:
1636    input_tensor: The tensor to reduce. Should have numeric type.
1637    axis: The dimensions to reduce. If `None` (the default), reduces all
1638      dimensions. Must be in the range `[-rank(input_tensor),
1639      rank(input_tensor))`.
1640    keepdims: If true, retains reduced dimensions with length 1.
1641    name: A name for the operation (optional).
1642    reduction_indices: The old (deprecated) name for axis.
1643    keep_dims: Deprecated alias for `keepdims`.
1644
1645  Returns:
1646    The reduced tensor, of the same dtype as the input_tensor.
1647
1648  @compatibility(numpy)
1649  Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
1650  int64 while tensorflow returns the same dtype as the input.
1651  @end_compatibility
1652  """
1653  axis = deprecation.deprecated_argument_lookup("axis", axis,
1654                                                "reduction_indices",
1655                                                reduction_indices)
1656  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
1657                                                    "keep_dims", keep_dims)
1658  return reduce_sum(input_tensor, axis, keepdims, name)
1659
1660
1661@tf_export("math.reduce_sum", "reduce_sum", v1=[])
1662@dispatch.add_dispatch_support
1663def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
1664  """Computes the sum of elements across dimensions of a tensor.
1665
1666  Reduces `input_tensor` along the dimensions given in `axis`.
1667  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
1668  entry in `axis`. If `keepdims` is true, the reduced dimensions
1669  are retained with length 1.
1670
1671  If `axis` is None, all dimensions are reduced, and a
1672  tensor with a single element is returned.
1673
1674  For example:
1675
1676  ```python
1677  x = tf.constant([[1, 1, 1], [1, 1, 1]])
1678  tf.reduce_sum(x)  # 6
1679  tf.reduce_sum(x, 0)  # [2, 2, 2]
1680  tf.reduce_sum(x, 1)  # [3, 3]
1681  tf.reduce_sum(x, 1, keepdims=True)  # [[3], [3]]
1682  tf.reduce_sum(x, [0, 1])  # 6
1683  ```
1684
1685  Args:
1686    input_tensor: The tensor to reduce. Should have numeric type.
1687    axis: The dimensions to reduce. If `None` (the default), reduces all
1688      dimensions. Must be in the range `[-rank(input_tensor),
1689      rank(input_tensor))`.
1690    keepdims: If true, retains reduced dimensions with length 1.
1691    name: A name for the operation (optional).
1692
1693  Returns:
1694    The reduced tensor, of the same dtype as the input_tensor.
1695
1696  @compatibility(numpy)
1697  Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
1698  int64 while tensorflow returns the same dtype as the input.
1699  @end_compatibility
1700  """
1701
1702  return reduce_sum_with_dims(input_tensor, axis, keepdims, name,
1703                              _ReductionDims(input_tensor, axis))
1704
1705
1706def reduce_sum_with_dims(input_tensor,
1707                         axis=None,
1708                         keepdims=False,
1709                         name=None,
1710                         dims=None):
1711  keepdims = False if keepdims is None else keepdims
1712  return _may_reduce_to_scalar(
1713      keepdims, axis,
1714      gen_math_ops._sum(input_tensor, dims, keepdims, name=name))
1715
1716
1717@tf_export("math.reduce_euclidean_norm")
1718def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):
1719  """Computes the Euclidean norm of elements across dimensions of a tensor.
1720
1721  Reduces `input_tensor` along the dimensions given in `axis`.
1722  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
1723  entry in `axis`. If `keepdims` is true, the reduced dimensions
1724  are retained with length 1.
1725
1726  If `axis` is None, all dimensions are reduced, and a
1727  tensor with a single element is returned.
1728
1729  For example:
1730
1731  ```python
1732  x = tf.constant([[1, 2, 3], [1, 1, 1]])
1733  tf.reduce_euclidean_norm(x)  # sqrt(17)
1734  tf.reduce_euclidean_norm(x, 0)  # [sqrt(2), sqrt(5), sqrt(10)]
1735  tf.reduce_euclidean_norm(x, 1)  # [sqrt(14), sqrt(3)]
1736  tf.reduce_euclidean_norm(x, 1, keepdims=True)  # [[sqrt(14)], [sqrt(3)]]
1737  tf.reduce_euclidean_norm(x, [0, 1])  # sqrt(17)
1738  ```
1739
1740  Args:
1741    input_tensor: The tensor to reduce. Should have numeric type.
1742    axis: The dimensions to reduce. If `None` (the default), reduces all
1743      dimensions. Must be in the range `[-rank(input_tensor),
1744      rank(input_tensor))`.
1745    keepdims: If true, retains reduced dimensions with length 1.
1746    name: A name for the operation (optional).
1747
1748  Returns:
1749    The reduced tensor, of the same dtype as the input_tensor.
1750  """
1751  return _may_reduce_to_scalar(
1752      keepdims, axis,
1753      gen_math_ops.euclidean_norm(
1754          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
1755          name=name))
1756
1757
1758@tf_export(v1=["math.count_nonzero", "count_nonzero"])
1759@deprecation.deprecated_args(None,
1760                             "keep_dims is deprecated, use keepdims instead",
1761                             "keep_dims")
1762@deprecation.deprecated_args(
1763    None, "reduction_indices is deprecated, use axis instead",
1764    "reduction_indices")
1765def count_nonzero(input_tensor=None,
1766                  axis=None,
1767                  keepdims=None,
1768                  dtype=dtypes.int64,
1769                  name=None,
1770                  reduction_indices=None,
1771                  keep_dims=None,
1772                  input=None):  # pylint: disable=redefined-builtin
1773  """Computes number of nonzero elements across dimensions of a tensor.
1774
1775  Reduces `input_tensor` along the dimensions given in `axis`.
1776  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
1777  entry in `axis`. If `keepdims` is true, the reduced dimensions
1778  are retained with length 1.
1779
1780  If `axis` has no entries, all dimensions are reduced, and a
1781  tensor with a single element is returned.
1782
1783  **NOTE** Floating point comparison to zero is done by exact floating point
1784  equality check.  Small values are **not** rounded to zero for purposes of
1785  the nonzero check.
1786
1787  For example:
1788
1789  ```python
1790  x = tf.constant([[0, 1, 0], [1, 1, 0]])
1791  tf.math.count_nonzero(x)  # 3
1792  tf.math.count_nonzero(x, 0)  # [1, 2, 0]
1793  tf.math.count_nonzero(x, 1)  # [1, 2]
1794  tf.math.count_nonzero(x, 1, keepdims=True)  # [[1], [2]]
1795  tf.math.count_nonzero(x, [0, 1])  # 3
1796  ```
1797
1798  **NOTE** Strings are compared against zero-length empty string `""`. Any
1799  string with a size greater than zero is already considered as nonzero.
1800
1801  For example:
1802  ```python
1803  x = tf.constant(["", "a", "  ", "b", ""])
1804  tf.math.count_nonzero(x) # 3, with "a", "  ", and "b" as nonzero strings.
1805  ```
1806
1807  Args:
1808    input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or
1809      `string`.
1810    axis: The dimensions to reduce. If `None` (the default), reduces all
1811      dimensions. Must be in the range `[-rank(input_tensor),
1812      rank(input_tensor))`.
1813    keepdims: If true, retains reduced dimensions with length 1.
1814    dtype: The output dtype; defaults to `tf.int64`.
1815    name: A name for the operation (optional).
1816    reduction_indices: The old (deprecated) name for axis.
1817    keep_dims: Deprecated alias for `keepdims`.
1818    input: Overrides input_tensor. For compatibility.
1819
1820  Returns:
1821    The reduced tensor (number of nonzero values).
1822  """
1823  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
1824                                                    "keep_dims", keep_dims)
1825  input_tensor = deprecation.deprecated_argument_lookup("input", input,
1826                                                        "input_tensor",
1827                                                        input_tensor)
1828  axis = deprecation.deprecated_argument_lookup("axis", axis,
1829                                                "reduction_indices",
1830                                                reduction_indices)
1831
1832  return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)
1833
1834
1835@tf_export("math.count_nonzero", v1=[])
1836def count_nonzero_v2(
1837    input,  # pylint: disable=redefined-builtin
1838    axis=None,
1839    keepdims=None,
1840    dtype=dtypes.int64,
1841    name=None):
1842  """Computes number of nonzero elements across dimensions of a tensor.
1843
1844  Reduces `input` along the dimensions given in `axis`.
1845  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
1846  entry in `axis`. If `keepdims` is true, the reduced dimensions
1847  are retained with length 1.
1848
1849  If `axis` has no entries, all dimensions are reduced, and a
1850  tensor with a single element is returned.
1851
1852  **NOTE** Floating point comparison to zero is done by exact floating point
1853  equality check.  Small values are **not** rounded to zero for purposes of
1854  the nonzero check.
1855
1856  For example:
1857
1858  ```python
1859  x = tf.constant([[0, 1, 0], [1, 1, 0]])
1860  tf.math.count_nonzero(x)  # 3
1861  tf.math.count_nonzero(x, 0)  # [1, 2, 0]
1862  tf.math.count_nonzero(x, 1)  # [1, 2]
1863  tf.math.count_nonzero(x, 1, keepdims=True)  # [[1], [2]]
1864  tf.math.count_nonzero(x, [0, 1])  # 3
1865  ```
1866
1867  **NOTE** Strings are compared against zero-length empty string `""`. Any
1868  string with a size greater than zero is already considered as nonzero.
1869
1870  For example:
1871  ```python
1872  x = tf.constant(["", "a", "  ", "b", ""])
1873  tf.math.count_nonzero(x) # 3, with "a", "  ", and "b" as nonzero strings.
1874  ```
1875
1876  Args:
1877    input: The tensor to reduce. Should be of numeric type, `bool`, or `string`.
1878    axis: The dimensions to reduce. If `None` (the default), reduces all
1879      dimensions. Must be in the range `[-rank(input), rank(input))`.
1880    keepdims: If true, retains reduced dimensions with length 1.
1881    dtype: The output dtype; defaults to `tf.int64`.
1882    name: A name for the operation (optional).
1883
1884  Returns:
1885    The reduced tensor (number of nonzero values).
1886  """
1887  if keepdims is None:
1888    keepdims = False
1889  with ops.name_scope(name, "count_nonzero", [input]):
1890    input = ops.convert_to_tensor(input, name="input")
1891    # A scalar of 'zero' is enough as `not_equal` will broadcast.
1892    zero = array_ops.zeros([], dtype=input.dtype)
1893    return cast(
1894        reduce_sum(
1895            # int64 reduction happens on GPU
1896            cast(gen_math_ops.not_equal(input, zero), dtypes.int64),
1897            axis=axis,
1898            keepdims=keepdims),
1899        dtype=dtype)
1900
1901
1902@tf_export(v1=["math.reduce_mean", "reduce_mean"])
1903def reduce_mean_v1(input_tensor,
1904                   axis=None,
1905                   keepdims=None,
1906                   name=None,
1907                   reduction_indices=None,
1908                   keep_dims=None):
1909  """Computes the mean of elements across dimensions of a tensor.
1910
1911  Reduces `input_tensor` along the dimensions given in `axis` by computing the
1912  mean of elements across the dimensions in `axis`.
1913  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
1914  entry in `axis`. If `keepdims` is true, the reduced dimensions
1915  are retained with length 1.
1916
1917  If `axis` is None, all dimensions are reduced, and a tensor with a single
1918  element is returned.
1919
1920  For example:
1921
1922  >>> x = tf.constant([[1., 1.], [2., 2.]])
1923  >>> tf.reduce_mean(x)
1924  <tf.Tensor: shape=(), dtype=float32, numpy=1.5>
1925  >>> tf.reduce_mean(x, 0)
1926  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
1927  >>> tf.reduce_mean(x, 1)
1928  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
1929
1930  Args:
1931    input_tensor: The tensor to reduce. Should have numeric type.
1932    axis: The dimensions to reduce. If `None` (the default), reduces all
1933      dimensions. Must be in the range `[-rank(input_tensor),
1934      rank(input_tensor))`.
1935    keepdims: If true, retains reduced dimensions with length 1.
1936    name: A name for the operation (optional).
1937    reduction_indices: The old (deprecated) name for axis.
1938    keep_dims: Deprecated alias for `keepdims`.
1939
1940  Returns:
1941    The reduced tensor.
1942
1943  @compatibility(numpy)
1944  Equivalent to np.mean
1945
1946  Please note that `np.mean` has a `dtype` parameter that could be used to
1947  specify the output type. By default this is `dtype=float64`. On the other
1948  hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
1949  for example:
1950
1951  >>> x = tf.constant([1, 0, 1, 0])
1952  >>> tf.reduce_mean(x)
1953  <tf.Tensor: shape=(), dtype=int32, numpy=0>
1954  >>> y = tf.constant([1., 0., 1., 0.])
1955  >>> tf.reduce_mean(y)
1956  <tf.Tensor: shape=(), dtype=float32, numpy=0.5>
1957
1958  @end_compatibility
1959  """
1960  axis = deprecation.deprecated_argument_lookup("axis", axis,
1961                                                "reduction_indices",
1962                                                reduction_indices)
1963  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
1964                                                    "keep_dims", keep_dims)
1965  return reduce_mean(input_tensor, axis, keepdims, name)
1966
1967
1968@tf_export("math.reduce_mean", "reduce_mean", v1=[])
1969@dispatch.add_dispatch_support
1970def reduce_mean(input_tensor, axis=None, keepdims=False, name=None):
1971  """Computes the mean of elements across dimensions of a tensor.
1972
1973  Reduces `input_tensor` along the dimensions given in `axis` by computing the
1974  mean of elements across the dimensions in `axis`.
1975  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
1976  entry in `axis`. If `keepdims` is true, the reduced dimensions are retained
1977  with length 1.
1978
1979  If `axis` is None, all dimensions are reduced, and a tensor with a single
1980  element is returned.
1981
1982  For example:
1983
1984  >>> x = tf.constant([[1., 1.], [2., 2.]])
1985  >>> tf.reduce_mean(x)
1986  <tf.Tensor: shape=(), dtype=float32, numpy=1.5>
1987  >>> tf.reduce_mean(x, 0)
1988  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
1989  >>> tf.reduce_mean(x, 1)
1990  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
1991
1992  Args:
1993    input_tensor: The tensor to reduce. Should have numeric type.
1994    axis: The dimensions to reduce. If `None` (the default), reduces all
1995      dimensions. Must be in the range `[-rank(input_tensor),
1996      rank(input_tensor))`.
1997    keepdims: If true, retains reduced dimensions with length 1.
1998    name: A name for the operation (optional).
1999
2000  Returns:
2001    The reduced tensor.
2002
2003  @compatibility(numpy)
2004  Equivalent to np.mean
2005
2006  Please note that `np.mean` has a `dtype` parameter that could be used to
2007  specify the output type. By default this is `dtype=float64`. On the other
2008  hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
2009  for example:
2010
2011  >>> x = tf.constant([1, 0, 1, 0])
2012  >>> tf.reduce_mean(x)
2013  <tf.Tensor: shape=(), dtype=int32, numpy=0>
2014  >>> y = tf.constant([1., 0., 1., 0.])
2015  >>> tf.reduce_mean(y)
2016  <tf.Tensor: shape=(), dtype=float32, numpy=0.5>
2017
2018  @end_compatibility
2019  """
2020  keepdims = False if keepdims is None else keepdims
2021  return _may_reduce_to_scalar(
2022      keepdims, axis,
2023      gen_math_ops.mean(
2024          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2025          name=name))
2026
2027
2028@tf_export("math.reduce_variance")
2029def reduce_variance(input_tensor, axis=None, keepdims=False, name=None):
2030  """Computes the variance of elements across dimensions of a tensor.
2031
2032  Reduces `input_tensor` along the dimensions given in `axis`.
2033  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2034  entry in `axis`. If `keepdims` is true, the reduced dimensions
2035  are retained with length 1.
2036
2037  If `axis` is None, all dimensions are reduced, and a
2038  tensor with a single element is returned.
2039
2040  For example:
2041
2042  ```python
2043  x = tf.constant([[1., 2.], [3., 4.]])
2044  tf.reduce_variance(x)  # 1.25
2045  tf.reduce_variance(x, 0)  # [1., 1.]
2046  tf.reduce_variance(x, 1)  # [0.25,  0.25]
2047  ```
2048
2049  Args:
2050    input_tensor: The tensor to reduce. Should have numeric type.
2051    axis: The dimensions to reduce. If `None` (the default), reduces all
2052      dimensions. Must be in the range `[-rank(input_tensor),
2053      rank(input_tensor))`.
2054    keepdims: If true, retains reduced dimensions with length 1.
2055    name: A name scope for the associated operations (optional).
2056
2057  Returns:
2058    The reduced tensor, of the same dtype as the input_tensor.
2059
2060  @compatibility(numpy)
2061  Equivalent to np.var
2062
2063  Please note that `np.var` has a `dtype` parameter that could be used to
2064  specify the output type. By default this is `dtype=float64`. On the other
2065  hand, `tf.reduce_variance` has an aggressive type inference from
2066  `input_tensor`,
2067  @end_compatibility
2068  """
2069  name = name if name else "reduce_variance"
2070  with ops.name_scope(name):
2071    means = reduce_mean(input_tensor, axis=axis, keepdims=True)
2072    squared_deviations = gen_math_ops.square(input_tensor - means)
2073    return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)
2074
2075
2076@tf_export("math.reduce_std")
2077def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
2078  """Computes the standard deviation of elements across dimensions of a tensor.
2079
2080  Reduces `input_tensor` along the dimensions given in `axis`.
2081  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2082  entry in `axis`. If `keepdims` is true, the reduced dimensions
2083  are retained with length 1.
2084
2085  If `axis` is None, all dimensions are reduced, and a
2086  tensor with a single element is returned.
2087
2088  For example:
2089
2090  ```python
2091  x = tf.constant([[1., 2.], [3., 4.]])
2092  tf.reduce_std(x)  # 1.1180339887498949
2093  tf.reduce_std(x, 0)  # [1., 1.]
2094  tf.reduce_std(x, 1)  # [0.5,  0.5]
2095  ```
2096
2097  Args:
2098    input_tensor: The tensor to reduce. Should have numeric type.
2099    axis: The dimensions to reduce. If `None` (the default), reduces all
2100      dimensions. Must be in the range `[-rank(input_tensor),
2101      rank(input_tensor))`.
2102    keepdims: If true, retains reduced dimensions with length 1.
2103    name: A name scope for the associated operations (optional).
2104
2105  Returns:
2106    The reduced tensor, of the same dtype as the input_tensor.
2107
2108  @compatibility(numpy)
2109  Equivalent to np.std
2110
2111  Please note that `np.std` has a `dtype` parameter that could be used to
2112  specify the output type. By default this is `dtype=float64`. On the other
2113  hand, `tf.reduce_std` has an aggressive type inference from `input_tensor`,
2114  @end_compatibility
2115  """
2116  name = name if name else "reduce_std"
2117  with ops.name_scope(name):
2118    variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
2119    return gen_math_ops.sqrt(variance)
2120
2121
2122@tf_export("math.reduce_prod", "reduce_prod", v1=[])
2123@dispatch.add_dispatch_support
2124def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
2125  """Computes the product of elements across dimensions of a tensor.
2126
2127  Reduces `input_tensor` along the dimensions given in `axis`.
2128  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2129  entry in `axis`. If `keepdims` is true, the reduced dimensions
2130  are retained with length 1.
2131
2132  If `axis` is None, all dimensions are reduced, and a
2133  tensor with a single element is returned.
2134
2135  Args:
2136    input_tensor: The tensor to reduce. Should have numeric type.
2137    axis: The dimensions to reduce. If `None` (the default), reduces all
2138      dimensions. Must be in the range `[-rank(input_tensor),
2139      rank(input_tensor))`.
2140    keepdims: If true, retains reduced dimensions with length 1.
2141    name: A name for the operation (optional).
2142
2143  Returns:
2144    The reduced tensor.
2145
2146  @compatibility(numpy)
2147  Equivalent to np.prod
2148  @end_compatibility
2149  """
2150  keepdims = False if keepdims is None else keepdims
2151  return _may_reduce_to_scalar(
2152      keepdims, axis,
2153      gen_math_ops.prod(
2154          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2155          name=name))
2156
2157
2158@tf_export(v1=["math.reduce_prod", "reduce_prod"])
2159@deprecation.deprecated_args(None,
2160                             "keep_dims is deprecated, use keepdims instead",
2161                             "keep_dims")
2162def reduce_prod_v1(input_tensor,
2163                   axis=None,
2164                   keepdims=None,
2165                   name=None,
2166                   reduction_indices=None,
2167                   keep_dims=None):
2168  """Computes the product of elements across dimensions of a tensor.
2169
2170  Reduces `input_tensor` along the dimensions given in `axis`.
2171  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2172  entry in `axis`. If `keepdims` is true, the reduced dimensions
2173  are retained with length 1.
2174
2175  If `axis` is None, all dimensions are reduced, and a
2176  tensor with a single element is returned.
2177
2178  Args:
2179    input_tensor: The tensor to reduce. Should have numeric type.
2180    axis: The dimensions to reduce. If `None` (the default), reduces all
2181      dimensions. Must be in the range `[-rank(input_tensor),
2182      rank(input_tensor))`.
2183    keepdims: If true, retains reduced dimensions with length 1.
2184    name: A name for the operation (optional).
2185    reduction_indices: The old (deprecated) name for axis.
2186    keep_dims: Deprecated alias for `keepdims`.
2187
2188  Returns:
2189    The reduced tensor.
2190
2191  @compatibility(numpy)
2192  Equivalent to np.prod
2193  @end_compatibility
2194  """
2195  axis = deprecation.deprecated_argument_lookup("axis", axis,
2196                                                "reduction_indices",
2197                                                reduction_indices)
2198  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2199                                                    "keep_dims", keep_dims)
2200  return reduce_prod(input_tensor, axis, keepdims, name)
2201
2202
2203@tf_export(v1=["math.reduce_min", "reduce_min"])
2204@deprecation.deprecated_args(None,
2205                             "keep_dims is deprecated, use keepdims instead",
2206                             "keep_dims")
2207def reduce_min_v1(input_tensor,
2208                  axis=None,
2209                  keepdims=None,
2210                  name=None,
2211                  reduction_indices=None,
2212                  keep_dims=None):
2213  """Computes the minimum of elements across dimensions of a tensor.
2214
2215  Reduces `input_tensor` along the dimensions given in `axis`.
2216  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2217  entry in `axis`. If `keepdims` is true, the reduced dimensions
2218  are retained with length 1.
2219
2220  If `axis` is None, all dimensions are reduced, and a
2221  tensor with a single element is returned.
2222
2223  Args:
2224    input_tensor: The tensor to reduce. Should have real numeric type.
2225    axis: The dimensions to reduce. If `None` (the default), reduces all
2226      dimensions. Must be in the range `[-rank(input_tensor),
2227      rank(input_tensor))`.
2228    keepdims: If true, retains reduced dimensions with length 1.
2229    name: A name for the operation (optional).
2230    reduction_indices: The old (deprecated) name for axis.
2231    keep_dims: Deprecated alias for `keepdims`.
2232
2233  Returns:
2234    The reduced tensor.
2235
2236  @compatibility(numpy)
2237  Equivalent to np.min
2238  @end_compatibility
2239  """
2240  axis = deprecation.deprecated_argument_lookup("axis", axis,
2241                                                "reduction_indices",
2242                                                reduction_indices)
2243  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2244                                                    "keep_dims", keep_dims)
2245  return reduce_min(input_tensor, axis, keepdims, name)
2246
2247
2248@tf_export("math.reduce_min", "reduce_min", v1=[])
2249@dispatch.add_dispatch_support
2250def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
2251  """Computes the minimum of elements across dimensions of a tensor.
2252
2253  Reduces `input_tensor` along the dimensions given in `axis`.
2254  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2255  entry in `axis`. If `keepdims` is true, the reduced dimensions
2256  are retained with length 1.
2257
2258  If `axis` is None, all dimensions are reduced, and a
2259  tensor with a single element is returned.
2260
2261  Args:
2262    input_tensor: The tensor to reduce. Should have real numeric type.
2263    axis: The dimensions to reduce. If `None` (the default), reduces all
2264      dimensions. Must be in the range `[-rank(input_tensor),
2265      rank(input_tensor))`.
2266    keepdims: If true, retains reduced dimensions with length 1.
2267    name: A name for the operation (optional).
2268
2269  Returns:
2270    The reduced tensor.
2271
2272  For example:
2273    >>> a = tf.constant([[1, 2], [3, 4]])
2274    >>> tf.reduce_min(a)
2275    <tf.Tensor: shape=(), dtype=int32, numpy=1>
2276
2277  @compatibility(numpy)
2278  Equivalent to np.min
2279  @end_compatibility
2280  """
2281  keepdims = False if keepdims is None else keepdims
2282  return _may_reduce_to_scalar(
2283      keepdims, axis,
2284      gen_math_ops._min(
2285          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2286          name=name))
2287
2288
2289@tf_export(v1=["math.reduce_max", "reduce_max"])
2290@deprecation.deprecated_args(None,
2291                             "keep_dims is deprecated, use keepdims instead",
2292                             "keep_dims")
2293def reduce_max_v1(input_tensor,
2294                  axis=None,
2295                  keepdims=None,
2296                  name=None,
2297                  reduction_indices=None,
2298                  keep_dims=None):
2299  """Computes the maximum of elements across dimensions of a tensor.
2300
2301  Reduces `input_tensor` along the dimensions given in `axis`.
2302  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2303  entry in `axis`. If `keepdims` is true, the reduced dimensions
2304  are retained with length 1.
2305
2306  If `axis` is None, all dimensions are reduced, and a
2307  tensor with a single element is returned.
2308
2309  Args:
2310    input_tensor: The tensor to reduce. Should have real numeric type.
2311    axis: The dimensions to reduce. If `None` (the default), reduces all
2312      dimensions. Must be in the range `[-rank(input_tensor),
2313      rank(input_tensor))`.
2314    keepdims: If true, retains reduced dimensions with length 1.
2315    name: A name for the operation (optional).
2316    reduction_indices: The old (deprecated) name for axis.
2317    keep_dims: Deprecated alias for `keepdims`.
2318
2319  Returns:
2320    The reduced tensor.
2321
2322  @compatibility(numpy)
2323  Equivalent to np.max
2324  @end_compatibility
2325  """
2326  axis = deprecation.deprecated_argument_lookup("axis", axis,
2327                                                "reduction_indices",
2328                                                reduction_indices)
2329  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2330                                                    "keep_dims", keep_dims)
2331  return reduce_max(input_tensor, axis, keepdims, name)
2332
2333
2334@tf_export("math.reduce_max", "reduce_max", v1=[])
2335@dispatch.add_dispatch_support
2336def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
2337  """Computes the maximum of elements across dimensions of a tensor.
2338
2339  Reduces `input_tensor` along the dimensions given in `axis`.
2340  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2341  entry in `axis`. If `keepdims` is true, the reduced dimensions
2342  are retained with length 1.
2343
2344  If `axis` is None, all dimensions are reduced, and a
2345  tensor with a single element is returned.
2346
2347  Args:
2348    input_tensor: The tensor to reduce. Should have real numeric type.
2349    axis: The dimensions to reduce. If `None` (the default), reduces all
2350      dimensions. Must be in the range `[-rank(input_tensor),
2351      rank(input_tensor))`.
2352    keepdims: If true, retains reduced dimensions with length 1.
2353    name: A name for the operation (optional).
2354
2355  Returns:
2356    The reduced tensor.
2357
2358  @compatibility(numpy)
2359  Equivalent to np.max
2360  @end_compatibility
2361  """
2362  return reduce_max_with_dims(input_tensor, axis, keepdims, name,
2363                              _ReductionDims(input_tensor, axis))
2364
2365
2366def reduce_max_with_dims(input_tensor,
2367                         axis=None,
2368                         keepdims=False,
2369                         name=None,
2370                         dims=None):
2371  keepdims = False if keepdims is None else keepdims
2372  return _may_reduce_to_scalar(
2373      keepdims, axis,
2374      gen_math_ops._max(input_tensor, dims, keepdims, name=name))
2375
2376
2377@tf_export(v1=["math.reduce_all", "reduce_all"])
2378@deprecation.deprecated_args(None,
2379                             "keep_dims is deprecated, use keepdims instead",
2380                             "keep_dims")
2381def reduce_all_v1(input_tensor,
2382                  axis=None,
2383                  keepdims=None,
2384                  name=None,
2385                  reduction_indices=None,
2386                  keep_dims=None):
2387  """Computes the "logical and" of elements across dimensions of a tensor.
2388
2389  Reduces `input_tensor` along the dimensions given in `axis`.
2390  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2391  entry in `axis`. If `keepdims` is true, the reduced dimensions
2392  are retained with length 1.
2393
2394  If `axis` is None, all dimensions are reduced, and a
2395  tensor with a single element is returned.
2396
2397  For example:
2398
2399  ```python
2400  x = tf.constant([[True,  True], [False, False]])
2401  tf.reduce_all(x)  # False
2402  tf.reduce_all(x, 0)  # [False, False]
2403  tf.reduce_all(x, 1)  # [True, False]
2404  ```
2405
2406  Args:
2407    input_tensor: The boolean tensor to reduce.
2408    axis: The dimensions to reduce. If `None` (the default), reduces all
2409      dimensions. Must be in the range `[-rank(input_tensor),
2410      rank(input_tensor))`.
2411    keepdims: If true, retains reduced dimensions with length 1.
2412    name: A name for the operation (optional).
2413    reduction_indices: The old (deprecated) name for axis.
2414    keep_dims: Deprecated alias for `keepdims`.
2415
2416  Returns:
2417    The reduced tensor.
2418
2419  @compatibility(numpy)
2420  Equivalent to np.all
2421  @end_compatibility
2422  """
2423  axis = deprecation.deprecated_argument_lookup("axis", axis,
2424                                                "reduction_indices",
2425                                                reduction_indices)
2426  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2427                                                    "keep_dims", keep_dims)
2428  return reduce_all(input_tensor, axis, keepdims, name)
2429
2430
2431@tf_export("reduce_all", "math.reduce_all", v1=[])
2432@dispatch.add_dispatch_support
2433def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
2434  """Computes the "logical and" of elements across dimensions of a tensor.
2435
2436  Reduces `input_tensor` along the dimensions given in `axis`.
2437  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2438  entry in `axis`. If `keepdims` is true, the reduced dimensions
2439  are retained with length 1.
2440
2441  If `axis` is None, all dimensions are reduced, and a
2442  tensor with a single element is returned.
2443
2444  For example:
2445
2446  ```python
2447  x = tf.constant([[True,  True], [False, False]])
2448  tf.reduce_all(x)  # False
2449  tf.reduce_all(x, 0)  # [False, False]
2450  tf.reduce_all(x, 1)  # [True, False]
2451  ```
2452
2453  Args:
2454    input_tensor: The boolean tensor to reduce.
2455    axis: The dimensions to reduce. If `None` (the default), reduces all
2456      dimensions. Must be in the range `[-rank(input_tensor),
2457      rank(input_tensor))`.
2458    keepdims: If true, retains reduced dimensions with length 1.
2459    name: A name for the operation (optional).
2460
2461  Returns:
2462    The reduced tensor.
2463
2464  @compatibility(numpy)
2465  Equivalent to np.all
2466  @end_compatibility
2467  """
2468  keepdims = False if keepdims is None else keepdims
2469  return _may_reduce_to_scalar(
2470      keepdims, axis,
2471      gen_math_ops._all(
2472          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2473          name=name))
2474
2475
2476@tf_export(v1=["math.reduce_any", "reduce_any"])
2477@deprecation.deprecated_args(None,
2478                             "keep_dims is deprecated, use keepdims instead",
2479                             "keep_dims")
2480def reduce_any_v1(input_tensor,
2481                  axis=None,
2482                  keepdims=None,
2483                  name=None,
2484                  reduction_indices=None,
2485                  keep_dims=None):
2486  """Computes the "logical or" of elements across dimensions of a tensor.
2487
2488  Reduces `input_tensor` along the dimensions given in `axis`.
2489  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2490  entry in `axis`. If `keepdims` is true, the reduced dimensions
2491  are retained with length 1.
2492
2493  If `axis` is None, all dimensions are reduced, and a
2494  tensor with a single element is returned.
2495
2496  For example:
2497
2498  ```python
2499  x = tf.constant([[True,  True], [False, False]])
2500  tf.reduce_any(x)  # True
2501  tf.reduce_any(x, 0)  # [True, True]
2502  tf.reduce_any(x, 1)  # [True, False]
2503  ```
2504
2505  Args:
2506    input_tensor: The boolean tensor to reduce.
2507    axis: The dimensions to reduce. If `None` (the default), reduces all
2508      dimensions. Must be in the range `[-rank(input_tensor),
2509      rank(input_tensor))`.
2510    keepdims: If true, retains reduced dimensions with length 1.
2511    name: A name for the operation (optional).
2512    reduction_indices: The old (deprecated) name for axis.
2513    keep_dims: Deprecated alias for `keepdims`.
2514
2515  Returns:
2516    The reduced tensor.
2517
2518  @compatibility(numpy)
2519  Equivalent to np.any
2520  @end_compatibility
2521  """
2522  axis = deprecation.deprecated_argument_lookup("axis", axis,
2523                                                "reduction_indices",
2524                                                reduction_indices)
2525  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2526                                                    "keep_dims", keep_dims)
2527  return reduce_any(input_tensor, axis, keepdims, name)
2528
2529
2530@tf_export("math.reduce_any", "reduce_any", v1=[])
2531@dispatch.add_dispatch_support
2532def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
2533  """Computes the "logical or" of elements across dimensions of a tensor.
2534
2535  Reduces `input_tensor` along the dimensions given in `axis`.
2536  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2537  entry in `axis`. If `keepdims` is true, the reduced dimensions
2538  are retained with length 1.
2539
2540  If `axis` is None, all dimensions are reduced, and a
2541  tensor with a single element is returned.
2542
2543  For example:
2544
2545  ```python
2546  x = tf.constant([[True,  True], [False, False]])
2547  tf.reduce_any(x)  # True
2548  tf.reduce_any(x, 0)  # [True, True]
2549  tf.reduce_any(x, 1)  # [True, False]
2550  ```
2551
2552  Args:
2553    input_tensor: The boolean tensor to reduce.
2554    axis: The dimensions to reduce. If `None` (the default), reduces all
2555      dimensions. Must be in the range `[-rank(input_tensor),
2556      rank(input_tensor))`.
2557    keepdims: If true, retains reduced dimensions with length 1.
2558    name: A name for the operation (optional).
2559
2560  Returns:
2561    The reduced tensor.
2562
2563  @compatibility(numpy)
2564  Equivalent to np.any
2565  @end_compatibility
2566  """
2567  keepdims = False if keepdims is None else keepdims
2568  return _may_reduce_to_scalar(
2569      keepdims, axis,
2570      gen_math_ops._any(
2571          input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2572          name=name))
2573
2574
2575@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"])
2576@deprecation.deprecated_args(None,
2577                             "keep_dims is deprecated, use keepdims instead",
2578                             "keep_dims")
2579def reduce_logsumexp_v1(input_tensor,
2580                        axis=None,
2581                        keepdims=None,
2582                        name=None,
2583                        reduction_indices=None,
2584                        keep_dims=None):
2585  """Computes log(sum(exp(elements across dimensions of a tensor))).
2586
2587  Reduces `input_tensor` along the dimensions given in `axis`.
2588  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2589  entry in `axis`. If `keepdims` is true, the reduced dimensions
2590  are retained with length 1.
2591
2592  If `axis` has no entries, all dimensions are reduced, and a
2593  tensor with a single element is returned.
2594
2595  This function is more numerically stable than log(sum(exp(input))). It avoids
2596  overflows caused by taking the exp of large inputs and underflows caused by
2597  taking the log of small inputs.
2598
2599  For example:
2600
2601  ```python
2602  x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
2603  tf.reduce_logsumexp(x)  # log(6)
2604  tf.reduce_logsumexp(x, 0)  # [log(2), log(2), log(2)]
2605  tf.reduce_logsumexp(x, 1)  # [log(3), log(3)]
2606  tf.reduce_logsumexp(x, 1, keepdims=True)  # [[log(3)], [log(3)]]
2607  tf.reduce_logsumexp(x, [0, 1])  # log(6)
2608  ```
2609
2610  Args:
2611    input_tensor: The tensor to reduce. Should have numeric type.
2612    axis: The dimensions to reduce. If `None` (the default), reduces all
2613      dimensions. Must be in the range `[-rank(input_tensor),
2614      rank(input_tensor))`.
2615    keepdims: If true, retains reduced dimensions with length 1.
2616    name: A name for the operation (optional).
2617    reduction_indices: The old (deprecated) name for axis.
2618    keep_dims: Deprecated alias for `keepdims`.
2619
2620  Returns:
2621    The reduced tensor.
2622  """
2623  axis = deprecation.deprecated_argument_lookup("axis", axis,
2624                                                "reduction_indices",
2625                                                reduction_indices)
2626  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2627                                                    "keep_dims", keep_dims)
2628  return reduce_logsumexp(input_tensor, axis, keepdims, name)
2629
2630
2631@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[])
2632def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):
2633  """Computes log(sum(exp(elements across dimensions of a tensor))).
2634
2635  Reduces `input_tensor` along the dimensions given in `axis`.
2636  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2637  entry in `axis`. If `keepdims` is true, the reduced dimensions
2638  are retained with length 1.
2639
2640  If `axis` has no entries, all dimensions are reduced, and a
2641  tensor with a single element is returned.
2642
2643  This function is more numerically stable than log(sum(exp(input))). It avoids
2644  overflows caused by taking the exp of large inputs and underflows caused by
2645  taking the log of small inputs.
2646
2647  For example:
2648
2649  ```python
2650  x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
2651  tf.reduce_logsumexp(x)  # log(6)
2652  tf.reduce_logsumexp(x, 0)  # [log(2), log(2), log(2)]
2653  tf.reduce_logsumexp(x, 1)  # [log(3), log(3)]
2654  tf.reduce_logsumexp(x, 1, keepdims=True)  # [[log(3)], [log(3)]]
2655  tf.reduce_logsumexp(x, [0, 1])  # log(6)
2656  ```
2657
2658  Args:
2659    input_tensor: The tensor to reduce. Should have numeric type.
2660    axis: The dimensions to reduce. If `None` (the default), reduces all
2661      dimensions. Must be in the range `[-rank(input_tensor),
2662      rank(input_tensor))`.
2663    keepdims: If true, retains reduced dimensions with length 1.
2664    name: A name for the operation (optional).
2665
2666  Returns:
2667    The reduced tensor.
2668  """
2669  keepdims = False if keepdims is None else keepdims
2670  input_tensor = ops.convert_to_tensor(input_tensor)
2671  with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
2672    reduce_dim = _ReductionDims(input_tensor, axis)
2673    raw_max = reduce_max_with_dims(
2674        input_tensor, axis=axis, keepdims=True, dims=reduce_dim)
2675    my_max = array_ops.stop_gradient(
2676        gen_math_ops.select(
2677            gen_math_ops.is_finite(raw_max), raw_max,
2678            gen_array_ops.zeros_like(raw_max)))
2679    result = gen_math_ops.log(
2680        reduce_sum_with_dims(
2681            gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)),
2682            axis=axis,
2683            keepdims=keepdims,
2684            dims=reduce_dim))
2685    if not keepdims:
2686      my_max = array_ops.reshape(my_max, gen_array_ops.shape(result))
2687    result = gen_math_ops.add(result, my_max)
2688    return _may_reduce_to_scalar(keepdims, axis, result)
2689
2690
2691@tf_export("linalg.trace", v1=["linalg.trace", "trace"])
2692@deprecation.deprecated_endpoints("trace")
2693@dispatch.add_dispatch_support
2694def trace(x, name=None):
2695  """Compute the trace of a tensor `x`.
2696
2697  `trace(x)` returns the sum along the main diagonal of each inner-most matrix
2698  in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
2699  is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
2700
2701  `output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
2702
2703  For example:
2704
2705  ```python
2706  x = tf.constant([[1, 2], [3, 4]])
2707  tf.linalg.trace(x)  # 5
2708
2709  x = tf.constant([[1, 2, 3],
2710                   [4, 5, 6],
2711                   [7, 8, 9]])
2712  tf.linalg.trace(x)  # 15
2713
2714  x = tf.constant([[[1, 2, 3],
2715                    [4, 5, 6],
2716                    [7, 8, 9]],
2717                   [[-1, -2, -3],
2718                    [-4, -5, -6],
2719                    [-7, -8, -9]]])
2720  tf.linalg.trace(x)  # [15, -15]
2721  ```
2722
2723  Args:
2724    x: tensor.
2725    name: A name for the operation (optional).
2726
2727  Returns:
2728    The trace of input tensor.
2729  """
2730  with ops.name_scope(name, "Trace", [x]) as name:
2731    x = ops.convert_to_tensor(x, name="x")
2732    return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
2733
2734
2735@tf_export("linalg.matmul", "matmul")
2736@dispatch.add_dispatch_support
2737def matmul(a,
2738           b,
2739           transpose_a=False,
2740           transpose_b=False,
2741           adjoint_a=False,
2742           adjoint_b=False,
2743           a_is_sparse=False,
2744           b_is_sparse=False,
2745           name=None):
2746  """Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
2747
2748  The inputs must, following any transpositions, be tensors of rank >= 2
2749  where the inner 2 dimensions specify valid matrix multiplication dimensions,
2750  and any further outer dimensions specify matching batch size.
2751
2752  Both matrices must be of the same type. The supported types are:
2753  `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
2754
2755  Either matrix can be transposed or adjointed (conjugated and transposed) on
2756  the fly by setting one of the corresponding flag to `True`. These are `False`
2757  by default.
2758
2759  If one or both of the matrices contain a lot of zeros, a more efficient
2760  multiplication algorithm can be used by setting the corresponding
2761  `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
2762  This optimization is only available for plain matrices (rank-2 tensors) with
2763  datatypes `bfloat16` or `float32`.
2764
2765  A simple 2-D tensor matrix multiplication:
2766
2767  >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
2768  >>> a  # 2-D tensor
2769  <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
2770  array([[1, 2, 3],
2771         [4, 5, 6]], dtype=int32)>
2772  >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
2773  >>> b  # 2-D tensor
2774  <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
2775  array([[ 7,  8],
2776         [ 9, 10],
2777         [11, 12]], dtype=int32)>
2778  >>> c = tf.matmul(a, b)
2779  >>> c  # `a` * `b`
2780  <tf.Tensor: shape=(2, 2), dtype=int32, numpy=
2781  array([[ 58,  64],
2782         [139, 154]], dtype=int32)>
2783
2784  A batch matrix multiplication with batch shape [2]:
2785
2786  >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])
2787  >>> a  # 3-D tensor
2788  <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=
2789  array([[[ 1,  2,  3],
2790          [ 4,  5,  6]],
2791         [[ 7,  8,  9],
2792          [10, 11, 12]]], dtype=int32)>
2793  >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])
2794  >>> b  # 3-D tensor
2795  <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
2796  array([[[13, 14],
2797          [15, 16],
2798          [17, 18]],
2799         [[19, 20],
2800          [21, 22],
2801          [23, 24]]], dtype=int32)>
2802  >>> c = tf.matmul(a, b)
2803  >>> c  # `a` * `b`
2804  <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
2805  array([[[ 94, 100],
2806          [229, 244]],
2807         [[508, 532],
2808          [697, 730]]], dtype=int32)>
2809
2810  Since python >= 3.5 the @ operator is supported
2811  (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,
2812  it simply calls the `tf.matmul()` function, so the following lines are
2813  equivalent:
2814
2815  >>> d = a @ b @ [[10], [11]]
2816  >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])
2817
2818  Args:
2819    a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,
2820      `complex64`, `complex128` and rank > 1.
2821    b: `tf.Tensor` with same type and rank as `a`.
2822    transpose_a: If `True`, `a` is transposed before multiplication.
2823    transpose_b: If `True`, `b` is transposed before multiplication.
2824    adjoint_a: If `True`, `a` is conjugated and transposed before
2825      multiplication.
2826    adjoint_b: If `True`, `b` is conjugated and transposed before
2827      multiplication.
2828    a_is_sparse: If `True`, `a` is treated as a sparse matrix.
2829    b_is_sparse: If `True`, `b` is treated as a sparse matrix.
2830    name: Name for the operation (optional).
2831
2832  Returns:
2833    A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix
2834    is the product of the corresponding matrices in `a` and `b`, e.g. if all
2835    transpose or adjoint attributes are `False`:
2836
2837    `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,
2838    for all indices `i`, `j`.
2839
2840    Note: This is matrix product, not element-wise product.
2841
2842
2843  Raises:
2844    ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and
2845      `adjoint_b` are both set to `True`.
2846  """
2847  with ops.name_scope(name, "MatMul", [a, b]) as name:
2848    if transpose_a and adjoint_a:
2849      raise ValueError("Only one of transpose_a and adjoint_a can be True.")
2850    if transpose_b and adjoint_b:
2851      raise ValueError("Only one of transpose_b and adjoint_b can be True.")
2852
2853    if context.executing_eagerly():
2854      if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):
2855        a = ops.convert_to_tensor(a, name="a")
2856      if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):
2857        b = ops.convert_to_tensor(b, name="b")
2858    else:
2859      a = ops.convert_to_tensor(a, name="a")
2860      b = ops.convert_to_tensor(b, name="b")
2861
2862    # TODO(apassos) remove _shape_tuple here when it is not needed.
2863    a_shape = a._shape_tuple()  # pylint: disable=protected-access
2864    b_shape = b._shape_tuple()  # pylint: disable=protected-access
2865
2866    output_may_have_non_empty_batch_shape = (
2867        (a_shape is None or len(a_shape) > 2) or
2868        (b_shape is None or len(b_shape) > 2))
2869
2870    if (not a_is_sparse and
2871        not b_is_sparse) and output_may_have_non_empty_batch_shape:
2872      # BatchMatmul does not support transpose, so we conjugate the matrix and
2873      # use adjoint instead. Conj() is a noop for real matrices.
2874      if transpose_a:
2875        a = conj(a)
2876        adjoint_a = True
2877      if transpose_b:
2878        b = conj(b)
2879        adjoint_b = True
2880      return gen_math_ops.batch_mat_mul_v2(
2881          a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
2882
2883    # Neither matmul nor sparse_matmul support adjoint, so we conjugate
2884    # the matrix and use transpose instead. Conj() is a noop for real
2885    # matrices.
2886    if adjoint_a:
2887      a = conj(a)
2888      transpose_a = True
2889    if adjoint_b:
2890      b = conj(b)
2891      transpose_b = True
2892
2893    use_sparse_matmul = False
2894    if a_is_sparse or b_is_sparse:
2895      sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
2896      use_sparse_matmul = (
2897          a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)
2898    if ((a.dtype == dtypes.bfloat16 or b.dtype == dtypes.bfloat16) and
2899        a.dtype != b.dtype):
2900      # matmul currently doesn't handle mixed-precision inputs.
2901      use_sparse_matmul = True
2902    if use_sparse_matmul:
2903      ret = sparse_matmul(
2904          a,
2905          b,
2906          transpose_a=transpose_a,
2907          transpose_b=transpose_b,
2908          a_is_sparse=a_is_sparse,
2909          b_is_sparse=b_is_sparse,
2910          name=name)
2911      # sparse_matmul always returns float32, even with
2912      # bfloat16 inputs. This prevents us from configuring bfloat16 training.
2913      # casting to bfloat16 also matches non-sparse matmul behavior better.
2914      if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:
2915        ret = cast(ret, dtypes.bfloat16)
2916      return ret
2917    else:
2918      return gen_math_ops.mat_mul(
2919          a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
2920
2921
2922@tf_export("linalg.matvec")
2923def matvec(a,
2924           b,
2925           transpose_a=False,
2926           adjoint_a=False,
2927           a_is_sparse=False,
2928           b_is_sparse=False,
2929           name=None):
2930  """Multiplies matrix `a` by vector `b`, producing `a` * `b`.
2931
2932  The matrix `a` must, following any transpositions, be a tensor of rank >= 2,
2933  with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast
2934  with `shape(b)[:-1]`.
2935
2936  Both `a` and `b` must be of the same type. The supported types are:
2937  `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
2938
2939  Matrix `a` can be transposed or adjointed (conjugated and transposed) on
2940  the fly by setting one of the corresponding flag to `True`. These are `False`
2941  by default.
2942
2943  If one or both of the inputs contain a lot of zeros, a more efficient
2944  multiplication algorithm can be used by setting the corresponding
2945  `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
2946  This optimization is only available for plain matrices/vectors (rank-2/1
2947  tensors) with datatypes `bfloat16` or `float32`.
2948
2949  For example:
2950
2951  ```python
2952  # 2-D tensor `a`
2953  # [[1, 2, 3],
2954  #  [4, 5, 6]]
2955  a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
2956
2957  # 1-D tensor `b`
2958  # [7, 9, 11]
2959  b = tf.constant([7, 9, 11], shape=[3])
2960
2961  # `a` * `b`
2962  # [ 58,  64]
2963  c = tf.linalg.matvec(a, b)
2964
2965
2966  # 3-D tensor `a`
2967  # [[[ 1,  2,  3],
2968  #   [ 4,  5,  6]],
2969  #  [[ 7,  8,  9],
2970  #   [10, 11, 12]]]
2971  a = tf.constant(np.arange(1, 13, dtype=np.int32),
2972                  shape=[2, 2, 3])
2973
2974  # 2-D tensor `b`
2975  # [[13, 14, 15],
2976  #  [16, 17, 18]]
2977  b = tf.constant(np.arange(13, 19, dtype=np.int32),
2978                  shape=[2, 3])
2979
2980  # `a` * `b`
2981  # [[ 86, 212],
2982  #  [410, 563]]
2983  c = tf.linalg.matvec(a, b)
2984  ```
2985
2986  Args:
2987    a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
2988      `complex128` and rank > 1.
2989    b: `Tensor` with same type as `a` and compatible dimensions.
2990    transpose_a: If `True`, `a` is transposed before multiplication.
2991    adjoint_a: If `True`, `a` is conjugated and transposed before
2992      multiplication.
2993    a_is_sparse: If `True`, `a` is treated as a sparse matrix.
2994    b_is_sparse: If `True`, `b` is treated as a sparse matrix.
2995    name: Name for the operation (optional).
2996
2997  Returns:
2998    A `Tensor` of the same type as `a` and `b` where each inner-most vector is
2999    the product of the corresponding matrices in `a` and vectors in `b`, e.g. if
3000    all transpose or adjoint attributes are `False`:
3001
3002    `output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.
3003
3004    Note: This is matrix-vector product, not element-wise product.
3005
3006
3007  Raises:
3008    ValueError: If transpose_a and adjoint_a are both set to True.
3009  """
3010  with ops.name_scope(name, "MatVec", [a, b]) as name:
3011    output = matmul(
3012        a,
3013        array_ops.expand_dims(b, axis=-1),
3014        transpose_a=transpose_a,
3015        adjoint_a=adjoint_a,
3016        a_is_sparse=a_is_sparse,
3017        b_is_sparse=b_is_sparse)
3018    return array_ops.squeeze(output, axis=-1)
3019
3020
3021_OverrideBinaryOperatorHelper(matmul, "matmul")
3022
3023sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")(
3024    gen_math_ops.sparse_mat_mul)
3025tf_export(v1=["sparse_matmul"])(sparse_matmul)
3026
3027
3028@ops.RegisterStatistics("MatMul", "flops")
3029def _calc_mat_mul_flops(graph, node):
3030  """Calculates the compute resources needed for MatMul."""
3031  transpose_a = node.attr["transpose_a"].b
3032  a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
3033  a_shape.assert_is_fully_defined()
3034  if transpose_a:
3035    k = int(a_shape[0])
3036  else:
3037    k = int(a_shape[1])
3038  output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
3039  output_shape.assert_is_fully_defined()
3040  output_count = np.prod(output_shape.as_list())
3041  return ops.OpStats("flops", (k * output_count * 2))
3042
3043
3044@ops.RegisterStatistics("BatchMatMul", "flops")
3045@ops.RegisterStatistics("BatchMatMulV2", "flops")
3046def _calc_batch_mat_mul_flops(graph, node):
3047  """Calculates the compute resources needed for BatchMatMul."""
3048  transpose_a = node.attr["transpose_a"].b
3049  a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
3050  a_shape.assert_is_fully_defined()
3051  if transpose_a:
3052    k = int(a_shape[-2])
3053  else:
3054    k = int(a_shape[-1])
3055  output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
3056  output_shape.assert_is_fully_defined()
3057  output_count = np.prod(output_shape.as_list())
3058  return ops.OpStats("flops", (k * output_count * 2))
3059
3060
3061def _as_indexed_slices(x, optimize=True):
3062  """Convert 'x' to IndexedSlices.
3063
3064  Convert a dense Tensor to a block-sparse IndexedSlices.
3065
3066  Args:
3067    x: Either a Tensor object, or an IndexedSlices object.
3068    optimize: if true, attempt to optimize the conversion of 'x'.
3069
3070  Returns:
3071    An IndexedSlices object.
3072
3073  Raises:
3074    TypeError: If 'x' is not a Tensor or an IndexedSlices object.
3075  """
3076  # TODO(touts): op_scope
3077  if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
3078    raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
3079  if isinstance(x, ops.IndexedSlices):
3080    return x
3081  x_shape = array_ops.shape_internal(x, optimize=optimize)
3082  return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
3083
3084
3085def _as_indexed_slices_list(inputs, optimize=True):
3086  """Convert all elements of 'inputs' to IndexedSlices.
3087
3088  Additionally, homogenize the types of all the indices to
3089  either int32 or int64.
3090
3091  Args:
3092    inputs: List containing either Tensor or IndexedSlices objects.
3093    optimize: if true, attempt to optimize the conversion of each input.
3094
3095  Returns:
3096    A list of IndexedSlices objects.
3097
3098  Raises:
3099    TypeError: If 'inputs' is not a list or a tuple.
3100  """
3101  if not isinstance(inputs, (list, tuple)):
3102    raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
3103  outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
3104  with_int32_index = [
3105      o.indices for o in outputs if o.indices.dtype == dtypes.int32
3106  ]
3107  if not with_int32_index or len(with_int32_index) == len(outputs):
3108    return outputs
3109  casted_outputs = []
3110  for o in outputs:
3111    if o.indices.dtype == dtypes.int32:
3112      casted_outputs.append(
3113          ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
3114                            o.dense_shape))
3115    else:
3116      casted_outputs.append(o)
3117  return casted_outputs
3118
3119
3120@tf_export("math.add_n", "add_n")
3121@dispatch.add_dispatch_support
3122def add_n(inputs, name=None):
3123  """Adds all input tensors element-wise.
3124
3125  `tf.math.add_n` performs the same operation as `tf.math.accumulate_n`, but it
3126  waits for all of its inputs to be ready before beginning to sum.
3127  This buffering can result in higher memory consumption when inputs are ready
3128  at different times, since the minimum temporary storage required is
3129  proportional to the input size rather than the output size.
3130
3131  This op does not [broadcast](
3132  https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)
3133  its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator)
3134  instead.
3135
3136  For example:
3137
3138  >>> a = tf.constant([[3, 5], [4, 8]])
3139  >>> b = tf.constant([[1, 6], [2, 9]])
3140  >>> tf.math.add_n([a, b, a])
3141  <tf.Tensor: shape=(2, 2), dtype=int32, numpy=
3142  array([[ 7, 16],
3143         [10, 25]], dtype=int32)>
3144
3145  Args:
3146    inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the
3147      same shape and type. `tf.IndexedSlices` objects will be converted into
3148      dense tensors prior to adding.
3149    name: A name for the operation (optional).
3150
3151  Returns:
3152    A `tf.Tensor` of the same shape and type as the elements of `inputs`.
3153
3154  Raises:
3155    ValueError: If `inputs` don't all have same shape and dtype or the shape
3156    cannot be inferred.
3157  """
3158  if not inputs or not isinstance(inputs, (list, tuple)):
3159    raise ValueError("inputs must be a list of at least one "
3160                     "Tensor/IndexedSlices with the same dtype and shape")
3161  inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
3162  if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs):
3163    raise ValueError("inputs must be a list of at least one "
3164                     "Tensor/IndexedSlices with the same dtype and shape")
3165
3166  if len(inputs) == 1:
3167    if isinstance(inputs[0], ops.IndexedSlices):
3168      values = ops.convert_to_tensor(inputs[0])
3169    else:
3170      values = inputs[0]
3171    if name:
3172      return array_ops.identity(values, name=name)
3173    return values
3174  return gen_math_ops.add_n(inputs, name=name)
3175
3176
3177@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"])
3178@deprecation.deprecated_endpoints("accumulate_n")
3179def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
3180  """Returns the element-wise sum of a list of tensors.
3181
3182  Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
3183  otherwise, these are inferred.
3184
3185  `accumulate_n` performs the same operation as `tf.math.add_n`.
3186
3187  For example:
3188
3189  ```python
3190  a = tf.constant([[1, 2], [3, 4]])
3191  b = tf.constant([[5, 0], [0, 6]])
3192  tf.math.accumulate_n([a, b, a])  # [[7, 4], [6, 14]]
3193
3194  # Explicitly pass shape and type
3195  tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
3196                                                                 # [[7,  4],
3197                                                                 #  [6, 14]]
3198  ```
3199
3200  Args:
3201    inputs: A list of `Tensor` objects, each with same shape and type.
3202    shape: Expected shape of elements of `inputs` (optional). Also controls the
3203      output shape of this op, which may affect type inference in other ops. A
3204      value of `None` means "infer the input shape from the shapes in `inputs`".
3205    tensor_dtype: Expected data type of `inputs` (optional). A value of `None`
3206      means "infer the input dtype from `inputs[0]`".
3207    name: A name for the operation (optional).
3208
3209  Returns:
3210    A `Tensor` of same shape and type as the elements of `inputs`.
3211
3212  Raises:
3213    ValueError: If `inputs` don't all have same shape and dtype or the shape
3214    cannot be inferred.
3215  """
3216
3217  def _input_error():
3218    return ValueError("inputs must be a list of at least one Tensor with the "
3219                      "same dtype and shape")
3220
3221  if not inputs or not isinstance(inputs, (list, tuple)):
3222    raise _input_error()
3223  inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
3224  if not all(isinstance(x, ops.Tensor) for x in inputs):
3225    raise _input_error()
3226  if not all(x.dtype == inputs[0].dtype for x in inputs):
3227    raise _input_error()
3228  if shape is not None:
3229    shape = tensor_shape.as_shape(shape)
3230  else:
3231    shape = tensor_shape.unknown_shape()
3232  for input_tensor in inputs:
3233    if isinstance(input_tensor, ops.Tensor):
3234      shape = shape.merge_with(input_tensor.get_shape())
3235
3236  # tensor_dtype is for safety only; operator's output type computed in C++
3237  if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
3238    raise TypeError("tensor_dtype is {}, but input is of type {}".format(
3239        tensor_dtype, inputs[0].dtype))
3240
3241  if len(inputs) == 1 and name is None:
3242    return inputs[0]
3243  elif len(inputs) == 1 and name is not None:
3244    return array_ops.identity(inputs[0], name=name)
3245  return add_n(inputs, name=name)
3246
3247
3248@ops.RegisterGradient("AccumulateNV2")
3249def _accumulate_n_grad(op, grad):
3250  """Same as gradient for AddN. Copies the gradient to all inputs."""
3251  # Not broadcasting.
3252  return [grad] * len(op.inputs)
3253
3254
3255@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
3256def sigmoid(x, name=None):
3257  r"""Computes sigmoid of `x` element-wise.
3258
3259  Formula for calculating sigmoid(x): `y = 1 / (1 + exp(-x))`.
3260
3261  For x \in (-inf, inf) => sigmoid(x) \in (0, 1)
3262
3263  Example Usage:
3264
3265  If a positive number is large, then its sigmoid will approach to 1 since the
3266  formula will be `y = <large_num> / (1 + <large_num>)`
3267
3268  >>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
3269  >>> tf.math.sigmoid(x)
3270  <tf.Tensor: shape=(4,), dtype=float32,
3271  numpy=array([0.5      , 0.7310586, 1.       , 1.       ], dtype=float32)>
3272
3273  If a negative number is large, its sigmoid will approach to 0 since the
3274  formula will be `y = 1 / (1 + <large_num>)`
3275
3276  >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
3277  >>> tf.math.sigmoid(x)
3278  <tf.Tensor: shape=(4,), dtype=float32, numpy=
3279  array([0.0000000e+00, 1.9287499e-22, 2.6894143e-01, 0.5],
3280        dtype=float32)>
3281
3282  Args:
3283    x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or
3284      `complex128`.
3285    name: A name for the operation (optional).
3286
3287  Returns:
3288    A Tensor with the same type as `x`.
3289
3290  Usage Example:
3291
3292  >>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)
3293  >>> tf.sigmoid(x)
3294  <tf.Tensor: shape=(3,), dtype=float32,
3295  numpy=array([0. , 0.5, 1. ], dtype=float32)>
3296
3297  @compatibility(scipy)
3298  Equivalent to scipy.special.expit
3299  @end_compatibility
3300  """
3301  with ops.name_scope(name, "Sigmoid", [x]) as name:
3302    x = ops.convert_to_tensor(x, name="x")
3303    return gen_math_ops.sigmoid(x, name=name)
3304
3305
3306@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"])
3307@dispatch.add_dispatch_support
3308@deprecation.deprecated_endpoints("log_sigmoid")
3309def log_sigmoid(x, name=None):
3310  """Computes log sigmoid of `x` element-wise.
3311
3312  Specifically, `y = log(1 / (1 + exp(-x)))`.  For numerical stability,
3313  we use `y = -tf.nn.softplus(-x)`.
3314
3315  Args:
3316    x: A Tensor with type `float32` or `float64`.
3317    name: A name for the operation (optional).
3318
3319  Returns:
3320    A Tensor with the same type as `x`.
3321  """
3322  with ops.name_scope(name, "LogSigmoid", [x]) as name:
3323    x = ops.convert_to_tensor(x, name="x")
3324    return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)
3325
3326
3327@tf_export("math.bincount", v1=[])
3328def bincount(arr,
3329             weights=None,
3330             minlength=None,
3331             maxlength=None,
3332             dtype=dtypes.int32,
3333             name=None):
3334  """Counts the number of occurrences of each value in an integer array.
3335
3336  If `minlength` and `maxlength` are not given, returns a vector with length
3337  `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.
3338  If `weights` are non-None, then index `i` of the output stores the sum of the
3339  value in `weights` at each index where the corresponding value in `arr` is
3340  `i`.
3341
3342  ```python
3343  values = tf.constant([1,1,2,3,2,4,4,5])
3344  tf.math.bincount(values) #[0 2 2 1 2 1]
3345  ```
3346  Vector length = Maximum element in vector `values` is 5. Adding 1, which is 6
3347                  will be the vector length.
3348
3349  Each bin value in the output indicates number of occurrences of the particular
3350  index. Here, index 1 in output has a value 2. This indicates value 1 occurs
3351  two times in `values`.
3352
3353  ```python
3354  values = tf.constant([1,1,2,3,2,4,4,5])
3355  weights = tf.constant([1,5,0,1,0,5,4,5])
3356  tf.math.bincount(values, weights=weights) #[0 6 0 1 9 5]
3357  ```
3358  Bin will be incremented by the corresponding weight instead of 1.
3359  Here, index 1 in output has a value 6. This is the summation of weights
3360  corresponding to the value in `values`.
3361
3362  Args:
3363    arr: An int32 tensor of non-negative values.
3364    weights: If non-None, must be the same shape as arr. For each value in
3365      `arr`, the bin will be incremented by the corresponding weight instead of
3366      1.
3367    minlength: If given, ensures the output has length at least `minlength`,
3368      padding with zeros at the end if necessary.
3369    maxlength: If given, skips values in `arr` that are equal or greater than
3370      `maxlength`, ensuring that the output has length at most `maxlength`.
3371    dtype: If `weights` is None, determines the type of the output bins.
3372    name: A name scope for the associated operations (optional).
3373
3374  Returns:
3375    A vector with the same dtype as `weights` or the given `dtype`. The bin
3376    values.
3377
3378  Raises:
3379    `InvalidArgumentError` if negative values are provided as an input.
3380
3381  """
3382  name = "bincount" if name is None else name
3383  with ops.name_scope(name):
3384    arr = ops.convert_to_tensor(arr, name="arr", dtype=dtypes.int32)
3385    array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0
3386    output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)
3387    if minlength is not None:
3388      minlength = ops.convert_to_tensor(
3389          minlength, name="minlength", dtype=dtypes.int32)
3390      output_size = gen_math_ops.maximum(minlength, output_size)
3391    if maxlength is not None:
3392      maxlength = ops.convert_to_tensor(
3393          maxlength, name="maxlength", dtype=dtypes.int32)
3394      output_size = gen_math_ops.minimum(maxlength, output_size)
3395    if weights is not None:
3396      weights = ops.convert_to_tensor(weights, name="weights")
3397      return gen_math_ops.unsorted_segment_sum(weights, arr, output_size)
3398    weights = constant_op.constant([], dtype)
3399    return gen_math_ops.bincount(arr, output_size, weights)
3400
3401
3402@tf_export(v1=["math.bincount", "bincount"])
3403@deprecation.deprecated_endpoints("bincount")
3404def bincount_v1(arr,
3405                weights=None,
3406                minlength=None,
3407                maxlength=None,
3408                dtype=dtypes.int32):
3409  """Counts the number of occurrences of each value in an integer array.
3410
3411  If `minlength` and `maxlength` are not given, returns a vector with length
3412  `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.
3413  If `weights` are non-None, then index `i` of the output stores the sum of the
3414  value in `weights` at each index where the corresponding value in `arr` is
3415  `i`.
3416
3417  Args:
3418    arr: An int32 tensor of non-negative values.
3419    weights: If non-None, must be the same shape as arr. For each value in
3420      `arr`, the bin will be incremented by the corresponding weight instead of
3421      1.
3422    minlength: If given, ensures the output has length at least `minlength`,
3423      padding with zeros at the end if necessary.
3424    maxlength: If given, skips values in `arr` that are equal or greater than
3425      `maxlength`, ensuring that the output has length at most `maxlength`.
3426    dtype: If `weights` is None, determines the type of the output bins.
3427
3428  Returns:
3429    A vector with the same dtype as `weights` or the given `dtype`. The bin
3430    values.
3431  """
3432  return bincount(arr, weights, minlength, maxlength, dtype)
3433
3434
3435@tf_export("math.cumsum", "cumsum")
3436def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
3437  """Compute the cumulative sum of the tensor `x` along `axis`.
3438
3439  By default, this op performs an inclusive cumsum, which means that the first
3440  element of the input is identical to the first element of the output:
3441  For example:
3442
3443  >>> # tf.cumsum([a, b, c])   # [a, a + b, a + b + c]
3444  >>> x = tf.constant([2, 4, 6, 8])
3445  >>> tf.cumsum(x)
3446  <tf.Tensor: shape=(4,), dtype=int32,
3447  numpy=array([ 2,  6, 12, 20], dtype=int32)>
3448
3449  >>> # using varying `axis` values
3450  >>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])
3451  >>> tf.cumsum(y, axis=0)
3452  <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
3453  array([[ 2,  4,  6,  8],
3454         [ 3,  7, 11, 15]], dtype=int32)>
3455  >>> tf.cumsum(y, axis=1)
3456  <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
3457  array([[ 2,  6, 12, 20],
3458         [ 1,  4,  9, 16]], dtype=int32)>
3459
3460  By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
3461  instead:
3462
3463  >>> # tf.cumsum([a, b, c], exclusive=True)  => [0, a, a + b]
3464  >>> x = tf.constant([2, 4, 6, 8])
3465  >>> tf.cumsum(x, exclusive=True)
3466  <tf.Tensor: shape=(4,), dtype=int32,
3467  numpy=array([ 0,  2,  6, 12], dtype=int32)>
3468
3469  By setting the `reverse` kwarg to `True`, the cumsum is performed in the
3470  opposite direction:
3471
3472  >>> # tf.cumsum([a, b, c], reverse=True)  # [a + b + c, b + c, c]
3473  >>> x = tf.constant([2, 4, 6, 8])
3474  >>> tf.cumsum(x, reverse=True)
3475  <tf.Tensor: shape=(4,), dtype=int32,
3476  numpy=array([20, 18, 14,  8], dtype=int32)>
3477
3478  This is more efficient than using separate `tf.reverse` ops.
3479  The `reverse` and `exclusive` kwargs can also be combined:
3480
3481  >>> # tf.cumsum([a, b, c], exclusive=True, reverse=True)  # [b + c, c, 0]
3482  >>> x = tf.constant([2, 4, 6, 8])
3483  >>> tf.cumsum(x, exclusive=True, reverse=True)
3484  <tf.Tensor: shape=(4,), dtype=int32,
3485  numpy=array([18, 14,  8,  0], dtype=int32)>
3486
3487  Args:
3488    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
3489      `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
3490      `complex128`, `qint8`, `quint8`, `qint32`, `half`.
3491    axis: A `Tensor` of type `int32` (default: 0). Must be in the range
3492      `[-rank(x), rank(x))`.
3493    exclusive: If `True`, perform exclusive cumsum.
3494    reverse: A `bool` (default: False).
3495    name: A name for the operation (optional).
3496
3497  Returns:
3498    A `Tensor`. Has the same type as `x`.
3499  """
3500  with ops.name_scope(name, "Cumsum", [x]) as name:
3501    x = ops.convert_to_tensor(x, name="x")
3502    return gen_math_ops.cumsum(
3503        x, axis, exclusive=exclusive, reverse=reverse, name=name)
3504
3505
3506@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"])
3507@deprecation.deprecated_endpoints("cumprod")
3508def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
3509  """Compute the cumulative product of the tensor `x` along `axis`.
3510
3511  By default, this op performs an inclusive cumprod, which means that the
3512  first element of the input is identical to the first element of the output:
3513
3514  ```python
3515  tf.math.cumprod([a, b, c])  # [a, a * b, a * b * c]
3516  ```
3517
3518  By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
3519  performed
3520  instead:
3521
3522  ```python
3523  tf.math.cumprod([a, b, c], exclusive=True)  # [1, a, a * b]
3524  ```
3525
3526  By setting the `reverse` kwarg to `True`, the cumprod is performed in the
3527  opposite direction:
3528
3529  ```python
3530  tf.math.cumprod([a, b, c], reverse=True)  # [a * b * c, b * c, c]
3531  ```
3532
3533  This is more efficient than using separate `tf.reverse` ops.
3534  The `reverse` and `exclusive` kwargs can also be combined:
3535
3536  ```python
3537  tf.math.cumprod([a, b, c], exclusive=True, reverse=True)  # [b * c, c, 1]
3538  ```
3539
3540  Args:
3541    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
3542      `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
3543      `complex128`, `qint8`, `quint8`, `qint32`, `half`.
3544    axis: A `Tensor` of type `int32` (default: 0). Must be in the range
3545      `[-rank(x), rank(x))`.
3546    exclusive: If `True`, perform exclusive cumprod.
3547    reverse: A `bool` (default: False).
3548    name: A name for the operation (optional).
3549
3550  Returns:
3551    A `Tensor`. Has the same type as `x`.
3552  """
3553  with ops.name_scope(name, "Cumprod", [x]) as name:
3554    x = ops.convert_to_tensor(x, name="x")
3555    return gen_math_ops.cumprod(
3556        x, axis, exclusive=exclusive, reverse=reverse, name=name)
3557
3558
3559@tf_export("math.cumulative_logsumexp", v1=["math.cumulative_logsumexp"])
3560def cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None):
3561  """Compute the cumulative log-sum-exp of the tensor `x` along `axis`.
3562
3563  By default, this op performs an inclusive cumulative log-sum-exp, which means
3564  that the first element of the input is identical to the first element of
3565  the output.
3566
3567  This operation is significantly more numerically stable than the equivalent
3568  tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although
3569  computes the same result given infinite numerical precision. However, note
3570  that in some cases, it may be less stable than `tf.math.reduce_logsumexp`
3571  for a given element, as it applies the "log-sum-exp trick" in a different
3572  way.
3573
3574  More precisely, where `tf.math.reduce_logsumexp` uses the following trick:
3575
3576  ```
3577  log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)
3578  ```
3579
3580  it cannot be directly used here as there is no fast way of applying it
3581  to each prefix `x[:i]`. Instead, this function implements a prefix
3582  scan using pairwise log-add-exp, which is a commutative and associative
3583  (up to floating point precision) operator:
3584
3585  ```
3586  log_add_exp(x, y) = log(exp(x) + exp(y))
3587                    = log(1 + exp(min(x, y) - max(x, y))) + max(x, y)
3588  ```
3589
3590  However, reducing using the above operator leads to a different computation
3591  tree (logs are taken repeatedly instead of only at the end), and the maximum
3592  is only computed pairwise instead of over the entire prefix. In general, this
3593  leads to a different and slightly less precise computation.
3594
3595  Args:
3596    x: A `Tensor`. Must be one of the following types: `float16`, `float32`,
3597      `float64`.
3598    axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the
3599      range `[-rank(x), rank(x))`.
3600    exclusive: If `True`, perform exclusive cumulative log-sum-exp.
3601    reverse: If `True`, performs the cumulative log-sum-exp in the reverse
3602      direction.
3603    name: A name for the operation (optional).
3604
3605  Returns:
3606    A `Tensor`. Has the same shape and type as `x`.
3607  """
3608  with ops.name_scope(name, "CumulativeLogsumexp", [x]) as name:
3609    x = ops.convert_to_tensor(x, name="x")
3610    return gen_math_ops.cumulative_logsumexp(
3611        x, axis, exclusive=exclusive, reverse=reverse, name=name)
3612
3613
3614@tf_export("math.conj", v1=["math.conj", "conj"])
3615@dispatch.add_dispatch_support
3616@deprecation.deprecated_endpoints("conj")
3617def conj(x, name=None):
3618  r"""Returns the complex conjugate of a complex number.
3619
3620  Given a tensor `input` of complex numbers, this operation returns a tensor of
3621  complex numbers that are the complex conjugate of each element in `input`. The
3622  complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
3623  real part and *b* is the imaginary part.
3624
3625  The complex conjugate returned by this operation is of the form \\(a - bj\\).
3626
3627  For example:
3628
3629      # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
3630      tf.math.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
3631
3632  If `x` is real, it is returned unchanged.
3633
3634  Args:
3635    x: `Tensor` to conjugate.  Must have numeric or variant type.
3636    name: A name for the operation (optional).
3637
3638  Returns:
3639    A `Tensor` that is the conjugate of `x` (with the same type).
3640
3641  Raises:
3642    TypeError: If `x` is not a numeric tensor.
3643  """
3644  if isinstance(x, ops.Tensor):
3645    dt = x.dtype
3646    if dt.is_floating or dt.is_integer:
3647      return x
3648  with ops.name_scope(name, "Conj", [x]) as name:
3649    x = ops.convert_to_tensor(x, name="x")
3650    if x.dtype.is_complex or x.dtype == dtypes.variant:
3651      return gen_math_ops.conj(x, name=name)
3652    elif x.dtype.is_floating or x.dtype.is_integer:
3653      return x
3654    else:
3655      raise TypeError("Expected numeric or variant tensor, got dtype %r" %
3656                      x.dtype)
3657
3658
3659def reduced_shape(input_shape, axes):
3660  """Helper function for reduction ops.
3661
3662  Args:
3663    input_shape: 1-D Tensor, the shape of the Tensor being reduced.
3664    axes: 1-D Tensor, the reduction axes.
3665
3666  Returns:
3667    A 1-D Tensor, the output shape as if keepdims were set to True.
3668  """
3669  if context.executing_eagerly():
3670    input_shape = input_shape.numpy()
3671    axes = axes.numpy()
3672    input_shape[axes] = 1
3673    return input_shape
3674
3675  # Example:
3676  # cast needed for SparseTensor reductions
3677  input_shape = cast(input_shape, dtypes.int32)  # [2, 3, 5, 7]
3678  axes = cast(axes, dtypes.int32)  # [1, 2]
3679
3680  input_rank = array_ops.size(input_shape)  # 4
3681  axes = (axes + input_rank) % input_rank
3682  axes_shape = array_ops.shape(axes)  # [2]
3683  return gen_data_flow_ops.dynamic_stitch(  # [2, 1, 1, 7]
3684      [
3685          range(input_rank),  # [0, 1, 2, 3]
3686          axes
3687      ],  # [1, 2]
3688      [
3689          input_shape,  # [2, 3, 5, 7]
3690          array_ops.fill(axes_shape, 1)
3691      ])  # [1, 1]
3692
3693
3694def _unsorted_segment_N(data, segment_ids, num_segments):
3695  """ Helper function for unsorted_segment_mean/_sqrtN.
3696
3697  Computes the number
3698      of segment entries with 0-entries set to 1 to allow division by N.
3699  """
3700  num_segments = ops.convert_to_tensor(num_segments)
3701  # bincount doesn't support negative indices so we use unsorted_segment_sum
3702  segment_ids_shape = array_ops.shape_internal(segment_ids)
3703  ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
3704  n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)
3705  # add dimensions for all non-reduced axes
3706  broadcastable_shape = array_ops.concat(
3707      [num_segments[array_ops.newaxis],
3708       array_ops.ones([array_ops.rank(data)
3709                       - array_ops.rank(segment_ids)],
3710                      dtype=num_segments.dtype)],
3711      axis=0)
3712  n = array_ops.reshape(n, broadcastable_shape)
3713  return gen_math_ops.maximum(n, 1)
3714
3715
3716@tf_export(
3717    "math.unsorted_segment_mean",
3718    v1=["math.unsorted_segment_mean", "unsorted_segment_mean"])
3719@deprecation.deprecated_endpoints("unsorted_segment_mean")
3720@dispatch.add_dispatch_support
3721def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
3722  r"""Computes the mean along segments of a tensor.
3723
3724  Read [the section on
3725  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
3726  for an explanation of segments.
3727
3728  This operator is similar to the unsorted segment sum operator found
3729  [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
3730  Instead of computing the sum over segments, it computes the mean of all
3731  entries belonging to a segment such that:
3732
3733  \\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples
3734  `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of
3735  occurrences of id \\i\\.
3736
3737  If there is no entry for a given segment ID `i`, it outputs 0.
3738
3739  If the given segment ID `i` is negative, the value is dropped and will not
3740  be added to the sum of the segment.
3741
3742  Args:
3743    data: A `Tensor` with floating point or complex dtype.
3744    segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
3745    num_segments: An integer scalar `Tensor`.  The number of distinct segment
3746      IDs.
3747    name: A name for the operation (optional).
3748
3749  Returns:
3750    A `Tensor`.  Has same shape as data, except for the first `segment_ids.rank`
3751    dimensions, which are replaced with a single dimension which has size
3752   `num_segments`.
3753  """
3754  with ops.name_scope(name, "UnsortedSegmentMean"):
3755    data = ops.convert_to_tensor(data)
3756    segment_ids = ops.convert_to_tensor(segment_ids)
3757    N = _unsorted_segment_N(data, segment_ids, num_segments)
3758    summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
3759    return summed / N
3760
3761
3762@tf_export(
3763    "math.unsorted_segment_sqrt_n",
3764    v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"])
3765@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
3766@dispatch.add_dispatch_support
3767def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
3768  r"""Computes the sum along segments of a tensor divided by the sqrt(N).
3769
3770  Read [the section on
3771  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
3772  for an explanation of segments.
3773
3774  This operator is similar to the unsorted segment sum operator found
3775  [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
3776  Additionally to computing the sum over segments, it divides the results by
3777  sqrt(N).
3778
3779  \\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over
3780  tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the
3781  number of occurrences of id \\i\\.
3782
3783  If there is no entry for a given segment ID `i`, it outputs 0.
3784
3785  Note that this op only supports floating point and complex dtypes,
3786  due to tf.sqrt only supporting these types.
3787
3788  If the given segment ID `i` is negative, the value is dropped and will not
3789  be added to the sum of the segment.
3790
3791  Args:
3792    data: A `Tensor` with floating point or complex dtype.
3793    segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
3794    num_segments: An integer scalar `Tensor`.  The number of distinct segment
3795      IDs.
3796    name: A name for the operation (optional).
3797
3798  Returns:
3799    A `Tensor`.  Has same shape as data, except for the first `segment_ids.rank`
3800    dimensions, which are replaced with a single dimension which has size
3801   `num_segments`.
3802  """
3803  with ops.name_scope(name, "UnsortedSegmentSqrtN"):
3804    data = ops.convert_to_tensor(data)
3805    segment_ids = ops.convert_to_tensor(segment_ids)
3806    N = _unsorted_segment_N(data, segment_ids, num_segments)
3807    summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
3808    return summed / gen_math_ops.sqrt(N)
3809
3810
3811@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"])
3812@deprecation.deprecated_endpoints("sparse_segment_sum")
3813def sparse_segment_sum(data,
3814                       indices,
3815                       segment_ids,
3816                       name=None,
3817                       num_segments=None):
3818  r"""Computes the sum along sparse segments of a tensor.
3819
3820  Read [the section on
3821  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
3822  for an explanation of segments.
3823
3824  Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
3825  first dimension, selecting a subset of dimension 0, specified by `indices`.
3826  `segment_ids` is allowed to have missing ids, in which case the output will
3827  be zeros at those indices. In those cases `num_segments` is used to determine
3828  the size of the output.
3829
3830  For example:
3831
3832  ```python
3833  c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
3834
3835  # Select two rows, one segment.
3836  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
3837  # => [[0 0 0 0]]
3838
3839  # Select two rows, two segment.
3840  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
3841  # => [[ 1  2  3  4]
3842  #     [-1 -2 -3 -4]]
3843
3844  # With missing segment ids.
3845  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
3846                        num_segments=4)
3847  # => [[ 1  2  3  4]
3848  #     [ 0  0  0  0]
3849  #     [-1 -2 -3 -4]
3850  #     [ 0  0  0  0]]
3851
3852  # Select all rows, two segments.
3853  tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
3854  # => [[0 0 0 0]
3855  #     [5 6 7 8]]
3856
3857  # Which is equivalent to:
3858  tf.math.segment_sum(c, tf.constant([0, 0, 1]))
3859  ```
3860
3861  Args:
3862    data: A `Tensor` with data that will be assembled in the output.
3863    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
3864      `segment_ids`.
3865    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
3866      should be sorted and can be repeated.
3867    name: A name for the operation (optional).
3868    num_segments: An optional int32 scalar. Indicates the size of the output
3869      `Tensor`.
3870
3871  Returns:
3872    A `tensor` of the shape as data, except for dimension 0 which
3873    has size `k`, the number of segments specified via `num_segments` or
3874    inferred for the last element in `segments_ids`.
3875  """
3876  if num_segments is not None:
3877    return gen_math_ops.sparse_segment_sum_with_num_segments(
3878        data=data,
3879        indices=indices,
3880        segment_ids=segment_ids,
3881        num_segments=num_segments,
3882        name=name)
3883  else:
3884    return gen_math_ops.sparse_segment_sum(
3885        data=data, indices=indices, segment_ids=segment_ids, name=name)
3886
3887
3888@tf_export("sparse.segment_sum", v1=[])
3889def sparse_segment_sum_v2(data,
3890                          indices,
3891                          segment_ids,
3892                          num_segments=None,
3893                          name=None):
3894  r"""Computes the sum along sparse segments of a tensor.
3895
3896  Read [the section on
3897  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
3898  for an explanation of segments.
3899
3900  Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
3901  first dimension, selecting a subset of dimension 0, specified by `indices`.
3902  `segment_ids` is allowed to have missing ids, in which case the output will
3903  be zeros at those indices. In those cases `num_segments` is used to determine
3904  the size of the output.
3905
3906  For example:
3907
3908  ```python
3909  c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
3910
3911  # Select two rows, one segment.
3912  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
3913  # => [[0 0 0 0]]
3914
3915  # Select two rows, two segment.
3916  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
3917  # => [[ 1  2  3  4]
3918  #     [-1 -2 -3 -4]]
3919
3920  # With missing segment ids.
3921  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
3922                        num_segments=4)
3923  # => [[ 1  2  3  4]
3924  #     [ 0  0  0  0]
3925  #     [-1 -2 -3 -4]
3926  #     [ 0  0  0  0]]
3927
3928  # Select all rows, two segments.
3929  tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
3930  # => [[0 0 0 0]
3931  #     [5 6 7 8]]
3932
3933  # Which is equivalent to:
3934  tf.math.segment_sum(c, tf.constant([0, 0, 1]))
3935  ```
3936
3937  Args:
3938    data: A `Tensor` with data that will be assembled in the output.
3939    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
3940      `segment_ids`.
3941    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
3942      should be sorted and can be repeated.
3943    num_segments: An optional int32 scalar. Indicates the size of the output
3944      `Tensor`.
3945    name: A name for the operation (optional).
3946
3947  Returns:
3948    A `tensor` of the shape as data, except for dimension 0 which
3949    has size `k`, the number of segments specified via `num_segments` or
3950    inferred for the last element in `segments_ids`.
3951  """
3952  return sparse_segment_sum(
3953      data, indices, segment_ids, name=name, num_segments=num_segments)
3954
3955
3956@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"])
3957@deprecation.deprecated_endpoints("sparse_segment_mean")
3958def sparse_segment_mean(data,
3959                        indices,
3960                        segment_ids,
3961                        name=None,
3962                        num_segments=None):
3963  r"""Computes the mean along sparse segments of a tensor.
3964
3965  Read [the section on
3966  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
3967  for an explanation of segments.
3968
3969  Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
3970  `data`'s first dimension, selecting a subset of dimension 0, specified by
3971  `indices`.
3972  `segment_ids` is allowed to have missing ids, in which case the output will
3973  be zeros at those indices. In those cases `num_segments` is used to determine
3974  the size of the output.
3975
3976  Args:
3977    data: A `Tensor` with data that will be assembled in the output.
3978    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
3979      `segment_ids`.
3980    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
3981      should be sorted and can be repeated.
3982    name: A name for the operation (optional).
3983    num_segments: An optional int32 scalar. Indicates the size of the output
3984      `Tensor`.
3985
3986  Returns:
3987    A `tensor` of the shape as data, except for dimension 0 which
3988    has size `k`, the number of segments specified via `num_segments` or
3989    inferred for the last element in `segments_ids`.
3990  """
3991  if num_segments is not None:
3992    return gen_math_ops.sparse_segment_mean_with_num_segments(
3993        data=data,
3994        indices=indices,
3995        segment_ids=segment_ids,
3996        num_segments=num_segments,
3997        name=name)
3998  else:
3999    return gen_math_ops.sparse_segment_mean(
4000        data=data, indices=indices, segment_ids=segment_ids, name=name)
4001
4002
4003@tf_export("sparse.segment_mean", v1=[])
4004def sparse_segment_mean_v2(data,
4005                           indices,
4006                           segment_ids,
4007                           num_segments=None,
4008                           name=None):
4009  r"""Computes the mean along sparse segments of a tensor.
4010
4011  Read [the section on
4012  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4013  for an explanation of segments.
4014
4015  Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
4016  `data`'s first dimension, selecting a subset of dimension 0, specified by
4017  `indices`.
4018  `segment_ids` is allowed to have missing ids, in which case the output will
4019  be zeros at those indices. In those cases `num_segments` is used to determine
4020  the size of the output.
4021
4022  Args:
4023    data: A `Tensor` with data that will be assembled in the output.
4024    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4025      `segment_ids`.
4026    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4027      should be sorted and can be repeated.
4028    num_segments: An optional int32 scalar. Indicates the size of the output
4029      `Tensor`.
4030    name: A name for the operation (optional).
4031
4032  Returns:
4033    A `tensor` of the shape as data, except for dimension 0 which
4034    has size `k`, the number of segments specified via `num_segments` or
4035    inferred for the last element in `segments_ids`.
4036  """
4037  return sparse_segment_mean(
4038      data, indices, segment_ids, name=name, num_segments=num_segments)
4039
4040
4041@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"])
4042@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
4043def sparse_segment_sqrt_n(data,
4044                          indices,
4045                          segment_ids,
4046                          name=None,
4047                          num_segments=None):
4048  r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
4049
4050  `N` is the size of the segment being reduced.
4051
4052  Args:
4053    data: A `Tensor` with data that will be assembled in the output.
4054    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4055      `segment_ids`.
4056    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4057      should be sorted and can be repeated.
4058    name: A name for the operation (optional).
4059    num_segments: An optional int32 scalar. Indicates the size of the output
4060      `Tensor`.
4061
4062  Returns:
4063    A `tensor` of the shape as data, except for dimension 0 which
4064    has size `k`, the number of segments specified via `num_segments` or
4065    inferred for the last element in `segments_ids`.
4066  """
4067  if num_segments is not None:
4068    return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(
4069        data=data,
4070        indices=indices,
4071        segment_ids=segment_ids,
4072        num_segments=num_segments,
4073        name=name)
4074  else:
4075    return gen_math_ops.sparse_segment_sqrt_n(
4076        data=data, indices=indices, segment_ids=segment_ids, name=name)
4077
4078
4079@tf_export("sparse.segment_sqrt_n", v1=[])
4080def sparse_segment_sqrt_n_v2(data,
4081                             indices,
4082                             segment_ids,
4083                             num_segments=None,
4084                             name=None):
4085  r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
4086
4087  Read [the section on
4088  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4089  for an explanation of segments.
4090
4091  Like `tf.sparse.segment_mean`, but instead of dividing by the size of the
4092  segment, `N`, divide by `sqrt(N)` instead.
4093
4094  Args:
4095    data: A `Tensor` with data that will be assembled in the output.
4096    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4097      `segment_ids`.
4098    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4099      should be sorted and can be repeated.
4100    num_segments: An optional int32 scalar. Indicates the size of the output
4101      `Tensor`.
4102    name: A name for the operation (optional).
4103
4104  Returns:
4105    A `tensor` of the shape as data, except for dimension 0 which
4106    has size `k`, the number of segments specified via `num_segments` or
4107    inferred for the last element in `segments_ids`.
4108  """
4109  return sparse_segment_sqrt_n(
4110      data, indices, segment_ids, name=name, num_segments=num_segments)
4111
4112
4113@tf_export("tensordot", "linalg.tensordot")
4114def tensordot(a, b, axes, name=None):
4115  r"""Tensor contraction of a and b along specified axes and outer product.
4116
4117  Tensordot (also known as tensor contraction) sums the product of elements
4118  from `a` and `b` over the indices specified by `a_axes` and `b_axes`.
4119  The lists `a_axes` and `b_axes` specify those pairs of axes along which to
4120  contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension
4121  as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists
4122  `a_axes` and `b_axes` must have identical length and consist of unique
4123  integers that specify valid axes for each of the tensors. Additionally
4124  outer product is supported by passing `axes=0`.
4125
4126  This operation corresponds to `numpy.tensordot(a, b, axes)`.
4127
4128  Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`
4129  is equivalent to matrix multiplication.
4130
4131  Example 2: When `a` and `b` are matrices (order 2), the case
4132  `axes = [[1], [0]]` is equivalent to matrix multiplication.
4133
4134  Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives
4135  the outer product, a tensor of order 4.
4136
4137  Example 4: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
4138  tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
4139  \\(c_{jklm}\\) whose entry
4140  corresponding to the indices \\((j,k,l,m)\\) is given by:
4141
4142  \\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
4143
4144  In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
4145
4146  Args:
4147    a: `Tensor` of type `float32` or `float64`.
4148    b: `Tensor` with the same type as `a`.
4149    axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
4150      If axes is a scalar, sum over the last N axes of a and the first N axes of
4151      b in order. If axes is a list or `Tensor` the first and second row contain
4152      the set of unique integers specifying axes along which the contraction is
4153      computed, for `a` and `b`, respectively. The number of axes for `a` and
4154      `b` must be equal. If `axes=0`, computes the outer product between `a` and
4155      `b`.
4156    name: A name for the operation (optional).
4157
4158  Returns:
4159    A `Tensor` with the same type as `a`.
4160
4161  Raises:
4162    ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
4163    IndexError: If the values in axes exceed the rank of the corresponding
4164      tensor.
4165  """
4166
4167  def _tensordot_reshape(a, axes, flipped=False):
4168    """Helper method to perform transpose and reshape for contraction op.
4169
4170    This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
4171    using `array_ops.transpose` and `array_ops.reshape`. The method takes a
4172    tensor and performs the correct transpose and reshape operation for a given
4173    set of indices. It returns the reshaped tensor as well as a list of indices
4174    necessary to reshape the tensor again after matrix multiplication.
4175
4176    Args:
4177      a: `Tensor`.
4178      axes: List or `int32` `Tensor` of unique indices specifying valid axes of
4179        `a`.
4180      flipped: An optional `bool`. Defaults to `False`. If `True`, the method
4181        assumes that `a` is the second argument in the contraction operation.
4182
4183    Returns:
4184      A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
4185      the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
4186      either a list of integers or an `int32` `Tensor`, depending on whether
4187      the shape of a is fully specified, and free_dims_static is either a list
4188      of integers and None values, or None, representing the inferred
4189      static shape of the free dimensions
4190    """
4191    if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
4192      shape_a = a.get_shape().as_list()
4193      axes = [i if i >= 0 else i + len(shape_a) for i in axes]
4194      free = [i for i in xrange(len(shape_a)) if i not in axes]
4195      free_dims = [shape_a[i] for i in free]
4196      prod_free = int(np.prod([shape_a[i] for i in free]))
4197      prod_axes = int(np.prod([shape_a[i] for i in axes]))
4198      perm = list(axes) + free if flipped else free + list(axes)
4199      new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
4200      if (perm != np.arange(len(shape_a))).any():
4201        a_trans = array_ops.transpose(a, perm)
4202      else:
4203        a_trans = a
4204      if a_trans.get_shape().as_list() != new_shape:
4205        reshaped_a = array_ops.reshape(a_trans, new_shape)
4206      else:
4207        reshaped_a = a_trans
4208      return reshaped_a, free_dims, free_dims
4209    else:
4210      if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
4211        shape_a = a.get_shape().as_list()
4212        axes = [i if i >= 0 else i + len(shape_a) for i in axes]
4213        free = [i for i in xrange(len(shape_a)) if i not in axes]
4214        axes_dims = [shape_a[i] for i in axes]
4215        free_dims = [shape_a[i] for i in free]
4216        free_dims_static = free_dims
4217        axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
4218        free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free")
4219        shape_a = array_ops.shape(a)
4220      else:
4221        free_dims_static = None
4222        shape_a = array_ops.shape(a)
4223        rank_a = array_ops.rank(a)
4224        axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
4225        axes = array_ops.where(axes >= 0, axes, axes + rank_a)
4226        free, _ = array_ops.setdiff1d(range(rank_a), axes)
4227      free_dims = array_ops.gather(shape_a, free)
4228      axes_dims = array_ops.gather(shape_a, axes)
4229      prod_free_dims = reduce_prod(free_dims)
4230      prod_axes_dims = reduce_prod(axes_dims)
4231      if flipped:
4232        perm = array_ops.concat([axes, free], 0)
4233        new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
4234      else:
4235        perm = array_ops.concat([free, axes], 0)
4236        new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
4237      reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
4238      return reshaped_a, free_dims, free_dims_static
4239
4240  def _tensordot_axes(a, axes):
4241    """Generates two sets of contraction axes for the two tensor arguments."""
4242    a_shape = a.get_shape()
4243    if isinstance(axes, compat.integral_types):
4244      if axes < 0:
4245        raise ValueError("'axes' must be at least 0.")
4246      if a_shape.ndims is not None:
4247        if axes > a_shape.ndims:
4248          raise ValueError("'axes' must not be larger than the number of "
4249                           "dimensions of tensor %s." % a)
4250        return (list(xrange(a_shape.ndims - axes,
4251                            a_shape.ndims)), list(xrange(axes)))
4252      else:
4253        rank = array_ops.rank(a)
4254        return (range(rank - axes, rank,
4255                      dtype=dtypes.int32), range(axes, dtype=dtypes.int32))
4256    elif isinstance(axes, (list, tuple)):
4257      if len(axes) != 2:
4258        raise ValueError("'axes' must be an integer or have length 2.")
4259      a_axes = axes[0]
4260      b_axes = axes[1]
4261      if isinstance(a_axes, compat.integral_types) and \
4262          isinstance(b_axes, compat.integral_types):
4263        a_axes = [a_axes]
4264        b_axes = [b_axes]
4265      if len(a_axes) != len(b_axes):
4266        raise ValueError(
4267            "Different number of contraction axes 'a' and 'b', %s != %s." %
4268            (len(a_axes), len(b_axes)))
4269      return a_axes, b_axes
4270    else:
4271      axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
4272      return axes[0], axes[1]
4273
4274  with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
4275    a = ops.convert_to_tensor(a, name="a")
4276    b = ops.convert_to_tensor(b, name="b")
4277    a_axes, b_axes = _tensordot_axes(a, axes)
4278    a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
4279    b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(
4280        b, b_axes, True)
4281    ab_matmul = matmul(a_reshape, b_reshape)
4282    if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
4283      if (ab_matmul.get_shape().is_fully_defined() and
4284          ab_matmul.get_shape().as_list() == a_free_dims + b_free_dims):
4285        return ab_matmul
4286      else:
4287        return array_ops.reshape(
4288            ab_matmul, a_free_dims + b_free_dims, name=name)
4289    else:
4290      a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
4291      b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
4292      product = array_ops.reshape(
4293          ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
4294      if a_free_dims_static is not None and b_free_dims_static is not None:
4295        product.set_shape(a_free_dims_static + b_free_dims_static)
4296      return product
4297
4298
4299@tf_export("math.polyval")
4300def polyval(coeffs, x, name=None):
4301  r"""Computes the elementwise value of a polynomial.
4302
4303  If `x` is a tensor and `coeffs` is a list n + 1 tensors,
4304  this function returns the value of the n-th order polynomial
4305
4306     p(x) = coeffs[n-1] + coeffs[n-2] * x + ...  + coeffs[0] * x**(n-1)
4307
4308  evaluated using Horner's method, i.e.
4309
4310     p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] +
4311            x * coeffs[0]))
4312
4313  Usage Example:
4314
4315  >>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0)
4316  <tf.Tensor: shape=(), dtype=int32, numpy=21>
4317
4318  `tf.math.polyval` can also be used in polynomial regression. Taking
4319  advantage of this function can facilitate writing a polynomial equation
4320  as compared to explicitly writing it out, especially for higher degree
4321  polynomials.
4322
4323  >>> x = tf.constant(3)
4324  >>> theta1 = tf.Variable(2)
4325  >>> theta2 = tf.Variable(1)
4326  >>> theta3 = tf.Variable(0)
4327  >>> tf.math.polyval([theta1, theta2, theta3], x)
4328  <tf.Tensor: shape=(), dtype=int32, numpy=21>
4329
4330  Args:
4331    coeffs: A list of `Tensor` representing the coefficients of the polynomial.
4332    x: A `Tensor` representing the variable of the polynomial.
4333    name: A name for the operation (optional).
4334
4335  Returns:
4336    A `tensor` of the shape as the expression p(x) with usual broadcasting
4337    rules for element-wise addition and multiplication applied.
4338
4339  @compatibility(numpy)
4340  Equivalent to numpy.polyval.
4341  @end_compatibility
4342  """
4343
4344  with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
4345    x = ops.convert_to_tensor(x, name="x")
4346    if len(coeffs) < 1:
4347      return array_ops.zeros_like(x, name=name)
4348    coeffs = [
4349        ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
4350        for index, coeff in enumerate(coeffs)
4351    ]
4352    p = coeffs[0]
4353    for c in coeffs[1:]:
4354      p = c + p * x
4355    return p
4356
4357
4358@tf_export("math.reciprocal_no_nan")
4359def reciprocal_no_nan(x, name=None):
4360  """Performs a safe reciprocal operation, element wise.
4361
4362  If a particular element is zero, the reciprocal for that element is
4363  also set to zero.
4364
4365  For example:
4366  ```python
4367  x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)
4368  tf.math.reciprocal_no_nan(x)  # [ 0.5, 2, 0.0, 1.0 ]
4369  ```
4370
4371  Args:
4372    x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or
4373      `complex128`.
4374    name: A name for the operation (optional).
4375
4376  Returns:
4377    A `Tensor` of same shape and type as `x`.
4378
4379  Raises:
4380    TypeError: x must be of a valid dtype.
4381
4382  """
4383
4384  with ops.name_scope(name, "reciprocal_no_nan", [x]) as scope:
4385    x = ops.convert_to_tensor(x, name="x")
4386    one = constant_op.constant(1, dtype=x.dtype.base_dtype, name="one")
4387    return gen_math_ops.div_no_nan(one, x, name=scope)
4388
4389
4390@tf_export("math.xlog1py")
4391@dispatch.add_dispatch_support
4392def xlog1py(x, y, name=None):
4393  r"""Compute x * log1p(y).
4394
4395  Given `x` and `y`, compute `x * log1p(y)`. This function safely returns
4396  zero when `x = 0`, no matter what the value of `y` is.
4397
4398  Example:
4399
4400  >>> tf.math.xlog1py(0., 1.)
4401  <tf.Tensor: shape=(), dtype=float32, numpy=0.>
4402  >>> tf.math.xlog1py(1., 1.)
4403  <tf.Tensor: shape=(), dtype=float32, numpy=0.6931472>
4404  >>> tf.math.xlog1py(2., 2.)
4405  <tf.Tensor: shape=(), dtype=float32, numpy=2.1972246>
4406  >>> tf.math.xlog1py(0., -1.)
4407  <tf.Tensor: shape=(), dtype=float32, numpy=0.>
4408
4409  Args:
4410    x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
4411      `complex64`, `complex128`
4412    y: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
4413      `complex64`, `complex128`
4414    name: A name for the operation (optional).
4415
4416  Returns:
4417    `x * log1p(y)`.
4418
4419  @compatibility(scipy)
4420  Equivalent to scipy.special.xlog1py
4421  @end_compatibility
4422  """
4423  with ops.name_scope(name, "xlog1py", [x]):
4424    return gen_math_ops.xlog1py(x, y)
4425
4426
4427@tf_export("math.erfinv")
4428@dispatch.add_dispatch_support
4429def erfinv(x, name=None):
4430  """Compute inverse error function.
4431
4432  Given `x`, compute the inverse error function of `x`. This function
4433  is the inverse of `tf.math.erf`.
4434
4435  Args:
4436    x: `Tensor` with type `float` or `double`.
4437    name: A name for the operation (optional).
4438  Returns:
4439    Inverse error function of `x`.
4440  """
4441  with ops.name_scope(name, "erfinv", [x]):
4442    return gen_math_ops.erfinv(x)
4443
4444
4445@tf_export("math.ndtri")
4446@dispatch.add_dispatch_support
4447def ndtri(x, name=None):
4448  """Compute quantile of Standard Normal.
4449
4450  Args:
4451    x: `Tensor` with type `float` or `double`.
4452    name: A name for the operation (optional).
4453  Returns:
4454    Inverse error function of `x`.
4455  """
4456  with ops.name_scope(name, "ndtri", [x]):
4457    return gen_math_ops.ndtri(x)
4458
4459
4460@tf_export("math.ceil", v1=["math.ceil", "ceil"])
4461@deprecation.deprecated_endpoints("ceil")
4462@dispatch.add_dispatch_support
4463def ceil(x, name=None):
4464  """Return the ceiling of the input, element-wise.
4465
4466  For example:
4467
4468  >>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
4469  <tf.Tensor: shape=(7,), dtype=float32,
4470  numpy=array([-1., -1., -0.,  1.,  2.,  2.,  2.], dtype=float32)>
4471
4472  Args:
4473    x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
4474      `float32`, `float64`. `int32`
4475    name: A name for the operation (optional).
4476
4477  Returns:
4478    A `tf.Tensor`. Has the same type as `x`.
4479
4480  @compatibility(numpy)
4481  Equivalent to np.ceil
4482  @end_compatibility
4483  """
4484  return gen_math_ops.ceil(x, name)
4485
4486
4487@tf_export("math.sqrt", "sqrt")
4488@dispatch.add_dispatch_support
4489def sqrt(x, name=None):  # pylint: disable=redefined-builtin
4490  r"""Computes element-wise square root of the input tensor.
4491
4492  Note: This operation does not support integer types.
4493
4494  >>> x = tf.constant([[4.0], [16.0]])
4495  >>> tf.sqrt(x)
4496  <tf.Tensor: shape=(2, 1), dtype=float32, numpy=
4497    array([[2.],
4498           [4.]], dtype=float32)>
4499  >>> y = tf.constant([[-4.0], [16.0]])
4500  >>> tf.sqrt(y)
4501  <tf.Tensor: shape=(2, 1), dtype=float32, numpy=
4502    array([[nan],
4503           [ 4.]], dtype=float32)>
4504  >>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)
4505  >>> tf.sqrt(z)
4506  <tf.Tensor: shape=(2, 1), dtype=complex128, numpy=
4507    array([[0.0+1.j],
4508           [4.0+0.j]])>
4509
4510  Note: In order to support complex complex, please provide an input tensor
4511  of `complex64` or `complex128`.
4512
4513  Args:
4514    x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
4515      `complex64`, `complex128`
4516    name: A name for the operation (optional).
4517
4518  Returns:
4519    A `tf.Tensor` of same size, type and sparsity as `x`.
4520  """
4521  return gen_math_ops.sqrt(x, name)
4522
4523
4524# pylint: disable=g-docstring-has-escape
4525@tf_export("math.exp", "exp")
4526@dispatch.add_dispatch_support
4527def exp(x, name=None):
4528  """Computes exponential of x element-wise.  \\(y = e^x\\).
4529
4530  This function computes the exponential of the input tensor element-wise.
4531  i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor.
4532  \\(e\\) denotes Euler's number and is approximately equal to 2.718281.
4533  Output is positive for any real input.
4534
4535  >>> x = tf.constant(2.0)
4536  >>> tf.math.exp(x)
4537  <tf.Tensor: shape=(), dtype=float32, numpy=7.389056>
4538
4539  >>> x = tf.constant([2.0, 8.0])
4540  >>> tf.math.exp(x)
4541  <tf.Tensor: shape=(2,), dtype=float32,
4542  numpy=array([   7.389056, 2980.958   ], dtype=float32)>
4543
4544  For complex numbers, the exponential value is calculated as
4545  \\(e^{x+iy}={e^x}{e^{iy}}={e^x}{\\cos(y)+i\\sin(y)}\\)
4546
4547  For `1+1j` the value would be computed as:
4548  \\(e^1{\\cos(1)+i\\sin(1)} = 2.7182817 \\times (0.5403023+0.84147096j)\\)
4549
4550  >>> x = tf.constant(1 + 1j)
4551  >>> tf.math.exp(x)
4552  <tf.Tensor: shape=(), dtype=complex128,
4553  numpy=(1.4686939399158851+2.2873552871788423j)>
4554
4555  Args:
4556    x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
4557      `float32`, `float64`, `complex64`, `complex128`.
4558    name: A name for the operation (optional).
4559
4560  Returns:
4561    A `tf.Tensor`. Has the same type as `x`.
4562
4563  @compatibility(numpy)
4564  Equivalent to np.exp
4565  @end_compatibility
4566  """
4567  return gen_math_ops.exp(x, name)
4568
4569
4570# pylint: enable=g-docstring-has-escape
4571
4572
4573@tf_export("math.sobol_sample")
4574def sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None):
4575  """Generates points from the Sobol sequence.
4576
4577  Creates a Sobol sequence with `num_results` samples. Each sample has dimension
4578  `dim`. Skips the first `skip` samples.
4579
4580  Args:
4581    dim: Positive scalar `Tensor` representing each sample's dimension.
4582    num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol
4583        points to return in the output.
4584    skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of
4585        initial points of the Sobol sequence to skip. Default value is 0.
4586    dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or
4587        `tf.float64`. Defaults to `tf.float32`.
4588    name: (Optional) Python `str` name prefixed to ops created by this function.
4589
4590  Returns:
4591    `Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
4592  """
4593  with ops.name_scope(name, "sobol", [dim, num_results, skip]):
4594    return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype)
4595