• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15
16# pylint: disable=g-short-docstring-punctuation
17"""Sparse Tensor Representation.
18
19See also `tf.sparse.SparseTensor`.
20"""
21
22import numbers
23
24import numpy as np
25
26from tensorflow.python.framework import constant_op
27from tensorflow.python.framework import dtypes
28from tensorflow.python.framework import ops
29from tensorflow.python.framework import sparse_tensor
30from tensorflow.python.framework import tensor_shape
31from tensorflow.python.framework import tensor_util
32from tensorflow.python.ops import array_ops
33from tensorflow.python.ops import check_ops
34from tensorflow.python.ops import control_flow_ops
35from tensorflow.python.ops import gen_sparse_ops
36from tensorflow.python.ops import math_ops
37from tensorflow.python.ops import special_math_ops
38# go/tf-wildcard-import
39# pylint: disable=wildcard-import
40from tensorflow.python.ops.gen_sparse_ops import *
41# pylint: enable=wildcard-import
42from tensorflow.python.util import compat
43from tensorflow.python.util import deprecation
44from tensorflow.python.util import dispatch
45from tensorflow.python.util import nest
46from tensorflow.python.util import tf_inspect
47from tensorflow.python.util.compat import collections_abc
48from tensorflow.python.util.tf_export import get_canonical_name_for_symbol
49from tensorflow.python.util.tf_export import tf_export
50
51
52def _convert_to_sparse_tensor(sp_input):
53  """Convert `sp_input` to `SparseTensor` and return it.
54
55  Args:
56    sp_input: `SparseTensor` or `SparseTensorValue`.
57
58  Returns:
59    `sp_input` converted to `SparseTensor`.
60
61  Raises:
62    ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
63  """
64  if isinstance(sp_input, sparse_tensor.SparseTensorValue):
65    return sparse_tensor.SparseTensor.from_value(sp_input)
66  if not isinstance(sp_input, sparse_tensor.SparseTensor):
67    raise TypeError("Input must be a SparseTensor.")
68  return sp_input
69
70
71def _convert_to_sparse_tensors(sp_inputs):
72  """Convert `sp_inputs` to `SparseTensor` objects and return them.
73
74  Args:
75    sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`
76      objects.
77
78  Returns:
79    `sp_inputs` converted to `SparseTensor` objects.
80
81  Raises:
82    ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor
83      `SparseTensorValue`.
84  """
85  if isinstance(sp_inputs, list):
86    return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]
87  if isinstance(sp_inputs, tuple):
88    return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)
89  raise TypeError("Inputs must be a list or tuple.")
90
91
92def _make_int64_tensor(value, name):
93  if isinstance(value, compat.integral_types):
94    return ops.convert_to_tensor(value, name=name, dtype=dtypes.int64)
95  if not isinstance(value, ops.Tensor):
96    raise TypeError("{} must be an integer value".format(name))
97  if value.dtype == dtypes.int64:
98    return value
99  return math_ops.cast(value, dtypes.int64)
100
101
102@tf_export("sparse.from_dense")
103def from_dense(tensor, name=None):
104  """Converts a dense tensor into a sparse tensor.
105
106  Only elements not equal to zero will be present in the result. The resulting
107  `SparseTensor` has the same dtype and shape as the input.
108
109  >>> sp = tf.sparse.from_dense([0, 0, 3, 0, 1])
110  >>> sp.shape.as_list()
111  [5]
112  >>> sp.values.numpy()
113  array([3, 1], dtype=int32)
114  >>> sp.indices.numpy()
115  array([[2],
116         [4]])
117
118  Args:
119    tensor: A dense `Tensor` to be converted to a `SparseTensor`.
120    name: Optional name for the op.
121
122  Returns:
123    The `SparseTensor`.
124  """
125  with ops.name_scope(name, "dense_to_sparse"):
126    tensor = ops.convert_to_tensor(tensor)
127    indices = array_ops.where_v2(
128        math_ops.not_equal(tensor, array_ops.zeros_like(tensor)))
129    values = array_ops.gather_nd(tensor, indices)
130    shape = array_ops.shape(tensor, out_type=dtypes.int64)
131    return sparse_tensor.SparseTensor(indices, values, shape)
132
133
134@tf_export("sparse.expand_dims")
135def sparse_expand_dims(sp_input, axis=None, name=None):
136  """Returns a tensor with an length 1 axis inserted at index `axis`.
137
138  Given a tensor `input`, this operation inserts a dimension of length 1 at the
139  dimension index `axis` of `input`'s shape. The dimension index follows python
140  indexing rules: It's zero-based, a negative index it is counted backward
141  from the end.
142
143  This operation is useful to:
144
145  * Add an outer "batch" dimension to a single element.
146  * Align axes for broadcasting.
147  * To add an inner vector length axis to a tensor of scalars.
148
149  For example:
150
151  If you have a sparse tensor with shape `[height, width, depth]`:
152
153  >>> sp = tf.sparse.SparseTensor(indices=[[3,4,1]], values=[7,],
154  ...                             dense_shape=[10,10,3])
155
156  You can add an outer `batch` axis by passing `axis=0`:
157
158  >>> tf.sparse.expand_dims(sp, axis=0).shape.as_list()
159  [1, 10, 10, 3]
160
161  The new axis location matches Python `list.insert(axis, 1)`:
162
163  >>> tf.sparse.expand_dims(sp, axis=1).shape.as_list()
164  [10, 1, 10, 3]
165
166  Following standard python indexing rules, a negative `axis` counts from the
167  end so `axis=-1` adds an inner most dimension:
168
169  >>> tf.sparse.expand_dims(sp, axis=-1).shape.as_list()
170  [10, 10, 3, 1]
171
172  Note: Unlike `tf.expand_dims` this function includes a default value for the
173  `axis`: `-1`. So if `axis is not specified, an inner dimension is added.
174
175  >>> sp.shape.as_list()
176  [10, 10, 3]
177  >>> tf.sparse.expand_dims(sp).shape.as_list()
178  [10, 10, 3, 1]
179
180  This operation requires that `axis` is a valid index for `input.shape`,
181  following python indexing rules:
182
183  ```
184  -1-tf.rank(input) <= axis <= tf.rank(input)
185  ```
186
187  This operation is related to:
188
189  * `tf.expand_dims`, which provides this functionality for dense tensors.
190  * `tf.squeeze`, which removes dimensions of size 1, from dense tensors.
191  * `tf.sparse.reshape`, which provides more flexible reshaping capability.
192
193  Args:
194    sp_input: A `SparseTensor`.
195    axis: 0-D (scalar). Specifies the dimension index at which to expand the
196      shape of `input`. Must be in the range `[-rank(sp_input) - 1,
197      rank(sp_input)]`. Defaults to `-1`.
198    name: The name of the output `SparseTensor`.
199
200  Returns:
201    A `SparseTensor` with the same data as `sp_input`, but its shape has an
202    additional dimension of size 1 added.
203  """
204  rank = sp_input.dense_shape.get_shape()[0]
205  if rank is None:
206    rank = array_ops.shape(sp_input.dense_shape)[0]
207  axis = -1 if axis is None else axis
208
209  with ops.name_scope(name, default_name="expand_dims", values=[sp_input]):
210    if isinstance(axis, compat.integral_types):
211      axis = ops.convert_to_tensor(axis, name="axis", dtype=dtypes.int32)
212    elif not isinstance(axis, ops.Tensor):
213      raise TypeError("axis must be an integer value in range [-rank(sp_input)"
214                      " - 1, rank(sp_input)]")
215
216    # Convert axis to a positive value if it is negative.
217    axis = array_ops.where_v2(axis >= 0, axis, axis + rank + 1)
218
219    # Create the new column of indices for the sparse tensor by slicing
220    # the indices and inserting a new column of indices for the new dimension.
221    column_size = array_ops.shape(sp_input.indices)[0]
222    new_index = array_ops.zeros([column_size, 1], dtype=dtypes.int64)
223    indices_before = array_ops.slice(sp_input.indices, [0, 0], [-1, axis])
224    indices_after = array_ops.slice(sp_input.indices, [0, axis], [-1, -1])
225    indices = array_ops.concat(
226        [indices_before, new_index, indices_after], axis=1)
227
228    # Create the new dense shape by splicing the tensor [1] in the correct
229    # dimension of the existing shape.
230    shape_before = array_ops.slice(sp_input.dense_shape, [0], [axis])
231    shape_after = array_ops.slice(sp_input.dense_shape, [axis], [-1])
232    new_shape = ops.convert_to_tensor([1], name="new_shape", dtype=dtypes.int64)
233    shape = array_ops.concat([shape_before, new_shape, shape_after], axis=0)
234
235    # Create the output sparse tensor.
236    return sparse_tensor.SparseTensor(
237        indices=indices, values=sp_input.values, dense_shape=shape)
238
239
240@tf_export("sparse.eye")
241def sparse_eye(num_rows,
242               num_columns=None,
243               dtype=dtypes.float32,
244               name=None):
245  """Creates a two-dimensional sparse tensor with ones along the diagonal.
246
247  Args:
248    num_rows: Non-negative integer or `int32` scalar `tensor` giving the number
249      of rows in the resulting matrix.
250    num_columns: Optional non-negative integer or `int32` scalar `tensor` giving
251      the number of columns in the resulting matrix. Defaults to `num_rows`.
252    dtype: The type of element in the resulting `Tensor`.
253    name: A name for this `Op`. Defaults to "eye".
254
255  Returns:
256    A `SparseTensor` of shape [num_rows, num_columns] with ones along the
257    diagonal.
258  """
259  with ops.name_scope(name, default_name="eye", values=[num_rows, num_columns]):
260    num_rows = _make_int64_tensor(num_rows, "num_rows")
261    num_columns = num_rows if num_columns is None else _make_int64_tensor(
262        num_columns, "num_columns")
263
264    # Create the sparse tensor.
265    diag_size = math_ops.minimum(num_rows, num_columns)
266    diag_range = math_ops.range(diag_size, dtype=dtypes.int64)
267
268    return sparse_tensor.SparseTensor(
269        indices=array_ops.stack([diag_range, diag_range], axis=1),
270        values=array_ops.ones(diag_size, dtype=dtype),
271        dense_shape=[num_rows, num_columns])
272
273
274# pylint: disable=protected-access
275@tf_export(v1=["sparse.concat", "sparse_concat"])
276@deprecation.deprecated_endpoints("sparse_concat")
277@deprecation.deprecated_args(
278    None, "concat_dim is deprecated, use axis instead", "concat_dim")
279def sparse_concat(axis,
280                  sp_inputs,
281                  name=None,
282                  expand_nonconcat_dim=False,
283                  concat_dim=None,
284                  expand_nonconcat_dims=None):
285  """Concatenates a list of `SparseTensor` along the specified dimension.
286
287  Concatenation is with respect to the dense versions of each sparse input.
288  It is assumed that each inputs is a `SparseTensor` whose elements are ordered
289  along increasing dimension number.
290
291  If expand_nonconcat_dim is False, all inputs' shapes must match, except for
292  the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are
293  allowed to vary among all inputs.
294
295  The `indices`, `values`, and `shapes` lists must have the same length.
296
297  If expand_nonconcat_dim is False, then the output shape is identical to the
298  inputs', except along the concat dimension, where it is the sum of the inputs'
299  sizes along that dimension.
300
301  If expand_nonconcat_dim is True, then the output shape along the non-concat
302  dimensions will be expand to be the largest among all inputs, and it is the
303  sum of the inputs sizes along the concat dimension.
304
305  The output elements will be resorted to preserve the sort order along
306  increasing dimension number.
307
308  This op runs in `O(M log M)` time, where `M` is the total number of non-empty
309  values across all inputs. This is due to the need for an internal sort in
310  order to concatenate efficiently across an arbitrary dimension.
311
312  For example, if `axis = 1` and the inputs are
313
314      sp_inputs[0]: shape = [2, 3]
315      [0, 2]: "a"
316      [1, 0]: "b"
317      [1, 1]: "c"
318
319      sp_inputs[1]: shape = [2, 4]
320      [0, 1]: "d"
321      [0, 2]: "e"
322
323  then the output will be
324
325      shape = [2, 7]
326      [0, 2]: "a"
327      [0, 4]: "d"
328      [0, 5]: "e"
329      [1, 0]: "b"
330      [1, 1]: "c"
331
332  Graphically this is equivalent to doing
333
334      [    a] concat [  d e  ] = [    a   d e  ]
335      [b c  ]        [       ]   [b c          ]
336
337  Another example, if 'axis = 1' and the inputs are
338
339      sp_inputs[0]: shape = [3, 3]
340      [0, 2]: "a"
341      [1, 0]: "b"
342      [2, 1]: "c"
343
344      sp_inputs[1]: shape = [2, 4]
345      [0, 1]: "d"
346      [0, 2]: "e"
347
348  if expand_nonconcat_dim = False, this will result in an error. But if
349  expand_nonconcat_dim = True, this will result in:
350
351      shape = [3, 7]
352      [0, 2]: "a"
353      [0, 4]: "d"
354      [0, 5]: "e"
355      [1, 0]: "b"
356      [2, 1]: "c"
357
358  Graphically this is equivalent to doing
359
360      [    a] concat [  d e  ] = [    a   d e  ]
361      [b    ]        [       ]   [b            ]
362      [  c  ]                    [  c          ]
363
364
365  Args:
366    axis: Dimension to concatenate along. Must be in range [-rank, rank),
367      where rank is the number of dimensions in each input `SparseTensor`.
368    sp_inputs: List of `SparseTensor` to concatenate.
369    name: A name prefix for the returned tensors (optional).
370    expand_nonconcat_dim: Whether to allow the expansion in the non-concat
371      dimensions. Defaulted to False.
372    concat_dim: The old (deprecated) name for axis.
373    expand_nonconcat_dims: alias for expand_nonconcat_dim
374
375  Returns:
376    A `SparseTensor` with the concatenated output.
377
378  Raises:
379    TypeError: If `sp_inputs` is not a list of `SparseTensor`.
380  """
381  expand_nonconcat_dim = deprecation.deprecated_argument_lookup(
382      "expand_nonconcat_dims", expand_nonconcat_dims,
383      "expand_nonconcat_dim", expand_nonconcat_dim)
384  if expand_nonconcat_dims is not None:
385    expand_nonconcat_dim = expand_nonconcat_dims
386  axis = deprecation.deprecated_argument_lookup("axis", axis, "concat_dim",
387                                                concat_dim)
388  return sparse_concat_v2(axis, sp_inputs, expand_nonconcat_dim, name)
389
390
391@tf_export("sparse.concat", v1=[])
392def sparse_concat_v2(axis, sp_inputs, expand_nonconcat_dims=False, name=None):  # pylint: disable=missing-docstring
393  sp_inputs = _convert_to_sparse_tensors(sp_inputs)
394
395  if len(sp_inputs) == 1:  # Degenerate case of one tensor.
396    return sp_inputs[0]
397
398  inds = [sp_input.indices for sp_input in sp_inputs]
399  vals = [sp_input.values for sp_input in sp_inputs]
400  shapes = [sp_input.dense_shape for sp_input in sp_inputs]
401
402  if expand_nonconcat_dims:
403    max_shape = math_ops.reduce_max(
404        array_ops.concat(
405            [array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0)
406    shapes = [
407        array_ops.concat([
408            max_shape[:axis], shape[-1:]
409            if axis == -1 else shape[axis:axis + 1], []
410            if axis == -1 else max_shape[axis + 1:]
411        ], 0) for shape in shapes
412    ]
413
414  output_ind, output_val, output_shape = (
415      gen_sparse_ops.sparse_concat(inds, vals, shapes, axis, name=name))
416
417  input_shapes = [inp.shape for inp in sp_inputs]
418  if all(shape.rank is not None for shape in input_shapes):
419    if expand_nonconcat_dims:
420      static_output_shape = []
421      for dim in range(input_shapes[0].rank):
422        static_output_shape.append(
423            max(tensor_shape.dimension_at_index(shape, dim)
424                for shape in input_shapes))
425    else:
426      static_output_shape = input_shapes[0].as_list()
427    static_output_shape[axis] = sum(
428        tensor_shape.dimension_at_index(shape, axis)
429        for shape in input_shapes)
430  else:
431    static_output_shape = tensor_shape.unknown_shape()
432  if all(shape.is_fully_defined() for shape in input_shapes):
433    output_shape = ops.convert_to_tensor(static_output_shape,
434                                         dtype=dtypes.int64)
435    return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
436  else:
437    # In case there are partially defined shape, we couldn't update the
438    # output_shape tensor value. We update the output._dense_shape_default,
439    # which populate output.shape as the best effort.
440    output = sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
441    output._dense_shape_default = tensor_shape.TensorShape(static_output_shape)
442    return output
443
444
445sparse_concat_v2.__doc__ = sparse_concat.__doc__.replace(
446    "    concat_dim: The old (deprecated) name for axis.\n",
447    "").replace("    expand_nonconcat_dims: alias for expand_nonconcat_dim\n",
448                "")
449
450
451@tf_export(v1=["sparse.add", "sparse_add"])
452@deprecation.deprecated_endpoints("sparse_add")
453@deprecation.deprecated_args(
454    None, "thresh is deprecated, use threshold instead", "thresh")
455def sparse_add(a, b, threshold=None, thresh=None):
456  """Adds two tensors, at least one of each is a `SparseTensor`.
457
458  If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`.  If
459  both arguments are `SparseTensor`s, this returns a `SparseTensor`.  The order
460  of arguments does not matter.  Use vanilla `tf.add()` for adding two dense
461  `Tensor`s.
462
463  The shapes of the two operands must match: broadcasting is not supported.
464
465  The indices of any input `SparseTensor` are assumed ordered in standard
466  lexicographic order.  If this is not the case, before this step run
467  `SparseReorder` to restore index ordering.
468
469  If both arguments are sparse, we perform "clipping" as follows.  By default,
470  if two values sum to zero at some index, the output `SparseTensor` would still
471  include that particular location in its index, storing a zero in the
472  corresponding value slot.  To override this, callers can specify `thresh`,
473  indicating that if the sum has a magnitude strictly smaller than `thresh`, its
474  corresponding value and index would then not be included.  In particular,
475  `thresh == 0.0` (default) means everything is kept and actual thresholding
476  happens only for a positive value.
477
478  For example, suppose the logical sum of two sparse operands is (densified):
479
480      [       2]
481      [.1     0]
482      [ 6   -.2]
483
484  Then,
485
486  * `thresh == 0` (the default): all 5 index/value pairs will be returned.
487  * `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three
488      index/value pairs will be returned.
489  * `thresh == 0.21`: .1, 0, and -.2 will vanish.
490
491  Args:
492    a: The first operand; `SparseTensor` or `Tensor`.
493    b: The second operand; `SparseTensor` or `Tensor`. At least one operand
494      must be sparse.
495    threshold: An optional 0-D `Tensor` (defaults to `0`). The magnitude
496      threshold that determines if an output value/index pair takes space. Its
497      dtype should match that of the values if they are real; if the latter are
498      complex64/complex128, then the dtype should be float32/float64,
499      correspondingly.
500    thresh: Deprecated alias for `threshold`.
501
502  Returns:
503    A `SparseTensor` or a `Tensor`, representing the sum.
504
505  Raises:
506    TypeError: If both `a` and `b` are `Tensor`s.  Use `tf.add()` instead.
507  """
508  threshold = deprecation.deprecated_argument_lookup("threshold", threshold,
509                                                     "thresh", thresh)
510  if threshold is None:
511    threshold = 0
512  return sparse_add_v2(a, b, threshold)
513
514
515@tf_export("sparse.add", v1=[])
516def sparse_add_v2(a, b, threshold=0):
517  """Adds two tensors, at least one of each is a `SparseTensor`.
518
519  If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`.  If
520  both arguments are `SparseTensor`s, this returns a `SparseTensor`.  The order
521  of arguments does not matter.  Use vanilla `tf.add()` for adding two dense
522  `Tensor`s.
523
524  The shapes of the two operands must match: broadcasting is not supported.
525
526  The indices of any input `SparseTensor` are assumed ordered in standard
527  lexicographic order.  If this is not the case, before this step run
528  `SparseReorder` to restore index ordering.
529
530  If both arguments are sparse, we perform "clipping" as follows.  By default,
531  if two values sum to zero at some index, the output `SparseTensor` would still
532  include that particular location in its index, storing a zero in the
533  corresponding value slot.  To override this, callers can specify `threshold`,
534  indicating that if the sum has a magnitude strictly smaller than `threshold`,
535  its corresponding value and index would then not be included.  In particular,
536  `threshold == 0.0` (default) means everything is kept and actual thresholding
537  happens only for a positive value.
538
539  For example, suppose the logical sum of two sparse operands is (densified):
540
541      [       2]
542      [.1     0]
543      [ 6   -.2]
544
545  Then,
546
547  * `threshold == 0` (the default): all 5 index/value pairs will be
548      returned.
549  * `threshold == 0.11`: only .1 and 0 will vanish, and the remaining three
550      index/value pairs will be returned.
551  * `threshold == 0.21`: .1, 0, and -.2 will vanish.
552
553  Args:
554    a: The first operand; `SparseTensor` or `Tensor`.
555    b: The second operand; `SparseTensor` or `Tensor`. At least one operand
556      must be sparse.
557    threshold: A 0-D `Tensor`. The magnitude threshold that determines if an
558      output value/index pair takes space. Its dtype should match that of the
559      values if they are real; if the latter are complex64/complex128, then the
560      dtype should be float32/float64, correspondingly.
561
562  Returns:
563    A `SparseTensor` or a `Tensor`, representing the sum.
564
565  Raises:
566    TypeError: If both `a` and `b` are `Tensor`s.  Use `tf.add()` instead.
567  """
568  sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)
569  if not any(isinstance(inp, sparse_classes) for inp in [a, b]):
570    raise TypeError("At least one input should be SparseTensor; do you mean to"
571                    " use tf.add()?")
572
573  if all(isinstance(inp, sparse_classes) for inp in [a, b]):
574    a = _convert_to_sparse_tensor(a)
575    b = _convert_to_sparse_tensor(b)
576    threshold = ops.convert_to_tensor(
577        threshold, dtype=a.values.dtype.real_dtype.base_dtype, name="threshold")
578    output_ind, output_val, output_shape = (
579        gen_sparse_ops.sparse_add(a.indices, a.values, a.dense_shape,
580                                  b.indices, b.values, b.dense_shape,
581                                  threshold))
582
583    # Attempt to get output_shape statically.
584    a.get_shape().assert_is_compatible_with(b.get_shape())
585    static_shape = array_ops.broadcast_static_shape(a.get_shape(),
586                                                    b.get_shape())
587    if static_shape.is_fully_defined():
588      output_shape = static_shape.as_list()
589
590    return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
591  else:
592    # swap to make `a` the SparseTensor.
593    if isinstance(b, sparse_classes):
594      a, b = b, a
595    return gen_sparse_ops.sparse_tensor_dense_add(a.indices, a.values,
596                                                  a.dense_shape, b)
597
598
599@tf_export("sparse.cross")
600def sparse_cross(inputs, name=None, separator=None):
601  """Generates sparse cross from a list of sparse and dense tensors.
602
603  For example, if the inputs are
604
605      * inputs[0]: SparseTensor with shape = [2, 2]
606        [0, 0]: "a"
607        [1, 0]: "b"
608        [1, 1]: "c"
609      * inputs[1]: SparseTensor with shape = [2, 1]
610        [0, 0]: "d"
611        [1, 0]: "e"
612      * inputs[2]: Tensor [["f"], ["g"]]
613
614  then the output will be:
615
616      shape = [2, 2]
617      [0, 0]: "a_X_d_X_f"
618      [1, 0]: "b_X_e_X_g"
619      [1, 1]: "c_X_e_X_g"
620
621  Customized separator "_Y_":
622
623  >>> inp_0 = tf.constant([['a'], ['b']])
624  >>> inp_1 = tf.constant([['c'], ['d']])
625  >>> output = tf.sparse.cross([inp_0, inp_1], separator='_Y_')
626  >>> output.values
627  <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'a_Y_c', b'b_Y_d'],
628    dtype=object)>
629
630
631  Args:
632    inputs: An iterable of `Tensor` or `SparseTensor`.
633    name: Optional name for the op.
634    separator: A string added between each string being joined. Defaults to
635      '_X_'.
636
637  Returns:
638    A `SparseTensor` of type `string`.
639  """
640  if separator is None:
641    separator = "_X_"
642  separator = ops.convert_to_tensor(separator, dtypes.string)
643  indices, values, shapes, dense_inputs = _sparse_cross_internal_v2(inputs)
644  indices_out, values_out, shape_out = gen_sparse_ops.sparse_cross_v2(
645      indices=indices,
646      values=values,
647      shapes=shapes,
648      dense_inputs=dense_inputs,
649      sep=separator,
650      name=name)
651  return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
652
653
654_sparse_cross = sparse_cross
655
656
657@tf_export("sparse.cross_hashed")
658def sparse_cross_hashed(inputs, num_buckets=0, hash_key=None, name=None):
659  """Generates hashed sparse cross from a list of sparse and dense tensors.
660
661  For example, if the inputs are
662
663      * inputs[0]: SparseTensor with shape = [2, 2]
664        [0, 0]: "a"
665        [1, 0]: "b"
666        [1, 1]: "c"
667      * inputs[1]: SparseTensor with shape = [2, 1]
668        [0, 0]: "d"
669        [1, 0]: "e"
670      * inputs[2]: Tensor [["f"], ["g"]]
671
672  then the output will be:
673
674      shape = [2, 2]
675      [0, 0]: FingerprintCat64(
676                  Fingerprint64("f"), FingerprintCat64(
677                      Fingerprint64("d"), Fingerprint64("a")))
678      [1, 0]: FingerprintCat64(
679                  Fingerprint64("g"), FingerprintCat64(
680                      Fingerprint64("e"), Fingerprint64("b")))
681      [1, 1]: FingerprintCat64(
682                  Fingerprint64("g"), FingerprintCat64(
683                      Fingerprint64("e"), Fingerprint64("c")))
684
685  Args:
686    inputs: An iterable of `Tensor` or `SparseTensor`.
687    num_buckets: An `int` that is `>= 0`.
688      output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
689    hash_key: Integer hash_key that will be used by the `FingerprintCat64`
690      function. If not given, will use a default key.
691    name: Optional name for the op.
692
693  Returns:
694    A `SparseTensor` of type `int64`.
695  """
696  return _sparse_cross_internal(
697      inputs=inputs,
698      hashed_output=True,
699      num_buckets=num_buckets,
700      hash_key=hash_key,
701      name=name)
702
703
704_sparse_cross_hashed = sparse_cross_hashed
705
706_DEFAULT_HASH_KEY = 0xDECAFCAFFE
707
708
709def _sparse_cross_internal_v2(inputs):
710  """See gen_sparse_ops.sparse_cross_v2."""
711  if not isinstance(inputs, (tuple, list)):
712    raise TypeError("Inputs must be a list")
713  if not all(
714      isinstance(i, sparse_tensor.SparseTensor) or isinstance(i, ops.Tensor)
715      for i in inputs):
716    raise TypeError("All inputs must be Tensor or SparseTensor.")
717  sparse_inputs = [
718      i for i in inputs if isinstance(i, sparse_tensor.SparseTensor)
719  ]
720  dense_inputs = [
721      i for i in inputs if not isinstance(i, sparse_tensor.SparseTensor)
722  ]
723  indices = [sp_input.indices for sp_input in sparse_inputs]
724  values = [sp_input.values for sp_input in sparse_inputs]
725  shapes = [sp_input.dense_shape for sp_input in sparse_inputs]
726  for i in range(len(values)):
727    if values[i].dtype != dtypes.string:
728      values[i] = math_ops.cast(values[i], dtypes.int64)
729  for i in range(len(dense_inputs)):
730    if dense_inputs[i].dtype != dtypes.string:
731      dense_inputs[i] = math_ops.cast(dense_inputs[i], dtypes.int64)
732  return indices, values, shapes, dense_inputs
733
734
735def _sparse_cross_internal(inputs,
736                           hashed_output=False,
737                           num_buckets=0,
738                           hash_key=None,
739                           name=None):
740  """See gen_sparse_ops.sparse_cross."""
741  if not isinstance(inputs, (tuple, list)):
742    raise TypeError("Inputs must be a list")
743  if not all(
744      isinstance(i, sparse_tensor.SparseTensor) or isinstance(i, ops.Tensor)
745      for i in inputs):
746    raise TypeError("All inputs must be SparseTensors")
747
748  sparse_inputs = [
749      i for i in inputs if isinstance(i, sparse_tensor.SparseTensor)
750  ]
751  dense_inputs = [
752      i for i in inputs if not isinstance(i, sparse_tensor.SparseTensor)
753  ]
754
755  indices = [sp_input.indices for sp_input in sparse_inputs]
756  values = [sp_input.values for sp_input in sparse_inputs]
757  shapes = [sp_input.dense_shape for sp_input in sparse_inputs]
758  out_type = dtypes.int64 if hashed_output else dtypes.string
759
760  internal_type = dtypes.string
761  for i in range(len(values)):
762    if values[i].dtype != dtypes.string:
763      values[i] = math_ops.cast(values[i], dtypes.int64)
764      internal_type = dtypes.int64
765  for i in range(len(dense_inputs)):
766    if dense_inputs[i].dtype != dtypes.string:
767      dense_inputs[i] = math_ops.cast(dense_inputs[i], dtypes.int64)
768      internal_type = dtypes.int64
769
770  indices_out, values_out, shape_out = gen_sparse_ops.sparse_cross(
771      indices=indices,
772      values=values,
773      shapes=shapes,
774      dense_inputs=dense_inputs,
775      hashed_output=hashed_output,
776      num_buckets=num_buckets,
777      hash_key=hash_key or _DEFAULT_HASH_KEY,
778      out_type=out_type,
779      internal_type=internal_type,
780      name=name)
781
782  return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
783
784
785def sparse_dense_cwise_add(sp_t, dense_t):
786  """Adds up a SparseTensor and a dense Tensor, using these special rules:
787
788  (1) Broadcasts the dense side to have the same shape as the sparse side, if
789      eligible;
790  (2) Then, only the dense values pointed to by the indices of the SparseTensor
791      participate in the cwise addition.
792
793  By the rules, the result is a logical SparseTensor with exactly the same
794  indices and shape, but possibly with different non-zero values.  The output of
795  this Op is the resultant non-zero values.
796
797  Args:
798    sp_t: the SparseTensor operand.
799    dense_t: the dense Tensor operand; must have the same dtype and a
800      broadcast-compatible shape as `sp_t`.
801
802  Returns:
803    output: the SparseTensor output.
804  """
805  result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values,
806                                                 sp_t.dense_shape, dense_t)
807  return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)
808
809
810@tf_export("sparse.reorder", v1=["sparse.reorder", "sparse_reorder"])
811@deprecation.deprecated_endpoints("sparse_reorder")
812def sparse_reorder(sp_input, name=None):
813  """Reorders a `SparseTensor` into the canonical, row-major ordering.
814
815  Note that by convention, all sparse ops preserve the canonical ordering
816  along increasing dimension number. The only time ordering can be violated
817  is during manual manipulation of the indices and values to add entries.
818
819  Reordering does not affect the shape of the `SparseTensor`.
820
821  For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
822
823      [0, 3]: b
824      [0, 1]: a
825      [3, 1]: d
826      [2, 0]: c
827
828  then the output will be a `SparseTensor` of shape `[4, 5]` and
829  `indices` / `values`:
830
831      [0, 1]: a
832      [0, 3]: b
833      [2, 0]: c
834      [3, 1]: d
835
836  Args:
837    sp_input: The input `SparseTensor`.
838    name: A name prefix for the returned tensors (optional)
839
840  Returns:
841    A `SparseTensor` with the same shape and non-empty values, but in
842    canonical ordering.
843
844  Raises:
845    TypeError: If `sp_input` is not a `SparseTensor`.
846  """
847  sp_input = _convert_to_sparse_tensor(sp_input)
848
849  reordered_ind, reordered_val = (
850      gen_sparse_ops.sparse_reorder(
851          sp_input.indices, sp_input.values, sp_input.dense_shape, name=name))
852
853  if sp_input.get_shape().is_fully_defined():
854    dense_shape = sp_input.get_shape().as_list()
855  else:
856    dense_shape = array_ops.identity(sp_input.dense_shape)
857
858  return sparse_tensor.SparseTensor(reordered_ind, reordered_val, dense_shape)
859
860
861@tf_export("sparse.reshape", v1=["sparse.reshape", "sparse_reshape"])
862@deprecation.deprecated_endpoints("sparse_reshape")
863def sparse_reshape(sp_input, shape, name=None):
864  """Reshapes a `SparseTensor` to represent values in a new dense shape.
865
866  This operation has the same semantics as `reshape` on the represented dense
867  tensor.  The indices of non-empty values in `sp_input` are recomputed based
868  on the new dense shape, and a new `SparseTensor` is returned containing the
869  new indices and new shape.  The order of non-empty values in `sp_input` is
870  unchanged.
871
872  If one component of `shape` is the special value -1, the size of that
873  dimension is computed so that the total dense size remains constant.  At
874  most one component of `shape` can be -1.  The number of dense elements
875  implied by `shape` must be the same as the number of dense elements
876  originally represented by `sp_input`.
877
878  For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`:
879
880      [0, 0, 0]: a
881      [0, 0, 1]: b
882      [0, 1, 0]: c
883      [1, 0, 0]: d
884      [1, 2, 3]: e
885
886  and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of
887  shape `[9, 4]` and `indices` / `values`:
888
889      [0, 0]: a
890      [0, 1]: b
891      [1, 2]: c
892      [4, 2]: d
893      [8, 1]: e
894
895  Args:
896    sp_input: The input `SparseTensor`.
897    shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the
898      represented `SparseTensor`.
899    name: A name prefix for the returned tensors (optional)
900
901  Returns:
902    A `SparseTensor` with the same non-empty values but with indices calculated
903    by the new dense shape.
904
905  Raises:
906    TypeError: If `sp_input` is not a `SparseTensor`.
907    ValueError:  If argument `shape` requests a `SparseTensor` with a different
908      number of elements than `sp_input`.
909    ValueError:  If `shape` has more than one inferred (== -1) dimension.
910  """
911  sp_input = _convert_to_sparse_tensor(sp_input)
912  shape = math_ops.cast(shape, dtype=dtypes.int64)
913
914  with ops.name_scope(name, "SparseReshape", [sp_input]) as name:
915    reshaped_ind, reshaped_shape = gen_sparse_ops.sparse_reshape(
916        sp_input.indices, sp_input.dense_shape, shape, name=name)
917
918    reshaped_shape_const = tensor_util.constant_value_as_shape(shape)
919    reshaped_shape_const = (
920        reshaped_shape_const.as_list() if reshaped_shape_const.ndims is not None
921        else None)
922
923    if (reshaped_shape_const is not None
924        and sp_input.shape.is_fully_defined()):
925      # constant_value_as_shape tends to get more information about the partial
926      # shape values, but here we specifically need to know if the *user* passed
927      # a shape with 2+ unknown dimensions; and for that constant_value
928      # provides either the user's direct value or None if only partial elements
929      # are known via the python shape inference code.
930      shape_const_by_user = tensor_util.constant_value(shape)
931      if shape_const_by_user is not None:
932        num_implied_by_user = sum(d == -1 for d in shape_const_by_user)
933        if num_implied_by_user > 1:
934          raise ValueError(
935              "At most one dimension can be inferred (-1). Found: %s"
936              % shape_const_by_user)
937      original_reshaped_shape = list(reshaped_shape_const)  # A copy
938      in_shape_size = np.prod(sp_input.shape.as_list())
939      num_implied = sum(dim is None for dim in reshaped_shape_const)
940
941      # If there is a 0 dim in the user-provided shape, we cannot infer the
942      # unknown dim reliably. This is why we skip the `if` branch below when
943      # a 0 is present in `reshaped_shape_const`. Same below.
944      if num_implied == 1 and 0 not in reshaped_shape_const:
945        implied_idx = original_reshaped_shape.index(None)
946        non_implied_idx = (
947            original_reshaped_shape[:implied_idx] +
948            original_reshaped_shape[implied_idx + 1:])
949        reshaped_shape_const[implied_idx] = int(
950            in_shape_size // np.prod(non_implied_idx))
951      if num_implied == 0 or (num_implied == 1 and
952                              0 not in reshaped_shape_const):
953        reshaped_size = np.prod(reshaped_shape_const)
954        if reshaped_size != in_shape_size:
955          raise ValueError(
956              "Cannot reshape a tensor with %d elements to shape %s "
957              "(%d elements)." %
958              (in_shape_size, original_reshaped_shape, reshaped_size))
959        reshaped_shape = constant_op.constant(
960            reshaped_shape_const, dtype=dtypes.int64)
961
962    return sparse_tensor.SparseTensor(reshaped_ind,
963                                      array_ops.identity(sp_input.values),
964                                      reshaped_shape)
965
966
967# TODO(aselle): Remove keyword required once for 1.0 final
968class KeywordRequired:
969
970  def __repr__(self):
971    # This is needed to make documentation without fully qualified module paths
972    return "KeywordRequired()"
973
974
975@tf_export(v1=["sparse.split", "sparse_split"])
976@deprecation.deprecated_endpoints("sparse_split")
977@deprecation.deprecated_args(
978    None, "split_dim is deprecated, use axis instead", "split_dim")
979def sparse_split(keyword_required=KeywordRequired(),
980                 sp_input=None,
981                 num_split=None,
982                 axis=None,
983                 name=None,
984                 split_dim=None):
985  """Split a `SparseTensor` into `num_split` tensors along `axis`.
986
987  If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split`
988  each slice starting from 0:`shape[axis] % num_split` gets extra one
989  dimension. For example, if `axis = 1` and `num_split = 2` and the
990  input is:
991
992      input_tensor = shape = [2, 7]
993      [    a   d e  ]
994      [b c          ]
995
996  Graphically the output tensors are:
997
998      output_tensor[0] =
999      [    a   ]
1000      [b c     ]
1001
1002      output_tensor[1] =
1003      [ d e  ]
1004      [      ]
1005
1006  Args:
1007    keyword_required: Python 2 standin for * (temporary for argument reorder)
1008    sp_input: The `SparseTensor` to split.
1009    num_split: A Python integer. The number of ways to split.
1010    axis: A 0-D `int32` `Tensor`. The dimension along which to split. Must be in
1011      range [-rank, rank), where rank is the number of dimensions in the input
1012      `SparseTensor`.
1013    name: A name for the operation (optional).
1014    split_dim: Deprecated old name for axis.
1015
1016  Returns:
1017    `num_split` `SparseTensor` objects resulting from splitting `value`.
1018
1019  Raises:
1020    TypeError: If `sp_input` is not a `SparseTensor`.
1021    ValueError: If the deprecated `split_dim` and `axis` are both non None.
1022  """
1023  if not isinstance(keyword_required, KeywordRequired):
1024    raise ValueError("Keyword arguments are required for this function.")
1025  if sp_input is None:
1026    raise ValueError("sp_input is required")
1027  if num_split is None:
1028    raise ValueError("num_split is required")
1029  if axis is None:
1030    raise ValueError("axis is required")
1031  axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim",
1032                                                split_dim)
1033  sp_input = _convert_to_sparse_tensor(sp_input)
1034
1035  output_inds, output_vals, output_shapes = (
1036      gen_sparse_ops.sparse_split(
1037          axis,
1038          sp_input.indices,
1039          sp_input.values,
1040          sp_input.dense_shape,
1041          num_split,
1042          name=name))
1043  sparse_tensors = []
1044  for i in range(0, num_split):
1045    sparse_tensors.append(
1046        sparse_tensor.SparseTensor(output_inds[i], output_vals[i],
1047                                   output_shapes[i]))
1048  return sparse_tensors
1049
1050
1051@tf_export("sparse.split", v1=[])
1052def sparse_split_v2(sp_input=None,
1053                    num_split=None,
1054                    axis=None,
1055                    name=None):
1056  """Split a `SparseTensor` into `num_split` tensors along `axis`.
1057
1058  If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split`
1059  each slice starting from 0:`shape[axis] % num_split` gets extra one
1060  dimension. For example:
1061
1062  >>> indices = [[0, 2], [0, 4], [0, 5], [1, 0], [1, 1]]
1063  >>> values = [1, 2, 3, 4, 5]
1064  >>> t = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=[2, 7])
1065  >>> tf.sparse.to_dense(t)
1066  <tf.Tensor: shape=(2, 7), dtype=int32, numpy=
1067  array([[0, 0, 1, 0, 2, 3, 0],
1068         [4, 5, 0, 0, 0, 0, 0]], dtype=int32)>
1069
1070  >>> output = tf.sparse.split(sp_input=t, num_split=2, axis=1)
1071  >>> tf.sparse.to_dense(output[0])
1072  <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
1073  array([[0, 0, 1, 0],
1074         [4, 5, 0, 0]], dtype=int32)>
1075  >>> tf.sparse.to_dense(output[1])
1076  <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
1077  array([[2, 3, 0],
1078         [0, 0, 0]], dtype=int32)>
1079
1080  >>> output = tf.sparse.split(sp_input=t, num_split=2, axis=0)
1081  >>> tf.sparse.to_dense(output[0])
1082  <tf.Tensor: shape=(1, 7), dtype=int32, numpy=array([[0, 0, 1, 0, 2, 3, 0]],
1083  dtype=int32)>
1084  >>> tf.sparse.to_dense(output[1])
1085  <tf.Tensor: shape=(1, 7), dtype=int32, numpy=array([[4, 5, 0, 0, 0, 0, 0]],
1086  dtype=int32)>
1087
1088  >>> output = tf.sparse.split(sp_input=t, num_split=2, axis=-1)
1089  >>> tf.sparse.to_dense(output[0])
1090  <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
1091  array([[0, 0, 1, 0],
1092         [4, 5, 0, 0]], dtype=int32)>
1093  >>> tf.sparse.to_dense(output[1])
1094  <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
1095  array([[2, 3, 0],
1096         [0, 0, 0]], dtype=int32)>
1097
1098  Args:
1099    sp_input: The `SparseTensor` to split.
1100    num_split: A Python integer. The number of ways to split.
1101    axis: A 0-D `int32` `Tensor`. The dimension along which to split. Must be in
1102      range [-rank, rank), where rank is the number of dimensions in the input
1103      `SparseTensor`.
1104    name: A name for the operation (optional).
1105
1106  Returns:
1107    `num_split` `SparseTensor` objects resulting from splitting `value`.
1108
1109  Raises:
1110    TypeError: If `sp_input` is not a `SparseTensor`.
1111  """
1112  return sparse_split(sp_input=sp_input,
1113                      num_split=num_split,
1114                      axis=axis,
1115                      name=name,
1116                      split_dim=None)
1117
1118
1119@tf_export("sparse.slice", v1=["sparse.slice", "sparse_slice"])
1120@deprecation.deprecated_endpoints("sparse_slice")
1121def sparse_slice(sp_input, start, size, name=None):
1122  """Slice a `SparseTensor` based on the `start` and `size`.
1123
1124  For example, if the input is
1125
1126      input_tensor = shape = [2, 7]
1127      [    a   d e  ]
1128      [b c          ]
1129
1130  Graphically the output tensors are:
1131
1132      sparse.slice([0, 0], [2, 4]) = shape = [2, 4]
1133      [    a  ]
1134      [b c    ]
1135
1136      sparse.slice([0, 4], [2, 3]) = shape = [2, 3]
1137      [ d e  ]
1138      [      ]
1139
1140  Args:
1141    sp_input: The `SparseTensor` to split.
1142    start: 1-D. tensor represents the start of the slice.
1143    size: 1-D. tensor represents the size of the slice.
1144    name: A name for the operation (optional).
1145
1146  Returns:
1147    A `SparseTensor` objects resulting from splicing.
1148
1149  Raises:
1150    TypeError: If `sp_input` is not a `SparseTensor`.
1151  """
1152  sp_input = _convert_to_sparse_tensor(sp_input)
1153  start = ops.convert_to_tensor(start, dtypes.int64)
1154  size = ops.convert_to_tensor(size, dtypes.int64)
1155
1156  with ops.name_scope(name, "SparseSlice", [sp_input]) as name:
1157    output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice(
1158        sp_input.indices,
1159        sp_input.values,
1160        sp_input.dense_shape,
1161        start,
1162        size,
1163        name=name)
1164
1165    return sparse_tensor.SparseTensor(output_indices, output_values,
1166                                      output_shape)
1167
1168
1169@tf_export(v1=["sparse_to_dense"])
1170@dispatch.add_dispatch_support
1171@deprecation.deprecated(
1172    None,
1173    "Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.")
1174def sparse_to_dense(sparse_indices,
1175                    output_shape,
1176                    sparse_values,
1177                    default_value=0,
1178                    validate_indices=True,
1179                    name=None):
1180  """Converts a sparse representation into a dense tensor.
1181
1182  Builds an array `dense` with shape `output_shape` such that
1183
1184  ```python
1185  # If sparse_indices is scalar
1186  dense[i] = (i == sparse_indices ? sparse_values : default_value)
1187
1188  # If sparse_indices is a vector, then for each i
1189  dense[sparse_indices[i]] = sparse_values[i]
1190
1191  # If sparse_indices is an n by d matrix, then for each i in [0, n)
1192  dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
1193  ```
1194
1195  All other values in `dense` are set to `default_value`.  If `sparse_values`
1196  is a scalar, all sparse indices are set to this single value.
1197
1198  Indices should be sorted in lexicographic order, and indices must not
1199  contain any repeats. If `validate_indices` is True, these properties
1200  are checked during execution.
1201
1202  Args:
1203    sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.
1204      `sparse_indices[i]` contains the complete index where `sparse_values[i]`
1205      will be placed.
1206    output_shape: A 1-D `Tensor` of the same type as `sparse_indices`.  Shape
1207      of the dense output tensor.
1208    sparse_values: A 0-D or 1-D `Tensor`.  Values corresponding to each row of
1209      `sparse_indices`, or a scalar value to be used for all sparse indices.
1210    default_value: A 0-D `Tensor` of the same type as `sparse_values`.  Value
1211      to set for indices not specified in `sparse_indices`.  Defaults to zero.
1212    validate_indices: A boolean value.  If True, indices are checked to make
1213      sure they are sorted in lexicographic order and that there are no repeats.
1214    name: A name for the operation (optional).
1215
1216  Returns:
1217    Dense `Tensor` of shape `output_shape`.  Has the same type as
1218    `sparse_values`.
1219  """
1220  return gen_sparse_ops.sparse_to_dense(
1221      sparse_indices,
1222      output_shape,
1223      sparse_values,
1224      default_value=default_value,
1225      validate_indices=validate_indices,
1226      name=name)
1227
1228
1229@tf_export("sparse.reduce_max", v1=[])
1230def sparse_reduce_max_v2(
1231    sp_input, axis=None, keepdims=None, output_is_sparse=False, name=None):
1232  """Computes `tf.sparse.maximum` of elements across dimensions of a SparseTensor.
1233
1234  This is the reduction operation for the elementwise `tf.sparse.maximum` op.
1235
1236  This Op takes a SparseTensor and is the sparse counterpart to
1237  `tf.reduce_max()`.  In particular, this Op also returns a dense `Tensor`
1238  if `output_is_sparse` is `False`, or a `SparseTensor` if `output_is_sparse`
1239  is `True`.
1240
1241  Note: A gradient is not defined for this function, so it can't be used
1242  in training models that need gradient descent.
1243
1244  Reduces `sp_input` along the dimensions given in `axis`.  Unless
1245  `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in
1246  `axis`. If `keepdims` is true, the reduced dimensions are retained
1247  with length 1.
1248
1249  If `axis` has no entries, all dimensions are reduced, and a tensor
1250  with a single element is returned.  Additionally, the axes can be negative,
1251  similar to the indexing rules in Python.
1252
1253  The values not defined in `sp_input` don't participate in the reduce max,
1254  as opposed to be implicitly assumed 0 -- hence it can return negative values
1255  for sparse `axis`. But, in case there are no values in
1256  `axis`, it will reduce to 0. See second example below.
1257
1258  For example:
1259
1260    # 'x' represents [[1, ?, 2]
1261    #                 [?, 3, ?]]
1262    # where ? is implicitly-zero.
1263
1264    >>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 2, 3], [2, 3])
1265    >>> tf.sparse.reduce_max(x)
1266    <tf.Tensor: shape=(), dtype=int32, numpy=3>
1267    >>> tf.sparse.reduce_max(x, 0)
1268    <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 3, 2], dtype=int32)>
1269    >>> tf.sparse.reduce_max(x, 1)
1270    <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 3], dtype=int32)>
1271    >>> tf.sparse.reduce_max(x, 1, keepdims=True)
1272    <tf.Tensor: shape=(2, 1), dtype=int32, numpy=
1273    array([[2],
1274           [3]], dtype=int32)>
1275    >>> tf.sparse.reduce_max(x, [0, 1])
1276    <tf.Tensor: shape=(), dtype=int32, numpy=3>
1277
1278    # 'y' represents [[-7, ?]
1279    #                 [ 4, 3]
1280    #                 [ ?, ?]
1281
1282    >>> y = tf.sparse.SparseTensor([[0, 0,], [1, 0], [1, 1]], [-7, 4, 3],
1283    ... [3, 2])
1284    >>> tf.sparse.reduce_max(y, 1)
1285    <tf.Tensor: shape=(3,), dtype=int32, numpy=array([-7,  4,  0], dtype=int32)>
1286
1287  Args:
1288    sp_input: The SparseTensor to reduce. Should have numeric type.
1289    axis: The dimensions to reduce; list or scalar. If `None` (the
1290      default), reduces all dimensions.
1291    keepdims: If true, retain reduced dimensions with length 1.
1292    output_is_sparse: If true, returns a `SparseTensor` instead of a dense
1293      `Tensor` (the default).
1294    name: A name for the operation (optional).
1295
1296  Returns:
1297    The reduced Tensor or the reduced SparseTensor if `output_is_sparse` is
1298    True.
1299  """
1300  if keepdims is None:
1301    keepdims = False
1302
1303  if output_is_sparse:
1304    output_ind, output_val, output_shape = (
1305        gen_sparse_ops.sparse_reduce_max_sparse(
1306            sp_input.indices,
1307            sp_input.values,
1308            sp_input.dense_shape,
1309            math_ops._ReductionDims(sp_input, axis),
1310            keepdims,
1311            name=name))
1312
1313    return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
1314
1315  return gen_sparse_ops.sparse_reduce_max(
1316      sp_input.indices,
1317      sp_input.values,
1318      sp_input.dense_shape,
1319      math_ops._ReductionDims(sp_input, axis),
1320      keepdims,
1321      name=name)
1322
1323
1324@tf_export(v1=["sparse.reduce_max", "sparse_reduce_max"])
1325@deprecation.deprecated_endpoints("sparse_reduce_max")
1326@deprecation.deprecated_args(
1327    None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
1328@deprecation.deprecated_args(
1329    None, "reduction_axes is deprecated, use axis instead",
1330    "reduction_axes")
1331def sparse_reduce_max(sp_input, axis=None, keepdims=None,
1332                      reduction_axes=None, keep_dims=None):
1333  """Computes `tf.sparse.maximum` of elements across dimensions of a SparseTensor.
1334
1335  This is the reduction operation for the elementwise `tf.sparse.maximum` op.
1336
1337  This Op takes a SparseTensor and is the sparse counterpart to
1338  `tf.reduce_max()`.  In particular, this Op also returns a dense `Tensor`
1339  instead of a sparse one.
1340
1341  Note: A gradient is not defined for this function, so it can't be used
1342  in training models that need gradient descent.
1343
1344  Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
1345  `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in
1346  `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained
1347  with length 1.
1348
1349  If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
1350  with a single element is returned.  Additionally, the axes can be negative,
1351  similar to the indexing rules in Python.
1352
1353  The values not defined in `sp_input` don't participate in the reduce max,
1354  as opposed to be implicitly assumed 0 -- hence it can return negative values
1355  for sparse `reduction_axes`. But, in case there are no values in
1356  `reduction_axes`, it will reduce to 0. See second example below.
1357
1358  For example:
1359
1360    # 'x' represents [[1, ?, 2]
1361    #                 [?, 3, ?]]
1362    # where ? is implicitly-zero.
1363
1364    >>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 2, 3], [2, 3])
1365    >>> tf.sparse.reduce_max(x)
1366    <tf.Tensor: shape=(), dtype=int32, numpy=3>
1367    >>> tf.sparse.reduce_max(x, 0)
1368    <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 3, 2], dtype=int32)>
1369    >>> tf.sparse.reduce_max(x, 1)
1370    <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 3], dtype=int32)>
1371    >>> tf.sparse.reduce_max(x, 1, keepdims=True)
1372    <tf.Tensor: shape=(2, 1), dtype=int32, numpy=
1373    array([[2],
1374           [3]], dtype=int32)>
1375    >>> tf.sparse.reduce_max(x, [0, 1])
1376    <tf.Tensor: shape=(), dtype=int32, numpy=3>
1377
1378    # 'y' represents [[-7, ?]
1379    #                 [ 4, 3]
1380    #                 [ ?, ?]
1381
1382    >>> y = tf.sparse.SparseTensor([[0, 0,], [1, 0], [1, 1]], [-7, 4, 3],
1383    ... [3, 2])
1384    >>> tf.sparse.reduce_max(y, 1)
1385    <tf.Tensor: shape=(3,), dtype=int32, numpy=array([-7,  4,  0], dtype=int32)>
1386
1387  Args:
1388    sp_input: The SparseTensor to reduce. Should have numeric type.
1389    axis: The dimensions to reduce; list or scalar. If `None` (the
1390      default), reduces all dimensions.
1391    keepdims: If true, retain reduced dimensions with length 1.
1392    reduction_axes: Deprecated name of `axis`.
1393    keep_dims:  Deprecated alias for `keepdims`.
1394
1395  Returns:
1396    The reduced Tensor.
1397  """
1398  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
1399                                                    "keep_dims", keep_dims)
1400  axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_axes",
1401                                                reduction_axes)
1402  if keepdims is None:
1403    keepdims = False
1404
1405  return gen_sparse_ops.sparse_reduce_max(
1406      sp_input.indices, sp_input.values, sp_input.dense_shape,
1407      math_ops._ReductionDims(sp_input, axis), keepdims)
1408
1409
1410@tf_export(v1=["sparse.reduce_max_sparse", "sparse_reduce_max_sparse"])
1411@deprecation.deprecated_endpoints("sparse_reduce_max_sparse")
1412@deprecation.deprecated_args(
1413    None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
1414def sparse_reduce_max_sparse(sp_input,
1415                             axis=None,
1416                             keepdims=None,
1417                             reduction_axes=None,
1418                             keep_dims=None):
1419  """Computes the max of elements across dimensions of a SparseTensor.
1420
1421  This Op takes a SparseTensor and is the sparse counterpart to
1422  `tf.reduce_max()`.  In contrast to SparseReduceSum, this Op returns a
1423  SparseTensor.
1424
1425  Note: A gradient is not defined for this function, so it can't be used
1426  in training models that need gradient descent.
1427
1428  Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
1429  `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in
1430  `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained
1431  with length 1.
1432
1433  If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
1434  with a single element is returned.  Additionally, the axes can be negative,
1435  which are interpreted according to the indexing rules in Python.
1436
1437  Args:
1438    sp_input: The SparseTensor to reduce. Should have numeric type.
1439    axis: The dimensions to reduce; list or scalar. If `None` (the
1440      default), reduces all dimensions.
1441    keepdims: If true, retain reduced dimensions with length 1.
1442    reduction_axes: Deprecated name of axis.
1443    keep_dims: Deprecated alias for `keepdims`.
1444
1445  Returns:
1446    The reduced SparseTensor.
1447  """
1448  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
1449                                                    "keep_dims", keep_dims)
1450  axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_axes",
1451                                                reduction_axes)
1452  if keepdims is None:
1453    keepdims = False
1454
1455  output_ind, output_val, output_shape = (
1456      gen_sparse_ops.sparse_reduce_max_sparse(
1457          sp_input.indices, sp_input.values, sp_input.dense_shape,
1458          math_ops._ReductionDims(sp_input, axis), keepdims))
1459
1460  return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
1461
1462
1463@tf_export("sparse.reduce_sum", v1=[])
1464def sparse_reduce_sum_v2(
1465    sp_input, axis=None, keepdims=None, output_is_sparse=False, name=None):
1466  """Computes `tf.sparse.add` of elements across dimensions of a SparseTensor.
1467
1468  This is the reduction operation for the elementwise `tf.sparse.add` op.
1469
1470  This Op takes a SparseTensor and is the sparse counterpart to
1471  `tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`
1472  if `output_is_sparse` is `False`, or a `SparseTensor` if `output_is_sparse`
1473  is `True`.
1474
1475  Note: if `output_is_sparse` is True, a gradient is not defined for this
1476  function, so it can't be used in training models that need gradient descent.
1477
1478  Reduces `sp_input` along the dimensions given in `axis`.  Unless `keepdims` is
1479  true, the rank of the tensor is reduced by 1 for each entry in `axis`. If
1480  `keepdims` is true, the reduced dimensions are retained with length 1.
1481
1482  If `axis` has no entries, all dimensions are reduced, and a tensor
1483  with a single element is returned.  Additionally, the axes can be negative,
1484  similar to the indexing rules in Python.
1485
1486  For example:
1487
1488    # 'x' represents [[1, ?, 1]
1489    #                 [?, 1, ?]]
1490    # where ? is implicitly-zero.
1491
1492    >>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 1, 1], [2, 3])
1493    >>> tf.sparse.reduce_sum(x)
1494    <tf.Tensor: shape=(), dtype=int32, numpy=3>
1495    >>> tf.sparse.reduce_sum(x, 0)
1496    <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 1, 1], dtype=int32)>
1497    >>> tf.sparse.reduce_sum(x, 1)  # Can also use -1 as the axis
1498    <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 1], dtype=int32)>
1499    >>> tf.sparse.reduce_sum(x, 1, keepdims=True)
1500    <tf.Tensor: shape=(2, 1), dtype=int32, numpy=
1501    array([[2],
1502           [1]], dtype=int32)>
1503    >>> tf.sparse.reduce_sum(x, [0, 1])
1504    <tf.Tensor: shape=(), dtype=int32, numpy=3>
1505
1506  Args:
1507    sp_input: The SparseTensor to reduce. Should have numeric type.
1508    axis: The dimensions to reduce; list or scalar. If `None` (the
1509      default), reduces all dimensions.
1510    keepdims: If true, retain reduced dimensions with length 1.
1511    output_is_sparse: If true, returns a `SparseTensor` instead of a dense
1512      `Tensor` (the default).
1513    name: A name for the operation (optional).
1514
1515  Returns:
1516    The reduced Tensor or the reduced SparseTensor if `output_is_sparse` is
1517    True.
1518  """
1519  if keepdims is None:
1520    keepdims = False
1521
1522  if output_is_sparse:
1523    output_ind, output_val, output_shape = (
1524        gen_sparse_ops.sparse_reduce_sum_sparse(
1525            sp_input.indices,
1526            sp_input.values,
1527            sp_input.dense_shape,
1528            math_ops._ReductionDims(sp_input, axis),
1529            keepdims,
1530            name=name))
1531    return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
1532
1533  return gen_sparse_ops.sparse_reduce_sum(
1534      sp_input.indices,
1535      sp_input.values,
1536      sp_input.dense_shape,
1537      math_ops._ReductionDims(sp_input, axis),
1538      keepdims,
1539      name=name)
1540
1541
1542@tf_export(v1=["sparse.reduce_sum", "sparse_reduce_sum"])
1543@deprecation.deprecated_endpoints("sparse_reduce_sum")
1544@deprecation.deprecated_args(
1545    None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
1546@deprecation.deprecated_args(
1547    None, "reduction_axes is deprecated, use axis instead",
1548    "reduction_axes")
1549def sparse_reduce_sum(sp_input, axis=None, keepdims=None,
1550                      reduction_axes=None, keep_dims=None):
1551  """Computes `tf.sparse.add` of elements across dimensions of a SparseTensor.
1552
1553  This is the reduction operation for the elementwise `tf.sparse.add` op.
1554
1555  This Op takes a SparseTensor and is the sparse counterpart to
1556  `tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`
1557  instead of a sparse one.
1558
1559  Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
1560  `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in
1561  `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained
1562  with length 1.
1563
1564  If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
1565  with a single element is returned.  Additionally, the axes can be negative,
1566  similar to the indexing rules in Python.
1567
1568  For example:
1569
1570    # 'x' represents [[1, ?, 1]
1571    #                 [?, 1, ?]]
1572    # where ? is implicitly-zero.
1573
1574    >>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 1, 1], [2, 3])
1575    >>> tf.sparse.reduce_sum(x)
1576    <tf.Tensor: shape=(), dtype=int32, numpy=3>
1577    >>> tf.sparse.reduce_sum(x, 0)
1578    <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 1, 1], dtype=int32)>
1579    >>> tf.sparse.reduce_sum(x, 1)  # Can also use -1 as the axis
1580    <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 1], dtype=int32)>
1581    >>> tf.sparse.reduce_sum(x, 1, keepdims=True)
1582    <tf.Tensor: shape=(2, 1), dtype=int32, numpy=
1583    array([[2],
1584           [1]], dtype=int32)>
1585    >>> tf.sparse.reduce_sum(x, [0, 1])
1586    <tf.Tensor: shape=(), dtype=int32, numpy=3>
1587
1588  Args:
1589    sp_input: The SparseTensor to reduce. Should have numeric type.
1590    axis: The dimensions to reduce; list or scalar. If `None` (the
1591      default), reduces all dimensions.
1592    keepdims: If true, retain reduced dimensions with length 1.
1593    reduction_axes: Deprecated name of `axis`.
1594    keep_dims: Deprecated alias for `keepdims`.
1595
1596  Returns:
1597    The reduced Tensor.
1598  """
1599  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
1600                                                    "keep_dims", keep_dims)
1601  axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_axes",
1602                                                reduction_axes)
1603  if keepdims is None:
1604    keepdims = False
1605
1606  return gen_sparse_ops.sparse_reduce_sum(
1607      sp_input.indices, sp_input.values, sp_input.dense_shape,
1608      math_ops._ReductionDims(sp_input, axis), keepdims)
1609
1610
1611@tf_export(v1=["sparse.reduce_sum_sparse", "sparse_reduce_sum_sparse"])
1612@deprecation.deprecated_endpoints("sparse_reduce_sum_sparse")
1613@deprecation.deprecated_args(
1614    None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
1615def sparse_reduce_sum_sparse(sp_input,
1616                             axis=None,
1617                             keepdims=None,
1618                             reduction_axes=None,
1619                             keep_dims=None):
1620  """Computes the sum of elements across dimensions of a SparseTensor.
1621
1622  This Op takes a SparseTensor and is the sparse counterpart to
1623  `tf.reduce_sum()`.  In contrast to SparseReduceSum, this Op returns a
1624  SparseTensor.
1625
1626  Note: A gradient is not defined for this function, so it can't be used
1627  in training models that need gradient descent.
1628
1629  Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
1630  `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in
1631  `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained
1632  with length 1.
1633
1634  If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
1635  with a single element is returned.  Additionally, the axes can be negative,
1636  which are interpreted according to the indexing rules in Python.
1637
1638  Args:
1639    sp_input: The SparseTensor to reduce. Should have numeric type.
1640    axis: The dimensions to reduce; list or scalar. If `None` (the
1641      default), reduces all dimensions.
1642    keepdims: If true, retain reduced dimensions with length 1.
1643    reduction_axes: Deprecated name of axis.
1644    keep_dims: Deprecated alias for `keepdims`.
1645
1646  Returns:
1647    The reduced SparseTensor.
1648  """
1649  keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
1650                                                    "keep_dims", keep_dims)
1651  axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_axes",
1652                                                reduction_axes)
1653  if keepdims is None:
1654    keepdims = False
1655
1656  output_ind, output_val, output_shape = (
1657      gen_sparse_ops.sparse_reduce_sum_sparse(
1658          sp_input.indices, sp_input.values, sp_input.dense_shape,
1659          math_ops._ReductionDims(sp_input, axis), keepdims))
1660
1661  return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
1662
1663
1664@tf_export("sparse.to_dense", v1=["sparse.to_dense", "sparse_tensor_to_dense"])
1665@deprecation.deprecated_endpoints("sparse_tensor_to_dense")
1666def sparse_tensor_to_dense(sp_input,
1667                           default_value=None,
1668                           validate_indices=True,
1669                           name=None):
1670  """Converts a `SparseTensor` into a dense tensor.
1671
1672  For this sparse tensor with three non-empty values:
1673
1674  >>> sp_input = tf.sparse.SparseTensor(
1675  ...   dense_shape=[3, 5],
1676  ...   values=[7, 8, 9],
1677  ...   indices =[[0, 1],
1678  ...             [0, 3],
1679  ...             [2, 0]])
1680
1681  The output will be a dense `[3, 5]` tensor with values:
1682
1683  >>> tf.sparse.to_dense(sp_input).numpy()
1684  array([[0, 7, 0, 8, 0],
1685         [0, 0, 0, 0, 0],
1686         [9, 0, 0, 0, 0]], dtype=int32)
1687
1688  Note: Indices must be without repeats.  This is only tested if
1689  `validate_indices` is `True`.
1690
1691  Args:
1692    sp_input: The input `SparseTensor`.
1693    default_value: Scalar value to set for indices not specified in
1694      `sp_input`.  Defaults to zero.
1695    validate_indices: A boolean value.  If `True`, indices are checked to make
1696      sure they are sorted in lexicographic order and that there are no repeats.
1697    name: A name prefix for the returned tensors (optional).
1698
1699  Returns:
1700    A dense tensor with shape `sp_input.dense_shape` and values specified by
1701    the non-empty values in `sp_input`. Indices not in `sp_input` are assigned
1702    `default_value`.
1703
1704  Raises:
1705    TypeError: If `sp_input` is not a `SparseTensor`.
1706  """
1707  sp_input = _convert_to_sparse_tensor(sp_input)
1708  if default_value is None:
1709    default_value = array_ops.zeros([], dtype=sp_input.dtype)
1710
1711  return gen_sparse_ops.sparse_to_dense(
1712      sp_input.indices,
1713      sp_input.dense_shape,
1714      sp_input.values,
1715      default_value=default_value,
1716      validate_indices=validate_indices,
1717      name=name)
1718
1719
1720@tf_export(
1721    "sparse.to_indicator", v1=["sparse.to_indicator", "sparse_to_indicator"])
1722@deprecation.deprecated_endpoints("sparse_to_indicator")
1723def sparse_to_indicator(sp_input, vocab_size, name=None):
1724  """Converts a `SparseTensor` of ids into a dense bool indicator tensor.
1725
1726  The last dimension of `sp_input.indices` is discarded and replaced with
1727  the values of `sp_input`.  If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`,
1728  then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where
1729
1730      output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True
1731
1732  and False elsewhere in `output`.
1733
1734  For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values:
1735
1736      [0, 0, 0]: 0
1737      [0, 1, 0]: 10
1738      [1, 0, 3]: 103
1739      [1, 1, 1]: 150
1740      [1, 1, 2]: 149
1741      [1, 1, 3]: 150
1742      [1, 2, 1]: 121
1743
1744  and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool
1745  tensor with False everywhere except at positions
1746
1747      (0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150),
1748      (1, 2, 121).
1749
1750  Note that repeats are allowed in the input SparseTensor.
1751  This op is useful for converting `SparseTensor`s into dense formats for
1752  compatibility with ops that expect dense tensors.
1753
1754  The input `SparseTensor` must be in row-major order.
1755
1756  Args:
1757    sp_input: A `SparseTensor` with `values` property of type `int32` or
1758      `int64`.
1759    vocab_size: A scalar int64 Tensor (or Python int) containing the new size
1760      of the last dimension, `all(0 <= sp_input.values < vocab_size)`.
1761    name: A name prefix for the returned tensors (optional)
1762
1763  Returns:
1764    A dense bool indicator tensor representing the indices with specified value.
1765
1766  Raises:
1767    TypeError: If `sp_input` is not a `SparseTensor`.
1768  """
1769  sp_input = _convert_to_sparse_tensor(sp_input)
1770
1771  with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name:
1772    num_entries = array_ops.shape(sp_input.indices)[0]
1773    new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True)
1774    sp_values = sparse_tensor.SparseTensor(sp_input.indices, new_values,
1775                                           sp_input.dense_shape)
1776
1777    sp_new = sparse_merge_impl(sp_input, sp_values, vocab_size, name)
1778
1779    # validate_indices may be False because we allow duplicates in new_indices:
1780    # repeated indices are allowed when creating an indicator matrix.
1781    return sparse_tensor_to_dense(
1782        sp_new, default_value=False, validate_indices=False, name=name)
1783
1784
1785@tf_export(v1=["sparse.merge", "sparse_merge"])
1786@deprecation.deprecated(None, "No similar op available at this time.")
1787def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
1788                 already_sorted=False):
1789  """Combines a batch of feature ids and values into a single `SparseTensor`.
1790
1791  The most common use case for this function occurs when feature ids and
1792  their corresponding values are stored in `Example` protos on disk.
1793  `parse_example` will return a batch of ids and a batch of values, and this
1794  function joins them into a single logical `SparseTensor` for use in
1795  functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc.
1796
1797  The `SparseTensor` returned by this function has the following properties:
1798
1799    - `indices` is equivalent to `sp_ids.indices` with the last
1800      dimension discarded and replaced with `sp_ids.values`.
1801    - `values` is simply `sp_values.values`.
1802    - If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
1803      `output.shape = [D0, D1, ..., Dn, vocab_size]`.
1804
1805  For example, consider the following feature vectors:
1806
1807  ```python
1808    vector1 = [-3, 0, 0, 0, 0, 0]
1809    vector2 = [ 0, 1, 0, 4, 1, 0]
1810    vector3 = [ 5, 0, 0, 9, 0, 0]
1811  ```
1812
1813  These might be stored sparsely in the following Example protos by storing
1814  only the feature ids (column number if the vectors are treated as a matrix)
1815  of the non-zero elements and the corresponding values:
1816
1817  ```python
1818    examples = [Example(features={
1819                    "ids": Feature(int64_list=Int64List(value=[0])),
1820                    "values": Feature(float_list=FloatList(value=[-3]))}),
1821                Example(features={
1822                    "ids": Feature(int64_list=Int64List(value=[1, 4, 3])),
1823                    "values": Feature(float_list=FloatList(value=[1, 1, 4]))}),
1824                Example(features={
1825                    "ids": Feature(int64_list=Int64List(value=[0, 3])),
1826                    "values": Feature(float_list=FloatList(value=[5, 9]))})]
1827  ```
1828
1829  The result of calling parse_example on these examples will produce a
1830  dictionary with entries for "ids" and "values". Passing those two objects
1831  to this function along with vocab_size=6, will produce a `SparseTensor` that
1832  sparsely represents all three instances. Namely, the `indices` property will
1833  contain the coordinates of the non-zero entries in the feature matrix (the
1834  first dimension is the row number in the matrix, i.e., the index within the
1835  batch, and the second dimension is the column number, i.e., the feature id);
1836  `values` will contain the actual values. `shape` will be the shape of the
1837  original matrix, i.e., (3, 6). For our example above, the output will be
1838  equal to:
1839
1840  ```python
1841    SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]],
1842                 values=[-3, 1, 4, 1, 5, 9],
1843                 dense_shape=[3, 6])
1844  ```
1845
1846  This method generalizes to higher-dimensions by simply providing a list for
1847  both the sp_ids as well as the vocab_size.
1848  In this case the resulting `SparseTensor` has the following properties:
1849    - `indices` is equivalent to `sp_ids[0].indices` with the last
1850      dimension discarded and concatenated with
1851      `sp_ids[0].values, sp_ids[1].values, ...`.
1852    - `values` is simply `sp_values.values`.
1853    - If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
1854      `output.shape = [D0, D1, ..., Dn] + vocab_size`.
1855
1856  Args:
1857    sp_ids: A single `SparseTensor` with `values` property of type `int32`
1858      or `int64` or a Python list of such `SparseTensor`s or a list thereof.
1859    sp_values: A `SparseTensor` of any type.
1860    vocab_size: A scalar `int64` Tensor (or Python int) containing the new size
1861      of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.
1862      Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for
1863      all `i`.
1864    name: A name prefix for the returned tensors (optional)
1865    already_sorted: A boolean to specify whether the per-batch values in
1866     `sp_values` are already sorted. If so skip sorting, False by default
1867     (optional).
1868
1869  Returns:
1870    A `SparseTensor` compactly representing a batch of feature ids and values,
1871    useful for passing to functions that expect such a `SparseTensor`.
1872
1873  Raises:
1874    TypeError: If `sp_values` is not a `SparseTensor`. Or if `sp_ids` is neither
1875      a `SparseTensor` nor a list thereof. Or if `vocab_size` is not a
1876      `Tensor` or a Python int and `sp_ids` is a `SparseTensor`. Or if
1877      `vocab_size` is not a or list thereof and `sp_ids` is a list.
1878    ValueError: If `sp_ids` and `vocab_size` are lists of different lengths.
1879  """
1880  return sparse_merge_impl(sp_ids, sp_values, vocab_size, name, already_sorted)
1881
1882
1883def sparse_merge_impl(sp_ids,
1884                      sp_values,
1885                      vocab_size,
1886                      name=None,
1887                      already_sorted=False):
1888  """Internal implementation for sparse_merge to avoid deprecation warnings."""
1889  if isinstance(sp_ids, sparse_tensor.SparseTensorValue) or isinstance(
1890      sp_ids, sparse_tensor.SparseTensor):
1891    sp_ids = [sp_ids]
1892    if not (isinstance(vocab_size, ops.Tensor) or
1893            isinstance(vocab_size, numbers.Integral)):
1894      raise TypeError("vocab_size has to be a Tensor or Python int. Found %s" %
1895                      type(vocab_size))
1896    vocab_size = [vocab_size]
1897  else:
1898    if not isinstance(sp_ids, collections_abc.Iterable):
1899      raise TypeError("sp_ids has to be a SparseTensor or list thereof. "
1900                      "Found %s" % type(sp_ids))
1901    if not isinstance(vocab_size, collections_abc.Iterable):
1902      raise TypeError("vocab_size has to be a list of Tensors or Python ints. "
1903                      "Found %s" % type(vocab_size))
1904    for dim in vocab_size:
1905      if not (isinstance(dim, ops.Tensor) or isinstance(dim, numbers.Integral)):
1906        raise TypeError(
1907            "vocab_size has to be a list of Tensors or Python ints. Found %s" %
1908            type(dim))
1909  if len(sp_ids) != len(vocab_size):
1910    raise ValueError("sp_ids and vocab_size have to have equal lengths.")
1911
1912  with ops.name_scope(name, "SparseMerge", [sp_ids, sp_values]):
1913    sp_ids = [_convert_to_sparse_tensor(sp_ids_dim) for sp_ids_dim in sp_ids]
1914    sp_values = _convert_to_sparse_tensor(sp_values)
1915    ids = []
1916    for sp_ids_dim in sp_ids:
1917      ids_dim = sp_ids_dim.values
1918      if sp_ids_dim.dtype != dtypes.int64:
1919        ids_dim = math_ops.cast(ids_dim, dtypes.int64)
1920      ids += [array_ops.expand_dims(ids_dim, axis=1)]
1921
1922    vocab_size = [math_ops.cast(x, dtypes.int64) for x in vocab_size]
1923
1924    # Slice off the last dimension of indices, then tack on the ids
1925    indices_columns_to_preserve = sp_ids[0].indices[:, :-1]
1926    new_indices = array_ops.concat([indices_columns_to_preserve] + ids, 1)
1927
1928    new_values = sp_values.values
1929    new_shape = array_ops.concat([sp_ids[0].dense_shape[:-1], vocab_size], 0)
1930
1931    result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)
1932    if already_sorted:
1933      return result
1934    sorted_result = sparse_reorder(result)
1935    return sparse_tensor.SparseTensor(
1936        sorted_result.indices, sorted_result.values, new_shape)
1937
1938
1939@tf_export("sparse.retain", v1=["sparse.retain", "sparse_retain"])
1940@deprecation.deprecated_endpoints("sparse_retain")
1941def sparse_retain(sp_input, to_retain):
1942  """Retains specified non-empty values within a `SparseTensor`.
1943
1944  For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values:
1945
1946      [0, 1]: a
1947      [0, 3]: b
1948      [2, 0]: c
1949      [3, 1]: d
1950
1951  and `to_retain = [True, False, False, True]`, then the output will
1952  be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:
1953
1954      [0, 1]: a
1955      [3, 1]: d
1956
1957  Args:
1958    sp_input: The input `SparseTensor` with `N` non-empty elements.
1959    to_retain: A bool vector of length `N` with `M` true values.
1960
1961  Returns:
1962    A `SparseTensor` with the same shape as the input and `M` non-empty
1963    elements corresponding to the true positions in `to_retain`.
1964
1965  Raises:
1966    TypeError: If `sp_input` is not a `SparseTensor`.
1967  """
1968  sp_input = _convert_to_sparse_tensor(sp_input)
1969
1970  to_retain = ops.convert_to_tensor(to_retain)
1971
1972  # Shape checking, if shape is known at graph construction time
1973  retain_shape = to_retain.get_shape()
1974  retain_shape.assert_has_rank(1)
1975  if sp_input.values.get_shape().dims is not None:
1976    sp_input.values.get_shape().dims[0].assert_is_compatible_with(
1977        tensor_shape.dimension_at_index(retain_shape, 0))
1978
1979  where_true = array_ops.reshape(array_ops.where_v2(to_retain), [-1])
1980  new_indices = array_ops.gather(sp_input.indices, where_true)
1981  new_values = array_ops.gather(sp_input.values, where_true)
1982  return sparse_tensor.SparseTensor(new_indices, new_values,
1983                                    array_ops.identity(sp_input.dense_shape))
1984
1985
1986@tf_export(
1987    "sparse.reset_shape", v1=["sparse.reset_shape", "sparse_reset_shape"])
1988@deprecation.deprecated_endpoints("sparse_reset_shape")
1989def sparse_reset_shape(sp_input, new_shape=None):
1990  """Resets the shape of a `SparseTensor` with indices and values unchanged.
1991
1992  If `new_shape` is None, returns a copy of `sp_input` with its shape reset
1993  to the tight bounding box of `sp_input`. This will be a shape consisting of
1994  all zeros if sp_input has no values.
1995
1996  If `new_shape` is provided, then it must be larger or equal in all dimensions
1997  compared to the shape of `sp_input`. When this condition is met, the returned
1998  SparseTensor will have its shape reset to `new_shape` and its indices and
1999  values unchanged from that of `sp_input.`
2000
2001  For example:
2002
2003    Consider a `sp_input` with shape [2, 3, 5]:
2004
2005      [0, 0, 1]: a
2006      [0, 1, 0]: b
2007      [0, 2, 2]: c
2008      [1, 0, 3]: d
2009
2010    - It is an error to set `new_shape` as [3, 7] since this represents a
2011      rank-2 tensor while `sp_input` is rank-3. This is either a ValueError
2012      during graph construction (if both shapes are known) or an OpError during
2013      run time.
2014
2015    - Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or
2016      equal in every dimension compared to the original shape [2, 3, 5].
2017
2018    - On the other hand, setting new_shape as [2, 3, 4] is also an error: The
2019      third dimension is smaller than the original shape [2, 3, 5] (and an
2020      `InvalidArgumentError` will be raised).
2021
2022    - If `new_shape` is None, the returned SparseTensor will have a shape
2023      [2, 3, 4], which is the tight bounding box of `sp_input`.
2024
2025  Args:
2026    sp_input: The input `SparseTensor`.
2027    new_shape: None or a vector representing the new shape for the returned
2028      `SparseTensor`.
2029
2030  Returns:
2031    A `SparseTensor` indices and values unchanged from `sp_input`. Its shape is
2032      `new_shape` if that is set. Otherwise it is the tight bounding box of
2033       `sp_input`
2034
2035  Raises:
2036    TypeError: If `sp_input` is not a `SparseTensor`.
2037    ValueError: If `new_shape` represents a tensor with a different rank from
2038      that of `sp_input` (if shapes are known when graph is constructed).
2039    ValueError:  If `new_shape` is determined during graph build to have
2040      dimension sizes that are too small.
2041    OpError:
2042      - If `new_shape` has dimension sizes that are too small.
2043      - If shapes are not known during graph construction time, and during run
2044        time it is found out that the ranks do not match.
2045  """
2046  sp_input = _convert_to_sparse_tensor(sp_input)
2047
2048  in_indices = array_ops.identity(sp_input.indices)
2049  in_values = array_ops.identity(sp_input.values)
2050  in_shape = array_ops.identity(sp_input.dense_shape)
2051
2052  if new_shape is None:
2053    dim_low_bound = math_ops.reduce_max(in_indices, axis=0)
2054    output_shape_tensor = math_ops.maximum(
2055        array_ops.constant(0, dtype=dtypes.int64),
2056        math_ops.add(dim_low_bound, array_ops.ones_like(in_shape)))
2057  else:
2058    output_shape_tensor = ops.convert_to_tensor(new_shape)
2059    output_shape_tensor.get_shape().assert_has_rank(1)
2060    output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64)
2061    # For cases when shape is known during graph construction, this catches the
2062    # error before the sparse_tensor.SparseTensor catches it.
2063    if output_shape_tensor.get_shape().rank is not None:
2064      output_shape_tensor.get_shape().dims[0].assert_is_compatible_with(
2065          in_shape.get_shape().dims[0])
2066
2067    output_shape_tensor_const = tensor_util.constant_value(output_shape_tensor)
2068    # For cases where all shapes are known during graph construction
2069    if (output_shape_tensor_const is not None and
2070        sp_input.get_shape().is_fully_defined()):
2071      in_shape_const = np.array(sp_input.get_shape().as_list())
2072      if not np.all(in_shape_const <= output_shape_tensor_const):
2073        raise ValueError(
2074            "Requested new_shape should have dimension sizes >= sp_input.shape."
2075            "  Found new_shape (%s), sp_input.shape (%s)." %
2076            (in_shape_const, output_shape_tensor_const))
2077      output_shape_tensor = output_shape_tensor_const
2078    else:
2079      # For cases where shape is not known during graph construction.
2080      output_shape_tensor = control_flow_ops.with_dependencies([
2081          check_ops.assert_equal(
2082              array_ops.shape(in_shape), array_ops.shape(output_shape_tensor))
2083      ], output_shape_tensor)
2084      output_shape_tensor = control_flow_ops.with_dependencies(
2085          [check_ops.assert_less_equal(in_shape, output_shape_tensor)],
2086          output_shape_tensor)
2087
2088  return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor)
2089
2090
2091@tf_export(
2092    "sparse.fill_empty_rows",
2093    v1=["sparse.fill_empty_rows", "sparse_fill_empty_rows"])
2094@deprecation.deprecated_endpoints("sparse_fill_empty_rows")
2095def sparse_fill_empty_rows(sp_input, default_value, name=None):
2096  """Fills empty rows in the input 2-D `SparseTensor` with a default value.
2097
2098  This op adds entries with the specified `default_value` at index
2099  `[row, 0]` for any row in the input that does not already have a value.
2100
2101  For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
2102
2103      [0, 1]: a
2104      [0, 3]: b
2105      [2, 0]: c
2106      [3, 1]: d
2107
2108  Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
2109
2110      [0, 1]: a
2111      [0, 3]: b
2112      [1, 0]: default_value
2113      [2, 0]: c
2114      [3, 1]: d
2115      [4, 0]: default_value
2116
2117  Note that the input may have empty columns at the end, with no effect on
2118  this op.
2119
2120  The output `SparseTensor` will be in row-major order and will have the
2121  same shape as the input.
2122
2123  This op also returns an indicator vector such that
2124
2125      empty_row_indicator[i] = True iff row i was an empty row.
2126
2127  Args:
2128    sp_input: A `SparseTensor` with shape `[N, M]`.
2129    default_value: The value to fill for empty rows, with the same type as
2130      `sp_input.`
2131    name: A name prefix for the returned tensors (optional)
2132
2133  Returns:
2134    sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty
2135      rows filled in with `default_value`.
2136    empty_row_indicator: A bool vector of length `N` indicating whether each
2137      input row was empty.
2138
2139  Raises:
2140    TypeError: If `sp_input` is not a `SparseTensor`.
2141  """
2142  sp_input = _convert_to_sparse_tensor(sp_input)
2143  with ops.name_scope(name, "SparseFillEmptyRows", [sp_input]):
2144    default_value = ops.convert_to_tensor(
2145        default_value, dtype=sp_input.values.dtype)
2146    (output_indices, output_values, empty_row_indicator,
2147     unused_reverse_index_map) = gen_sparse_ops.sparse_fill_empty_rows(
2148         indices=sp_input.indices,
2149         values=sp_input.values,
2150         dense_shape=sp_input.dense_shape,
2151         default_value=default_value)
2152    return (sparse_tensor.SparseTensor(
2153        indices=output_indices,
2154        values=output_values,
2155        dense_shape=sp_input.dense_shape), empty_row_indicator)
2156
2157
2158@tf_export(v1=["io.serialize_sparse", "serialize_sparse"])
2159@dispatch.add_dispatch_support
2160@deprecation.deprecated_endpoints("serialize_sparse")
2161def serialize_sparse(sp_input, name=None, out_type=dtypes.string):
2162  """Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object.
2163
2164  Args:
2165    sp_input: The input `SparseTensor`.
2166    name: A name prefix for the returned tensors (optional).
2167    out_type: The `dtype` to use for serialization.
2168
2169  Returns:
2170    A 3-vector (1-D `Tensor`), with each column representing the serialized
2171    `SparseTensor`'s indices, values, and shape (respectively).
2172
2173  Raises:
2174    TypeError: If `sp_input` is not a `SparseTensor`.
2175  """
2176  return serialize_sparse_v2(sp_input, out_type, name)
2177
2178
2179@tf_export("io.serialize_sparse", v1=[])
2180@dispatch.add_dispatch_support
2181def serialize_sparse_v2(sp_input, out_type=dtypes.string, name=None):
2182  """Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object.
2183
2184  Args:
2185    sp_input: The input `SparseTensor`.
2186    out_type: The `dtype` to use for serialization.
2187    name: A name prefix for the returned tensors (optional).
2188
2189  Returns:
2190    A 3-vector (1-D `Tensor`), with each column representing the serialized
2191    `SparseTensor`'s indices, values, and shape (respectively).
2192
2193  Raises:
2194    TypeError: If `sp_input` is not a `SparseTensor`.
2195  """
2196  sp_input = _convert_to_sparse_tensor(sp_input)
2197
2198  return gen_sparse_ops.serialize_sparse(
2199      sp_input.indices,
2200      sp_input.values,
2201      sp_input.dense_shape,
2202      name=name,
2203      out_type=out_type)
2204
2205
2206@tf_export(v1=["io.serialize_many_sparse", "serialize_many_sparse"])
2207@dispatch.add_dispatch_support
2208@deprecation.deprecated_endpoints("serialize_many_sparse")
2209def serialize_many_sparse(sp_input, name=None, out_type=dtypes.string):
2210  """Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`.
2211
2212  The `SparseTensor` must have rank `R` greater than 1, and the first dimension
2213  is treated as the minibatch dimension.  Elements of the `SparseTensor`
2214  must be sorted in increasing order of this first dimension.  The serialized
2215  `SparseTensor` objects going into each row of the output `Tensor` will have
2216  rank `R-1`.
2217
2218  The minibatch size `N` is extracted from `sparse_shape[0]`.
2219
2220  Args:
2221    sp_input: The input rank `R` `SparseTensor`.
2222    name: A name prefix for the returned tensors (optional).
2223    out_type: The `dtype` to use for serialization.
2224
2225  Returns:
2226    A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column
2227    represents serialized `SparseTensor`'s indices, values, and shape
2228    (respectively).
2229
2230  Raises:
2231    TypeError: If `sp_input` is not a `SparseTensor`.
2232  """
2233  return serialize_many_sparse_v2(sp_input, out_type, name)
2234
2235
2236@tf_export("io.serialize_many_sparse", v1=[])
2237@dispatch.add_dispatch_support
2238def serialize_many_sparse_v2(sp_input, out_type=dtypes.string, name=None):
2239  """Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`.
2240
2241  The `SparseTensor` must have rank `R` greater than 1, and the first dimension
2242  is treated as the minibatch dimension.  Elements of the `SparseTensor`
2243  must be sorted in increasing order of this first dimension.  The serialized
2244  `SparseTensor` objects going into each row of the output `Tensor` will have
2245  rank `R-1`.
2246
2247  The minibatch size `N` is extracted from `sparse_shape[0]`.
2248
2249  Args:
2250    sp_input: The input rank `R` `SparseTensor`.
2251    out_type: The `dtype` to use for serialization.
2252    name: A name prefix for the returned tensors (optional).
2253
2254  Returns:
2255    A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column
2256    represents serialized `SparseTensor`'s indices, values, and shape
2257    (respectively).
2258
2259  Raises:
2260    TypeError: If `sp_input` is not a `SparseTensor`.
2261  """
2262  sp_input = _convert_to_sparse_tensor(sp_input)
2263
2264  return gen_sparse_ops.serialize_many_sparse(
2265      sp_input.indices,
2266      sp_input.values,
2267      sp_input.dense_shape,
2268      name=name,
2269      out_type=out_type)
2270
2271
2272def deserialize_sparse(serialized_sparse, dtype, rank=None, name=None):
2273  """Deserialize `SparseTensor` objects.
2274
2275  The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
2276  the last dimension stores serialized `SparseTensor` objects and the other N
2277  dimensions (N >= 0) correspond to a batch. The ranks of the original
2278  `SparseTensor` objects must all match. When the final `SparseTensor` is
2279  created, its rank is the rank of the incoming `SparseTensor` objects plus N;
2280  the sparse tensors have been concatenated along new dimensions, one for each
2281  batch.
2282
2283  The output `SparseTensor` object's shape values for the original dimensions
2284  are the max across the input `SparseTensor` objects' shape values for the
2285  corresponding dimensions. The new dimensions match the size of the batch.
2286
2287  The input `SparseTensor` objects' indices are assumed ordered in
2288  standard lexicographic order.  If this is not the case, after this
2289  step run `SparseReorder` to restore index ordering.
2290
2291  For example, if the serialized input is a `[2 x 3]` matrix representing two
2292  original `SparseTensor` objects:
2293
2294      index = [ 0]
2295              [10]
2296              [20]
2297      values = [1, 2, 3]
2298      shape = [50]
2299
2300  and
2301
2302      index = [ 2]
2303              [10]
2304      values = [4, 5]
2305      shape = [30]
2306
2307  then the final deserialized `SparseTensor` will be:
2308
2309      index = [0  0]
2310              [0 10]
2311              [0 20]
2312              [1  2]
2313              [1 10]
2314      values = [1, 2, 3, 4, 5]
2315      shape = [2 50]
2316
2317  Args:
2318    serialized_sparse: The serialized `SparseTensor` objects.
2319      The last dimension must have 3 columns.
2320    dtype: The `dtype` of the serialized `SparseTensor` objects.
2321    rank: (optional) Python int, the rank of the `SparseTensor` objects.
2322    name: A name prefix for the returned tensors (optional).
2323
2324  Returns:
2325    A `SparseTensor` representing the deserialized `SparseTensor` objects.
2326
2327  """
2328  output_indices, output_values, output_shape = (
2329      gen_sparse_ops.deserialize_sparse(serialized_sparse, dtype, name=name))
2330
2331  # Feed rank data back in, if available
2332  output_indices.set_shape([None, rank])
2333  output_shape.set_shape([rank])
2334
2335  return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
2336
2337
2338@tf_export(
2339    "io.deserialize_many_sparse",
2340    v1=["io.deserialize_many_sparse", "deserialize_many_sparse"])
2341@dispatch.add_dispatch_support
2342@deprecation.deprecated_endpoints("deserialize_many_sparse")
2343def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
2344  """Deserialize and concatenate `SparseTensors` from a serialized minibatch.
2345
2346  The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
2347  `N` is the minibatch size and the rows correspond to packed outputs of
2348  `serialize_sparse`.  The ranks of the original `SparseTensor` objects
2349  must all match.  When the final `SparseTensor` is created, it has rank one
2350  higher than the ranks of the incoming `SparseTensor` objects (they have been
2351  concatenated along a new row dimension).
2352
2353  The output `SparseTensor` object's shape values for all dimensions but the
2354  first are the max across the input `SparseTensor` objects' shape values
2355  for the corresponding dimensions.  Its first shape value is `N`, the minibatch
2356  size.
2357
2358  The input `SparseTensor` objects' indices are assumed ordered in
2359  standard lexicographic order.  If this is not the case, after this
2360  step run `sparse.reorder` to restore index ordering.
2361
2362  For example, if the serialized input is a `[2, 3]` matrix representing two
2363  original `SparseTensor` objects:
2364
2365      index = [ 0]
2366              [10]
2367              [20]
2368      values = [1, 2, 3]
2369      shape = [50]
2370
2371  and
2372
2373      index = [ 2]
2374              [10]
2375      values = [4, 5]
2376      shape = [30]
2377
2378  then the final deserialized `SparseTensor` will be:
2379
2380      index = [0  0]
2381              [0 10]
2382              [0 20]
2383              [1  2]
2384              [1 10]
2385      values = [1, 2, 3, 4, 5]
2386      shape = [2 50]
2387
2388  Args:
2389    serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`.
2390      The serialized and packed `SparseTensor` objects.
2391    dtype: The `dtype` of the serialized `SparseTensor` objects.
2392    rank: (optional) Python int, the rank of the `SparseTensor` objects.
2393    name: A name prefix for the returned tensors (optional)
2394
2395  Returns:
2396    A `SparseTensor` representing the deserialized `SparseTensor`s,
2397    concatenated along the `SparseTensor`s' first dimension.
2398
2399    All of the serialized `SparseTensor`s must have had the same rank and type.
2400  """
2401  output_indices, output_values, output_shape = (
2402      gen_sparse_ops.deserialize_many_sparse(
2403          serialized_sparse, dtype, name=name))
2404
2405  # Feed rank data back in, if available
2406  output_indices.set_shape([None, rank])
2407  output_shape.set_shape([rank])
2408
2409  return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
2410
2411
2412@tf_export("sparse.sparse_dense_matmul",
2413           v1=["sparse.sparse_dense_matmul", "sparse.matmul",
2414               "sparse_tensor_dense_matmul"])
2415@deprecation.deprecated_endpoints("sparse_tensor_dense_matmul")
2416def sparse_tensor_dense_matmul(sp_a,
2417                               b,
2418                               adjoint_a=False,
2419                               adjoint_b=False,
2420                               name=None):
2421  # pylint: disable=line-too-long
2422  """Multiply SparseTensor (or dense Matrix) (of rank 2) "A" by dense matrix
2423
2424  (or SparseTensor) "B". Please note that one and only one of the inputs MUST
2425  be a SparseTensor and the other MUST be a dense matrix.
2426
2427  The following input format is recommended (but not required) for optimal
2428  performance:
2429
2430  * If `adjoint_a == false`: `A` should be sorted in lexicographically
2431    increasing order.  Use `sparse.reorder` if you're not sure.
2432  * If `adjoint_a == true`: `A` should be sorted in order of increasing
2433    dimension 1 (i.e., "column major" order instead of "row major" order).
2434
2435  Args:
2436    sp_a: SparseTensor (or dense Matrix) A, of rank 2.
2437    b: dense Matrix (or SparseTensor) B, with the same dtype as sp_a.
2438    adjoint_a: Use the adjoint of A in the matrix multiply.  If A is complex,
2439      this is transpose(conj(A)).  Otherwise it's transpose(A).
2440    adjoint_b: Use the adjoint of B in the matrix multiply.  If B is complex,
2441      this is transpose(conj(B)).  Otherwise it's transpose(B).
2442    name: A name prefix for the returned tensors (optional)
2443
2444  Returns:
2445    A dense matrix (pseudo-code in dense np.matrix notation):
2446      `A = A.H if adjoint_a else A`
2447      `B = B.H if adjoint_b else B`
2448      `return A*B`
2449
2450  Notes:
2451
2452  Using `tf.nn.embedding_lookup_sparse` for sparse multiplication:
2453
2454  It's not obvious but you can consider `embedding_lookup_sparse` as another
2455  sparse and dense multiplication. In some situations, you may prefer to use
2456  `embedding_lookup_sparse` even though you're not dealing with embeddings.
2457
2458  There are two questions to ask in the decision process: Do you need gradients
2459  computed as sparse too? Is your sparse data represented as two
2460  `SparseTensor`s: ids and values? There is more explanation about data format
2461  below. If you answer any of these questions as yes, consider using
2462  `tf.nn.embedding_lookup_sparse`.
2463
2464  Following explains differences between the expected SparseTensors:
2465  For example if dense form of your sparse data has shape `[3, 5]` and values:
2466
2467      [[  a      ]
2468       [b       c]
2469       [    d    ]]
2470
2471
2472  `SparseTensor` format expected by `sparse_tensor_dense_matmul`:
2473   `sp_a` (indices, values):
2474
2475      [0, 1]: a
2476      [1, 0]: b
2477      [1, 4]: c
2478      [2, 2]: d
2479
2480  `SparseTensor` format expected by `embedding_lookup_sparse`:
2481   `sp_ids`                 `sp_weights`
2482
2483      [0, 0]: 1                [0, 0]: a
2484      [1, 0]: 0                [1, 0]: b
2485      [1, 1]: 4                [1, 1]: c
2486      [2, 0]: 2                [2, 0]: d
2487
2488
2489  Deciding when to use `sparse_tensor_dense_matmul` vs.
2490  `matmul`(a_is_sparse=True):
2491
2492  There are a number of questions to ask in the decision process, including:
2493
2494  * Will the SparseTensor `A` fit in memory if densified?
2495  * Is the column count of the product large (>> 1)?
2496  * Is the density of `A` larger than approximately 15%?
2497
2498  If the answer to several of these questions is yes, consider
2499  converting the `SparseTensor` to a dense one and using `tf.matmul` with
2500  `a_is_sparse=True`.
2501
2502  This operation tends to perform well when `A` is more sparse, if the column
2503  size of the product is small (e.g. matrix-vector multiplication), if
2504  `sp_a.dense_shape` takes on large values.
2505
2506  Below is a rough speed comparison between `sparse_tensor_dense_matmul`,
2507  labeled 'sparse', and `matmul`(a_is_sparse=True), labeled 'dense'.  For
2508  purposes of the comparison, the time spent converting from a `SparseTensor` to
2509  a dense `Tensor` is not included, so it is overly conservative with respect to
2510  the time ratio.
2511
2512  Benchmark system:
2513  CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB
2514  GPU: NVidia Tesla k40c
2515
2516  Compiled with:
2517  `-c opt --config=cuda --copt=-mavx`
2518
2519  ```
2520  tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks
2521  A sparse [m, k] with % nonzero values between 1% and 80%
2522  B dense [k, n]
2523
2524  % nnz  n   gpu   m     k     dt(dense)     dt(sparse)   dt(sparse)/dt(dense)
2525  0.01   1   True  100   100   0.000221166   0.00010154   0.459112
2526  0.01   1   True  100   1000  0.00033858    0.000109275  0.322745
2527  0.01   1   True  1000  100   0.000310557   9.85661e-05  0.317385
2528  0.01   1   True  1000  1000  0.0008721     0.000100875  0.115669
2529  0.01   1   False 100   100   0.000208085   0.000107603  0.51711
2530  0.01   1   False 100   1000  0.000327112   9.51118e-05  0.290762
2531  0.01   1   False 1000  100   0.000308222   0.00010345   0.335635
2532  0.01   1   False 1000  1000  0.000865721   0.000101397  0.117124
2533  0.01   10  True  100   100   0.000218522   0.000105537  0.482958
2534  0.01   10  True  100   1000  0.000340882   0.000111641  0.327506
2535  0.01   10  True  1000  100   0.000315472   0.000117376  0.372064
2536  0.01   10  True  1000  1000  0.000905493   0.000123263  0.136128
2537  0.01   10  False 100   100   0.000221529   9.82571e-05  0.44354
2538  0.01   10  False 100   1000  0.000330552   0.000112615  0.340687
2539  0.01   10  False 1000  100   0.000341277   0.000114097  0.334324
2540  0.01   10  False 1000  1000  0.000819944   0.000120982  0.147549
2541  0.01   25  True  100   100   0.000207806   0.000105977  0.509981
2542  0.01   25  True  100   1000  0.000322879   0.00012921   0.400181
2543  0.01   25  True  1000  100   0.00038262    0.00014158   0.370035
2544  0.01   25  True  1000  1000  0.000865438   0.000202083  0.233504
2545  0.01   25  False 100   100   0.000209401   0.000104696  0.499979
2546  0.01   25  False 100   1000  0.000321161   0.000130737  0.407076
2547  0.01   25  False 1000  100   0.000377012   0.000136801  0.362856
2548  0.01   25  False 1000  1000  0.000861125   0.00020272   0.235413
2549  0.2    1   True  100   100   0.000206952   9.69219e-05  0.46833
2550  0.2    1   True  100   1000  0.000348674   0.000147475  0.422959
2551  0.2    1   True  1000  100   0.000336908   0.00010122   0.300439
2552  0.2    1   True  1000  1000  0.001022      0.000203274  0.198898
2553  0.2    1   False 100   100   0.000207532   9.5412e-05   0.459746
2554  0.2    1   False 100   1000  0.000356127   0.000146824  0.41228
2555  0.2    1   False 1000  100   0.000322664   0.000100918  0.312764
2556  0.2    1   False 1000  1000  0.000998987   0.000203442  0.203648
2557  0.2    10  True  100   100   0.000211692   0.000109903  0.519165
2558  0.2    10  True  100   1000  0.000372819   0.000164321  0.440753
2559  0.2    10  True  1000  100   0.000338651   0.000144806  0.427596
2560  0.2    10  True  1000  1000  0.00108312    0.000758876  0.70064
2561  0.2    10  False 100   100   0.000215727   0.000110502  0.512231
2562  0.2    10  False 100   1000  0.000375419   0.0001613    0.429653
2563  0.2    10  False 1000  100   0.000336999   0.000145628  0.432132
2564  0.2    10  False 1000  1000  0.00110502    0.000762043  0.689618
2565  0.2    25  True  100   100   0.000218705   0.000129913  0.594009
2566  0.2    25  True  100   1000  0.000394794   0.00029428   0.745402
2567  0.2    25  True  1000  100   0.000404483   0.0002693    0.665788
2568  0.2    25  True  1000  1000  0.0012002     0.00194494   1.62052
2569  0.2    25  False 100   100   0.000221494   0.0001306    0.589632
2570  0.2    25  False 100   1000  0.000396436   0.000297204  0.74969
2571  0.2    25  False 1000  100   0.000409346   0.000270068  0.659754
2572  0.2    25  False 1000  1000  0.00121051    0.00193737   1.60046
2573  0.5    1   True  100   100   0.000214981   9.82111e-05  0.456836
2574  0.5    1   True  100   1000  0.000415328   0.000223073  0.537101
2575  0.5    1   True  1000  100   0.000358324   0.00011269   0.314492
2576  0.5    1   True  1000  1000  0.00137612    0.000437401  0.317851
2577  0.5    1   False 100   100   0.000224196   0.000101423  0.452386
2578  0.5    1   False 100   1000  0.000400987   0.000223286  0.556841
2579  0.5    1   False 1000  100   0.000368825   0.00011224   0.304318
2580  0.5    1   False 1000  1000  0.00136036    0.000429369  0.31563
2581  0.5    10  True  100   100   0.000222125   0.000112308  0.505608
2582  0.5    10  True  100   1000  0.000461088   0.00032357   0.701753
2583  0.5    10  True  1000  100   0.000394624   0.000225497  0.571422
2584  0.5    10  True  1000  1000  0.00158027    0.00190898   1.20801
2585  0.5    10  False 100   100   0.000232083   0.000114978  0.495418
2586  0.5    10  False 100   1000  0.000454574   0.000324632  0.714146
2587  0.5    10  False 1000  100   0.000379097   0.000227768  0.600817
2588  0.5    10  False 1000  1000  0.00160292    0.00190168   1.18638
2589  0.5    25  True  100   100   0.00023429    0.000151703  0.647501
2590  0.5    25  True  100   1000  0.000497462   0.000598873  1.20386
2591  0.5    25  True  1000  100   0.000460778   0.000557038  1.20891
2592  0.5    25  True  1000  1000  0.00170036    0.00467336   2.74845
2593  0.5    25  False 100   100   0.000228981   0.000155334  0.678371
2594  0.5    25  False 100   1000  0.000496139   0.000620789  1.25124
2595  0.5    25  False 1000  100   0.00045473    0.000551528  1.21287
2596  0.5    25  False 1000  1000  0.00171793    0.00467152   2.71927
2597  0.8    1   True  100   100   0.000222037   0.000105301  0.47425
2598  0.8    1   True  100   1000  0.000410804   0.000329327  0.801664
2599  0.8    1   True  1000  100   0.000349735   0.000131225  0.375212
2600  0.8    1   True  1000  1000  0.00139219    0.000677065  0.48633
2601  0.8    1   False 100   100   0.000214079   0.000107486  0.502085
2602  0.8    1   False 100   1000  0.000413746   0.000323244  0.781261
2603  0.8    1   False 1000  100   0.000348983   0.000131983  0.378193
2604  0.8    1   False 1000  1000  0.00136296    0.000685325  0.50282
2605  0.8    10  True  100   100   0.000229159   0.00011825   0.516017
2606  0.8    10  True  100   1000  0.000498845   0.000532618  1.0677
2607  0.8    10  True  1000  100   0.000383126   0.00029935   0.781336
2608  0.8    10  True  1000  1000  0.00162866    0.00307312   1.88689
2609  0.8    10  False 100   100   0.000230783   0.000124958  0.541452
2610  0.8    10  False 100   1000  0.000493393   0.000550654  1.11606
2611  0.8    10  False 1000  100   0.000377167   0.000298581  0.791642
2612  0.8    10  False 1000  1000  0.00165795    0.00305103   1.84024
2613  0.8    25  True  100   100   0.000233496   0.000175241  0.75051
2614  0.8    25  True  100   1000  0.00055654    0.00102658   1.84458
2615  0.8    25  True  1000  100   0.000463814   0.000783267  1.68875
2616  0.8    25  True  1000  1000  0.00186905    0.00755344   4.04132
2617  0.8    25  False 100   100   0.000240243   0.000175047  0.728625
2618  0.8    25  False 100   1000  0.000578102   0.00104499   1.80763
2619  0.8    25  False 1000  100   0.000485113   0.000776849  1.60138
2620  0.8    25  False 1000  1000  0.00211448    0.00752736   3.55992
2621  ```
2622
2623  """
2624  # pylint: enable=line-too-long
2625
2626  if isinstance(b, sparse_tensor.SparseTensor) \
2627          or isinstance(b, sparse_tensor.SparseTensorValue):
2628    # We can do C * D where C is sparse but if we want to do A * B when
2629    # B is sparse we have to transpose. But AB = (B'A')' so we have to feed in
2630    # the transpose of the arguments as well.
2631    if adjoint_a != adjoint_b:
2632      return array_ops.transpose(
2633          sparse_tensor_dense_matmul(b, sp_a, adjoint_a, adjoint_b))
2634    else:
2635      return array_ops.transpose(
2636          sparse_tensor_dense_matmul(
2637              b, sp_a, adjoint_a=not adjoint_a, adjoint_b=not adjoint_b))
2638
2639  else:
2640    sp_a = _convert_to_sparse_tensor(sp_a)
2641    with ops.name_scope(name, "SparseTensorDenseMatMul",
2642                        [sp_a.indices, sp_a.values, b]) as name:
2643      b = ops.convert_to_tensor(b, name="b")
2644      return gen_sparse_ops.sparse_tensor_dense_mat_mul(
2645          a_indices=sp_a.indices,
2646          a_values=sp_a.values,
2647          a_shape=sp_a.dense_shape,
2648          b=b,
2649          adjoint_a=adjoint_a,
2650          adjoint_b=adjoint_b)
2651
2652
2653@tf_export("sparse.softmax", v1=["sparse.softmax", "sparse_softmax"])
2654@deprecation.deprecated_endpoints("sparse_softmax")
2655def sparse_softmax(sp_input, name=None):
2656  """Applies softmax to a batched N-D `SparseTensor`.
2657
2658  The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
2659  (where `N >= 2`), and with indices sorted in the canonical lexicographic
2660  order.
2661
2662  This op is equivalent to applying the normal `tf.nn.softmax()` to each
2663  innermost logical submatrix with shape `[B, C]`, but with the catch that *the
2664  implicitly zero elements do not participate*.  Specifically, the algorithm is
2665  equivalent to:
2666
2667    (1) Applies `tf.nn.softmax()` to a densified view of each innermost
2668        submatrix with shape `[B, C]`, along the size-C dimension;
2669    (2) Masks out the original implicitly-zero locations;
2670    (3) Renormalizes the remaining elements.
2671
2672  Hence, the `SparseTensor` result has exactly the same non-zero indices and
2673  shape.
2674
2675  Example:
2676
2677  ```python
2678  # First batch:
2679  # [?   e.]
2680  # [1.  ? ]
2681  # Second batch:
2682  # [e   ? ]
2683  # [e   e ]
2684  shape = [2, 2, 2]  # 3-D SparseTensor
2685  values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]])
2686  indices = np.vstack(np.where(values)).astype(np.int64).T
2687
2688  result = tf.sparse.softmax(tf.sparse.SparseTensor(indices, values, shape))
2689  # ...returning a 3-D SparseTensor, equivalent to:
2690  # [?   1.]     [1    ?]
2691  # [1.  ? ] and [.5  .5]
2692  # where ? means implicitly zero.
2693  ```
2694
2695  Args:
2696    sp_input: N-D `SparseTensor`, where `N >= 2`.
2697    name: optional name of the operation.
2698  Returns:
2699    output: N-D `SparseTensor` representing the results.
2700  """
2701  with ops.name_scope(name, "SparseSoftmax",
2702                      [sp_input.indices, sp_input.values]) as name:
2703    out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values,
2704                                             sp_input.dense_shape)
2705    return sparse_tensor.SparseTensor(sp_input.indices, out_vals,
2706                                      sp_input.dense_shape)
2707
2708
2709@tf_export("sparse.maximum", v1=["sparse.maximum", "sparse_maximum"])
2710@deprecation.deprecated_endpoints("sparse_maximum")
2711def sparse_maximum(sp_a, sp_b, name=None):
2712  """Returns the element-wise max of two SparseTensors.
2713
2714  Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
2715
2716  Example:
2717
2718    >>> sp_zero = tf.sparse.SparseTensor([[0]], [0], [7])
2719    >>> sp_one = tf.sparse.SparseTensor([[1]], [1], [7])
2720    >>> res = tf.sparse.maximum(sp_zero, sp_one)
2721    >>> res.indices
2722    <tf.Tensor: shape=(2, 1), dtype=int64, numpy=
2723    array([[0],
2724           [1]])>
2725    >>> res.values
2726    <tf.Tensor: shape=(2,), dtype=int32, numpy=array([0, 1], dtype=int32)>
2727    >>> res.dense_shape
2728    <tf.Tensor: shape=(1,), dtype=int64, numpy=array([7])>
2729
2730  The reduction version of this elementwise operation is `tf.sparse.reduce_max`
2731
2732  Args:
2733    sp_a: a `SparseTensor` operand whose dtype is real, and indices
2734      lexicographically ordered.
2735    sp_b: the other `SparseTensor` operand with the same requirements (and the
2736      same shape).
2737    name: optional name of the operation.
2738  Returns:
2739    output: the output SparseTensor.
2740  """
2741  with ops.name_scope(
2742      name, "SparseSparseMaximum",
2743      [sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:
2744    out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum(
2745        sp_a.indices,
2746        sp_a.values,
2747        sp_a.dense_shape,
2748        sp_b.indices,
2749        sp_b.values,
2750        sp_b.dense_shape,
2751        name=name)
2752  return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
2753
2754
2755@tf_export("sparse.minimum", v1=["sparse.minimum", "sparse_minimum"])
2756@deprecation.deprecated_endpoints("sparse_minimum")
2757def sparse_minimum(sp_a, sp_b, name=None):
2758  """Returns the element-wise min of two SparseTensors.
2759
2760  Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
2761
2762  Example:
2763
2764    >>> sp_zero = tf.sparse.SparseTensor([[0]], [0], [7])
2765    >>> sp_one = tf.sparse.SparseTensor([[1]], [1], [7])
2766    >>> res = tf.sparse.minimum(sp_zero, sp_one)
2767    >>> res.indices
2768    <tf.Tensor: shape=(2, 1), dtype=int64, numpy=
2769    array([[0],
2770           [1]])>
2771    >>> res.values
2772    <tf.Tensor: shape=(2,), dtype=int32, numpy=array([0, 0], dtype=int32)>
2773    >>> res.dense_shape
2774    <tf.Tensor: shape=(1,), dtype=int64, numpy=array([7])>
2775
2776  Args:
2777    sp_a: a `SparseTensor` operand whose dtype is real, and indices
2778      lexicographically ordered.
2779    sp_b: the other `SparseTensor` operand with the same requirements (and the
2780      same shape).
2781    name: optional name of the operation.
2782  Returns:
2783    output: the output SparseTensor.
2784  """
2785  with ops.name_scope(
2786      name, "SparseSparseMinimum",
2787      [sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:
2788    out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum(
2789        sp_a.indices,
2790        sp_a.values,
2791        sp_a.dense_shape,
2792        sp_b.indices,
2793        sp_b.values,
2794        sp_b.dense_shape,
2795        name=name)
2796  return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
2797
2798
2799@tf_export("sparse.transpose", v1=["sparse.transpose", "sparse_transpose"])
2800@deprecation.deprecated_endpoints("sparse_transpose")
2801def sparse_transpose(sp_input, perm=None, name=None):
2802  """Transposes a `SparseTensor`
2803
2804  The returned tensor's dimension i will correspond to the input dimension
2805  `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
2806  the rank of the input tensor. Hence by default, this operation performs a
2807  regular matrix transpose on 2-D input Tensors.
2808
2809  For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
2810
2811      [0, 3]: b
2812      [0, 1]: a
2813      [3, 1]: d
2814      [2, 0]: c
2815
2816  then the output will be a `SparseTensor` of shape `[5, 4]` and
2817  `indices` / `values`:
2818
2819      [0, 2]: c
2820      [1, 0]: a
2821      [1, 3]: d
2822      [3, 0]: b
2823
2824  Args:
2825    sp_input: The input `SparseTensor`.
2826    perm: A permutation of the dimensions of `sp_input`.
2827    name: A name prefix for the returned tensors (optional)
2828  Returns:
2829    A transposed `SparseTensor`.
2830
2831  Raises:
2832    TypeError: If `sp_input` is not a `SparseTensor`.
2833  """
2834  with ops.name_scope(name, "SparseTranspose", [sp_input]) as name:
2835    if perm is None:
2836      if sp_input.shape.rank is not None:
2837        rank = sp_input.shape.rank
2838        perm = (rank - 1) - np.arange(0, rank, 1)
2839      else:
2840        rank = array_ops.rank(sp_input)
2841        perm = (rank - 1) - math_ops.range(0, rank, 1)
2842    indices = sp_input.indices
2843    transposed_indices = array_ops.transpose(
2844        array_ops.gather(array_ops.transpose(indices), perm))
2845
2846    perm_ = tensor_util.constant_value(ops.convert_to_tensor(perm))
2847    if perm_ is not None and sp_input.get_shape().is_fully_defined():
2848      old_shape_ = sp_input.get_shape().as_list()
2849      transposed_dense_shape = list(old_shape_)  # Copy.
2850      for i, p in enumerate(perm_):
2851        transposed_dense_shape[i] = old_shape_[p]
2852    else:
2853      dense_shape = sp_input.dense_shape
2854      transposed_dense_shape = array_ops.gather(dense_shape, perm)
2855    transposed_st = sparse_tensor.SparseTensor(
2856        transposed_indices, sp_input.values, transposed_dense_shape)
2857    transposed_st = sparse_reorder(transposed_st)
2858    return transposed_st
2859
2860
2861@tf_export("sparse.map_values", v1=[])
2862@dispatch.add_dispatch_support
2863def map_values(op, *args, **kwargs):
2864  """Applies `op` to the `.values` tensor of one or more `SparseTensor`s.
2865
2866  Replaces any `SparseTensor` in `args` or `kwargs` with its `values`
2867  tensor (which contains the non-default values for the SparseTensor),
2868  and then calls `op`.  Returns a `SparseTensor` that is constructed
2869  from the input `SparseTensor`s' `indices`, `dense_shape`, and the
2870  value returned by the `op`.
2871
2872  If the input arguments contain multiple `SparseTensor`s, then they must have
2873  equal `indices` and dense shapes.
2874
2875  Examples:
2876
2877  >>> s = tf.sparse.from_dense([[1, 2, 0],
2878  ...                           [0, 4, 0],
2879  ...                           [1, 0, 0]])
2880  >>> tf.sparse.to_dense(tf.sparse.map_values(tf.ones_like, s)).numpy()
2881  array([[1, 1, 0],
2882         [0, 1, 0],
2883         [1, 0, 0]], dtype=int32)
2884
2885  >>> tf.sparse.to_dense(tf.sparse.map_values(tf.multiply, s, s)).numpy()
2886  array([[ 1,  4,  0],
2887         [ 0, 16,  0],
2888         [ 1,  0,  0]], dtype=int32)
2889
2890  >>> tf.sparse.to_dense(tf.sparse.map_values(tf.add, s, 5)).numpy()
2891  array([[6, 7, 0],
2892         [0, 9, 0],
2893         [6, 0, 0]], dtype=int32)
2894
2895  Note: even though `tf.add(0, 5) != 0`, implicit zeros
2896  will remain unchanged. However, if the sparse tensor contains any explicit
2897  zeros, these will be affected by the mapping!
2898
2899  Args:
2900    op: The operation that should be applied to the SparseTensor `values`. `op`
2901      is typically an element-wise operation (such as math_ops.add), but any
2902      operation that preserves the shape can be used.
2903    *args: Arguments for `op`.
2904    **kwargs: Keyword arguments for `op`.
2905
2906  Returns:
2907    A `SparseTensor` whose `indices` and `dense_shape` matches the `indices`
2908    and `dense_shape` of all input `SparseTensor`s.
2909  Raises:
2910    ValueError: If args contains no `SparseTensor`, or if the `indices`
2911      or `dense_shape`s of the input `SparseTensor`s are not equal.
2912  """
2913  sparse_list = []
2914  inner_args = _replace_sparse_with_values(args, sparse_list)
2915  inner_kwargs = _replace_sparse_with_values(kwargs, sparse_list)
2916  if not sparse_list:
2917    raise ValueError("No SparseTensor in argument list of map_values")
2918
2919  with ops.control_dependencies(_assert_sparse_compatible(sparse_list)):
2920    # Delegate to op, and then compose the result from the transformed values
2921    # and the known indices/dense shape. Since we ensure that indices and shape
2922    # are identical, we can just use the first one.
2923    return sparse_tensor.SparseTensor(sparse_list[0].indices,
2924                                      op(*inner_args, **inner_kwargs),
2925                                      sparse_list[0].dense_shape)
2926
2927
2928def _assert_sparse_compatible(sparse_tensors):
2929  """Check that all of `sparse_tensors` have same `indices` and `dense_shape`.
2930
2931  Args:
2932    sparse_tensors: A list of sparse tensors.
2933
2934  Returns:
2935    An op to be used as a control dependency.
2936  """
2937  checks = []
2938  first = sparse_tensors[0]
2939  for t in sparse_tensors[1:]:
2940    checks.append(
2941        check_ops.assert_equal(
2942            first.dense_shape, t.dense_shape, message="Mismatched shapes!"))
2943    checks.append(
2944        check_ops.assert_equal(
2945            first.indices, t.indices, message="Mismatched indices!"))
2946  return checks
2947
2948
2949def _replace_sparse_with_values(value, sparse_list):
2950  """Replace `SparseTensor`s with their values in `value`
2951
2952  Each `SparseTensor` in `value` is replaced by its `values` tensor, and
2953  collects all `SparseTensor`s in `sparse_list`.
2954
2955  Args:
2956    value: A structure of `Tensor`s and `SparseTensor`s
2957    sparse_list: A list. Output parameter that collects all `SparseTensor`s in
2958      `value`.
2959
2960  Returns:
2961    `value` with each SparseTensor replaced by its `.value` attribute.
2962  """
2963  flat_vals = nest.flatten(value, expand_composites=False)
2964  new_vals = []
2965  for v in flat_vals:
2966    if isinstance(v, sparse_tensor.SparseTensor):
2967      sparse_list.append(v)
2968      new_vals.append(v.values)
2969    else:
2970      new_vals.append(v)
2971  return nest.pack_sequence_as(value, new_vals, expand_composites=False)
2972
2973
2974def _add_sparse_to_tensors_map(sp_input,
2975                               container=None,
2976                               shared_name=None,
2977                               name=None):
2978  """Add a `SparseTensor` to a `SparseTensorsMap` and return its handle.
2979
2980  Args:
2981    sp_input: The input `SparseTensor`.
2982    container: The container for the underlying `SparseTensorsMap` (optional).
2983    shared_name: The shared name for the underlying `SparseTensorsMap`
2984      (optional, defaults to the name of the newly created op).
2985    name: A name prefix for the returned tensors (optional).
2986
2987  Returns:
2988    A string 1-vector (1D `Tensor`), with the single element representing the
2989    a unique handle to a `SparseTensor` stored by the `SparseTensorMap`
2990    underlying this op.
2991
2992  Raises:
2993    TypeError: If `sp_input` is not a `SparseTensor`.
2994  """
2995  sp_input = _convert_to_sparse_tensor(sp_input)
2996
2997  return gen_sparse_ops.add_sparse_to_tensors_map(
2998      sp_input.indices,
2999      sp_input.values,
3000      sp_input.dense_shape,
3001      container=container,
3002      shared_name=shared_name,
3003      name=name)
3004
3005
3006def _add_many_sparse_to_tensors_map(sp_input,
3007                                    container=None,
3008                                    shared_name=None,
3009                                    name=None):
3010  """Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
3011
3012  The `SparseTensor` must have rank `R` greater than 1, and the first dimension
3013  is treated as the minibatch dimension.  Elements of the `SparseTensor`
3014  must be sorted in increasing order of this first dimension.  The serialized
3015  `SparseTensor` objects going into each row of the output `Tensor` will have
3016  rank `R-1`.
3017
3018  The minibatch size `N` is extracted from `sparse_shape[0]`.
3019
3020  Args:
3021    sp_input: The input rank `R` `SparseTensor`.
3022    container: The container for the underlying `SparseTensorsMap` (optional).
3023    shared_name: The shared name for the underlying `SparseTensorsMap`
3024      (optional, defaults to the name of the newly created op).
3025    name: A name prefix for the returned tensors (optional).
3026
3027  Returns:
3028    A string matrix (2-D `Tensor`) with `N` rows and `1` column.
3029    Each row represents a unique handle to a `SparseTensor` stored by
3030    the `SparseTensorMap` underlying this op.
3031
3032  Raises:
3033    TypeError: If `sp_input` is not a `SparseTensor`.
3034  """
3035  sp_input = _convert_to_sparse_tensor(sp_input)
3036
3037  return gen_sparse_ops.add_many_sparse_to_tensors_map(
3038      sp_input.indices,
3039      sp_input.values,
3040      sp_input.dense_shape,
3041      container=container,
3042      shared_name=shared_name,
3043      name=name)
3044
3045
3046def _take_many_sparse_from_tensors_map(sparse_map_op,
3047                                       sparse_handles,
3048                                       rank=None,
3049                                       name=None):
3050  """Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
3051
3052  The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
3053  `N` is the minibatch size and the rows correspond to packed outputs of
3054  `add_sparse_to_tensors_map`.  The ranks of the original `SparseTensor` objects
3055  must all match.  When the final `SparseTensor` is created, it has rank one
3056  higher than the ranks of the incoming `SparseTensor` objects (they have been
3057  concatenated along a new row dimension).
3058
3059  The output `SparseTensor` object's shape values for all dimensions but the
3060  first are the max across the input `SparseTensor` objects' shape values
3061  for the corresponding dimensions.  Its first shape value is `N`, the minibatch
3062  size.
3063
3064  The input `SparseTensor` objects' indices are assumed ordered in
3065  standard lexicographic order.  If this is not the case, after this
3066  step run `sparse.reorder` to restore index ordering.
3067
3068  For example, if the serialized input is a `[2, 3]` matrix representing two
3069  original `SparseTensor` objects:
3070
3071      index = [ 0]
3072              [10]
3073              [20]
3074      values = [1, 2, 3]
3075      shape = [50]
3076
3077  and
3078
3079      index = [ 2]
3080              [10]
3081      values = [4, 5]
3082      shape = [30]
3083
3084  then the final deserialized `SparseTensor` will be:
3085
3086      index = [0  0]
3087              [0 10]
3088              [0 20]
3089              [1  2]
3090              [1 10]
3091      values = [1, 2, 3, 4, 5]
3092      shape = [2 50]
3093
3094  Args:
3095    sparse_map_op: The `Operation` that created the original handles.
3096      Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
3097    sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
3098      The serialized and packed `SparseTensor` objects.
3099    rank: (optional) Python int, the rank of the `SparseTensor` objects.
3100    name: A name prefix for the returned tensors (optional)
3101
3102  Returns:
3103    A `SparseTensor` representing the deserialized `SparseTensor`s,
3104    concatenated along the `SparseTensor`s' first dimension.
3105
3106    All of the serialized `SparseTensor`s must have had the same rank and type.
3107  """
3108  if not isinstance(sparse_map_op, ops.Operation):
3109    raise TypeError("sparse_map_op be an Operation")
3110  if sparse_map_op.type not in ("AddSparseToTensorsMap",
3111                                "AddManySparseToTensorsMap"):
3112    raise TypeError(
3113        "sparse_map_op must be one of AddSparseToTensorsMap or "
3114        "AddSparseToTensorsMap. Instead, found `%s`." % sparse_map_op.type)
3115  with ops.colocate_with(sparse_map_op):
3116    shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name
3117    output_indices, output_values, output_shape = (
3118        gen_sparse_ops.take_many_sparse_from_tensors_map(
3119            sparse_handles,
3120            dtype=sparse_map_op.get_attr("T"),
3121            container=sparse_map_op.get_attr("container"),
3122            shared_name=shared_name,
3123            name=name))
3124
3125  # Feed rank data back in, if available
3126  output_indices.set_shape([None, rank])
3127  output_shape.set_shape([rank])
3128
3129  return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
3130
3131
3132class _UnaryMapValueDispatcher(dispatch.OpDispatcher):
3133  """OpDispatcher for unary ops that maps base function across sparse values."""
3134
3135  def __init__(self, original_func):
3136    self._original_func = original_func
3137    func_name = get_canonical_name_for_symbol(original_func)
3138    arg_names = tf_inspect.getfullargspec(original_func)[0]
3139    self._x = arg_names[0]
3140    original_func.__doc__ = (
3141        original_func.__doc__.rstrip() + "\n\n" +
3142        ("    If `{x}` is a `SparseTensor`, returns\n"
3143         "    `SparseTensor({x}.indices, tf.{func}({x}.values, ...), "
3144         "{x}.dense_shape)`").format(x=self._x, func=func_name))
3145
3146  def handle(self, args, kwargs):
3147    if args:
3148      x, args = args[0], args[1:]
3149    else:
3150      kwargs = kwargs.copy()
3151      x = kwargs.pop(self._x, None)
3152    if isinstance(x, sparse_tensor.SparseTensor):
3153      return sparse_tensor.SparseTensor(
3154          indices=x.indices,
3155          values=self._original_func(x.values, *args, **kwargs),
3156          dense_shape=x.dense_shape)
3157    else:
3158      return self.NOT_SUPPORTED
3159
3160
3161_UNARY_OPS = [
3162    # TODO(b/120307967) Add dispatchers for additional TensorFlow ops.
3163    math_ops.abs,
3164    math_ops.negative,
3165    math_ops.sign,
3166    math_ops.square,
3167    math_ops.sqrt,
3168    math_ops.erf,
3169    math_ops.tanh,
3170    # TODO(b/157272291) Add dispatchers for rest of special functions.
3171    special_math_ops.bessel_i0e,
3172    special_math_ops.bessel_i1e,
3173]
3174for unary_op in _UNARY_OPS:
3175  _UnaryMapValueDispatcher(unary_op).register(unary_op)
3176