• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Miscellaneous utilities that don't fit anywhere else."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21from tensorflow.python.framework import ops
22from tensorflow.python.ops import array_ops
23from tensorflow.python.ops import gen_math_ops
24from tensorflow.python.ops import math_ops
25
26
27def alias_tensors(*args):
28  """Wraps any Tensor arguments with an identity op.
29
30  Any other argument, including Variables, is returned unchanged.
31
32  Args:
33    *args: Any arguments. Must contain at least one element.
34
35  Returns:
36    Same as *args, with Tensor instances replaced as described.
37
38  Raises:
39    ValueError: If args doesn't meet the requirements.
40  """
41
42  def alias_if_tensor(a):
43    return array_ops.identity(a) if isinstance(a, ops.Tensor) else a
44
45  # TODO(mdan): Recurse into containers?
46  # TODO(mdan): Anything we can do about variables? Fake a scope reuse?
47  if len(args) > 1:
48    return (alias_if_tensor(a) for a in args)
49  elif len(args) == 1:
50    return alias_if_tensor(args[0])
51
52  raise ValueError('at least one argument required')
53
54
55def get_range_len(start, limit, delta):
56  dist = ops.convert_to_tensor(limit - start)
57  unadjusted_len = dist // delta
58  adjustment = math_ops.cast(
59      gen_math_ops.not_equal(dist % delta,
60                             array_ops.zeros_like(unadjusted_len)), dist.dtype)
61  final_len = unadjusted_len + adjustment
62  return gen_math_ops.maximum(final_len, array_ops.zeros_like(final_len))
63