• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Tests for Uniform distribution."""
16
17import importlib
18
19import numpy as np
20
21from tensorflow.python.eager import backprop
22from tensorflow.python.framework import constant_op
23from tensorflow.python.framework import errors
24from tensorflow.python.framework import tensor_shape
25from tensorflow.python.framework import test_util
26from tensorflow.python.ops import array_ops
27from tensorflow.python.ops import math_ops
28from tensorflow.python.ops.distributions import uniform as uniform_lib
29from tensorflow.python.platform import test
30from tensorflow.python.platform import tf_logging
31
32
33def try_import(name):  # pylint: disable=invalid-name
34  module = None
35  try:
36    module = importlib.import_module(name)
37  except ImportError as e:
38    tf_logging.warning("Could not import %s: %s" % (name, str(e)))
39  return module
40
41
42stats = try_import("scipy.stats")
43
44
45class UniformTest(test.TestCase):
46
47  @test_util.run_in_graph_and_eager_modes
48  def testUniformRange(self):
49    a = 3.0
50    b = 10.0
51    uniform = uniform_lib.Uniform(low=a, high=b)
52    self.assertAllClose(a, self.evaluate(uniform.low))
53    self.assertAllClose(b, self.evaluate(uniform.high))
54    self.assertAllClose(b - a, self.evaluate(uniform.range()))
55
56  @test_util.run_in_graph_and_eager_modes
57  def testUniformPDF(self):
58    a = constant_op.constant([-3.0] * 5 + [15.0])
59    b = constant_op.constant([11.0] * 5 + [20.0])
60    uniform = uniform_lib.Uniform(low=a, high=b)
61
62    a_v = -3.0
63    b_v = 11.0
64    x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0], dtype=np.float32)
65
66    def _expected_pdf():
67      pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
68      pdf[x > b_v] = 0.0
69      pdf[x < a_v] = 0.0
70      pdf[5] = 1.0 / (20.0 - 15.0)
71      return pdf
72
73    expected_pdf = _expected_pdf()
74
75    pdf = uniform.prob(x)
76    self.assertAllClose(expected_pdf, self.evaluate(pdf))
77
78    log_pdf = uniform.log_prob(x)
79    self.assertAllClose(np.log(expected_pdf), self.evaluate(log_pdf))
80
81  @test_util.run_in_graph_and_eager_modes
82  def testUniformShape(self):
83    a = constant_op.constant([-3.0] * 5)
84    b = constant_op.constant(11.0)
85    uniform = uniform_lib.Uniform(low=a, high=b)
86
87    self.assertEqual(self.evaluate(uniform.batch_shape_tensor()), (5,))
88    self.assertEqual(uniform.batch_shape, tensor_shape.TensorShape([5]))
89    self.assertAllEqual(self.evaluate(uniform.event_shape_tensor()), [])
90    self.assertEqual(uniform.event_shape, tensor_shape.TensorShape([]))
91
92  @test_util.run_in_graph_and_eager_modes
93  def testUniformPDFWithScalarEndpoint(self):
94    a = constant_op.constant([0.0, 5.0])
95    b = constant_op.constant(10.0)
96    uniform = uniform_lib.Uniform(low=a, high=b)
97
98    x = np.array([0.0, 8.0], dtype=np.float32)
99    expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)])
100
101    pdf = uniform.prob(x)
102    self.assertAllClose(expected_pdf, self.evaluate(pdf))
103
104  @test_util.run_in_graph_and_eager_modes
105  def testUniformCDF(self):
106    batch_size = 6
107    a = constant_op.constant([1.0] * batch_size)
108    b = constant_op.constant([11.0] * batch_size)
109    a_v = 1.0
110    b_v = 11.0
111    x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)
112
113    uniform = uniform_lib.Uniform(low=a, high=b)
114
115    def _expected_cdf():
116      cdf = (x - a_v) / (b_v - a_v)
117      cdf[x >= b_v] = 1
118      cdf[x < a_v] = 0
119      return cdf
120
121    cdf = uniform.cdf(x)
122    self.assertAllClose(_expected_cdf(), self.evaluate(cdf))
123
124    log_cdf = uniform.log_cdf(x)
125    self.assertAllClose(np.log(_expected_cdf()), self.evaluate(log_cdf))
126
127  @test_util.run_in_graph_and_eager_modes
128  def testUniformEntropy(self):
129    a_v = np.array([1.0, 1.0, 1.0])
130    b_v = np.array([[1.5, 2.0, 3.0]])
131    uniform = uniform_lib.Uniform(low=a_v, high=b_v)
132
133    expected_entropy = np.log(b_v - a_v)
134    self.assertAllClose(expected_entropy, self.evaluate(uniform.entropy()))
135
136  @test_util.run_in_graph_and_eager_modes
137  def testUniformAssertMaxGtMin(self):
138    a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
139    b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32)
140
141    with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
142                                             "x < y"):
143      uniform = uniform_lib.Uniform(low=a_v, high=b_v, validate_args=True)
144      self.evaluate(uniform.low)
145
146  @test_util.run_in_graph_and_eager_modes
147  def testUniformSample(self):
148    a = constant_op.constant([3.0, 4.0])
149    b = constant_op.constant(13.0)
150    a1_v = 3.0
151    a2_v = 4.0
152    b_v = 13.0
153    n = constant_op.constant(100000)
154    uniform = uniform_lib.Uniform(low=a, high=b)
155
156    samples = uniform.sample(n, seed=137)
157    sample_values = self.evaluate(samples)
158    self.assertEqual(sample_values.shape, (100000, 2))
159    self.assertAllClose(
160        sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-1, rtol=0.)
161    self.assertAllClose(
162        sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-1, rtol=0.)
163    self.assertFalse(
164        np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v))
165    self.assertFalse(
166        np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
167
168  @test_util.run_in_graph_and_eager_modes
169  def _testUniformSampleMultiDimensional(self):
170    # DISABLED: Please enable this test once b/issues/30149644 is resolved.
171    batch_size = 2
172    a_v = [3.0, 22.0]
173    b_v = [13.0, 35.0]
174    a = constant_op.constant([a_v] * batch_size)
175    b = constant_op.constant([b_v] * batch_size)
176
177    uniform = uniform_lib.Uniform(low=a, high=b)
178
179    n_v = 100000
180    n = constant_op.constant(n_v)
181    samples = uniform.sample(n)
182    self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))
183
184    sample_values = self.evaluate(samples)
185
186    self.assertFalse(
187        np.any(sample_values[:, 0, 0] < a_v[0]) or
188        np.any(sample_values[:, 0, 0] >= b_v[0]))
189    self.assertFalse(
190        np.any(sample_values[:, 0, 1] < a_v[1]) or
191        np.any(sample_values[:, 0, 1] >= b_v[1]))
192
193    self.assertAllClose(
194        sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2)
195    self.assertAllClose(
196        sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
197
198  @test_util.run_in_graph_and_eager_modes
199  def testUniformMean(self):
200    a = 10.0
201    b = 100.0
202    uniform = uniform_lib.Uniform(low=a, high=b)
203    if not stats:
204      return
205    s_uniform = stats.uniform(loc=a, scale=b - a)
206    self.assertAllClose(self.evaluate(uniform.mean()), s_uniform.mean())
207
208  @test_util.run_in_graph_and_eager_modes
209  def testUniformVariance(self):
210    a = 10.0
211    b = 100.0
212    uniform = uniform_lib.Uniform(low=a, high=b)
213    if not stats:
214      return
215    s_uniform = stats.uniform(loc=a, scale=b - a)
216    self.assertAllClose(self.evaluate(uniform.variance()), s_uniform.var())
217
218  @test_util.run_in_graph_and_eager_modes
219  def testUniformStd(self):
220    a = 10.0
221    b = 100.0
222    uniform = uniform_lib.Uniform(low=a, high=b)
223    if not stats:
224      return
225    s_uniform = stats.uniform(loc=a, scale=b - a)
226    self.assertAllClose(self.evaluate(uniform.stddev()), s_uniform.std())
227
228  @test_util.run_in_graph_and_eager_modes
229  def testUniformNans(self):
230    a = 10.0
231    b = [11.0, 100.0]
232    uniform = uniform_lib.Uniform(low=a, high=b)
233
234    no_nans = constant_op.constant(1.0)
235    nans = constant_op.constant(0.0) / constant_op.constant(0.0)
236    self.assertTrue(self.evaluate(math_ops.is_nan(nans)))
237    with_nans = array_ops.stack([no_nans, nans])
238
239    pdf = uniform.prob(with_nans)
240
241    is_nan = self.evaluate(math_ops.is_nan(pdf))
242    self.assertFalse(is_nan[0])
243    self.assertTrue(is_nan[1])
244
245  @test_util.run_in_graph_and_eager_modes
246  def testUniformSamplePdf(self):
247    a = 10.0
248    b = [11.0, 100.0]
249    uniform = uniform_lib.Uniform(a, b)
250    self.assertTrue(
251        self.evaluate(
252            math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0)))
253
254  @test_util.run_in_graph_and_eager_modes
255  def testUniformBroadcasting(self):
256    a = 10.0
257    b = [11.0, 20.0]
258    uniform = uniform_lib.Uniform(a, b)
259
260    pdf = uniform.prob([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]])
261    expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
262    self.assertAllClose(expected_pdf, self.evaluate(pdf))
263
264  @test_util.run_in_graph_and_eager_modes
265  def testUniformSampleWithShape(self):
266    a = 10.0
267    b = [11.0, 20.0]
268    uniform = uniform_lib.Uniform(a, b)
269
270    pdf = uniform.prob(uniform.sample((2, 3)))
271    # pylint: disable=bad-continuation
272    expected_pdf = [
273        [[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
274        [[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
275    ]
276    # pylint: enable=bad-continuation
277    self.assertAllClose(expected_pdf, self.evaluate(pdf))
278
279    pdf = uniform.prob(uniform.sample())
280    expected_pdf = [1.0, 0.1]
281    self.assertAllClose(expected_pdf, self.evaluate(pdf))
282
283  def testFullyReparameterized(self):
284    a = constant_op.constant(0.1)
285    b = constant_op.constant(0.8)
286    with backprop.GradientTape() as tape:
287      tape.watch(a)
288      tape.watch(b)
289      uniform = uniform_lib.Uniform(a, b)
290      samples = uniform.sample(100)
291    grad_a, grad_b = tape.gradient(samples, [a, b])
292    self.assertIsNotNone(grad_a)
293    self.assertIsNotNone(grad_b)
294
295  # Eager doesn't pass due to a type mismatch in one of the ops.
296  def testUniformFloat64(self):
297    uniform = uniform_lib.Uniform(
298        low=np.float64(0.), high=np.float64(1.))
299
300    self.assertAllClose(
301        [1., 1.],
302        self.evaluate(uniform.prob(np.array([0.5, 0.6], dtype=np.float64))))
303
304    self.assertAllClose(
305        [0.5, 0.6],
306        self.evaluate(uniform.cdf(np.array([0.5, 0.6], dtype=np.float64))))
307
308    self.assertAllClose(0.5, self.evaluate(uniform.mean()))
309    self.assertAllClose(1 / 12., self.evaluate(uniform.variance()))
310    self.assertAllClose(0., self.evaluate(uniform.entropy()))
311
312
313if __name__ == "__main__":
314  test.main()
315