/external/tensorflow/tensorflow/python/ops/ |
D | linalg_ops_impl.py | 35 batch_shape=None, argument 43 name, default_name='eye', values=[num_rows, num_columns, batch_shape]): 45 batch_shape = [] if batch_shape is None else batch_shape 62 if isinstance(batch_shape, ops.Tensor) or isinstance(diag_size, ops.Tensor): 63 batch_shape = ops.convert_to_tensor( 64 batch_shape, name='shape', dtype=dtypes.int32) 65 diag_shape = array_ops.concat((batch_shape, [diag_size]), axis=0) 67 shape = array_ops.concat((batch_shape, [num_rows, num_columns]), axis=0) 70 batch_shape = list(batch_shape) 71 diag_shape = batch_shape + [diag_size] [all …]
|
/external/tensorflow/tensorflow/contrib/distributions/python/ops/ |
D | independent.py | 168 batch_shape = self.distribution.batch_shape_tensor() 170 batch_shape.shape.with_rank_at_least(1)[0]) 173 else array_ops.shape(batch_shape)[0]) 174 return batch_shape[:batch_ndims - self.reinterpreted_batch_ndims] 177 batch_shape = self.distribution.batch_shape 179 or batch_shape.ndims is None): 181 d = batch_shape.ndims - self._static_reinterpreted_batch_ndims 182 return batch_shape[:d] 186 batch_shape = self.distribution.batch_shape_tensor() 188 batch_shape.shape.with_rank_at_least(1)[0]) [all …]
|
D | batch_reshape.py | 86 batch_shape, argument 117 with ops.name_scope(name, values=[batch_shape]) as name: 120 batch_shape, dtype=dtypes.int32, name="batch_shape") 122 batch_shape, batch_shape_static, runtime_assertions = calculate_reshape( 126 self._batch_shape_ = batch_shape 225 if self.batch_shape.ndims is None else self.batch_shape.ndims) 255 self.batch_shape.ndims is not None): 256 new_shape = static_sample_shape.concatenate(self.batch_shape) 274 if (self.batch_shape.ndims is not None and 280 self.batch_shape.concatenate(event_shape)) [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/linalg/ |
D | linear_operator_zeros_test.py | 59 batch_shape = shape[:-2] 63 num_rows, batch_shape=batch_shape, dtype=dtype) 104 linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=2) 108 linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[2.]) 112 linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[-2]) 140 batch_shape = array_ops.placeholder(dtypes.int32) 142 num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True) 144 operator.to_dense().eval(feed_dict={batch_shape: 2}) 149 batch_shape = array_ops.placeholder(dtypes.int32) 151 num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True) [all …]
|
D | linear_operator_identity_test.py | 55 batch_shape = shape[:-2] 59 num_rows, batch_shape=batch_shape, dtype=dtype) 60 mat = linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=dtype) 106 linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=2) 110 linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=[2.]) 114 linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=[-2]) 137 batch_shape = array_ops.placeholder(dtypes.int32) 139 num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True) 141 operator.to_dense().eval(feed_dict={batch_shape: 2}) 146 batch_shape = array_ops.placeholder(dtypes.int32) [all …]
|
/external/tensorflow/tensorflow/contrib/distributions/python/kernel_tests/ |
D | quantized_distribution_test.py | 133 batch_shape = (5, 5) 141 low=array_ops.zeros(batch_shape, dtype=dtypes.float32), 142 high=10 * array_ops.ones(batch_shape, dtype=dtypes.float32)) 147 x = rng.randint(-3, 13, size=batch_shape).astype(np.float32) 151 expected_pmf = (1 / 10) * np.ones(batch_shape) 167 batch_shape = (2,) 171 batch_shape, dtype=dtypes.float32), 173 batch_shape, dtype=dtypes.float32)) 243 batch_shape = (3, 3) 244 mu = rng.randn(*batch_shape) [all …]
|
D | mixture_test.py | 110 def make_univariate_mixture(batch_shape, num_components, use_static_graph): argument 111 batch_shape = ops.convert_to_tensor(batch_shape, dtypes.int32) 113 array_ops.concat((batch_shape, [num_components]), axis=0), 117 loc=random_ops.random_normal(batch_shape), 118 scale=10 * random_ops.random_uniform(batch_shape)) 125 def make_multivariate_mixture(batch_shape, num_components, event_shape, argument 128 batch_shape_tensor = batch_shape 134 tensor_shape.TensorShape(batch_shape).concatenate(num_components)) 136 tensor_shape.TensorShape(batch_shape).concatenate(event_shape)) 156 for batch_shape in ([], [1], [2, 3, 4]): [all …]
|
D | relaxed_onehot_categorical_test.py | 32 def make_relaxed_categorical(batch_shape, num_classes, dtype=dtypes.float32): argument 34 list(batch_shape) + [num_classes], -10, 10, dtype=dtype) - 50. 36 list(batch_shape), 0.1, 10, dtype=dtypes.float32) 140 for batch_shape in ([], [1], [2, 3, 4]): 141 dist = make_relaxed_categorical(batch_shape, 10) 142 self.assertAllEqual(batch_shape, dist.batch_shape.as_list()) 143 self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval()) 147 for batch_shape in ([], [1], [2, 3, 4]): 149 batch_shape, constant_op.constant(10, dtype=dtypes.int32)) 150 self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims) [all …]
|
D | cauchy_test.py | 99 self.assertAllEqual(cauchy.batch_shape, log_pdf.shape) 100 self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape) 105 self.assertAllEqual(cauchy.batch_shape, pdf.shape) 106 self.assertAllEqual(cauchy.batch_shape, pdf.eval().shape) 129 self.assertAllEqual(cauchy.batch_shape, log_pdf.shape) 130 self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape) 137 self.assertAllEqual(cauchy.batch_shape, pdf.shape) 138 self.assertAllEqual(cauchy.batch_shape, pdf_values.shape) 157 self.assertAllEqual(cauchy.batch_shape, cdf.shape) 158 self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape) [all …]
|
D | mvn_full_covariance_test.py | 114 self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list())) 120 def _random_mu_and_sigma(self, batch_shape, event_shape): argument 122 mat_shape = batch_shape + event_shape + event_shape 128 mu_shape = batch_shape + event_shape 134 batch_shape = [2] 137 mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape) 138 mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape) 149 self.assertEqual(batch_shape, kl.get_shape()) 160 batch_shape = [2] 163 mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape) [all …]
|
D | transformed_distribution_test.py | 310 batch_shape=(), argument 322 feed_dict = {batch_shape_pl: np.array(batch_shape, dtype=np.int32), 328 batch_shape=batch_shape_pl, 336 batch_shape=batch_shape, 355 self.assertAllEqual([2], fake_mvn_static.batch_shape) 360 fake_mvn_dynamic.batch_shape) 419 batch_shape=[2], 428 batch_shape=[2], 437 batch_shape=[2], 454 batch_shape=[2], [all …]
|
D | mvn_tril_test.py | 218 self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list())) 224 def _random_mu_and_sigma(self, batch_shape, event_shape): argument 226 mat_shape = batch_shape + event_shape + event_shape 232 mu_shape = batch_shape + event_shape 238 batch_shape = [] 241 mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape) 242 mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape) 253 self.assertEqual(batch_shape, kl.get_shape()) 260 batch_shape = [2] 263 mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape) [all …]
|
D | autoregressive_test.py | 54 batch_shape = [] 57 batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0) 68 batch_shape = np.int32([]) 71 batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0) 84 batch_shape=batch_shape, 87 [sample_shape, batch_shape, [event_size]], axis=0)
|
D | batch_reshape_test.py | 54 batch_shape=new_batch_shape_ph, 66 batch_shape = reshape_wishart.batch_shape_tensor() 86 batch_shape, 100 self.assertAllEqual(new_batch_shape, reshape_wishart.batch_shape) 181 batch_shape=new_batch_shape_ph, 192 batch_shape = reshape_normal.batch_shape_tensor() 212 batch_shape, 225 self.assertAllEqual(new_batch_shape, reshape_normal.batch_shape) 302 batch_shape=new_batch_shape_ph, 313 batch_shape = reshape_mvn.batch_shape_tensor() [all …]
|
/external/tensorflow/tensorflow/core/ops/ |
D | linalg_ops.cc | 37 ShapeHandle batch_shape; in MakeBatchSquareMatrix() local 38 TF_RETURN_IF_ERROR(c->Subshape(s, 0, -2, &batch_shape)); in MakeBatchSquareMatrix() 39 TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(d, d), out)); in MakeBatchSquareMatrix() 95 ShapeHandle batch_shape; in SelfAdjointEigV2ShapeFn() local 96 TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape)); in SelfAdjointEigV2ShapeFn() 98 TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Vector(n), &e_shape)); in SelfAdjointEigV2ShapeFn() 104 TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(n, n), &v_shape)); in SelfAdjointEigV2ShapeFn() 122 ShapeHandle batch_shape; in LuShapeFn() local 123 TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape)); in LuShapeFn() 128 TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(n, n), &lu_shape)); in LuShapeFn() [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | linalg_ops_test.py | 154 batch_shape = (2, 3) 157 linalg_ops.eye(num_rows=2, batch_shape=batch_shape).shape) 161 num_rows=2, num_columns=3, batch_shape=batch_shape).shape) 179 batch_shape = (2, 3) 183 batch_shape=batch_shape) 210 def test_eye_no_placeholder(self, num_rows, num_columns, batch_shape, dtype): argument 212 if batch_shape is not None: 213 eye_np = np.tile(eye_np, batch_shape + [1, 1]) 217 batch_shape=batch_shape, 241 self, num_rows, num_columns, batch_shape, dtype): argument [all …]
|
D | matrix_solve_ls_op_test.py | 46 batch_shape = matrix_shape[:-2] 55 np.tile(matrix, batch_shape + (1, 1)), trainable=False) 56 rhs = variables.Variable(np.tile(rhs, batch_shape + (1, 1)), trainable=False) 88 batch_shape=()): argument 107 if batch_shape is not (): 108 a = np.tile(a, batch_shape + (1, 1)) 109 b = np.tile(b, batch_shape + (1, 1)) 110 np_ans = np.tile(np_ans, batch_shape + (1, 1)) 111 np_r_norm = np.tile(np_r_norm, batch_shape) 183 for batch_shape in (), (2, 3): [all …]
|
/external/tensorflow/tensorflow/python/ops/linalg/ |
D | linear_operator_kronecker.py | 250 batch_shape = self.operators[0].batch_shape 252 batch_shape = common_shapes.broadcast_shape( 253 batch_shape, operator.batch_shape) 255 return batch_shape.concatenate(matrix_shape) 270 batch_shape = self.operators[0].batch_shape_tensor() 272 batch_shape = array_ops.broadcast_dynamic_shape( 273 batch_shape, operator.batch_shape_tensor()) 275 return array_ops.concat((batch_shape, matrix_shape), 0) 319 batch_shape = array_ops.concat( 321 x += array_ops.zeros(batch_shape, dtype=x.dtype.base_dtype) [all …]
|
D | linear_operator_composition.py | 204 batch_shape = self.operators[0].batch_shape 206 batch_shape = common_shapes.broadcast_shape( 207 batch_shape, operator.batch_shape) 209 return batch_shape.concatenate(matrix_shape) 229 batch_shape = array_ops.shape(zeros) 231 return array_ops.concat((batch_shape, matrix_shape), 0)
|
D | linear_operator_zeros.py | 129 batch_shape=None, argument 221 if batch_shape is None: 225 batch_shape, name="batch_shape_arg") 236 batch_shape = tensor_shape.TensorShape(self._batch_shape_static) 237 return batch_shape.concatenate(matrix_shape) 275 special_shape = self.batch_shape.concatenate([1, 1]) 319 if self.batch_shape.is_fully_defined(): 320 return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype) 326 if self.batch_shape.is_fully_defined(): 327 return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype) [all …]
|
D | linear_operator_identity.py | 92 d_shape = self.batch_shape.concatenate([self._min_matrix_dim()]) 209 batch_shape=None, argument 281 if batch_shape is None: 285 batch_shape, name="batch_shape_arg") 296 batch_shape = tensor_shape.TensorShape(self._batch_shape_static) 297 return batch_shape.concatenate(matrix_shape) 331 special_shape = self.batch_shape.concatenate([1, 1]) 367 if self.batch_shape.is_fully_defined(): 368 batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype) 630 batch_shape = self.multiplier.get_shape() [all …]
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | matrix_band_part_test.py | 169 def testMatrixBandPart(self, batch_shape, rows, cols): argument 171 if self.device == 'XLA_CPU' and cols == 7 and rows == 1 and batch_shape == [ 177 mat = np.ones(batch_shape + [rows, cols]).astype(dtype) 178 batch_mat = np.tile(mat, batch_shape + [1, 1]) 186 if batch_shape: 187 band_np = np.tile(band_np, batch_shape + [1, 1])
|
/external/tensorflow/tensorflow/contrib/distributions/python/ops/bijectors/ |
D | fill_triangular.py | 109 batch_shape, d = (input_shape[:-1], 115 return batch_shape.concatenate([n, n]) 118 batch_shape, n1, n2 = (output_shape[:-2], 127 return batch_shape.concatenate([m]) 130 batch_shape, d = input_shape_tensor[:-1], input_shape_tensor[-1] 132 return array_ops.concat([batch_shape, [n, n]], axis=0) 135 batch_shape, n = output_shape_tensor[:-2], output_shape_tensor[-1] 142 return array_ops.concat([batch_shape, [d]], axis=0)
|
/external/tensorflow/tensorflow/python/kernel_tests/distributions/ |
D | normal_test.py | 124 self.assertAllEqual(normal.batch_shape, log_pdf.get_shape()) 125 self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape) 133 self.assertAllEqual(normal.batch_shape, pdf.get_shape()) 134 self.assertAllEqual(normal.batch_shape, self.evaluate(pdf).shape) 160 self.assertAllEqual(normal.batch_shape, log_pdf.get_shape()) 161 self.assertAllEqual(normal.batch_shape, self.evaluate(log_pdf).shape) 170 self.assertAllEqual(normal.batch_shape, pdf.get_shape()) 171 self.assertAllEqual(normal.batch_shape, pdf_values.shape) 194 self.assertAllEqual(normal.batch_shape, cdf.get_shape()) 195 self.assertAllEqual(normal.batch_shape, self.evaluate(cdf).shape) [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | linalg_ops_common.cc | 92 TensorShape batch_shape; in Compute() local 93 AnalyzeInputs(context, &inputs, &input_matrix_shapes, &batch_shape); in Compute() 97 PrepareOutputs(context, input_matrix_shapes, batch_shape, &outputs, in Compute() 110 batch_shape.num_elements(), GetCostPerUnit(input_matrix_shapes), shard); in Compute() 117 TensorShape* batch_shape) { in AnalyzeInputs() argument 131 batch_shape->AddDim(in.dim_size(dim)); in AnalyzeInputs() 140 context, in.dim_size(dim) == batch_shape->dim_size(dim), in AnalyzeInputs() 161 const TensorShape& batch_shape, TensorOutputs* outputs, in PrepareOutputs() argument 193 output_tensor_shape = batch_shape; in PrepareOutputs()
|