/external/tensorflow/tensorflow/python/keras/utils/ |
D | conv_utils_test.py | 169 ndims = len(input_shape) 170 strides = (1,) * ndims 184 ndims = len(input_shape) 185 kernel_shape = (1,) * ndims 186 strides = (1,) * ndims 203 ndims = len(input_shape) 204 kernel_shape = (1,) * ndims 224 ndims = len(input_shape) 225 kernel_shape = (1,) * ndims 247 ndims = len(input_shape) [all …]
|
/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_util.py | 43 def get_positive_axis(axis, ndims): argument 65 if ndims is not None: 66 if 0 <= axis < ndims: 68 elif -ndims <= axis < 0: 69 return axis + ndims 72 "axis=%s out of bounds: expected %s<=axis<%s" % (axis, -ndims, ndims)) 152 axis = get_positive_axis(axis, data.shape.ndims) 155 if repeats.shape.ndims == 1: 159 if repeats.shape.ndims == 0: 167 if repeats.shape.ndims != axis + 1: [all …]
|
D | ragged_batch_gather_with_default_op.py | 86 if (default_value.shape.ndims is not 0 87 and default_value.shape.ndims is not 1): 90 if indices.shape.ndims is None: 92 if params.shape.ndims is None: 95 num_batch_dimensions = indices.shape.ndims - 1 125 params_shape[num_batch_dimensions + 1:params.shape.ndims] 149 num_batch_dimensions = indices.shape.ndims - 1 154 if params.shape.ndims == indices.shape.ndims:
|
D | ragged_concat_ops.py | 147 ndims = None 149 if ndims is None: 150 ndims = rt.shape.ndims 152 rt.shape.assert_has_rank(ndims) 154 out_ndims = ndims if (ndims is None or not stack_values) else ndims + 1 161 if ndims is not None and (axis == out_ndims - 1 or axis == ndims - 1):
|
D | ragged_tensor_shape.py | 113 if dimension_size.shape.ndims is None: 117 if partitioned_dim_sizes[0].shape.ndims == 1: 119 if partitioned_dim_sizes[-1].shape.ndims == 0: 153 if dim_size.shape.ndims == 1: 155 elif dim_size.shape.ndims != 0: 195 self._partitioned_dim_sizes[axis].shape.ndims == 1) 296 if lengths.shape.ndims is None: 298 elif lengths.shape.ndims > 1: 301 lengths_is_scalar = (lengths.shape.ndims == 0) 361 if lengths.shape.ndims == 0: [all …]
|
D | ragged_array_ops.py | 127 if mask.shape.ndims is None: 129 elif mask.shape.ndims == 0: 149 if mask.shape.ndims > 2: 173 elif ragged_tensor.is_ragged(data) and mask.shape.ndims == 1: 196 mask, ragged_rank=min(data.ragged_rank, mask.shape.ndims - 1)) 204 if mask.shape.ndims >= 2 and keepdims: 215 if mask.shape.ndims > 2 and keepdims: 218 for dim in range(mask.shape.ndims - 3, -1, -1): 472 ndims = None if input.shape.ndims is None else input.shape.ndims + 1 473 axis = ragged_util.get_positive_axis(axis, ndims)
|
/external/tensorflow/tensorflow/contrib/distributions/python/ops/ |
D | shape.py | 260 ndims = x.get_shape().ndims 261 if ndims is None: 263 return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims") 279 ndims = self.get_ndims(x, name=name) 280 if self._is_all_constant_helper(ndims, self.batch_ndims, 282 ndims = tensor_util.constant_value(ndims) 283 sample_ndims = (ndims - self._batch_ndims_static - 288 (self._batch_ndims_static, self._event_ndims_static, ndims)) 292 sample_ndims = ndims - self.batch_ndims - self.event_ndims 359 if (x.get_shape().ndims is not None and [all …]
|
D | batch_reshape.py | 219 x_ndims = (array_ops.rank(x) if x.shape.ndims is None else x.shape.ndims) 221 if self.event_shape.ndims is None 222 else self.event_shape.ndims) 225 if self.batch_shape.ndims is None else self.batch_shape.ndims) 254 if (static_sample_shape.ndims is not None and 255 self.batch_shape.ndims is not None): 274 if (self.batch_shape.ndims is not None and 275 self.event_shape.ndims is not None): 287 x_ndims = (array_ops.rank(x) if x.shape.ndims is None else x.shape.ndims) 289 if self.event_shape.ndims is None [all …]
|
D | sample_stats.py | 402 ndims = x.get_shape().ndims 403 if ndims is None: 406 ndims = shape_const.ndim 408 if ndims is None: 418 if ndims != expect_ndims: 425 if ndims < expect_ndims_at_least: 432 if ndims > expect_ndims_no_more_than: 435 return ndims 468 def _make_static_axis_non_negative(axis, ndims): argument 484 if d >= ndims: [all …]
|
D | mixture_same_family.py | 161 if (mixture_distribution.event_shape.ndims is not None 162 and mixture_distribution.event_shape.ndims != 0): 174 if mdbs.ndims != 0 and mdbs != cdbs: 258 self._event_shape().ndims) # [n, B, k, [1]*e] 275 self._event_shape().ndims) # [B, k, [1]*e] 293 self._event_shape().ndims) # [B, k, [1]*e] 305 static_event_ndims = self.event_shape.ndims 315 self._event_shape().ndims), 317 self._event_shape().ndims) # [B, k, 1, 1] 330 ndims = x.shape.ndims if x.shape.ndims is not None else array_ops.rank(x) [all …]
|
D | independent.py | 179 or batch_shape.ndims is None): 181 d = batch_shape.ndims - self._static_reinterpreted_batch_ndims 200 or batch_shape.ndims is None): 202 d = batch_shape.ndims - self._static_reinterpreted_batch_ndims 238 batch_ndims = distribution.batch_shape.ndims 267 ndims = distribution.batch_shape.ndims 268 if ndims is None: 270 ndims = array_ops.shape(distribution.batch_shape_tensor())[0] 273 return which_maximum(0, ndims - 1) 316 num_reduce_dims = a.event_shape.ndims - p.event_shape.ndims
|
D | wishart.py | 138 if (self._scale_operator.shape.ndims is None or 227 ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2 259 perm = array_ops.concat([math_ops.range(1, ndims), [0]], 0) 274 perm = array_ops.concat([[ndims - 1], math_ops.range(0, ndims - 1)], 0) 292 ndims = array_ops.rank(x_sqrt) 294 sample_ndims = ndims - array_ops.shape(batch_shape)[0] - 2 311 perm = array_ops.concat([math_ops.range(sample_ndims, ndims), 331 perm = array_ops.concat([math_ops.range(ndims - sample_ndims, ndims), 332 math_ops.range(0, ndims - sample_ndims)], 0) 359 if x.get_shape().ndims is not None: [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | tile_functor_gpu.h | 35 const int32 ndims, T* dst) { in TileKernel() argument 37 const int32* out_strides = buf + ndims; in TileKernel() 38 const int32* in_dim_sizes = buf + ndims * 2; in TileKernel() 42 for (int i = 0; i < ndims; ++i) { in TileKernel() 58 const int32 ndims = in.dims(); in TileSimple() local 59 gtl::InlinedVector<int32, 24> host_buf(ndims * 3); in TileSimple() 62 for (int i = 0; i < ndims; ++i) { in TileSimple() 64 host_buf[ndims + i] = out_strides[i]; in TileSimple() 65 host_buf[ndims * 2 + i] = in.dim_size(i); in TileSimple() 81 reinterpret_cast<const int32*>(dev_buf), ndims, q)); in TileSimple()
|
D | qr_op_impl.h | 141 const int ndims = input.dims(); in ComputeAsync() local 142 const int64 m = input.dim_size(ndims - 2); in ComputeAsync() 143 const int64 n = input.dim_size(ndims - 1); in ComputeAsync() 150 context, ndims >= 2, in ComputeAsync() 151 errors::InvalidArgument("Input must have rank >= 2, got ", ndims), in ComputeAsync() 159 q_shape.set_dim(ndims - 1, full_matrices_ ? m : min_size); in ComputeAsync() 164 r_shape.set_dim(ndims - 2, full_matrices_ ? m : min_size); in ComputeAsync() 179 transposed_shape.set_dim(ndims - 2, input.dim_size(ndims - 1)); in ComputeAsync() 180 transposed_shape.set_dim(ndims - 1, input.dim_size(ndims - 2)); in ComputeAsync()
|
D | transpose_functor_gpu.cu.cc | 36 const int32 ndims, T* dst) { in TransposeKernel() argument 38 const int32* out_strides = buf + ndims; in TransposeKernel() 39 const int32* perm = buf + ndims * 2; in TransposeKernel() 43 for (int32 i = 0; i < ndims; ++i) { in TransposeKernel() 63 const int32 ndims = in.dims(); in TransposeSimple() local 64 gtl::InlinedVector<int32, 24> host_buf(ndims * 3); in TransposeSimple() 68 for (int i = 0; i < ndims; ++i) { in TransposeSimple() 70 host_buf[ndims + i] = out_strides[i]; in TransposeSimple() 71 host_buf[ndims * 2 + i] = perm[i]; in TransposeSimple() 86 reinterpret_cast<const int32*>(dev_buf), ndims, q)); in TransposeSimple()
|
D | determinant_op.cc | 135 const int ndims = input.dims(); in ComputeAsync() local 136 const int64 n = input.dim_size(ndims - 1); in ComputeAsync() 139 context, ndims >= 2, in ComputeAsync() 140 errors::InvalidArgument("Input must have rank >= 2, got ", ndims), in ComputeAsync() 143 context, input.dim_size(ndims - 2) == n, in ComputeAsync() 145 input.dim_size(ndims - 2), " != ", n), in ComputeAsync() 150 for (int dim = 0; dim < ndims - 2; ++dim) { in ComputeAsync() 275 const int ndims = input.dims(); in ComputeAsync() local 276 const int64 n = input.dim_size(ndims - 1); in ComputeAsync() 279 context, ndims >= 2, in ComputeAsync() [all …]
|
D | matrix_solve_op.cc | 131 const int ndims = input.dims(); in ComputeAsync() local 132 const int64 n = input.dim_size(ndims - 1); in ComputeAsync() 133 const int64 nrhs = rhs.dim_size(ndims - 1); in ComputeAsync() 136 context, ndims >= 2, in ComputeAsync() 137 errors::InvalidArgument("Input must have rank >= 2, got ", ndims), in ComputeAsync() 139 OP_REQUIRES_ASYNC(context, rhs.dims() == ndims, in ComputeAsync() 142 ndims, " != ", rhs.dims()), in ComputeAsync() 145 context, input.dim_size(ndims - 2) == n, in ComputeAsync() 147 input.dim_size(ndims - 2), " != ", n), in ComputeAsync() 149 OP_REQUIRES_ASYNC(context, rhs.dim_size(ndims - 2) == n, in ComputeAsync() [all …]
|
/external/tensorflow/tensorflow/stream_executor/ |
D | dnn.cc | 238 BatchDescriptor::BatchDescriptor(int ndims) in BatchDescriptor() argument 242 tensor_.mutable_dimensions()->Resize(ndims + 2, 0); in BatchDescriptor() 249 std::vector<int64> bdyx_dims(ndims() + 2); in full_dims() 269 phys_strides[ndims() + 1] = 1; in full_strides() 270 for (int i = ndims(); i >= 0; i--) { in full_strides() 285 for (int i = 0; i < ndims(); i++) { in ToString() 303 for (int i = 0; i < ndims(); i++) { in ToShortString() 334 for (int i = 0; i < ndims(); i++) { in NodesPerFeatureMap() 383 FilterDescriptor::FilterDescriptor(int ndims) { in FilterDescriptor() argument 384 tensor_.mutable_dimensions()->Resize(ndims + 2, 0); in FilterDescriptor() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | triangular_solve_expander.cc | 43 int ndims = shape.rank(); in DiagonalBlocks() local 52 std::vector<int64> permutation(ndims); in DiagonalBlocks() 68 MakeEdgePaddingConfig({{0, 0}, {ndims - 2, 0}}); in DiagonalBlocks() 73 std::vector<int64> slice_sizes(ndims); in DiagonalBlocks() 75 for (int i = 0; i < ndims - 2; ++i) { in DiagonalBlocks() 80 slice_sizes[ndims - 2] = slice_sizes[ndims - 1] = block_size; in DiagonalBlocks() 81 dim_numbers.add_offset_dims(ndims - 1); in DiagonalBlocks() 82 dim_numbers.add_offset_dims(ndims); in DiagonalBlocks() 83 dim_numbers.add_start_index_map(ndims - 2); in DiagonalBlocks() 84 dim_numbers.add_start_index_map(ndims - 1); in DiagonalBlocks() [all …]
|
/external/tensorflow/tensorflow/python/ops/distributions/ |
D | distribution.py | 1212 if self.batch_shape.ndims is not None 1215 if self.event_shape.ndims is not None 1247 ndims = x.get_shape().ndims # != sample_ndims 1248 if ndims is None: 1250 ndims = array_ops.rank(x) 1252 math_ops.equal(ndims, 0), 1255 elif ndims == 0: 1263 elif ndims != 1: 1274 ndims = x.get_shape().ndims 1275 sample_ndims = sample_shape.ndims [all …]
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_sharding.py | 173 ndims = shape.ndims 174 if ndims is None: 176 if ndims <= self._shard_dimension: 209 ndims = shape.ndims 210 if ndims is None: 212 if ndims <= self._shard_dimension:
|
/external/tensorflow/tensorflow/contrib/gan/python/features/python/ |
D | virtual_batchnorm_impl.py | 84 if reference_batch.shape.ndims is None: 87 ndims = reference_batch.shape.ndims 89 used_axis = ndims + axis 92 if used_axis < 0 or used_axis >= ndims: 94 ' is out of range for input with rank ' + str(ndims)) 198 ndims = input_shape.ndims 199 reduction_axes = list(range(ndims)) 205 self._example_reduction_axes = list(range(ndims)) 214 sorted(self._example_reduction_axes) != list(range(ndims))[:-2])
|
/external/tensorflow/tensorflow/core/framework/ |
D | ops_util.h | 91 const int ndims = shape.dims(); in ComputeStride() local 92 gtl::InlinedVector<T, 8> strides(ndims); in ComputeStride() 94 for (int i = ndims - 1; i >= 0; --i) { in ComputeStride() 104 const int ndims = shape.rank(); in ComputeEigenStrides() local 105 gtl::InlinedVector<T, 8> strides(ndims); in ComputeEigenStrides() 107 for (int i = ndims - 1; i >= 0; --i) { in ComputeEigenStrides()
|
/external/tensorflow/tensorflow/go/ |
D | operation.go | 100 ndims := C.TF_GraphGetTensorNumDims(p.Op.g.c, port, status.c) 107 if ndims < 0 { 110 if ndims == 0 { 113 dims := make([]C.int64_t, ndims) 114 C.TF_GraphGetTensorShape(p.Op.g.c, port, &dims[0], ndims, status.c) 119 ret := Shape{dims: make([]int64, ndims)} 120 for i := 0; i < int(ndims); i++ {
|
/external/tensorflow/tensorflow/python/ops/ |
D | image_ops_impl.py | 217 if image.get_shape().ndims is None: 325 if shape.ndims == 3 or shape.ndims is None: 335 elif shape.ndims == 4: 414 if shape.ndims == 3 or shape.ndims is None: 416 elif shape.ndims == 4: 446 if shape.ndims == 3 or shape.ndims is None: 448 elif shape.ndims == 4: 544 if shape.ndims == 3 or shape.ndims is None: 546 elif shape.ndims == 4: 589 rank = image.get_shape().ndims [all …]
|