/external/tensorflow/tensorflow/python/keras/utils/ |
D | conv_utils_test.py | 169 ndims = len(input_shape) 170 strides = (1,) * ndims 184 ndims = len(input_shape) 185 kernel_shape = (1,) * ndims 186 strides = (1,) * ndims 203 ndims = len(input_shape) 204 kernel_shape = (1,) * ndims 224 ndims = len(input_shape) 225 kernel_shape = (1,) * ndims 247 ndims = len(input_shape) [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | tile_functor_gpu.h | 34 const int32* __restrict__ buf, const int32 ndims, in TileKernel() argument 37 const int32* out_strides = buf + ndims; in TileKernel() 38 const int32* in_dim_sizes = buf + ndims * 2; in TileKernel() 42 for (int i = 0; i < ndims; ++i) { in TileKernel() 58 const int32 ndims = in.dims(); in TileSimple() local 59 gtl::InlinedVector<int32, 24> host_buf(ndims * 3); in TileSimple() 62 for (int i = 0; i < ndims; ++i) { in TileSimple() 64 host_buf[ndims + i] = out_strides[i]; in TileSimple() 65 host_buf[ndims * 2 + i] = in.dim_size(i); in TileSimple() 81 reinterpret_cast<const int32*>(dev_buf), ndims, q)); in TileSimple()
|
D | qr_op_impl.h | 141 const int ndims = input.dims(); in ComputeAsync() local 142 const int64 m = input.dim_size(ndims - 2); in ComputeAsync() 143 const int64 n = input.dim_size(ndims - 1); in ComputeAsync() 150 context, ndims >= 2, in ComputeAsync() 151 errors::InvalidArgument("Input must have rank >= 2, got ", ndims), in ComputeAsync() 159 q_shape.set_dim(ndims - 1, full_matrices_ ? m : min_size); in ComputeAsync() 164 r_shape.set_dim(ndims - 2, full_matrices_ ? m : min_size); in ComputeAsync() 179 transposed_shape.set_dim(ndims - 2, input.dim_size(ndims - 1)); in ComputeAsync() 180 transposed_shape.set_dim(ndims - 1, input.dim_size(ndims - 2)); in ComputeAsync()
|
D | transpose_functor_gpu.cu.cc | 37 const int32 ndims, T* __restrict__ dst) { in TransposeKernel() argument 39 const int32* out_strides = buf + ndims; in TransposeKernel() 40 const int32* perm = buf + ndims * 2; in TransposeKernel() 44 for (int32 i = 0; i < ndims; ++i) { in TransposeKernel() 64 const int32 ndims = in.dims(); in TransposeSimple() local 65 gtl::InlinedVector<int32, 24> host_buf(ndims * 3); in TransposeSimple() 69 for (int i = 0; i < ndims; ++i) { in TransposeSimple() 71 host_buf[ndims + i] = out_strides[i]; in TransposeSimple() 72 host_buf[ndims * 2 + i] = perm[i]; in TransposeSimple() 87 reinterpret_cast<const int32*>(dev_buf), ndims, q)); in TransposeSimple()
|
D | determinant_op.cc | 135 const int ndims = input.dims(); in ComputeAsync() local 136 const int64 n = input.dim_size(ndims - 1); in ComputeAsync() 139 context, ndims >= 2, in ComputeAsync() 140 errors::InvalidArgument("Input must have rank >= 2, got ", ndims), in ComputeAsync() 143 context, input.dim_size(ndims - 2) == n, in ComputeAsync() 145 input.dim_size(ndims - 2), " != ", n), in ComputeAsync() 150 for (int dim = 0; dim < ndims - 2; ++dim) { in ComputeAsync() 275 const int ndims = input.dims(); in ComputeAsync() local 276 const int64 n = input.dim_size(ndims - 1); in ComputeAsync() 279 context, ndims >= 2, in ComputeAsync() [all …]
|
D | matrix_solve_op.cc | 131 const int ndims = input.dims(); in ComputeAsync() local 132 const int64 n = input.dim_size(ndims - 1); in ComputeAsync() 133 const int64 nrhs = rhs.dim_size(ndims - 1); in ComputeAsync() 136 context, ndims >= 2, in ComputeAsync() 137 errors::InvalidArgument("Input must have rank >= 2, got ", ndims), in ComputeAsync() 139 OP_REQUIRES_ASYNC(context, rhs.dims() == ndims, in ComputeAsync() 142 ndims, " != ", rhs.dims()), in ComputeAsync() 145 context, input.dim_size(ndims - 2) == n, in ComputeAsync() 147 input.dim_size(ndims - 2), " != ", n), in ComputeAsync() 149 OP_REQUIRES_ASYNC(context, rhs.dim_size(ndims - 2) == n, in ComputeAsync() [all …]
|
D | tridiagonal_solve_op_gpu.cu.cc | 264 const int ndims = lhs.dims(); in Compute() local 266 const int64 matrix_size = lhs.dim_size(ndims - 1); in Compute() 268 for (int i = 0; i < ndims - 2; i++) { in Compute() 293 const int ndims = lhs.dims(); in ComputeWithGtsvBatched() local 304 int matrix_size = lhs.dim_size(ndims - 1); in ComputeWithGtsvBatched() 343 const int ndims = lhs.dims(); in TransposeLhsForGtsvBatched() local 347 std::vector<int> perm(ndims); in TransposeLhsForGtsvBatched() 348 perm[0] = ndims - 2; in TransposeLhsForGtsvBatched() 349 for (int i = 0; i < ndims - 2; ++i) { in TransposeLhsForGtsvBatched() 352 perm[ndims - 1] = ndims - 1; in TransposeLhsForGtsvBatched() [all …]
|
D | reduction_ops_common.h | 98 int ndims() const { return data_reshape_.size(); } 156 CHECK_GE(helper.ndims(), 0); 159 bool is_trivial = helper.ndims() == 0 || 160 (helper.ndims() == 1 && !helper.reduce_first_axis()); 204 } else if ((helper.ndims() == 1) && helper.reduce_first_axis()) { 208 } else if ((helper.ndims() == 2) && helper.reduce_first_axis()) { 212 } else if ((helper.ndims() == 2) && !helper.reduce_first_axis()) { 216 } else if ((helper.ndims() == 3) && helper.reduce_first_axis()) { 221 } else if ((helper.ndims() == 3) && !helper.reduce_first_axis()) {
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | matrix_inverse_op.cc | 32 int64 ndims = input_shape.dims(); in Compile() local 34 ctx, ndims >= 2, in Compile() 35 errors::InvalidArgument("Input must have rank >= 2, got ", ndims)); in Compile() 37 ctx, input_shape.dim_size(ndims - 2) == input_shape.dim_size(ndims - 1), in Compile() 39 input_shape.dim_size(ndims - 2), in Compile() 40 " != ", input_shape.dim_size(ndims - 1))); in Compile()
|
/external/tensorflow/tensorflow/stream_executor/ |
D | dnn.cc | 264 BatchDescriptor::BatchDescriptor(int ndims) in BatchDescriptor() argument 268 tensor_.mutable_dimensions()->Resize(ndims + 2, 0); in BatchDescriptor() 275 std::vector<int64> bdyx_dims(ndims() + 2); in full_dims() 295 phys_strides[ndims() + 1] = 1; in full_strides() 296 for (int i = ndims(); i >= 0; i--) { in full_strides() 311 for (int i = 0; i < ndims(); i++) { in ToString() 329 for (int i = 0; i < ndims(); i++) { in ToShortString() 360 for (int i = 0; i < ndims(); i++) { in NodesPerFeatureMap() 409 FilterDescriptor::FilterDescriptor(int ndims) { in FilterDescriptor() argument 410 tensor_.mutable_dimensions()->Resize(ndims + 2, 0); in FilterDescriptor() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | triangular_solve_expander.cc | 43 int ndims = shape.rank(); in DiagonalBlocks() local 52 std::vector<int64> permutation(ndims); in DiagonalBlocks() 68 MakeEdgePaddingConfig({{0, 0}, {ndims - 2, 0}}); in DiagonalBlocks() 73 std::vector<int64> slice_sizes(ndims); in DiagonalBlocks() 75 for (int i = 0; i < ndims - 2; ++i) { in DiagonalBlocks() 80 slice_sizes[ndims - 2] = slice_sizes[ndims - 1] = block_size; in DiagonalBlocks() 81 dim_numbers.add_offset_dims(ndims - 1); in DiagonalBlocks() 82 dim_numbers.add_offset_dims(ndims); in DiagonalBlocks() 83 dim_numbers.add_start_index_map(ndims - 2); in DiagonalBlocks() 84 dim_numbers.add_start_index_map(ndims - 1); in DiagonalBlocks() [all …]
|
/external/tensorflow/tensorflow/python/ops/distributions/ |
D | distribution.py | 1212 if self.batch_shape.ndims is not None 1215 if self.event_shape.ndims is not None 1247 ndims = x.get_shape().ndims # != sample_ndims 1248 if ndims is None: 1250 ndims = array_ops.rank(x) 1252 math_ops.equal(ndims, 0), 1255 elif ndims == 0: 1263 elif ndims != 1: 1274 ndims = x.get_shape().ndims 1275 sample_ndims = sample_shape.ndims [all …]
|
D | util.py | 626 ndims = x.get_shape().ndims 627 if ndims is not None and shift_value_static is not None: 628 if ndims < 2: 631 abs(shift_value_static) % ndims) 634 perm = np.roll(np.arange(ndims), shift_value_static) 650 ndims = array_ops.rank(x) 652 math_ops.less(shift, 0), math_ops.mod(-shift, ndims), 653 ndims - math_ops.mod(shift, ndims)) 655 last = math_ops.range(shift, ndims) 898 ndims = prefer_static_rank(x) [all …]
|
/external/tensorflow/tensorflow/python/ops/ragged/ |
D | ragged_batch_gather_with_default_op.py | 84 if (default_value.shape.ndims is not 0 85 and default_value.shape.ndims is not 1): 88 if indices.shape.ndims is None: 90 if params.shape.ndims is None: 93 num_batch_dimensions = indices.shape.ndims - 1 123 params_shape[num_batch_dimensions + 1:params.shape.ndims] 147 num_batch_dimensions = indices.shape.ndims - 1 152 if params.shape.ndims == indices.shape.ndims:
|
D | ragged_concat_ops.py | 156 ndims = None 158 if ndims is None: 159 ndims = rt.shape.ndims 161 rt.shape.assert_has_rank(ndims) 163 out_ndims = ndims if (ndims is None or not stack_values) else ndims + 1 166 if stack_values and ndims == 1 and axis == 0: 176 if ndims is not None and (axis == out_ndims - 1 or axis == ndims - 1):
|
D | ragged_tensor_shape.py | 117 if dimension_size.shape.ndims is None: 121 if partitioned_dim_sizes[0].shape.ndims == 1: 123 if partitioned_dim_sizes[-1].shape.ndims == 0: 130 p.dtype for p in partitioned_dim_sizes if p.shape.ndims == 1) 173 if dim_size.shape.ndims == 1: 175 elif dim_size.shape.ndims != 0: 216 self._partitioned_dim_sizes[axis].shape.ndims == 1) 323 if lengths.shape.ndims is None: 325 elif lengths.shape.ndims > 1: 328 lengths_is_scalar = (lengths.shape.ndims == 0) [all …]
|
D | ragged_array_ops.py | 101 if mask.shape.ndims is None: 103 elif mask.shape.ndims == 0: 124 if mask.shape.ndims > 2: 147 elif ragged_tensor.is_ragged(data) and mask.shape.ndims == 1: 171 mask, ragged_rank=min(data.ragged_rank, mask.shape.ndims - 1), 180 if mask.shape.ndims >= 2: 192 if mask.shape.ndims > 2: 195 for dim in range(mask.shape.ndims - 3, -1, -1): 447 ndims = None if input.shape.ndims is None else input.shape.ndims + 1 448 axis = ragged_util.get_positive_axis(axis, ndims) [all …]
|
D | ragged_string_ops.py | 65 rank = input.shape.ndims 123 if input_tensor.shape.ndims is None: 126 if input_tensor.flat_values.shape.ndims > 1: 148 if input_tensor.shape.ndims == 2: 153 elif input_tensor.shape.ndims > 2: 162 elif input_tensor.shape.ndims == 0: 401 input_ndims = input.shape.ndims 500 rank = input.shape.ndims 748 if data.shape.ndims is None: 750 elif data.shape.ndims == 0: [all …]
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_sharding.py | 172 ndims = shape.ndims 173 if ndims is None: 175 if ndims <= self._shard_dimension: 208 ndims = shape.ndims 209 if ndims is None: 211 if ndims <= self._shard_dimension:
|
/external/tensorflow/tensorflow/core/framework/ |
D | ops_util.h | 91 const int ndims = shape.dims(); in ComputeStride() local 92 gtl::InlinedVector<T, 8> strides(ndims); in ComputeStride() 94 for (int i = ndims - 1; i >= 0; --i) { in ComputeStride() 104 const int ndims = shape.rank(); in ComputeEigenStrides() local 105 gtl::InlinedVector<T, 8> strides(ndims); in ComputeEigenStrides() 107 for (int i = ndims - 1; i >= 0; --i) { in ComputeEigenStrides()
|
/external/tensorflow/tensorflow/go/ |
D | operation.go | 100 ndims := C.TF_GraphGetTensorNumDims(p.Op.g.c, port, status.c) 107 if ndims < 0 { 110 if ndims == 0 { 113 dims := make([]C.int64_t, ndims) 114 C.TF_GraphGetTensorShape(p.Op.g.c, port, &dims[0], ndims, status.c) 119 ret := Shape{dims: make([]int64, ndims)} 120 for i := 0; i < int(ndims); i++ {
|
/external/tensorflow/tensorflow/python/ops/ |
D | array_ops.py | 804 if optimize and input_shape.ndims is not None: 805 return constant(input_shape.ndims, dtypes.int32, name=name) 1498 if value_shape.ndims is not None: 1499 if axis < -value_shape.ndims or axis >= value_shape.ndims: 1501 (axis, -value_shape.ndims, value_shape.ndims)) 1661 ndims_mask = shape_mask.ndims 2188 ndims = a_shape.ndims 2189 if ndims is not None: 2190 if ndims < 2: 2194 perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2] [all …]
|
D | image_ops_impl.py | 212 if image.get_shape().ndims is None: 277 if image.get_shape().ndims is None: 425 if shape.ndims == 3 or shape.ndims is None: 434 elif shape.ndims == 4: 540 if shape.ndims == 3 or shape.ndims is None: 542 elif shape.ndims == 4: 580 if shape.ndims == 3 or shape.ndims is None: 582 elif shape.ndims == 4: 701 if shape.ndims == 3 or shape.ndims is None: 703 elif shape.ndims == 4: [all …]
|
/external/tensorflow/tensorflow/python/framework/ |
D | common_shapes.py | 84 if shape_x.ndims is None or shape_y.ndims is None: 102 if shape_x.ndims is None or shape_y.ndims is None:
|
/external/tensorflow/tensorflow/python/ops/linalg/ |
D | linear_operator_util.py | 242 if sh.ndims is not None and sh.ndims < 2: 389 if a.shape.ndims is None or b.shape.ndims is None: 393 if a.shape.ndims >= b.shape.ndims: 401 b_extra_ndims = b.shape.ndims - a.shape.ndims 449 (np.arange(b_extra_ndims, b.shape.ndims),
|