/external/tensorflow/tensorflow/core/util/ |
D | cuda_launch_config.h | 197 dim3 virtual_thread_count = dim3(0, 0, 0); 198 dim3 thread_per_block = dim3(0, 0, 0); 199 dim3 block_count = dim3(0, 0, 0); 220 config.virtual_thread_count = dim3(xdim, ydim, 1); in GetCuda2DLaunchConfig() 221 config.thread_per_block = dim3(block_cols, block_rows, 1); in GetCuda2DLaunchConfig() 225 config.block_count = dim3( in GetCuda2DLaunchConfig() 276 config.virtual_thread_count = dim3(xdim, ydim, zdim); in GetCuda3DLaunchConfig() 277 config.thread_per_block = dim3(threadsx, threadsy, threadsz); in GetCuda3DLaunchConfig() 278 config.block_count = dim3(blocksx, blocksy, blocksz); in GetCuda3DLaunchConfig() 331 Status CudaLaunchKernel(void (*function)(Ts...), dim3 grid_dim, dim3 block_dim, in CudaLaunchKernel()
|
/external/clang/test/PCH/Inputs/ |
D | cuda.h | 12 struct dim3 { struct 14 __host__ __device__ dim3(unsigned x, unsigned y = 1, unsigned z = 1) : x(x), y(y), z(z) {} in x() argument 19 int cudaConfigureCall(dim3 gridSize, dim3 blockSize, size_t sharedSize = 0,
|
/external/clang/test/SemaCUDA/Inputs/ |
D | cuda.h | 15 struct dim3 { struct 17 __host__ __device__ dim3(unsigned x, unsigned y = 1, unsigned z = 1) : x(x), y(y), z(z) {} in x() argument 22 int cudaConfigureCall(dim3 gridSize, dim3 blockSize, size_t sharedSize = 0,
|
/external/aac/libFDK/src/ |
D | FDK_matrixCalloc.cpp | 236 void ***fdkCallocMatrix3D(UINT dim1, UINT dim2, UINT dim3, UINT size) { in fdkCallocMatrix3D() argument 242 if (!dim1 || !dim2 || !dim3) return NULL; in fdkCallocMatrix3D() 252 if ((p3 = (char *)fdkCallocMatrix1D(dim1 * dim2 * dim3, size)) == NULL) { in fdkCallocMatrix3D() 263 p3 += dim3 * size; in fdkCallocMatrix3D() 278 void ***fdkCallocMatrix3D_int(UINT dim1, UINT dim2, UINT dim3, UINT size, in fdkCallocMatrix3D_int() argument 285 if (!dim1 || !dim2 || !dim3) return NULL; in fdkCallocMatrix3D_int() 297 if ((p3 = (char *)fdkCallocMatrix1D_int(dim1 * dim2 * dim3, size, s)) == in fdkCallocMatrix3D_int() 309 p3 += dim3 * size; in fdkCallocMatrix3D_int()
|
/external/clang/test/CodeGenCUDA/Inputs/ |
D | cuda.h | 12 struct dim3 { struct 14 __host__ __device__ dim3(unsigned x, unsigned y = 1, unsigned z = 1) : x(x), y(y), z(z) {} in x() argument 19 int cudaConfigureCall(dim3 gridSize, dim3 blockSize, size_t sharedSize = 0,
|
/external/aac/libFDK/include/ |
D | FDK_matrixCalloc.h | 140 void*** fdkCallocMatrix3D(UINT dim1, UINT dim2, UINT dim3, UINT size); 143 void*** fdkCallocMatrix3D_int(UINT dim1, UINT dim2, UINT dim3, UINT size, 212 #define FDK_ALLOCATE_MEMORY_3D(a, dim1, dim2, dim3, type) \ argument 213 if (((a) = (type***)fdkCallocMatrix3D((dim1), (dim2), (dim3), \ 218 #define FDK_ALLOCATE_MEMORY_3D_INT(a, dim1, dim2, dim3, type, s) \ argument 219 if (((a) = (type***)fdkCallocMatrix3D_int((dim1), (dim2), (dim3), \
|
/external/clang/lib/Headers/ |
D | __clang_cuda_runtime_wrapper.h | 289 __device__ inline __cuda_builtin_blockDim_t::operator dim3() const { in dim3() function 290 return dim3(x, y, z); in dim3() 293 __device__ inline __cuda_builtin_gridDim_t::operator dim3() const { in dim3() function 294 return dim3(x, y, z); in dim3() 308 #define dim3 __cuda_builtin_blockDim_t macro
|
D | cuda_builtin_vars.h | 29 struct dim3; 94 __attribute__((device)) operator dim3() const; 105 __attribute__((device)) operator dim3() const;
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/tests_data/ |
D | saxpy.ll | 5 %struct.dim3 = type { i32, i32, i32 } 8 @blockDim = external addrspace(1) global %struct.dim3 36 … getelementptr inbounds (%struct.dim3, %struct.dim3* addrspacecast (%struct.dim3 addrspace(1)* @bl… 88 … getelementptr inbounds (%struct.dim3, %struct.dim3* addrspacecast (%struct.dim3 addrspace(1)* @bl…
|
/external/eigen/unsupported/test/ |
D | cxx11_tensor_ifft.cpp | 100 static void test_sub_fft_ifft_invariant(int dim0, int dim1, int dim2, int dim3) { in test_sub_fft_ifft_invariant() argument 101 Tensor<double, 4, DataLayout> tensor(dim0, dim1, dim2, dim3); in test_sub_fft_ifft_invariant() 117 VERIFY_IS_EQUAL(tensor_after_fft.dimension(3), dim3); in test_sub_fft_ifft_invariant() 121 VERIFY_IS_EQUAL(tensor_after_fft_ifft.dimension(3), dim3); in test_sub_fft_ifft_invariant() 126 for (int l = 0; l < dim3; ++l) { in test_sub_fft_ifft_invariant()
|
D | cxx11_tensor_index_list.cpp | 274 int dim3 = 0; in test_dynamic_index_list() local 276 auto reduction_axis = make_index_list(dim1, dim2, dim3); in test_dynamic_index_list()
|
D | cxx11_tensor_morphing.cpp | 30 Tensor<float, 2>::Dimensions dim3(2,21); in test_simple_reshape() local 31 tensor4 = tensor1.reshape(dim1).reshape(dim3); in test_simple_reshape()
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | Tensor.h | 358 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3) in Tensor() argument 359 : m_storage(dim1*dim2*dim3, array<Index, 3>(dim1, dim2, dim3)) in Tensor() 363 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4) in Tensor() argument 364 : m_storage(dim1*dim2*dim3*dim4, array<Index, 4>(dim1, dim2, dim3, dim4)) in Tensor() 368 …EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index… in Tensor() argument 369 : m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 5>(dim1, dim2, dim3, dim4, dim5)) in Tensor()
|
D | TensorMap.h | 82 …erArgType dataPtr, Index dim1, Index dim2, Index dim3) : m_data(dataPtr), m_dimensions(dim1, dim2,… in TensorMap() argument 86 …ype dataPtr, Index dim1, Index dim2, Index dim3, Index dim4) : m_data(dataPtr), m_dimensions(dim1,… in TensorMap() argument 90 …taPtr, Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) : m_data(dataPtr), m_dimensions… in TensorMap() argument
|
D | TensorConvolution.h | 867 dim3 block_size; 898 dim3 num_blocks(num_x_blocks, numext::mini<int>(num_y_blocks, ceil(numP, block_size.y))); 943 dim3 block_size; 956 …dim3 num_blocks(num_x_blocks, num_y_blocks, numext::mini<int>(num_z_blocks, ceil(numP, block_size.… 1022 dim3 block_size; 1026 dim3 num_blocks(ceil(numX, maxX), ceil(numY, maxY), ceil(numZ, maxZ));
|
D | TensorContractionCuda.h | 1320 const dim3 num_blocks(m_blocks, n_blocks, 1); 1321 const dim3 block_size(8, 8, 8); 1331 const dim3 num_blocks(m_blocks, n_blocks, 1); 1332 const dim3 block_size(16, 16, 1); 1337 const dim3 num_blocks(m_blocks, n_blocks, 1); 1338 const dim3 block_size(8, 32, 1);
|
/external/eigen/test/ |
D | cuda_common.h | 11 dim3 threadIdx, blockDim, blockIdx; 49 dim3 Blocks(128); in run_on_cuda() 50 dim3 Grids( (n+int(Blocks.x)-1)/int(Blocks.x) ); in run_on_cuda()
|
/external/tensorflow/tensorflow/python/keras/layers/ |
D | convolutional.py | 2029 dim3 = self.size[2] * input_shape[ 2032 [input_shape[0], input_shape[1], dim1, dim2, dim3]) 2038 dim3 = self.size[2] * input_shape[ 2041 [input_shape[0], dim1, dim2, dim3, input_shape[4]]) 2283 dim3 = input_shape[4] + 2 * self.padding[2][0] 2285 dim3 = None 2287 [input_shape[0], input_shape[1], dim1, dim2, dim3]) 2298 dim3 = input_shape[3] + 2 * self.padding[2][1] 2300 dim3 = None 2302 [input_shape[0], dim1, dim2, dim3, input_shape[4]]) [all …]
|
/external/tensorflow/tensorflow/contrib/reduce_slice_ops/kernels/ |
D | reduce_slice_ops.cc | 65 Index dim3 = output.dimension(2); \ 66 Index size = dim1 * dim2 * dim3; \ 76 XYZ xyz = global_index_to_xyz(global, XYZ(dim1, dim2, dim3)); \
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | literal_util.h | 515 int64 dim3 = 0; in CreateR4Projected() local 517 array(dim0, dim1, dim2, dim3) = value; in CreateR4Projected() 518 ++dim3; in CreateR4Projected() 520 CHECK_EQ(dim3_size, dim3); in CreateR4Projected()
|
/external/tensorflow/tensorflow/contrib/rnn/kernels/ |
D | lstm_ops_gpu.cu.cc | 260 dim3 block_dim_2d(std::min(batch_size, 8), 32); in LSTMBlockCellFpropWithCUDA() 261 dim3 grid_dim_2d(Eigen::divup(batch_size, static_cast<int>(block_dim_2d.x)), in LSTMBlockCellFpropWithCUDA() 376 dim3 block_dim_2d(std::min(batch_size, 8), 32); in LSTMBlockCellBpropWithCUDA() 377 dim3 grid_dim_2d(Eigen::divup(batch_size, static_cast<int>(block_dim_2d.x)), in LSTMBlockCellBpropWithCUDA()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | reduction_gpu_kernels.cu.h | 621 dim3 block_dim(32, std::min(Eigen::divup(extent_x, rows_per_warp), 32), 1); 622 dim3 grid_dim(1, 650 dim3 new_grid_dim((grid_dim.y * extent_y + 31) / 32, 1, 1); 651 dim3 num_threads(128, 1, 1); 663 dim3 block_dim(32, std::min(extent_x, 32), 1); 664 dim3 grid_dim((extent_y + 31) / 32, 1, 1); 689 dim3 new_grid_dim((grid_dim.y * extent_y + 31) / 32, 1, 1); 690 dim3 num_threads(128, 1, 1);
|
D | depthwise_conv_op_gpu.h | 605 dim3 block_dim; in LaunchDepthwiseConv2dGPUSmall() 610 block_dim = dim3(kBlockDepth, args.in_cols, block_height); in LaunchDepthwiseConv2dGPUSmall() 619 block_dim = dim3(args.in_cols, block_height, kBlockDepth); in LaunchDepthwiseConv2dGPUSmall() 1600 dim3 block_dim; 1605 block_dim = dim3(kBlockDepth, args.in_cols, block_height); 1613 block_dim = dim3(args.in_cols, block_height, kBlockDepth);
|
/external/skia/src/compute/hs/cuda/ |
D | hs_cuda.inl | 657 dim3 grid; 752 dim3 grid;
|
/external/skqp/src/compute/hs/cuda/ |
D | hs_cuda.inl | 657 dim3 grid; 752 dim3 grid;
|