/external/clang/test/SemaCUDA/ |
D | cuda-builtin-vars.cu | 14 out[i++] = blockIdx.x; in kernel() 15 blockIdx.x = 0; // expected-error {{no setter defined for property 'x'}} in kernel() 16 out[i++] = blockIdx.y; in kernel() 17 blockIdx.y = 0; // expected-error {{no setter defined for property 'y'}} in kernel() 18 out[i++] = blockIdx.z; in kernel() 19 blockIdx.z = 0; // expected-error {{no setter defined for property 'z'}} in kernel()
|
/external/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ |
D | WorkList.h | 32 unsigned blockIdx; // This is the index of the next statement. variable 40 blockIdx(idx) {} in WorkListUnit() 46 blockIdx(0) {} in WorkListUnit() 58 unsigned getIndex() const { return blockIdx; } in getIndex()
|
/external/clang/test/CodeGenCUDA/ |
D | cuda-builtin-vars.cu | 13 out[i++] = blockIdx.x; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() in kernel() 14 out[i++] = blockIdx.y; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.y() in kernel() 15 out[i++] = blockIdx.z; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.z() in kernel()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | concat_lib_gpu_impl.cu.cc | 40 IntType gidx = blockIdx.x * blockDim.x + threadIdx.x; in concat_fixed_kernel() 43 IntType gidy = blockIdx.y * blockDim.y + threadIdx.y; in concat_fixed_kernel() 68 IntType gidx = blockIdx.x * blockDim.x + threadIdx.x; in concat_variable_kernel() 107 IntType gidy = blockIdx.y * blockDim.y + threadIdx.y; in concat_variable_kernel()
|
D | bias_op_gpu.cu.cc | 126 for (int32 index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; in BiasGradNHWC_SharedAtomics() 154 int32 bias_index = blockIdx.x % bias_size; in BiasGradNCHW_SharedAtomics() 155 int32 group_index = blockIdx.x / bias_size; in BiasGradNCHW_SharedAtomics()
|
D | reduction_gpu_kernels.cu.h | 180 const int bid = blockIdx.x; 218 const int row = (blockIdx.x * blockDim.x + threadIdx.x) / 32; 222 int gid = threadIdx.x + blockIdx.x * blockDim.x; 260 rows_per_warp * (blockIdx.y * blockDim.y + threadIdx.y); 301 out[col * gridDim.y + blockIdx.y] = s; 311 int row = blockIdx.y * blockDim.y + threadIdx.y; 312 int col = blockIdx.x * 32 + threadIdx.x; 347 min(blockDim.y, num_rows - blockIdx.y * blockDim.y); 353 out[col * gridDim.y + blockIdx.y] = s; 365 const int tid = threadIdx.x + blockIdx.x * blockDim.x; [all …]
|
D | random_op_gpu.cu.cc | 134 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in Run() 175 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in Run()
|
D | check_numerics_op_gpu.cu.cc | 39 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in CheckNumericsKernel()
|
D | split_lib_gpu.cu.cc | 118 IntType gidx = blockIdx.x * blockDim.x + threadIdx.x; in split_v_kernel() 157 IntType gidy = blockIdx.y * blockDim.y + threadIdx.y; in split_v_kernel()
|
D | adjust_hsv_gpu.cu.h | 103 const int64 idx = (blockDim.x * blockIdx.x + threadIdx.x) * 3; in adjust_hsv_nhwc()
|
D | softmax_op_gpu.cu.cc | 41 const int tid = blockIdx.x * blockDim.x + threadIdx.x; in GenerateNormalizedProb()
|
D | parameterized_truncated_normal_op_gpu.cu.cc | 59 const int32 initial_offset = blockIdx.x * blockDim.x + threadIdx.x; in TruncatedNormalKernel()
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorReductionCuda.h | 115 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; in ReductionInitKernel() 129 const Index first_index = blockIdx.x * BlockSize * NumPerThread + threadIdx.x; in FullReductionKernel() 205 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; in ReductionInitKernelHalfFloat() 223 const Index first_index = blockIdx.x * BlockSize * NumPerThread + 2*threadIdx.x; in FullReductionKernelHalfFloat() 390 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; 400 for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) { 461 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; 476 for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) { 670 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
|
D | TensorConvolution.h | 579 const int first_x = blockIdx.x * maxX; 584 const int first_plane = blockIdx.y * blockDim.y; 628 const int first_x = blockIdx.x * maxX; 633 const int first_y = blockIdx.y * maxY; 638 const int first_plane = blockIdx.z * blockDim.z; 697 const int first_x = blockIdx.x * maxX; 701 const int first_y = blockIdx.y * maxY; 705 const int first_z = blockIdx.z * maxZ;
|
D | TensorRandom.h | 24 blockIdx.x * blockDim.x + threadIdx.x + in get_random_seed() 25 gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y); in get_random_seed()
|
D | TensorContractionCuda.h | 26 const Index m_block_idx = blockIdx.x; in EigenContractionKernelInternal() 27 const Index n_block_idx = blockIdx.y; in EigenContractionKernelInternal() 509 const Index m_block_idx = blockIdx.x; in EigenContractionKernel() 510 const Index n_block_idx = blockIdx.y; in EigenContractionKernel() 1143 const Index m_block_idx = blockIdx.x; in EigenFloatContractionKernel() 1144 const Index n_block_idx = blockIdx.y; in EigenFloatContractionKernel() 1183 const Index m_block_idx = blockIdx.x; in EigenFloatContractionKernel16x16() 1184 const Index n_block_idx = blockIdx.y; in EigenFloatContractionKernel16x16()
|
/external/tensorflow/tensorflow/contrib/rnn/kernels/ |
D | lstm_ops_gpu.cu.cc | 48 const int batch_id = blockIdx.x * blockDim.x + threadIdx.x; in lstm_gates() 49 const int act_id = blockIdx.y * blockDim.y + threadIdx.y; in lstm_gates() 160 const int gid = blockDim.x * blockIdx.x + threadIdx.x; in concat_xh() 254 const int batch_id = blockIdx.x * blockDim.x + threadIdx.x; in lstm_gates_bprop() 255 const int act_id = blockIdx.y * blockDim.y + threadIdx.y; in lstm_gates_bprop()
|
/external/eigen/test/ |
D | cuda_common.h | 11 dim3 threadIdx, blockDim, blockIdx; variable 26 int i = threadIdx.x + blockIdx.x*blockDim.x; in run_on_cuda_meta_kernel()
|
/external/tensorflow/tensorflow/examples/adding_an_op/ |
D | cuda_op_kernel.cu.cc | 21 for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; in AddOneKernel()
|
/external/tensorflow/tensorflow/tools/ci_build/builds/user_ops/ |
D | cuda_op_kernel.cu.cc | 21 for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; in AddOneKernel()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/tests_data/ |
D | saxpy.ll | 7 @blockIdx = external addrspace(1) global %struct.uint3 35 …truct.uint3, %struct.uint3* addrspacecast (%struct.uint3 addrspace(1)* @blockIdx to %struct.uint3*… 87 …truct.uint3, %struct.uint3* addrspacecast (%struct.uint3 addrspace(1)* @blockIdx to %struct.uint3*…
|
/external/clang/lib/Headers/ |
D | cuda_builtin_vars.h | 113 __CUDA_BUILTIN_VAR __cuda_builtin_blockIdx_t blockIdx; variable
|
/external/tensorflow/tensorflow/contrib/mpi_collectives/kernels/ |
D | ring.cu.cc | 90 for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < N; in elemwise_accum()
|
/external/tensorflow/tensorflow/core/util/ |
D | cuda_device_functions.h | 90 return detail::CudaGridRange<T>(blockIdx.x * blockDim.x + threadIdx.x, in CudaGridRangeX() 98 return detail::CudaGridRange<T>(blockIdx.y * blockDim.y + threadIdx.y, in CudaGridRangeY() 106 return detail::CudaGridRange<T>(blockIdx.z * blockDim.z + threadIdx.z, in CudaGridRangeZ()
|
/external/spirv-llvm/lib/SPIRV/ |
D | OCLUtil.cpp | 365 getBlockInvokeTy(Function * F, unsigned blockIdx) { in getBlockInvokeTy() argument 367 PointerType * funcPtr = cast<PointerType>(params[blockIdx]); in getBlockInvokeTy()
|