/external/llvm-project/clang/test/SemaCUDA/ |
D | cuda-builtin-vars.cu | 14 out[i++] = blockIdx.x; in kernel() 15 blockIdx.x = 0; // expected-error {{no setter defined for property 'x'}} in kernel() 16 out[i++] = blockIdx.y; in kernel() 17 blockIdx.y = 0; // expected-error {{no setter defined for property 'y'}} in kernel() 18 out[i++] = blockIdx.z; in kernel() 19 blockIdx.z = 0; // expected-error {{no setter defined for property 'z'}} in kernel()
|
/external/clang/test/SemaCUDA/ |
D | cuda-builtin-vars.cu | 14 out[i++] = blockIdx.x; in kernel() 15 blockIdx.x = 0; // expected-error {{no setter defined for property 'x'}} in kernel() 16 out[i++] = blockIdx.y; in kernel() 17 blockIdx.y = 0; // expected-error {{no setter defined for property 'y'}} in kernel() 18 out[i++] = blockIdx.z; in kernel() 19 blockIdx.z = 0; // expected-error {{no setter defined for property 'z'}} in kernel()
|
/external/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ |
D | WorkList.h | 32 unsigned blockIdx; // This is the index of the next statement. variable 40 blockIdx(idx) {} in WorkListUnit() 46 blockIdx(0) {} in WorkListUnit() 58 unsigned getIndex() const { return blockIdx; } in getIndex()
|
/external/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ |
D | WorkList.h | 31 unsigned blockIdx; // This is the index of the next statement. variable 39 blockIdx(idx) {} in WorkListUnit() 45 blockIdx(0) {} in WorkListUnit() 57 unsigned getIndex() const { return blockIdx; } in getIndex()
|
/external/clang/test/CodeGenCUDA/ |
D | cuda-builtin-vars.cu | 13 out[i++] = blockIdx.x; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() in kernel() 14 out[i++] = blockIdx.y; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.y() in kernel() 15 out[i++] = blockIdx.z; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.z() in kernel()
|
/external/llvm-project/clang/test/CodeGenCUDA/ |
D | cuda-builtin-vars.cu | 13 out[i++] = blockIdx.x; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() in kernel() 14 out[i++] = blockIdx.y; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.y() in kernel() 15 out[i++] = blockIdx.z; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.ctaid.z() in kernel()
|
/external/llvm-project/mlir/test/Transforms/ |
D | parametric-mapping.mlir | 26 // blockIdx.x * blockDim.x 29 // threadIdx.x + blockIdx.x * blockDim.x 32 // thread_offset = step * (threadIdx.x + blockIdx.x * blockDim.x)
|
/external/tensorflow/tensorflow/core/kernels/ |
D | debug_ops_gpu.cu.cc | 41 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in CurtHealthKernel() 59 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in ConciseHealthKernel() 90 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in FullHealthKernel() 133 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in ReduceInfNanThreeSlotsKernel()
|
D | concat_lib_gpu_impl.cu.cc | 41 IntType gidx = blockIdx.x * blockDim.x + threadIdx.x; in concat_fixed_kernel() 44 IntType gidy = blockIdx.y * blockDim.y + threadIdx.y; in concat_fixed_kernel() 69 IntType gidx = blockIdx.x * blockDim.x + threadIdx.x; in concat_variable_kernel() 108 IntType gidy = blockIdx.y * blockDim.y + threadIdx.y; in concat_variable_kernel()
|
D | check_numerics_op_gpu.cu.cc | 41 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in CheckNumericsKernel() 64 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in CheckNumericsKernelV2()
|
D | reduction_gpu_kernels.cu.h | 167 const int bid = blockIdx.x; 209 const int row = blockIdx.x * warps_per_block + warp_index; 213 int gid = threadIdx.x + blockIdx.x * blockDim.x; 279 rows_per_warp * (blockIdx.y * blockDim.y + threadIdx.y); 333 out[col * gridDim.y + blockIdx.y] = s; 343 int row = blockIdx.y * blockDim.y + threadIdx.y; 344 int col = blockIdx.x * TF_RED_WARPSIZE + threadIdx.x; 390 min(static_cast<int>(blockDim.y), num_rows - blockIdx.y * blockDim.y); 397 out[col * gridDim.y + blockIdx.y] = s; 409 const int tid = threadIdx.x + blockIdx.x * blockDim.x; [all …]
|
D | bias_op_gpu.cu.cc | 138 for (int32 index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; in BiasGradNHWC_SharedAtomics() 165 int32 bias_index = blockIdx.x % bias_size; in BiasGradNCHW_SharedAtomics() 166 int32 group_index = blockIdx.x / bias_size; in BiasGradNCHW_SharedAtomics()
|
D | softmax_op_gpu.cu.cc | 76 int tid = blockIdx.x * blockDim.x + threadIdx.x; in GenerateNormalizedProb() 93 tid = blockIdx.x * blockDim.x + threadIdx.x; in GenerateNormalizedProb() 115 int tid = blockIdx.x * blockDim.x + threadIdx.x; in GenerateNormalizedProb()
|
D | random_op_gpu.h | 146 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; 188 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x;
|
D | relu_op_gpu.cu.cc | 49 int32 index = blockIdx.x * blockDim.x + threadIdx.x; in ReluGradHalfKernel() 102 int32 index = blockIdx.x * blockDim.x + threadIdx.x; in ReluGradHalfKernelVector()
|
D | split_lib_gpu.cu.cc | 120 IntType gidx = blockIdx.x * blockDim.x + threadIdx.x; in split_v_kernel() 159 IntType gidy = blockIdx.y * blockDim.y + threadIdx.y; in split_v_kernel()
|
/external/eigen/test/ |
D | cuda_common.h | 11 dim3 threadIdx, blockDim, blockIdx; variable 26 int i = threadIdx.x + blockIdx.x*blockDim.x; in run_on_cuda_meta_kernel()
|
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorReductionCuda.h | 115 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; in ReductionInitKernel() 129 const Index first_index = blockIdx.x * BlockSize * NumPerThread + threadIdx.x; in FullReductionKernel() 205 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; in ReductionInitKernelHalfFloat() 223 const Index first_index = blockIdx.x * BlockSize * NumPerThread + 2*threadIdx.x; in FullReductionKernelHalfFloat() 390 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; 400 for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) { 461 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; 476 for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) { 670 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x;
|
D | TensorRandom.h | 24 blockIdx.x * blockDim.x + threadIdx.x + in get_random_seed() 25 gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y); in get_random_seed()
|
/external/tensorflow/tensorflow/tools/ci_build/builds/user_ops/ |
D | cuda_op_kernel.cu.cc | 22 for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; in AddOneKernel()
|
/external/tensorflow/tensorflow/examples/adding_an_op/ |
D | cuda_op_kernel.cu.cc | 23 for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; in AddOneKernel()
|
/external/tensorflow/tensorflow/core/kernels/rnn/ |
D | lstm_ops_gpu.cu.cc | 90 const int batch_id = blockIdx.x * blockDim.x + threadIdx.x; in lstm_gates() 91 const int act_id = blockIdx.y * blockDim.y + threadIdx.y; in lstm_gates() 211 const int gid = blockDim.x * blockIdx.x + threadIdx.x; in concat_xh() 311 const int batch_id = blockIdx.x * blockDim.x + threadIdx.x; in lstm_gates_bprop() 312 const int act_id = blockIdx.y * blockDim.y + threadIdx.y; in lstm_gates_bprop()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/tests_data/ |
D | saxpy.ll | 7 @blockIdx = external addrspace(1) global %struct.uint3 35 …truct.uint3, %struct.uint3* addrspacecast (%struct.uint3 addrspace(1)* @blockIdx to %struct.uint3*… 87 …truct.uint3, %struct.uint3* addrspacecast (%struct.uint3 addrspace(1)* @blockIdx to %struct.uint3*…
|
/external/clang/lib/Headers/ |
D | cuda_builtin_vars.h | 113 __CUDA_BUILTIN_VAR __cuda_builtin_blockIdx_t blockIdx; variable
|
/external/llvm-project/clang/lib/Headers/ |
D | __clang_cuda_builtin_vars.h | 107 __CUDA_BUILTIN_VAR __cuda_builtin_blockIdx_t blockIdx; variable
|