/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorContractionGpu.h | 67 const Index lhs_store_idx_base = threadIdx.y * 72 + threadIdx.x * 9 + threadIdx.z; in EigenContractionKernelInternal() 68 const Index rhs_store_idx_base = threadIdx.y * 72 + threadIdx.z * 8 + threadIdx.x; in EigenContractionKernelInternal() 97 const Index load_idx_vert = threadIdx.x + 8 * threadIdx.y; in EigenContractionKernelInternal() 121 const Index lhs_horiz_0 = base_k + threadIdx.z + 0 * 8; \ in EigenContractionKernelInternal() 122 const Index lhs_horiz_1 = base_k + threadIdx.z + 1 * 8; \ in EigenContractionKernelInternal() 123 const Index lhs_horiz_2 = base_k + threadIdx.z + 2 * 8; \ in EigenContractionKernelInternal() 124 const Index lhs_horiz_3 = base_k + threadIdx.z + 3 * 8; \ in EigenContractionKernelInternal() 125 const Index lhs_horiz_4 = base_k + threadIdx.z + 4 * 8; \ in EigenContractionKernelInternal() 126 const Index lhs_horiz_5 = base_k + threadIdx.z + 5 * 8; \ in EigenContractionKernelInternal() 127 const Index lhs_horiz_6 = base_k + threadIdx.z + 6 * 8; \ in EigenContractionKernelInternal() [all …]
|
D | TensorReductionGpu.h | 124 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; in ReductionInitKernel() 138 const Index first_index = blockIdx.x * BlockSize * NumPerThread + threadIdx.x; in FullReductionKernel() 145 if (threadIdx.x == 0) { in FullReductionKernel() 196 if ((threadIdx.x & (warpSize - 1)) == 0) { in FullReductionKernel() 200 if (gridDim.x > 1 && threadIdx.x == 0) { in FullReductionKernel() 242 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; in ReductionInitKernelHalfFloat() 267 blockIdx.x * BlockSize * NumPerThread + packet_width * threadIdx.x; in FullReductionKernelHalfFloat() 339 if ((threadIdx.x & (warpSize - 1)) == 0) { in FullReductionKernelHalfFloat() 361 eigen_assert(threadIdx.x == 1); in ReductionCleanupKernelHalfFloat() 494 const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; [all …]
|
D | TensorConvolution.h | 601 for (int p = first_plane + threadIdx.y; p < numPlanes; p += plane_stride) { 604 const int plane_kernel_offset = threadIdx.y * num_x_input; 606 for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) { 617 for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) { 659 for (int p = first_plane + threadIdx.z; p < numPlanes; p += plane_stride) { 662 const int plane_kernel_offset = threadIdx.z * num_y_input; 666 for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) { 669 for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) { 681 for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) { 683 for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) { [all …]
|
D | TensorRandom.h | 23 gpu_assert(threadIdx.z == 0); in get_random_seed() 24 return blockIdx.x * blockDim.x + threadIdx.x in get_random_seed() 25 + gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y); in get_random_seed()
|
/external/clang/test/SemaCUDA/ |
D | cuda-builtin-vars.cu | 7 out[i++] = threadIdx.x; in kernel() 8 threadIdx.x = 0; // expected-error {{no setter defined for property 'x'}} in kernel() 9 out[i++] = threadIdx.y; in kernel() 10 threadIdx.y = 0; // expected-error {{no setter defined for property 'y'}} in kernel() 11 out[i++] = threadIdx.z; in kernel() 12 threadIdx.z = 0; // expected-error {{no setter defined for property 'z'}} in kernel() 43 …__cuda_builtin_threadIdx_t y = threadIdx; // expected-error {{calling a private constructor of cla… in kernel() 46 …threadIdx = threadIdx; // expected-error {{'operator=' is a private member of '__cuda_builtin_thre… in kernel() 49 …void *ptr = &threadIdx; // expected-error {{'operator&' is a private member of '__cuda_builtin_thr… in kernel()
|
/external/angle/src/tests/gl_tests/ |
D | VulkanMultithreadingTest.cpp | 74 for (size_t threadIdx = 0; threadIdx < threadCount; threadIdx++) in runMultithreadedGLTest() local 76 threads[threadIdx] = std::thread([&, threadIdx]() { in runMultithreadedGLTest() 97 testBody(surface, threadIdx); in runMultithreadedGLTest()
|
D | MultithreadingTest.cpp | 74 for (size_t threadIdx = 0; threadIdx < threadCount; threadIdx++) in runMultithreadedGLTest() local 76 threads[threadIdx] = std::thread([&, threadIdx]() { in runMultithreadedGLTest() 97 testBody(surface, threadIdx); in runMultithreadedGLTest() 512 for (size_t threadIdx = 0; threadIdx < kThreadCount; threadIdx++) in TEST_P() local 514 threads[threadIdx] = std::thread([&, threadIdx]() { in TEST_P() 515 contexts[threadIdx] = EGL_NO_CONTEXT; in TEST_P() 517 contexts[threadIdx] = createMultithreadedContext(window, EGL_NO_CONTEXT); in TEST_P() 518 EXPECT_NE(EGL_NO_CONTEXT, contexts[threadIdx]); in TEST_P() 528 EXPECT_TRUE(eglDestroyContext(dpy, contexts[threadIdx])); in TEST_P() 597 for (size_t threadIdx = 0; threadIdx < threadCount; threadIdx++) in TEST_P() local [all …]
|
/external/clang/test/CodeGenCUDA/ |
D | cuda-builtin-vars.cu | 9 out[i++] = threadIdx.x; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.tid.x() in kernel() 10 out[i++] = threadIdx.y; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.tid.y() in kernel() 11 out[i++] = threadIdx.z; // CHECK: call i32 @llvm.nvvm.read.ptx.sreg.tid.z() in kernel()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | concat_lib_gpu_impl.cu.cc | 41 IntType gidx = blockIdx.x * blockDim.x + threadIdx.x; in concat_fixed_kernel() 44 IntType gidy = blockIdx.y * blockDim.y + threadIdx.y; in concat_fixed_kernel() 69 IntType gidx = blockIdx.x * blockDim.x + threadIdx.x; in concat_variable_kernel() 77 IntType lidx = threadIdx.y * blockDim.x + threadIdx.x; in concat_variable_kernel() 108 IntType gidy = blockIdx.y * blockDim.y + threadIdx.y; in concat_variable_kernel()
|
D | bias_op_gpu.cu.cc | 136 for (int32 index = threadIdx.x; index < bias_size; index += blockDim.x) { in BiasGradNHWC_SharedAtomics() 141 for (int32 index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; in BiasGradNHWC_SharedAtomics() 148 for (int32 index = threadIdx.x; index < bias_size; index += blockDim.x) { in BiasGradNHWC_SharedAtomics() 161 for (int32 index = threadIdx.x; index < kSDataSize; index += blockDim.x) { in BiasGradNCHW_SharedAtomics() 172 for (int32 index = group_index * blockDim.x + threadIdx.x; in BiasGradNCHW_SharedAtomics() 183 int bias_offset = threadIdx.x % 32; in BiasGradNCHW_SharedAtomics() 189 int32 thread_index = threadIdx.x; in BiasGradNCHW_SharedAtomics()
|
D | reduction_gpu_kernels.cu.h | 168 const int tid = threadIdx.x; 208 int warp_index = threadIdx.x / TF_RED_WARPSIZE; 210 const int lane = threadIdx.x % TF_RED_WARPSIZE; 213 int gid = threadIdx.x + blockIdx.x * blockDim.x; 275 const int lane = threadIdx.x % TF_RED_WARPSIZE; 279 rows_per_warp * (blockIdx.y * blockDim.y + threadIdx.y); 314 sum, static_cast<int>(threadIdx.x + i * num_cols), 0xffffffff); 319 partial_sums[lane * (TF_RED_WARPSIZE + 1) + threadIdx.y] = sum; 323 if (threadIdx.y == 0 && threadIdx.x < num_cols) { 324 value_type s = partial_sums[threadIdx.x * (TF_RED_WARPSIZE + 1)]; [all …]
|
D | debug_ops_gpu.cu.cc | 41 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in CurtHealthKernel() 59 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in ConciseHealthKernel() 90 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in FullHealthKernel() 133 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in ReduceInfNanThreeSlotsKernel()
|
D | split_lib_gpu.cu.cc | 120 IntType gidx = blockIdx.x * blockDim.x + threadIdx.x; in split_v_kernel() 128 IntType lidx = threadIdx.y * blockDim.x + threadIdx.x; in split_v_kernel() 159 IntType gidy = blockIdx.y * blockDim.y + threadIdx.y; in split_v_kernel()
|
D | check_numerics_op_gpu.cu.cc | 41 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in CheckNumericsKernel() 64 const int32 thread_id = blockIdx.x * blockDim.x + threadIdx.x; in CheckNumericsKernelV2()
|
D | tensor_to_hash_bucket_op_gpu.cu.cc | 74 s + threadIdx.x * kSharedMemBufferSizePerThread); in ComputeHashes() 76 s + threadIdx.x * kSharedMemBufferSizePerThread, size); in ComputeHashes()
|
D | bucketize_op_gpu.cu.cc | 45 int32 lidx = threadIdx.y * blockDim.x + threadIdx.x; in BucketizeCustomKernel()
|
D | softmax_op_gpu.cu.cc | 76 int tid = blockIdx.x * blockDim.x + threadIdx.x; in GenerateNormalizedProb() 93 tid = blockIdx.x * blockDim.x + threadIdx.x; in GenerateNormalizedProb() 115 int tid = blockIdx.x * blockDim.x + threadIdx.x; in GenerateNormalizedProb()
|
D | depthwise_conv_op_gpu.h | 237 const int thread_depth = threadIdx.x; 238 const int thread_col = threadIdx.y; 239 const int thread_row = threadIdx.z; 527 const int thread_col = threadIdx.x; 528 const int thread_row = threadIdx.y; 529 const int thread_depth = threadIdx.z; 1209 const int thread_depth = threadIdx.x; 1210 const int thread_col = threadIdx.y; 1211 const int thread_row = threadIdx.z; 1352 for (int i = threadIdx.x; i < out_spatial_size; i += blockDim.x) { [all …]
|
/external/llvm/test/Analysis/DivergenceAnalysis/NVPTX/ |
D | diverge.ll | 6 ; return (n < 0 ? a + threadIdx.x : b + threadIdx.x) 26 ; if (threadIdx.x < 5) // divergent: data dependent 45 ; if (threadIdx.x >= 5) { // divergent 48 ; // c here is divergent because it is sync dependent on threadIdx.x >= 5
|
/external/tensorflow/tensorflow/core/kernels/image/ |
D | non_max_suppression_op.cu.cc | 177 const int i = i_block_offset + threadIdx.x; in NMSKernel() 180 if (threadIdx.y == 0) { in NMSKernel() 183 shared_i_boxes[threadIdx.x] = box; in NMSKernel() 184 shared_i_areas[threadIdx.x] = (box.x2 - box.x1) * (box.y2 - box.y1); in NMSKernel() 189 kNmsBoxesPerThread * (blockIdx.y * blockDim.y + threadIdx.y); in NMSKernel() 206 const Box i_box = shared_i_boxes[threadIdx.x]; in NMSKernel() 208 if (OverThreshold<float>(&i_box, &j_box, shared_i_areas[threadIdx.x], in NMSKernel()
|
/external/tensorflow/tensorflow/tools/ci_build/builds/user_ops/ |
D | cuda_op_kernel.cu.cc | 22 for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; in AddOneKernel()
|
/external/tensorflow/tensorflow/examples/adding_an_op/ |
D | cuda_op_kernel.cu.cc | 23 for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; in AddOneKernel()
|
/external/eigen/test/ |
D | gpu_common.h | 19 dim3 threadIdx, blockDim, blockIdx; variable 35 int i = threadIdx.x + blockIdx.x*blockDim.x; in run_on_gpu_meta_kernel()
|
/external/tensorflow/tensorflow/stream_executor/rocm/ |
D | rocm_helpers.cu.cc | 29 int idx = threadIdx.x + blockIdx.x * blockDim.x; in __xla_MakeBatchPointers()
|
/external/tensorflow/tensorflow/core/kernels/rnn/ |
D | lstm_ops_gpu.cu.cc | 90 const int batch_id = blockIdx.x * blockDim.x + threadIdx.x; in lstm_gates() 91 const int act_id = blockIdx.y * blockDim.y + threadIdx.y; in lstm_gates() 211 const int gid = blockDim.x * blockIdx.x + threadIdx.x; in concat_xh() 311 const int batch_id = blockIdx.x * blockDim.x + threadIdx.x; in lstm_gates_bprop() 312 const int act_id = blockIdx.y * blockDim.y + threadIdx.y; in lstm_gates_bprop()
|