| /external/tensorflow/tensorflow/core/kernels/ |
| D | bias_op_gpu.cu.cc | 54 __global__ void BiasNHWCKernel(int32 nthreads, const T* __restrict__ input, in BiasNHWCKernel() 64 __global__ void BiasNCHWKernel(int32 nthreads, const T* __restrict__ input, in BiasNCHWKernel() 103 __global__ void BiasGradNHWC_Naive(int32 nthreads, in BiasGradNHWC_Naive() 115 __global__ void BiasGradNCHW_Naive(int32 nthreads, in BiasGradNCHW_Naive() 128 int32 nthreads, const T* __restrict__ output_backprop, in BiasGradNHWC_SharedAtomics()
|
| D | resize_nearest_neighbor_op_gpu.cu.cc | 36 const int nthreads, const T* __restrict__ bottom_data, const int in_height, in ResizeNearestNeighborNHWC() 67 const int nthreads, const T* __restrict__ bottom_data, const int in_height, in LegacyResizeNearestNeighborNHWC() 96 const int nthreads, const T* __restrict__ top_diff, const int in_height, in ResizeNearestNeighborBackwardNHWC() 127 const int nthreads, const T* __restrict__ top_diff, const int in_height, in LegacyResizeNearestNeighborBackwardNHWC()
|
| D | maxpooling_op_gpu.cu.cc | 68 const int nthreads, const dtype* __restrict__ bottom_data, in MaxPoolForwardNCHW() 112 const int nthreads, const int32* __restrict__ bottom_data, const int height, in MaxPoolForwardNoMaskKernel_NCHW_VECT_C() 145 const int nthreads, const dtype* __restrict__ bottom_data, const int height, in MaxPoolForwardNHWC() 185 const int nthreads, const dtype* __restrict__ bottom_data, const int height, in MaxPoolBackwardNoMaskNHWC() 245 __global__ void MaxPoolBackward(const int nthreads, in MaxPoolBackward() 274 const int nthreads, const dtype* __restrict__ bottom_data, in MaxPoolGradBackwardNoMaskNCHW() 315 const int nthreads, const dtype* __restrict__ bottom_data, in MaxPoolGradBackwardNoMaskNHWC() 376 __global__ void MaxPoolGradBackward(const int nthreads, in MaxPoolGradBackward()
|
| D | spacetodepth_op_gpu.cu.cc | 32 __global__ void S2D_NHWC(const int32 nthreads, in S2D_NHWC() 64 __global__ void S2D_NCHW(const int32 nthreads, in S2D_NCHW() 102 __global__ void S2D_NCHW_LOOP(const int32 nthreads, in S2D_NCHW_LOOP()
|
| D | depthtospace_op_gpu.cu.cc | 33 __global__ void D2S_NHWC(const int32 nthreads, in D2S_NHWC() 64 __global__ void D2S_NCHW(const int32 nthreads, in D2S_NCHW() 101 __global__ void D2S_NCHW_LOOP(const int32 nthreads, in D2S_NCHW_LOOP()
|
| D | resize_bilinear_op_gpu.cu.cc | 117 const int32 nthreads, const T* __restrict__ images, float height_scale, in ResizeBilinearKernel() 167 __global__ void ResizeBilinearGradKernel(const int32 nthreads, in ResizeBilinearGradKernel() 233 const int32 nthreads, const T* __restrict__ images, float height_scale, in LegacyResizeBilinearKernel() 283 const int32 nthreads, const float* __restrict__ input_grad, in LegacyResizeBilinearGradKernel()
|
| D | tile_functor_gpu.h | 33 __global__ void TileKernel(int nthreads, const T* __restrict__ src, in TileKernel()
|
| D | dilation_ops_gpu.cu.cc | 39 const int32 nthreads, const T* __restrict__ input_ptr, in DilationKernel() 78 const int32 nthreads, const T* __restrict__ input_ptr, in DilationBackpropInputKernel() 128 const int32 nthreads, const T* __restrict__ input_ptr, in DilationBackpropFilterKernel()
|
| D | inplace_ops_functor_gpu.cu.cc | 30 __global__ void DoParallelConcatOpKernel(int nthreads, const int64 rows, in DoParallelConcatOpKernel() 83 __global__ void DoInplaceOpKernel(int nthreads, const int64 rows, in DoInplaceOpKernel()
|
| D | roll_op_gpu.cu.cc | 33 __global__ void RollKernel(const int32 nthreads, const int32 num_dims, in RollKernel()
|
| D | multinomial_op_gpu.cu.cc | 47 __global__ void MultinomialKernel(int32 nthreads, const int32 num_classes, in MultinomialKernel()
|
| D | crop_and_resize_op_gpu.cu.cc | 41 const int32 nthreads, const T* __restrict__ image_ptr, in CropAndResizeKernel() 134 const int32 nthreads, const float* __restrict__ grads_ptr, in CropAndResizeBackpropImageKernel() 230 const int32 nthreads, const float* __restrict__ grads_ptr, in CropAndResizeBackpropBoxesKernel()
|
| D | pooling_ops_3d_gpu.cu.cc | 31 const int nthreads, const dtype* __restrict__ bottom_data, in MaxPoolGradBackwardNoMaskNCDHW() 82 const int nthreads, const dtype* __restrict__ bottom_data, in MaxPoolGradBackwardNoMaskNDHWC()
|
| D | spacetobatch_functor_gpu.cu.cc | 47 __global__ void S2B(const int32 nthreads, T* __restrict__ space_tensor_ptr, in S2B()
|
| /external/grpc-grpc/test/core/gpr/ |
| D | cpu_test.cc | 59 int nthreads; member 115 uint32_t nthreads = ct.ncores * 3; in cpu_test() local
|
| /external/ltp/testcases/realtime/perf/latency/ |
| D | pthread_cond_many.c | 55 int nthreads = 0; variable 186 void test_signal(long iter, long nthreads) in test_signal()
|
| /external/python/cpython2/Tools/ccbench/ |
| D | ccbench.py | 196 def run_throughput_test(func, args, nthreads): argument 300 def run_latency_test(func, args, nthreads): argument 432 def run_bandwidth_test(func, args, nthreads): argument
|
| /external/python/cpython3/Tools/ccbench/ |
| D | ccbench.py | 194 def run_throughput_test(func, args, nthreads): argument 301 def run_latency_test(func, args, nthreads): argument 434 def run_bandwidth_test(func, args, nthreads): argument
|
| /external/compiler-rt/test/asan/TestCases/Posix/ |
| D | halt_on_error-torture.cc | 29 size_t nthreads = 10; variable
|
| /external/ltp/testcases/open_posix_testsuite/stress/threads/pthread_create/ |
| D | s-c1.c | 104 int nthreads; member 147 int nthreads, ctl, i, tmp; in main() local
|
| /external/ltp/testcases/kernel/fs/fs_fill/ |
| D | fs_fill.c | 23 static unsigned int nthreads; variable
|
| /external/autotest/client/tests/monotonic_time/src/ |
| D | threads.c | 86 int create_threads(int nthreads, thread_func_t func, void *arg) in create_threads()
|
| /external/ltp/testcases/open_posix_testsuite/stress/threads/pthread_cond_timedwait/ |
| D | s-c.c | 175 int nthreads; member 290 long do_threads_test(int nthreads, mes_t * measure) in do_threads_test()
|
| /external/python/cpython2/Python/ |
| D | thread_sgi.h | 18 static int nthreads; /* protected by count_lock */ variable
|
| /external/eigen/test/ |
| D | cuda_basic.cu | 146 int nthreads = 100; in test_cuda_basic() local
|