Home
last modified time | relevance | path

Searched refs:nthreads (Results 1 – 25 of 57) sorted by relevance

123

/external/python/cpython2/Tools/ccbench/
Dccbench.py196 def run_throughput_test(func, args, nthreads): argument
197 assert nthreads >= 1
206 if nthreads == 1:
230 for i in range(nthreads):
238 while len(ready) < nthreads:
243 start_cond.notify(nthreads)
254 nthreads = 1
256 while nthreads <= max_threads:
257 results = run_throughput_test(func, args, nthreads)
261 print("threads=%d: %d" % (nthreads, speed), end="")
[all …]
/external/python/cpython3/Tools/ccbench/
Dccbench.py194 def run_throughput_test(func, args, nthreads): argument
195 assert nthreads >= 1
204 if nthreads == 1:
228 for i in range(nthreads):
236 while len(ready) < nthreads:
241 start_cond.notify(nthreads)
252 nthreads = 1
254 while nthreads <= max_threads:
255 results = run_throughput_test(func, args, nthreads)
259 print("threads=%d: %d" % (nthreads, speed), end="")
[all …]
/external/ltp/testcases/realtime/perf/latency/
Dpthread_cond_many.c55 int nthreads = 0; variable
186 void test_signal(long iter, long nthreads) in test_signal() argument
197 stats_container_init(&dat, iter * nthreads); in test_signal()
199 pt = malloc(sizeof(*pt) * nthreads); in test_signal()
204 for (j = 0; j < nthreads; j++) { in test_signal()
208 for (i = 0; i < (iter - 1) * nthreads; i += nthreads) { in test_signal()
209 for (j = 0, k = i; j < nthreads; j++, k++) { in test_signal()
219 for (j = 0; j < nthreads; j++) { in test_signal()
231 for (i = 0; i < iter * nthreads; i++) { in test_signal()
271 nthreads = atoi(v); in parse_args()
[all …]
/external/eigen/test/
Dcuda_basic.cu146 int nthreads = 100; in test_cuda_basic() local
150 int data_size = nthreads * 512; in test_cuda_basic()
155 CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise<Vector3f>(), nthreads, in, out) ); in test_cuda_basic()
156 CALL_SUBTEST( run_and_compare_to_cuda(coeff_wise<Array44f>(), nthreads, in, out) ); in test_cuda_basic()
158 CALL_SUBTEST( run_and_compare_to_cuda(replicate<Array4f>(), nthreads, in, out) ); in test_cuda_basic()
159 CALL_SUBTEST( run_and_compare_to_cuda(replicate<Array33f>(), nthreads, in, out) ); in test_cuda_basic()
161 CALL_SUBTEST( run_and_compare_to_cuda(redux<Array4f>(), nthreads, in, out) ); in test_cuda_basic()
162 CALL_SUBTEST( run_and_compare_to_cuda(redux<Matrix3f>(), nthreads, in, out) ); in test_cuda_basic()
164 CALL_SUBTEST( run_and_compare_to_cuda(prod_test<Matrix3f,Matrix3f>(), nthreads, in, out) ); in test_cuda_basic()
165 CALL_SUBTEST( run_and_compare_to_cuda(prod_test<Matrix4f,Vector4f>(), nthreads, in, out) ); in test_cuda_basic()
[all …]
/external/ltp/testcases/kernel/fs/fs_fill/
Dfs_fill.c35 static unsigned int nthreads; variable
78 pthread_t threads[nthreads]; in testrun()
82 for (i = 0; i < nthreads; i++) in testrun()
96 for (i = 0; i < nthreads; i++) in testrun()
106 nthreads = tst_ncpus_conf() + 2; in setup()
107 workers = SAFE_MALLOC(sizeof(struct worker) * nthreads); in setup()
109 for (i = 0; i < nthreads; i++) { in setup()
115 tst_res(TINFO, "Running %i writer threads", nthreads); in setup()
/external/ltp/testcases/open_posix_testsuite/stress/threads/pthread_create/
Ds-c1.c104 int nthreads; member
147 int nthreads, ctl, i, tmp; in main() local
197 nthreads = 0; in main()
247 pthread_create(&th[nthreads], in main()
258 nthreads++; in main()
268 if (nthreads > my_max) { in main()
298 if ((nthreads % RESOLUTION) == 0) { in main()
307 m_tmp->nthreads = in main()
308 nthreads; in main()
331 sc, nthreads, in main()
[all …]
/external/grpc-grpc/test/core/gpr/
Dcpu_test.cc59 int nthreads; member
94 ct->nthreads--; in worker_thread()
95 if (ct->nthreads == 0) { in worker_thread()
108 ct.nthreads = static_cast<int>(ct.ncores) * 3; in cpu_test()
115 uint32_t nthreads = ct.ncores * 3; in cpu_test() local
117 static_cast<grpc_core::Thread*>(gpr_malloc(sizeof(*thd) * nthreads)); in cpu_test()
119 for (i = 0; i < nthreads; i++) { in cpu_test()
128 for (i = 0; i < nthreads; i++) { in cpu_test()
Dsync_test.cc138 int nthreads; /* number of threads */ member
163 static struct test* test_new(int nthreads, int64_t iterations, int incr_step) { in test_new() argument
165 m->nthreads = nthreads; in test_new()
167 gpr_malloc(sizeof(*m->threads) * nthreads)); in test_new()
171 m->done = nthreads; in test_new()
179 gpr_ref_init(&m->thread_refcount, nthreads); in test_new()
197 for (i = 0; i != m->nthreads; i++) { in test_create_threads()
210 for (int i = 0; i != m->nthreads; i++) { in test_wait()
268 if (m->counter != m->nthreads * m->iterations * m->incr_step) { in test()
270 static_cast<long>(m->counter), m->nthreads, in test()
[all …]
/external/compiler-rt/test/asan/TestCases/Posix/
Dhalt_on_error-torture.cc29 size_t nthreads = 10; variable
61 nthreads = (size_t)strtoul(argv[1], 0, 0); in main()
64 pthread_t *tids = new pthread_t[nthreads]; in main()
66 for (size_t i = 0; i < nthreads; ++i) { in main()
73 for (size_t i = 0; i < nthreads; ++i) { in main()
/external/tensorflow/tensorflow/core/kernels/
Dmaxpooling_op_gpu.cu.cc67 const int nthreads, const dtype* bottom_data, const int channels, in MaxPoolForwardNCHW() argument
72 CUDA_1D_KERNEL_LOOP(index, nthreads) { in MaxPoolForwardNCHW()
109 const int nthreads, const int32* bottom_data, const int height, in MaxPoolForwardNoMaskKernel_NCHW_VECT_C() argument
116 CUDA_1D_KERNEL_LOOP(index, nthreads) { in MaxPoolForwardNoMaskKernel_NCHW_VECT_C()
141 const int nthreads, const dtype* bottom_data, const int height, in MaxPoolForwardNHWC() argument
146 CUDA_1D_KERNEL_LOOP(index, nthreads) { in MaxPoolForwardNHWC()
180 const int nthreads, const dtype* bottom_data, const int height, in MaxPoolBackwardNoMaskNHWC() argument
185 CUDA_1D_KERNEL_LOOP(index, nthreads) { in MaxPoolBackwardNoMaskNHWC()
240 __global__ void MaxPoolBackward(const int nthreads, const dtype* top_diff, in MaxPoolBackward() argument
244 CUDA_1D_KERNEL_LOOP(index, nthreads) { in MaxPoolBackward()
[all …]
Dbias_op_gpu.cu.cc56 __global__ void BiasNHWCKernel(int32 nthreads, const T* input, const T* bias, in BiasNHWCKernel() argument
58 CUDA_1D_KERNEL_LOOP(index, nthreads) { in BiasNHWCKernel()
65 __global__ void BiasNCHWKernel(int32 nthreads, const T* input, const T* bias, in BiasNCHWKernel() argument
67 CUDA_1D_KERNEL_LOOP(index, nthreads) { in BiasNCHWKernel()
102 __global__ void BiasGradNHWC_Naive(int32 nthreads, const T* output_backprop, in BiasGradNHWC_Naive() argument
104 CUDA_1D_KERNEL_LOOP(index, nthreads) { in BiasGradNHWC_Naive()
112 __global__ void BiasGradNCHW_Naive(int32 nthreads, const T* output_backprop, in BiasGradNCHW_Naive() argument
115 CUDA_1D_KERNEL_LOOP(index, nthreads) { in BiasGradNCHW_Naive()
125 __global__ void BiasGradNHWC_SharedAtomics(int32 nthreads, in BiasGradNHWC_SharedAtomics() argument
135 for (int32 index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; in BiasGradNHWC_SharedAtomics()
Dresize_nearest_neighbor_op_gpu.cu.cc37 const int nthreads, const T* bottom_data, const int in_height, in ResizeNearestNeighborNHWC() argument
41 CUDA_1D_KERNEL_LOOP(index, nthreads) { in ResizeNearestNeighborNHWC()
68 const int nthreads, const T* bottom_data, const int in_height, in LegacyResizeNearestNeighborNHWC() argument
72 CUDA_1D_KERNEL_LOOP(index, nthreads) { in LegacyResizeNearestNeighborNHWC()
97 const int nthreads, const T* top_diff, const int in_height, in ResizeNearestNeighborBackwardNHWC() argument
101 CUDA_1D_KERNEL_LOOP(index, nthreads) { in ResizeNearestNeighborBackwardNHWC()
128 const int nthreads, const T* top_diff, const int in_height, in LegacyResizeNearestNeighborBackwardNHWC() argument
132 CUDA_1D_KERNEL_LOOP(index, nthreads) { in LegacyResizeNearestNeighborBackwardNHWC()
Dfused_batch_norm_op.cu.cc29 __global__ void VarianceToInvVarianceKernel(int nthreads, const T* input, in VarianceToInvVarianceKernel() argument
31 CUDA_1D_KERNEL_LOOP(index, nthreads) { in VarianceToInvVarianceKernel()
47 __global__ void InvVarianceToVarianceKernel(int nthreads, double epsilon, in InvVarianceToVarianceKernel() argument
49 CUDA_1D_KERNEL_LOOP(index, nthreads) { in InvVarianceToVarianceKernel()
Dresize_bilinear_op_gpu.cu.cc36 __global__ void ResizeBilinearKernel(const int32 nthreads, const T* images, in ResizeBilinearKernel() argument
41 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { in ResizeBilinearKernel()
89 const int32 nthreads, const float* input_grad, float height_scale, in ResizeBilinearGradKernel() argument
92 CUDA_1D_KERNEL_LOOP(in_idx, nthreads) { in ResizeBilinearGradKernel()
150 __global__ void LegacyResizeBilinearKernel(const int32 nthreads, in LegacyResizeBilinearKernel() argument
156 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { in LegacyResizeBilinearKernel()
203 const int32 nthreads, const float* input_grad, float height_scale, in LegacyResizeBilinearGradKernel() argument
206 CUDA_1D_KERNEL_LOOP(in_idx, nthreads) { in LegacyResizeBilinearGradKernel()
Ddepthtospace_op_gpu.cu.cc34 __global__ void D2S_NHWC(const int32 nthreads, in D2S_NHWC() argument
41 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { in D2S_NHWC()
65 __global__ void D2S_NCHW(const int32 nthreads, in D2S_NCHW() argument
70 CUDA_1D_KERNEL_LOOP(input_idx, nthreads) { in D2S_NCHW()
102 __global__ void D2S_NCHW_LOOP(const int32 nthreads, in D2S_NCHW_LOOP() argument
108 CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { in D2S_NCHW_LOOP()
Dspacetodepth_op_gpu.cu.cc33 __global__ void S2D_NHWC(const int32 nthreads, const dtype* input_ptr, in S2D_NHWC() argument
39 CUDA_1D_KERNEL_LOOP(inp_idx, nthreads) { in S2D_NHWC()
64 __global__ void S2D_NCHW(const int32 nthreads, in S2D_NCHW() argument
69 CUDA_1D_KERNEL_LOOP(input_idx, nthreads) { in S2D_NCHW()
102 __global__ void S2D_NCHW_LOOP(const int32 nthreads, in S2D_NCHW_LOOP() argument
108 CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { in S2D_NCHW_LOOP()
Ddilation_ops_gpu.cu.cc39 __global__ void DilationKernel(const int32 nthreads, const T* input_ptr, in DilationKernel() argument
46 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { in DilationKernel()
80 const int32 nthreads, const T* input_ptr, const T* filter_ptr, in DilationBackpropInputKernel() argument
85 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { in DilationBackpropInputKernel()
129 const int32 nthreads, const T* input_ptr, const T* filter_ptr, in DilationBackpropFilterKernel() argument
134 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { in DilationBackpropFilterKernel()
Dpooling_ops_3d_gpu.cu.cc31 const int nthreads, const dtype* bottom_data, const dtype* output_data, in MaxPoolGradBackwardNoMaskNCDHW() argument
38 CUDA_1D_KERNEL_LOOP(index, nthreads) { in MaxPoolGradBackwardNoMaskNCDHW()
81 const int nthreads, const dtype* bottom_data, const dtype* output_data, in MaxPoolGradBackwardNoMaskNDHWC() argument
88 CUDA_1D_KERNEL_LOOP(index, nthreads) { in MaxPoolGradBackwardNoMaskNDHWC()
Dinplace_ops_functor_gpu.cu.cc30 __global__ void DoParallelConcatOpKernel(int nthreads, const int64 rows, in DoParallelConcatOpKernel() argument
33 CUDA_1D_KERNEL_LOOP(idx, nthreads) { in DoParallelConcatOpKernel()
82 __global__ void DoInplaceOpKernel(int nthreads, const int64 rows, in DoInplaceOpKernel() argument
85 CUDA_1D_KERNEL_LOOP(idx, nthreads) { in DoInplaceOpKernel()
Dcrop_and_resize_op_gpu.cu.cc42 const int32 nthreads, const T* image_ptr, const float* boxes_ptr, in CropAndResizeKernel() argument
46 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { in CropAndResizeKernel()
134 const int32 nthreads, const float* grads_ptr, const float* boxes_ptr, in CropAndResizeBackpropImageKernel() argument
138 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { in CropAndResizeBackpropImageKernel()
229 const int32 nthreads, const float* grads_ptr, const T* image_ptr, in CropAndResizeBackpropBoxesKernel() argument
233 CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { in CropAndResizeBackpropBoxesKernel()
/external/jemalloc_new/test/unit/
Dretained.c109 unsigned nthreads = ncpus * 2; in TEST_BEGIN() local
110 VARIABLE_ARRAY(thd_t, threads, nthreads); in TEST_BEGIN()
111 for (unsigned i = 0; i < nthreads; i++) { in TEST_BEGIN()
121 while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) { in TEST_BEGIN()
132 size_t allocated = esz * nthreads * PER_THD_NALLOCS; in TEST_BEGIN()
169 for (unsigned i = 0; i < nthreads; i++) { in TEST_BEGIN()
/external/ltp/testcases/open_posix_testsuite/stress/threads/pthread_cond_timedwait/
Ds-c.c175 int nthreads; member
290 long do_threads_test(int nthreads, mes_t * measure) in do_threads_test() argument
317 th = (pthread_t *) calloc(nthreads, sizeof(pthread_t)); in do_threads_test()
322 output("%d", nthreads); in do_threads_test()
396 for (i = 0; i < nthreads; i++) { in do_threads_test()
444 while (tnum < nthreads) { in do_threads_test()
495 for (i = 0; i < nthreads; i++) { in do_threads_test()
538 output("%5d threads; %d.%09d s (%i loops)\n", nthreads, ts_cumul.tv_sec, in do_threads_test()
617 m_tmp->nthreads = nth; in main()
737 Xavg += (double)cur->nthreads; in parse_measure()
[all …]
/external/autotest/client/tests/monotonic_time/src/
Dthreads.c86 int create_threads(int nthreads, thread_func_t func, void *arg) in create_threads() argument
88 if (nthreads > MAX_THREADS) in create_threads()
89 nthreads = MAX_THREADS; in create_threads()
91 while (--nthreads >= 0) { in create_threads()
Dtime_test.c235 int nthreads; in run_test() local
257 nthreads = create_per_cpu_threads(cpus, test_loop, test); in run_test()
258 if (nthreads != ncpus) { in run_test()
260 ncpus, nthreads); in run_test()
261 if (nthreads) { in run_test()
/external/python/cpython2/Python/
Dthread_sgi.h18 static int nthreads; /* protected by count_lock */ variable
158 nthreads++; in PyThread_start_new_thread()
182 nthreads--; in PyThread_exit_thread()
190 if (nthreads < 0) { in PyThread_exit_thread()
196 dprintf(("waiting for other threads (%d)\n", nthreads)); in PyThread_exit_thread()

123