/external/gemmlowp/meta/ |
D | legacy_multi_thread_gemv.h | 94 std::int32_t max_threads) { in gemv_q8_scratch() argument 95 return internal::ResolveMaxThreads(max_threads) * in gemv_q8_scratch() 99 void multi_thread_gemv_q8(gemmlowp::WorkersPool* pool, std::int32_t max_threads, in multi_thread_gemv_q8() argument 106 max_threads = internal::ResolveMaxThreads(max_threads); in multi_thread_gemv_q8() 109 if (max_threads == 1) { in multi_thread_gemv_q8() 112 internal::MultiThreadedMatrixMatrix(pool, max_threads, scratch, lhs, rhs, 1, in multi_thread_gemv_q8() 118 std::int32_t max_threads) { in gemv_f_scratch() argument 119 return internal::ResolveMaxThreads(max_threads) * in gemv_f_scratch() 123 void multi_thread_gemv_f(gemmlowp::WorkersPool* pool, std::int32_t max_threads, in multi_thread_gemv_f() argument 129 max_threads = internal::ResolveMaxThreads(max_threads); in multi_thread_gemv_f() [all …]
|
D | legacy_multi_thread_gemm.h | 153 std::int32_t max_threads) { in gemm_q8_scratch() argument 154 return internal::ResolveMaxThreads(max_threads) * in gemm_q8_scratch() 158 void multi_thread_gemm_q8(gemmlowp::WorkersPool* pool, std::int32_t max_threads, in multi_thread_gemm_q8() argument 166 multi_thread_gemv_q8(pool, max_threads, scratch, lhs, rhs, n, k, lhs_offset, in multi_thread_gemm_q8() 170 multi_thread_gemv_q8(pool, max_threads, scratch, rhs, lhs, m, k, rhs_offset, in multi_thread_gemm_q8() 175 max_threads = internal::ResolveMaxThreads(max_threads); in multi_thread_gemm_q8() 178 if (max_threads == 1) { in multi_thread_gemm_q8() 182 internal::MultiThreadedMatrixMatrix(pool, max_threads, scratch, lhs, rhs, m, in multi_thread_gemm_q8() 188 std::int32_t max_threads) { in gemm_f_scratch() argument 189 return internal::ResolveMaxThreads(max_threads) * in gemm_f_scratch() [all …]
|
D | legacy_multi_thread_common.h | 74 std::int32_t ResolveMaxThreads(std::int32_t max_threads) { in ResolveMaxThreads() argument 75 if (max_threads == 0) { in ResolveMaxThreads() 80 return max_threads; in ResolveMaxThreads() 117 std::int32_t max_threads, std::uint8_t* scratch, in MultiThreadedMatrixMatrix() argument 122 max_threads = internal::ResolveMaxThreads(max_threads); in MultiThreadedMatrixMatrix() 125 internal::PrepareTasks(max_threads, m, n, k, &task_rects); in MultiThreadedMatrixMatrix()
|
D | multi_thread_common.h | 23 inline int ResolveMaxThreads(int max_threads) { in ResolveMaxThreads() argument 24 if (max_threads == 0) { in ResolveMaxThreads() 35 return max_threads; in ResolveMaxThreads()
|
D | multi_thread_transform.h | 36 const int max_threads = ResolveMaxThreads(context->max_num_threads()); in PrepareTransform1DTasks() local 41 const int real_tasks = std::max(1, std::min(max_threads, max_tasks_by_size)); in PrepareTransform1DTasks()
|
/external/ComputeLibrary/src/runtime/ |
D | SchedulerUtils.cpp | 35 std::pair<unsigned, unsigned> split_2d(unsigned max_threads, std::size_t m, std::size_t n) in split_2d() argument 51 std::sqrt(max_threads * ratio)); in split_2d() 58 if(max_threads % adj_down == 0) in split_2d() 60 return { adj_down, max_threads / adj_down }; in split_2d() 65 if(max_threads % adj_up == 0) in split_2d() 67 return { adj_up, max_threads / adj_up }; in split_2d() 74 return { std::min<unsigned>(m, max_threads), 1 }; in split_2d() 78 return { 1, std::min<unsigned>(n, max_threads) }; in split_2d()
|
/external/gemmlowp/internal/ |
D | platform.h | 62 inline int GetHardwareConcurrency(int max_threads) { in GetHardwareConcurrency() argument 63 if (max_threads == 0) { in GetHardwareConcurrency() 68 return max_threads; in GetHardwareConcurrency() 91 inline int GetHardwareConcurrency(int max_threads) { 92 if (max_threads == 0) { 97 return max_threads;
|
/external/ComputeLibrary/src/cpu/ |
D | CpuContext.cpp | 123 int32_t max_threads) in populate_capabilities() argument 139 ARM_COMPUTE_UNUSED(max_threads); in populate_capabilities() 140 caps.max_threads = 1; in populate_capabilities() 142 caps.max_threads = (max_threads > 0) ? max_threads : std::thread::hardware_concurrency(); in populate_capabilities()
|
/external/pytorch/aten/src/ATen/ |
D | Parallel-inl.h | 60 const auto max_threads = at::get_num_threads(); in parallel_reduce() local 63 max_threads > 1); in parallel_reduce() 70 c10::SmallVector<scalar_t, 64> results(max_threads, ident); in parallel_reduce()
|
/external/ltp/lib/ |
D | tst_pid.c | 140 int max_pids, max_session_pids, max_threads, used_pids = get_used_pids(cleanup_fn); in tst_get_free_pids_() local 143 SAFE_FILE_SCANF(cleanup_fn, THREADS_MAX_PATH, "%d", &max_threads); in tst_get_free_pids_() 144 max_pids = MIN(max_pids, max_threads); in tst_get_free_pids_()
|
/external/libcxx/utils/google-benchmark/src/ |
D | benchmark_register.cc | 434 Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { in ThreadRange() argument 436 CHECK_GE(max_threads, min_threads); in ThreadRange() 438 AddRange(&thread_counts_, min_threads, max_threads, 2); in ThreadRange() 442 Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads, in DenseThreadRange() argument 445 CHECK_GE(max_threads, min_threads); in DenseThreadRange() 448 for (auto i = min_threads; i < max_threads; i += stride) { in DenseThreadRange() 451 thread_counts_.push_back(max_threads); in DenseThreadRange()
|
/external/crosvm/cros_async/src/blocking/ |
D | pool.rs | 102 max_threads: usize, field 129 if state.num_threads < self.max_threads { in spawn() 217 pub fn new(max_threads: usize, keepalive: Duration) -> BlockingPool { in new() 232 max_threads, in new() 239 pub fn with_capacity(max_threads: usize, keepalive: Duration) -> BlockingPool { in with_capacity() 248 worker_threads: Slab::with_capacity(max_threads), in with_capacity() 254 max_threads, in with_capacity()
|
D | cancellable_pool.rs | 142 pub fn new(max_threads: usize, keepalive: Duration) -> CancellableBlockingPool { in new() 145 blocking_pool: BlockingPool::new(max_threads, keepalive), in new() 153 pub fn with_capacity(max_threads: usize, keepalive: Duration) -> CancellableBlockingPool { in with_capacity() 156 blocking_pool: BlockingPool::with_capacity(max_threads, keepalive), in with_capacity()
|
/external/cronet/stable/third_party/google_benchmark/src/src/ |
D | benchmark_register.cc | 445 Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { in ThreadRange() argument 447 BM_CHECK_GE(max_threads, min_threads); in ThreadRange() 449 AddRange(&thread_counts_, min_threads, max_threads, 2); in ThreadRange() 453 Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads, in DenseThreadRange() argument 456 BM_CHECK_GE(max_threads, min_threads); in DenseThreadRange() 459 for (auto i = min_threads; i < max_threads; i += stride) { in DenseThreadRange() 462 thread_counts_.push_back(max_threads); in DenseThreadRange()
|
/external/cronet/tot/third_party/google_benchmark/src/src/ |
D | benchmark_register.cc | 445 Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { in ThreadRange() argument 447 BM_CHECK_GE(max_threads, min_threads); in ThreadRange() 449 AddRange(&thread_counts_, min_threads, max_threads, 2); in ThreadRange() 453 Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads, in DenseThreadRange() argument 456 BM_CHECK_GE(max_threads, min_threads); in DenseThreadRange() 459 for (auto i = min_threads; i < max_threads; i += stride) { in DenseThreadRange() 462 thread_counts_.push_back(max_threads); in DenseThreadRange()
|
/external/google-benchmark/src/ |
D | benchmark_register.cc | 445 Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { in ThreadRange() argument 447 BM_CHECK_GE(max_threads, min_threads); in ThreadRange() 449 AddRange(&thread_counts_, min_threads, max_threads, 2); in ThreadRange() 453 Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads, in DenseThreadRange() argument 456 BM_CHECK_GE(max_threads, min_threads); in DenseThreadRange() 459 for (auto i = min_threads; i < max_threads; i += stride) { in DenseThreadRange() 462 thread_counts_.push_back(max_threads); in DenseThreadRange()
|
/external/rust/crabbyavif/src/capi/ |
D | reformat.rs | 36 pub max_threads: i32, field 53 max_threads: rgb.max_threads, in from() 71 max_threads: rgb.max_threads, in from()
|
/external/rust/crabbyavif/examples/ |
D | crabby_decode.rs | 288 fn max_threads(jobs: &Option<u32>) -> u32 { in max_threads() function 308 max_threads: max_threads(&args.jobs), in create_decoder_and_parse() 392 let max_threads = max_threads(&args.jobs); in decode() localVariable 395 if max_threads == 1 { "" } else { "s" } in decode()
|
/external/libfuse/lib/ |
D | fuse_loop_mt.c | 64 int max_threads; member 169 if (mt->numavail == 0 && mt->numworker < mt->max_threads) in fuse_do_work() 375 mt.max_threads = config->max_threads; in fuse_session_loop_mt_312() 462 config->max_threads = FUSE_LOOP_MT_DEF_MAX_THREADS; in fuse_loop_cfg_create() 506 config->max_threads = value; in fuse_loop_cfg_set_max_threads()
|
/external/pytorch/aten/src/ATen/native/cuda/ |
D | LossCTC.cu | 294 …constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 768; // we need 72 or so… in ctc_loss_gpu_template() local 295 int threads_target = max_threads; in ctc_loss_gpu_template() 299 int threads_batch = std::min(max_threads / threads_target, (int) batch_size); in ctc_loss_gpu_template() 646 …constexpr int max_threads = std::is_same<scalar_t, float>::value ? 1024 : 896; // we need 72 or so… in ctc_loss_backward_gpu_template() local 647 int threads_target = max_threads; in ctc_loss_backward_gpu_template() 651 int threads_batch = std::min(max_threads / threads_target, (int) batch_size); in ctc_loss_backward_gpu_template() 699 int threads_target = max_threads; in ctc_loss_backward_gpu_template() 703 int threads_batch = std::min(max_threads / threads_target, (int) batch_size); in ctc_loss_backward_gpu_template() 726 int threads_input = max_threads; in ctc_loss_backward_gpu_template() 730 threads_batch = std::min(max_threads / threads_input, (int) batch_size); in ctc_loss_backward_gpu_template() [all …]
|
D | AdaptiveAveragePooling.cu | 490 const int max_threads = std::min<int>( in adaptive_avg_pool2d_out_cuda_template() local 508 maxThreadsDim[1], std::min<int>(lastPow2(osizeW), max_threads / block_x)); in adaptive_avg_pool2d_out_cuda_template() 510 maxThreadsDim[2], std::min<int>(lastPow2(osizeH), max_threads / block_x / block_y)); in adaptive_avg_pool2d_out_cuda_template() 512 maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z)); in adaptive_avg_pool2d_out_cuda_template() 642 int max_threads = std::min<int>( in adaptive_avg_pool2d_backward_out_cuda_template() local 662 maxThreadsDim[1], std::min<int>(lastPow2(isizeW), max_threads / block_x)), 1); in adaptive_avg_pool2d_backward_out_cuda_template() 664 … maxThreadsDim[2], std::min<int>(lastPow2(isizeH), max_threads / block_x / block_y)), 1); in adaptive_avg_pool2d_backward_out_cuda_template() 666 … maxThreadsDim[0], std::min<int>(lastPow2(sizeC), max_threads / block_y / block_z)), 1); in adaptive_avg_pool2d_backward_out_cuda_template() 700 max_threads /= 2; in adaptive_avg_pool2d_backward_out_cuda_template() 704 } while (!done && max_threads); in adaptive_avg_pool2d_backward_out_cuda_template()
|
/external/pthreadpool/bench/ |
D | latency.cc | 8 const int max_threads = std::thread::hardware_concurrency(); in SetNumberOfThreads() local 9 for (int t = 1; t <= max_threads; t++) { in SetNumberOfThreads()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | runtime_conv2d_acl.cc | 92 const int max_threads = tpd->numThreads(); in ACLDepthwiseConvImpl() local 97 arm_compute::Scheduler::get().set_num_threads(max_threads); in ACLDepthwiseConvImpl() 196 const int max_threads = tpd->numThreads(); in ACLGemmConvImpl() local 201 arm_compute::Scheduler::get().set_num_threads(max_threads); in ACLGemmConvImpl()
|
/external/libvpx/vp9/decoder/ |
D | vp9_decoder.h | 120 int max_threads; member 166 VP9_COMMON *cm, int num_sbs, int max_threads,
|
/external/libvpx/test/ |
D | vpx_temporal_svc_encoder.sh | 44 local max_threads="4" 55 for threads in $(seq $max_threads); do
|