/external/tensorflow/tensorflow/core/distributed_runtime/ |
D | collective_param_resolver_distributed_test.cc | 138 void DefineWorkers(int num_workers, int num_devices, in DefineWorkers() argument 140 for (int w = 0; w < num_workers; ++w) { in DefineWorkers() 178 void DefineCollectiveParams(int num_workers, int num_devices, in DefineCollectiveParams() argument 180 for (int wi = 0; wi < num_workers; ++wi) { in DefineCollectiveParams() 186 CreateCollectiveParams(num_workers, num_devices, device_type); in DefineCollectiveParams() 191 CollectiveParams* CreateCollectiveParams(int num_workers, int num_devices, in CreateCollectiveParams() argument 197 cp->group.group_size = num_workers * num_devices; in CreateCollectiveParams() 199 cp->group.num_tasks = num_workers; in CreateCollectiveParams() 208 void IssueRequests(int num_workers, int num_devices) { in IssueRequests() argument 213 int group_size = num_workers * num_devices; in IssueRequests() [all …]
|
/external/tensorflow/tensorflow/python/distribute/ |
D | combinations_test.py | 45 "HasClusterParams", lambda: None, has_chief=True, num_workers=2), 48 def testClusterParams(self, distribution, has_chief, num_workers): argument 50 self.assertEqual(num_workers, 2) 57 def testClusterParamsHasDefault(self, distribution, has_chief, num_workers): argument 59 self.assertEqual(num_workers, 1) 64 def testClusterParamsNoStrategy(self, v, has_chief, num_workers): argument 66 self.assertEqual(num_workers, 1) 71 "WithClusterParams", lambda: None, has_chief=True, num_workers=2), 82 "Strategy1", lambda: None, has_chief=True, num_workers=0), 84 "Strategy2", lambda: None, has_chief=False, num_workers=1), [all …]
|
D | multi_process_runner_test.py | 84 num_workers=2, num_ps=3, has_chief=True)) 97 multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1), 104 cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2) 117 multi_worker_test_base.create_cluster_spec(num_workers=1), 134 multi_worker_test_base.create_cluster_spec(num_workers=2), 154 multi_worker_test_base.create_cluster_spec(num_workers=2), 180 multi_worker_test_base.create_cluster_spec(num_workers=2), 213 has_chief=True, num_workers=2, num_ps=2), 261 has_chief=True, num_workers=1), 282 multi_worker_test_base.create_cluster_spec(num_workers=2), [all …]
|
D | combinations.py | 87 v.num_workers) > 1: 94 num_workers = strategy.num_workers 99 if "num_workers" in kwargs and kwargs["num_workers"] != num_workers: 104 num_workers = kwargs.get("num_workers", 1) 113 update["num_workers"] = num_workers 260 num_workers=1, argument 284 self.num_workers = num_workers 484 def decorator(self, has_chief, num_workers, runner, **kwargs): argument 485 if _num_total_workers(has_chief, num_workers) == 1 or _running_in_worker: 521 num_workers=num_workers, [all …]
|
/external/tensorflow/tensorflow/python/data/experimental/kernel_tests/ |
D | data_service_ops_test.py | 60 num_workers=1, 70 cluster = self.create_cluster(num_workers=1) 82 cluster = self.create_cluster(num_workers=1) 96 cluster = self.create_cluster(num_workers=2) 116 cluster = self.create_cluster(num_workers=1) 124 cluster = self.create_cluster(num_workers=1) 134 cluster = self.create_cluster(num_workers=1) 154 cluster = self.create_cluster(num_workers=1) 177 num_workers = 3 178 cluster = self.create_cluster(num_workers=num_workers) [all …]
|
D | data_service_ops_ft_test.py | 43 cluster = self.create_cluster(num_workers=1) 58 cluster = self.create_cluster(num_workers=1) 67 cluster = self.create_cluster(num_workers=1) 82 cluster = self.create_cluster(num_workers=1) 98 cluster = self.create_cluster(num_workers=1) 118 cluster = self.create_cluster(num_workers=1) 127 cluster = self.create_cluster(num_workers=1) 141 cluster = self.create_cluster(num_workers=1) 157 cluster = self.create_cluster(num_workers=starting_workers) 208 num_workers = 2 [all …]
|
/external/tensorflow/tensorflow/python/distribute/v1/ |
D | all_reduce_test.py | 89 def _buildInput(self, num_workers, num_gpus): argument 95 for w in range(0, num_workers): 144 def _buildRing(self, num_workers, num_gpus, subdiv): argument 147 x, num_workers, subdiv, gpu_perm, math_ops.add, un_op) 149 def _testAllReduce(self, num_workers, num_gpus, shape, build_f): argument 151 num_devices = num_workers * num_gpus 165 def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv): argument 167 build_f = self._buildRing(num_workers, num_gpus, subdiv) 168 self._testAllReduce(num_workers, num_gpus, shape, build_f) 172 (num_workers, num_gpus, shape, subdiv, elapsed)) [all …]
|
/external/tensorflow/tensorflow/core/kernels/data/experimental/ |
D | auto_shard_dataset_op.cc | 47 int64 index, num_workers, auto_shard_policy, num_replicas; in MakeDataset() local 48 OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kNumWorkers, &num_workers)); in MakeDataset() 50 ctx, num_workers > 0, in MakeDataset() 55 ctx, index >= 0 && index < num_workers, in MakeDataset() 56 errors::InvalidArgument("index must be between 0 and ", num_workers - 1)); in MakeDataset() 60 auto config_factory = [num_workers, index, auto_shard_policy, in MakeDataset() 62 return CreateConfig(num_workers, index, auto_shard_policy, num_replicas); in MakeDataset() 73 RewriterConfig AutoShardDatasetOp::CreateConfig(int64 num_workers, int64 index, in CreateConfig() argument 85 {{kNumWorkers, num_workers}, in CreateConfig()
|
/external/tensorflow/tensorflow/core/kernels/boosted_trees/quantiles/ |
D | weighted_quantiles_stream_test.cc | 196 int32 num_workers, double eps, int64 max_elements, in TestDistributedStreams() argument 204 for (int32 i = 0; i < num_workers; ++i) { in TestDistributedStreams() 206 worker_summary_generator(i, max_elements / num_workers, &total_weight, in TestDistributedStreams() 237 const int32 num_workers = 10; in TEST() local 239 const int64 max_elements = num_workers * (1 << 16); in TEST() 241 num_workers, eps, max_elements, GenerateFixedUniformSummary, in TEST() 246 const int32 num_workers = 10; in TEST() local 248 const int64 max_elements = num_workers * (1 << 16); in TEST() 249 TestDistributedStreams(num_workers, eps, max_elements, in TEST() 258 const int32 num_workers = 10; in TEST() local [all …]
|
/external/libaom/libaom/av1/encoder/ |
D | ethread.c | 213 MultiThreadHandle *multi_thread_ctxt, int num_tiles, int num_workers) { in assign_tile_to_thread() argument 217 for (i = 0; i < num_workers; i++) { in assign_tile_to_thread() 375 t += cpi->num_workers) { in enc_worker_hook() 389 static AOM_INLINE void create_enc_workers(AV1_COMP *cpi, int num_workers) { in create_enc_workers() argument 395 aom_malloc(num_workers * sizeof(*cpi->workers))); in create_enc_workers() 398 aom_calloc(num_workers, sizeof(*cpi->tile_thr_data))); in create_enc_workers() 410 for (int i = num_workers - 1; i >= 0; i--) { in create_enc_workers() 414 ++cpi->num_workers; in create_enc_workers() 511 static AOM_INLINE void launch_enc_workers(AV1_COMP *cpi, int num_workers) { in launch_enc_workers() argument 514 for (int i = num_workers - 1; i >= 0; i--) { in launch_enc_workers() [all …]
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_ethread.c | 49 t += cpi->num_workers) { in enc_worker_hook() 78 static void create_enc_workers(VP9_COMP *cpi, int num_workers) { in create_enc_workers() argument 84 if (cpi->num_workers == 0) { in create_enc_workers() 85 int allocated_workers = num_workers; in create_enc_workers() 105 ++cpi->num_workers; in create_enc_workers() 140 int num_workers) { in launch_enc_workers() argument 144 for (i = 0; i < num_workers; i++) { in launch_enc_workers() 152 for (i = 0; i < num_workers; i++) { in launch_enc_workers() 159 if (i == cpi->num_workers - 1) in launch_enc_workers() 166 for (i = 0; i < num_workers; i++) { in launch_enc_workers() [all …]
|
/external/webrtc/third_party/abseil-cpp/absl/synchronization/ |
D | blocking_counter_test.cc | 39 const int num_workers = 10; in TEST() local 40 BlockingCounter counter(num_workers); in TEST() 43 std::vector<int> done(num_workers, 0); in TEST() 47 workers.reserve(num_workers); in TEST() 48 for (int k = 0; k < num_workers; k++) { in TEST() 57 for (int k = 0; k < num_workers; k++) { in TEST()
|
/external/libtextclassifier/abseil-cpp/absl/synchronization/ |
D | blocking_counter_test.cc | 39 const int num_workers = 10; in TEST() local 40 BlockingCounter counter(num_workers); in TEST() 43 std::vector<int> done(num_workers, 0); in TEST() 47 workers.reserve(num_workers); in TEST() 48 for (int k = 0; k < num_workers; k++) { in TEST() 57 for (int k = 0; k < num_workers; k++) { in TEST()
|
/external/abseil-cpp/absl/synchronization/ |
D | blocking_counter_test.cc | 39 const int num_workers = 10; in TEST() local 40 BlockingCounter counter(num_workers); in TEST() 43 std::vector<int> done(num_workers, 0); in TEST() 47 workers.reserve(num_workers); in TEST() 48 for (int k = 0; k < num_workers; k++) { in TEST() 57 for (int k = 0; k < num_workers; k++) { in TEST()
|
/external/rust/crates/grpcio-sys/grpc/third_party/abseil-cpp/absl/synchronization/ |
D | blocking_counter_test.cc | 39 const int num_workers = 10; in TEST() local 40 BlockingCounter counter(num_workers); in TEST() 43 std::vector<int> done(num_workers, 0); in TEST() 47 workers.reserve(num_workers); in TEST() 48 for (int k = 0; k < num_workers; k++) { in TEST() 57 for (int k = 0; k < num_workers; k++) { in TEST()
|
/external/openscreen/third_party/abseil/src/absl/synchronization/ |
D | blocking_counter_test.cc | 39 const int num_workers = 10; in TEST() local 40 BlockingCounter counter(num_workers); in TEST() 43 std::vector<int> done(num_workers, 0); in TEST() 47 workers.reserve(num_workers); in TEST() 48 for (int k = 0; k < num_workers; k++) { in TEST() 57 for (int k = 0; k < num_workers; k++) { in TEST()
|
/external/rust/crates/grpcio-sys/grpc/src/core/lib/iomgr/poller/ |
D | eventmanager_libuv.cc | 26 grpc::experimental::LibuvEventManager::Options::Options(int num_workers) in Options() argument 27 : num_workers_(num_workers) {} in Options() 31 int num_workers = options_.num_workers(); in LibuvEventManager() local 34 if (num_workers <= 0) num_workers = 32; in LibuvEventManager() 36 for (int i = 0; i < num_workers; i++) { in LibuvEventManager()
|
/external/angle/third_party/abseil-cpp/absl/synchronization/ |
D | blocking_counter_test.cc | 39 const int num_workers = 10; in TEST() local 40 BlockingCounter counter(num_workers); in TEST() 43 std::vector<int> done(num_workers, 0); in TEST() 47 workers.reserve(num_workers); in TEST() 48 for (int k = 0; k < num_workers; k++) { in TEST() 57 for (int k = 0; k < num_workers; k++) { in TEST()
|
/external/rust/crates/tokio/src/runtime/thread_pool/ |
D | idle.rs | 20 num_workers: usize, field 31 pub(super) fn new(num_workers: usize) -> Idle { in new() 32 let init = State::new(num_workers); in new() 36 sleepers: Mutex::new(Vec::with_capacity(num_workers)), in new() 37 num_workers, in new() 93 if 2 * state.num_searching() >= self.num_workers { in transition_worker_to_searching() 137 state.num_searching() == 0 && state.num_unparked() < self.num_workers in notify_should_wakeup() 142 fn new(num_workers: usize) -> State { in new() 144 let ret = State(num_workers << UNPARK_SHIFT); in new() 145 debug_assert_eq!(num_workers, ret.num_unparked()); in new()
|
/external/rust/crates/rayon-core/src/ |
D | log.rs | 114 pub(super) fn new(num_workers: usize) -> Logger { in new() 130 Self::tail_logger_thread(num_workers, filename, 10_000, receiver) in new() 133 ::std::thread::spawn(move || Self::all_logger_thread(num_workers, receiver)); in new() 137 Self::profile_logger_thread(num_workers, filename, 10_000, receiver) in new() 164 num_workers: usize, in profile_logger_thread() 174 let mut state = SimulatorState::new(num_workers); in profile_logger_thread() 207 num_workers: usize, in tail_logger_thread() 217 let mut state = SimulatorState::new(num_workers); in tail_logger_thread() 261 fn all_logger_thread(num_workers: usize, receiver: Receiver<Event>) { in all_logger_thread() 263 let mut state = SimulatorState::new(num_workers); in all_logger_thread() [all …]
|
/external/tensorflow/tensorflow/core/grappler/optimizers/data/ |
D | auto_shard.cc | 123 Status OptimizeGraph(const GrapplerItem& item, int64 num_workers, int64 index, 141 int64 num_workers, int64 index) { in AddShardNode() argument 149 graph_utils::AddScalarConstNode<int64>(num_workers, graph); in AddShardNode() 382 int64 num_workers, int64 index) { in ProcessDatasetSourceNode() argument 390 TF_RETURN_IF_ERROR(AddShardNode(graph, node, num_workers, index)); in ProcessDatasetSourceNode() 423 const NodeDef* node, int64 num_workers, int64 index, in FindFuncAndTensorSliceDataset() argument 445 return FindFuncAndTensorSliceDataset(input_node, num_workers, index, flib, in FindFuncAndTensorSliceDataset() 449 Status RecursivelyHandleOp(const NodeDef& node, int64 num_workers, int64 index, in RecursivelyHandleOp() argument 459 return RecursivelyHandleOp(*input_node, num_workers, index, flib, graph, in RecursivelyHandleOp() 471 TF_RETURN_IF_ERROR(RecursivelyHandleOp(*input_node, num_workers, index, in RecursivelyHandleOp() [all …]
|
/external/libaom/libaom/av1/common/ |
D | thread_common.c | 56 int width, int num_workers) { in loop_filter_alloc() argument 88 aom_malloc(num_workers * sizeof(*(lf_sync->lfdata)))); in loop_filter_alloc() 89 lf_sync->num_workers = num_workers; in loop_filter_alloc() 423 const int num_workers = nworkers; in loop_filter_rows_mt() local 427 num_workers > lf_sync->num_workers) { in loop_filter_rows_mt() 429 loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); in loop_filter_rows_mt() 445 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt() 465 if (i == num_workers - 1) { in loop_filter_rows_mt() 473 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt() 484 AVxWorker *workers, int num_workers, in av1_loop_filter_frame_mt() argument [all …]
|
D | thread_common.h | 47 int num_workers; member 87 int num_workers; member 109 AVxWorker *workers, int num_workers, 114 int num_workers, AV1LrSync *lr_sync, 116 void av1_loop_restoration_dealloc(AV1LrSync *lr_sync, int num_workers);
|
/external/tensorflow/tensorflow/python/keras/benchmarks/ |
D | distribution_util.py | 152 num_workers = ( 157 num_workers = len(workers) 158 if num_workers > 1 and task_index < 0: 160 task_index = 0 if num_workers == 1 else task_index 171 num_workers = 1 172 return num_workers
|
/external/tensorflow/tensorflow/python/training/ |
D | sync_replicas_optimizer_test.py | 35 def get_workers(num_workers, replicas_to_aggregate, workers): argument 39 for worker_id in range(num_workers): 64 total_num_replicas=num_workers) 72 is_chief, num_tokens=num_workers) 95 num_workers = 2 98 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps) 101 sessions, graphs, train_ops = get_workers(num_workers, 187 num_workers = 3 190 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps) 193 sessions, graphs, train_ops = get_workers(num_workers,
|