Home
last modified time | relevance | path

Searched refs:num_workers (Results 1 – 25 of 140) sorted by relevance

123456

/external/tensorflow/tensorflow/core/distributed_runtime/
Dcollective_param_resolver_distributed_test.cc138 void DefineWorkers(int num_workers, int num_devices, in DefineWorkers() argument
140 for (int w = 0; w < num_workers; ++w) { in DefineWorkers()
178 void DefineCollectiveParams(int num_workers, int num_devices, in DefineCollectiveParams() argument
180 for (int wi = 0; wi < num_workers; ++wi) { in DefineCollectiveParams()
186 CreateCollectiveParams(num_workers, num_devices, device_type); in DefineCollectiveParams()
191 CollectiveParams* CreateCollectiveParams(int num_workers, int num_devices, in CreateCollectiveParams() argument
197 cp->group.group_size = num_workers * num_devices; in CreateCollectiveParams()
199 cp->group.num_tasks = num_workers; in CreateCollectiveParams()
208 void IssueRequests(int num_workers, int num_devices) { in IssueRequests() argument
213 int group_size = num_workers * num_devices; in IssueRequests()
[all …]
/external/tensorflow/tensorflow/python/distribute/
Dcombinations_test.py45 "HasClusterParams", lambda: None, has_chief=True, num_workers=2),
48 def testClusterParams(self, distribution, has_chief, num_workers): argument
50 self.assertEqual(num_workers, 2)
57 def testClusterParamsHasDefault(self, distribution, has_chief, num_workers): argument
59 self.assertEqual(num_workers, 1)
64 def testClusterParamsNoStrategy(self, v, has_chief, num_workers): argument
66 self.assertEqual(num_workers, 1)
71 "WithClusterParams", lambda: None, has_chief=True, num_workers=2),
82 "Strategy1", lambda: None, has_chief=True, num_workers=0),
84 "Strategy2", lambda: None, has_chief=False, num_workers=1),
[all …]
Dmulti_process_runner_test.py84 num_workers=2, num_ps=3, has_chief=True))
97 multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
104 cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
117 multi_worker_test_base.create_cluster_spec(num_workers=1),
134 multi_worker_test_base.create_cluster_spec(num_workers=2),
154 multi_worker_test_base.create_cluster_spec(num_workers=2),
180 multi_worker_test_base.create_cluster_spec(num_workers=2),
213 has_chief=True, num_workers=2, num_ps=2),
261 has_chief=True, num_workers=1),
282 multi_worker_test_base.create_cluster_spec(num_workers=2),
[all …]
Dcombinations.py87 v.num_workers) > 1:
94 num_workers = strategy.num_workers
99 if "num_workers" in kwargs and kwargs["num_workers"] != num_workers:
104 num_workers = kwargs.get("num_workers", 1)
113 update["num_workers"] = num_workers
260 num_workers=1, argument
284 self.num_workers = num_workers
484 def decorator(self, has_chief, num_workers, runner, **kwargs): argument
485 if _num_total_workers(has_chief, num_workers) == 1 or _running_in_worker:
521 num_workers=num_workers,
[all …]
/external/tensorflow/tensorflow/python/data/experimental/kernel_tests/
Ddata_service_ops_test.py60 num_workers=1,
70 cluster = self.create_cluster(num_workers=1)
82 cluster = self.create_cluster(num_workers=1)
96 cluster = self.create_cluster(num_workers=2)
116 cluster = self.create_cluster(num_workers=1)
124 cluster = self.create_cluster(num_workers=1)
134 cluster = self.create_cluster(num_workers=1)
154 cluster = self.create_cluster(num_workers=1)
177 num_workers = 3
178 cluster = self.create_cluster(num_workers=num_workers)
[all …]
Ddata_service_ops_ft_test.py43 cluster = self.create_cluster(num_workers=1)
58 cluster = self.create_cluster(num_workers=1)
67 cluster = self.create_cluster(num_workers=1)
82 cluster = self.create_cluster(num_workers=1)
98 cluster = self.create_cluster(num_workers=1)
118 cluster = self.create_cluster(num_workers=1)
127 cluster = self.create_cluster(num_workers=1)
141 cluster = self.create_cluster(num_workers=1)
157 cluster = self.create_cluster(num_workers=starting_workers)
208 num_workers = 2
[all …]
/external/tensorflow/tensorflow/python/distribute/v1/
Dall_reduce_test.py89 def _buildInput(self, num_workers, num_gpus): argument
95 for w in range(0, num_workers):
144 def _buildRing(self, num_workers, num_gpus, subdiv): argument
147 x, num_workers, subdiv, gpu_perm, math_ops.add, un_op)
149 def _testAllReduce(self, num_workers, num_gpus, shape, build_f): argument
151 num_devices = num_workers * num_gpus
165 def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv): argument
167 build_f = self._buildRing(num_workers, num_gpus, subdiv)
168 self._testAllReduce(num_workers, num_gpus, shape, build_f)
172 (num_workers, num_gpus, shape, subdiv, elapsed))
[all …]
/external/tensorflow/tensorflow/core/kernels/data/experimental/
Dauto_shard_dataset_op.cc47 int64 index, num_workers, auto_shard_policy, num_replicas; in MakeDataset() local
48 OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kNumWorkers, &num_workers)); in MakeDataset()
50 ctx, num_workers > 0, in MakeDataset()
55 ctx, index >= 0 && index < num_workers, in MakeDataset()
56 errors::InvalidArgument("index must be between 0 and ", num_workers - 1)); in MakeDataset()
60 auto config_factory = [num_workers, index, auto_shard_policy, in MakeDataset()
62 return CreateConfig(num_workers, index, auto_shard_policy, num_replicas); in MakeDataset()
73 RewriterConfig AutoShardDatasetOp::CreateConfig(int64 num_workers, int64 index, in CreateConfig() argument
85 {{kNumWorkers, num_workers}, in CreateConfig()
/external/tensorflow/tensorflow/core/kernels/boosted_trees/quantiles/
Dweighted_quantiles_stream_test.cc196 int32 num_workers, double eps, int64 max_elements, in TestDistributedStreams() argument
204 for (int32 i = 0; i < num_workers; ++i) { in TestDistributedStreams()
206 worker_summary_generator(i, max_elements / num_workers, &total_weight, in TestDistributedStreams()
237 const int32 num_workers = 10; in TEST() local
239 const int64 max_elements = num_workers * (1 << 16); in TEST()
241 num_workers, eps, max_elements, GenerateFixedUniformSummary, in TEST()
246 const int32 num_workers = 10; in TEST() local
248 const int64 max_elements = num_workers * (1 << 16); in TEST()
249 TestDistributedStreams(num_workers, eps, max_elements, in TEST()
258 const int32 num_workers = 10; in TEST() local
[all …]
/external/libaom/libaom/av1/encoder/
Dethread.c213 MultiThreadHandle *multi_thread_ctxt, int num_tiles, int num_workers) { in assign_tile_to_thread() argument
217 for (i = 0; i < num_workers; i++) { in assign_tile_to_thread()
375 t += cpi->num_workers) { in enc_worker_hook()
389 static AOM_INLINE void create_enc_workers(AV1_COMP *cpi, int num_workers) { in create_enc_workers() argument
395 aom_malloc(num_workers * sizeof(*cpi->workers))); in create_enc_workers()
398 aom_calloc(num_workers, sizeof(*cpi->tile_thr_data))); in create_enc_workers()
410 for (int i = num_workers - 1; i >= 0; i--) { in create_enc_workers()
414 ++cpi->num_workers; in create_enc_workers()
511 static AOM_INLINE void launch_enc_workers(AV1_COMP *cpi, int num_workers) { in launch_enc_workers() argument
514 for (int i = num_workers - 1; i >= 0; i--) { in launch_enc_workers()
[all …]
/external/libvpx/libvpx/vp9/encoder/
Dvp9_ethread.c49 t += cpi->num_workers) { in enc_worker_hook()
78 static void create_enc_workers(VP9_COMP *cpi, int num_workers) { in create_enc_workers() argument
84 if (cpi->num_workers == 0) { in create_enc_workers()
85 int allocated_workers = num_workers; in create_enc_workers()
105 ++cpi->num_workers; in create_enc_workers()
140 int num_workers) { in launch_enc_workers() argument
144 for (i = 0; i < num_workers; i++) { in launch_enc_workers()
152 for (i = 0; i < num_workers; i++) { in launch_enc_workers()
159 if (i == cpi->num_workers - 1) in launch_enc_workers()
166 for (i = 0; i < num_workers; i++) { in launch_enc_workers()
[all …]
/external/webrtc/third_party/abseil-cpp/absl/synchronization/
Dblocking_counter_test.cc39 const int num_workers = 10; in TEST() local
40 BlockingCounter counter(num_workers); in TEST()
43 std::vector<int> done(num_workers, 0); in TEST()
47 workers.reserve(num_workers); in TEST()
48 for (int k = 0; k < num_workers; k++) { in TEST()
57 for (int k = 0; k < num_workers; k++) { in TEST()
/external/libtextclassifier/abseil-cpp/absl/synchronization/
Dblocking_counter_test.cc39 const int num_workers = 10; in TEST() local
40 BlockingCounter counter(num_workers); in TEST()
43 std::vector<int> done(num_workers, 0); in TEST()
47 workers.reserve(num_workers); in TEST()
48 for (int k = 0; k < num_workers; k++) { in TEST()
57 for (int k = 0; k < num_workers; k++) { in TEST()
/external/abseil-cpp/absl/synchronization/
Dblocking_counter_test.cc39 const int num_workers = 10; in TEST() local
40 BlockingCounter counter(num_workers); in TEST()
43 std::vector<int> done(num_workers, 0); in TEST()
47 workers.reserve(num_workers); in TEST()
48 for (int k = 0; k < num_workers; k++) { in TEST()
57 for (int k = 0; k < num_workers; k++) { in TEST()
/external/rust/crates/grpcio-sys/grpc/third_party/abseil-cpp/absl/synchronization/
Dblocking_counter_test.cc39 const int num_workers = 10; in TEST() local
40 BlockingCounter counter(num_workers); in TEST()
43 std::vector<int> done(num_workers, 0); in TEST()
47 workers.reserve(num_workers); in TEST()
48 for (int k = 0; k < num_workers; k++) { in TEST()
57 for (int k = 0; k < num_workers; k++) { in TEST()
/external/openscreen/third_party/abseil/src/absl/synchronization/
Dblocking_counter_test.cc39 const int num_workers = 10; in TEST() local
40 BlockingCounter counter(num_workers); in TEST()
43 std::vector<int> done(num_workers, 0); in TEST()
47 workers.reserve(num_workers); in TEST()
48 for (int k = 0; k < num_workers; k++) { in TEST()
57 for (int k = 0; k < num_workers; k++) { in TEST()
/external/rust/crates/grpcio-sys/grpc/src/core/lib/iomgr/poller/
Deventmanager_libuv.cc26 grpc::experimental::LibuvEventManager::Options::Options(int num_workers) in Options() argument
27 : num_workers_(num_workers) {} in Options()
31 int num_workers = options_.num_workers(); in LibuvEventManager() local
34 if (num_workers <= 0) num_workers = 32; in LibuvEventManager()
36 for (int i = 0; i < num_workers; i++) { in LibuvEventManager()
/external/angle/third_party/abseil-cpp/absl/synchronization/
Dblocking_counter_test.cc39 const int num_workers = 10; in TEST() local
40 BlockingCounter counter(num_workers); in TEST()
43 std::vector<int> done(num_workers, 0); in TEST()
47 workers.reserve(num_workers); in TEST()
48 for (int k = 0; k < num_workers; k++) { in TEST()
57 for (int k = 0; k < num_workers; k++) { in TEST()
/external/rust/crates/tokio/src/runtime/thread_pool/
Didle.rs20 num_workers: usize, field
31 pub(super) fn new(num_workers: usize) -> Idle { in new()
32 let init = State::new(num_workers); in new()
36 sleepers: Mutex::new(Vec::with_capacity(num_workers)), in new()
37 num_workers, in new()
93 if 2 * state.num_searching() >= self.num_workers { in transition_worker_to_searching()
137 state.num_searching() == 0 && state.num_unparked() < self.num_workers in notify_should_wakeup()
142 fn new(num_workers: usize) -> State { in new()
144 let ret = State(num_workers << UNPARK_SHIFT); in new()
145 debug_assert_eq!(num_workers, ret.num_unparked()); in new()
/external/rust/crates/rayon-core/src/
Dlog.rs114 pub(super) fn new(num_workers: usize) -> Logger { in new()
130 Self::tail_logger_thread(num_workers, filename, 10_000, receiver) in new()
133 ::std::thread::spawn(move || Self::all_logger_thread(num_workers, receiver)); in new()
137 Self::profile_logger_thread(num_workers, filename, 10_000, receiver) in new()
164 num_workers: usize, in profile_logger_thread()
174 let mut state = SimulatorState::new(num_workers); in profile_logger_thread()
207 num_workers: usize, in tail_logger_thread()
217 let mut state = SimulatorState::new(num_workers); in tail_logger_thread()
261 fn all_logger_thread(num_workers: usize, receiver: Receiver<Event>) { in all_logger_thread()
263 let mut state = SimulatorState::new(num_workers); in all_logger_thread()
[all …]
/external/tensorflow/tensorflow/core/grappler/optimizers/data/
Dauto_shard.cc123 Status OptimizeGraph(const GrapplerItem& item, int64 num_workers, int64 index,
141 int64 num_workers, int64 index) { in AddShardNode() argument
149 graph_utils::AddScalarConstNode<int64>(num_workers, graph); in AddShardNode()
382 int64 num_workers, int64 index) { in ProcessDatasetSourceNode() argument
390 TF_RETURN_IF_ERROR(AddShardNode(graph, node, num_workers, index)); in ProcessDatasetSourceNode()
423 const NodeDef* node, int64 num_workers, int64 index, in FindFuncAndTensorSliceDataset() argument
445 return FindFuncAndTensorSliceDataset(input_node, num_workers, index, flib, in FindFuncAndTensorSliceDataset()
449 Status RecursivelyHandleOp(const NodeDef& node, int64 num_workers, int64 index, in RecursivelyHandleOp() argument
459 return RecursivelyHandleOp(*input_node, num_workers, index, flib, graph, in RecursivelyHandleOp()
471 TF_RETURN_IF_ERROR(RecursivelyHandleOp(*input_node, num_workers, index, in RecursivelyHandleOp()
[all …]
/external/libaom/libaom/av1/common/
Dthread_common.c56 int width, int num_workers) { in loop_filter_alloc() argument
88 aom_malloc(num_workers * sizeof(*(lf_sync->lfdata)))); in loop_filter_alloc()
89 lf_sync->num_workers = num_workers; in loop_filter_alloc()
423 const int num_workers = nworkers; in loop_filter_rows_mt() local
427 num_workers > lf_sync->num_workers) { in loop_filter_rows_mt()
429 loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); in loop_filter_rows_mt()
445 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt()
465 if (i == num_workers - 1) { in loop_filter_rows_mt()
473 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt()
484 AVxWorker *workers, int num_workers, in av1_loop_filter_frame_mt() argument
[all …]
Dthread_common.h47 int num_workers; member
87 int num_workers; member
109 AVxWorker *workers, int num_workers,
114 int num_workers, AV1LrSync *lr_sync,
116 void av1_loop_restoration_dealloc(AV1LrSync *lr_sync, int num_workers);
/external/tensorflow/tensorflow/python/keras/benchmarks/
Ddistribution_util.py152 num_workers = (
157 num_workers = len(workers)
158 if num_workers > 1 and task_index < 0:
160 task_index = 0 if num_workers == 1 else task_index
171 num_workers = 1
172 return num_workers
/external/tensorflow/tensorflow/python/training/
Dsync_replicas_optimizer_test.py35 def get_workers(num_workers, replicas_to_aggregate, workers): argument
39 for worker_id in range(num_workers):
64 total_num_replicas=num_workers)
72 is_chief, num_tokens=num_workers)
95 num_workers = 2
98 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
101 sessions, graphs, train_ops = get_workers(num_workers,
187 num_workers = 3
190 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
193 sessions, graphs, train_ops = get_workers(num_workers,

123456