Home
last modified time | relevance | path

Searched refs:num_workers (Results 1 – 25 of 91) sorted by relevance

1234

/external/tensorflow/tensorflow/core/distributed_runtime/
Dcollective_param_resolver_distributed_test.cc145 void DefineWorkers(int num_workers, int num_devices, in DefineWorkers() argument
148 for (int w = 0; w < num_workers; ++w) { in DefineWorkers()
186 void DefineCollectiveParams(int num_workers, int num_devices) { in DefineCollectiveParams() argument
189 for (int wi = 0; wi < num_workers; ++wi) { in DefineCollectiveParams()
196 cp.group.group_size = num_workers * num_devices; in DefineCollectiveParams()
198 cp.group.num_tasks = num_workers; in DefineCollectiveParams()
208 void IssueRequests(int num_workers, int num_devices) { in IssueRequests() argument
209 const int device_count = num_workers * num_devices; in IssueRequests()
217 for (int wi = 0; wi < num_workers; ++wi) { in IssueRequests()
219 IssueRequest(num_workers, num_devices, idx); in IssueRequests()
[all …]
Ddevice_resolver_distributed_test.cc147 void DefineWorkers(int num_workers, int num_devices, in DefineWorkers() argument
150 for (int w = 0; w < num_workers; ++w) { in DefineWorkers()
196 const int num_workers, const int num_devices, const string& worker_prefix, in ResolveIncarnationsAndValidate() argument
199 for (int w = 0; w < num_workers; ++w) { in ResolveIncarnationsAndValidate()
204 for (int peer_w = 0; peer_w < num_workers; ++peer_w) { in ResolveIncarnationsAndValidate()
274 constexpr int num_workers = 3; in TEST_F() local
279 DefineWorkers(num_workers, num_devices, device_type, device_incarnation_base); in TEST_F()
285 std::vector<std::vector<uint64>> expected_incarnations(num_workers); in TEST_F()
286 for (int w = 0; w < num_workers; ++w) { in TEST_F()
293 ResolveIncarnationsAndValidate(num_workers, num_devices, worker_prefix, in TEST_F()
[all …]
/external/tensorflow/tensorflow/python/distribute/
Dall_reduce_test.py90 def _buildInput(self, num_workers, num_gpus): argument
96 for w in range(0, num_workers):
145 def _buildRing(self, num_workers, num_gpus, subdiv): argument
148 x, num_workers, subdiv, gpu_perm, math_ops.add, un_op)
150 def _testAllReduce(self, num_workers, num_gpus, shape, build_f): argument
152 num_devices = num_workers * num_gpus
166 def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv): argument
168 build_f = self._buildRing(num_workers, num_gpus, subdiv)
169 self._testAllReduce(num_workers, num_gpus, shape, build_f)
173 (num_workers, num_gpus, shape, subdiv, elapsed))
[all …]
Dmulti_process_runner_test.py63 num_workers=2, num_ps=3, has_eval=1),
77 multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
84 cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
97 multi_worker_test_base.create_cluster_spec(num_workers=1),
112 multi_worker_test_base.create_cluster_spec(num_workers=2),
130 multi_worker_test_base.create_cluster_spec(num_workers=1),
140 multi_worker_test_base.create_cluster_spec(num_workers=1),
158 multi_worker_test_base.create_cluster_spec(num_workers=2),
181 multi_worker_test_base.create_cluster_spec(num_workers=2),
Dmulti_worker_test_base.py82 def _create_cluster(num_workers, argument
93 worker_ports = [pick_unused_port() for _ in range(num_workers)]
97 if num_workers > 0:
108 for i in range(num_workers):
147 def create_in_process_cluster(num_workers, argument
154 gpu_mem_frac = 0.7 / (num_workers + int(has_chief) + int(has_eval))
186 num_workers,
205 num_workers=1, argument
218 if num_workers:
220 'localhost:%s' % pick_unused_port() for _ in range(num_workers)
[all …]
Dmulti_worker_continuous_run_test.py45 num_workers = 5
65 expected_mean = (num_workers - 1) / 2
81 cluster_spec=test_base.create_cluster_spec(num_workers=num_workers))
Dall_reduce.py193 def _ring_permutations(num_workers, num_subchunks, gpu_perm): argument
224 devices = num_workers * num_gpus
235 for w in range(0, num_workers):
254 def build_ring_all_reduce(input_tensors, num_workers, num_subchunks, argument
282 num_workers, num_subchunks, gpu_perm)
733 num_workers = len(per_worker_devices)
734 up_values = [None for w in range(0, num_workers)]
738 for w in range(0, num_workers):
751 for w in range(0, num_workers):
824 num_workers = len(per_worker_devices)
[all …]
/external/tensorflow/tensorflow/core/kernels/data/experimental/
Dauto_shard_dataset_op.cc43 int64 index, num_workers, auto_shard_policy; in MakeDataset() local
44 OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kNumWorkers, &num_workers)); in MakeDataset()
46 ctx, num_workers > 0, in MakeDataset()
51 ctx, index >= 0 && index < num_workers, in MakeDataset()
52 errors::InvalidArgument("index must be between 0 and ", num_workers - 1)); in MakeDataset()
55 auto config_factory = [num_workers, index, auto_shard_policy]() { in MakeDataset()
56 return CreateConfig(num_workers, index, auto_shard_policy); in MakeDataset()
68 RewriterConfig AutoShardDatasetOp::CreateConfig(int64 num_workers, int64 index, in CreateConfig() argument
78 num_workers_attr.set_i(num_workers); in CreateConfig()
/external/libaom/libaom/av1/encoder/
Dethread.c213 MultiThreadHandle *multi_thread_ctxt, int num_tiles, int num_workers) { in assign_tile_to_thread() argument
217 for (i = 0; i < num_workers; i++) { in assign_tile_to_thread()
375 t += cpi->num_workers) { in enc_worker_hook()
389 static AOM_INLINE void create_enc_workers(AV1_COMP *cpi, int num_workers) { in create_enc_workers() argument
395 aom_malloc(num_workers * sizeof(*cpi->workers))); in create_enc_workers()
398 aom_calloc(num_workers, sizeof(*cpi->tile_thr_data))); in create_enc_workers()
410 for (int i = num_workers - 1; i >= 0; i--) { in create_enc_workers()
414 ++cpi->num_workers; in create_enc_workers()
511 static AOM_INLINE void launch_enc_workers(AV1_COMP *cpi, int num_workers) { in launch_enc_workers() argument
514 for (int i = num_workers - 1; i >= 0; i--) { in launch_enc_workers()
[all …]
/external/tensorflow/tensorflow/core/kernels/boosted_trees/quantiles/
Dweighted_quantiles_stream_test.cc196 int32 num_workers, double eps, int64 max_elements, in TestDistributedStreams() argument
204 for (int32 i = 0; i < num_workers; ++i) { in TestDistributedStreams()
206 worker_summary_generator(i, max_elements / num_workers, &total_weight, in TestDistributedStreams()
237 const int32 num_workers = 10; in TEST() local
239 const int64 max_elements = num_workers * (1 << 16); in TEST()
241 num_workers, eps, max_elements, GenerateFixedUniformSummary, in TEST()
246 const int32 num_workers = 10; in TEST() local
248 const int64 max_elements = num_workers * (1 << 16); in TEST()
249 TestDistributedStreams(num_workers, eps, max_elements, in TEST()
258 const int32 num_workers = 10; in TEST() local
[all …]
/external/libvpx/libvpx/vp9/encoder/
Dvp9_ethread.c49 t += cpi->num_workers) { in enc_worker_hook()
78 static void create_enc_workers(VP9_COMP *cpi, int num_workers) { in create_enc_workers() argument
84 if (cpi->num_workers == 0) { in create_enc_workers()
85 int allocated_workers = num_workers; in create_enc_workers()
105 ++cpi->num_workers; in create_enc_workers()
140 int num_workers) { in launch_enc_workers() argument
144 for (i = 0; i < num_workers; i++) { in launch_enc_workers()
152 for (i = 0; i < num_workers; i++) { in launch_enc_workers()
159 if (i == cpi->num_workers - 1) in launch_enc_workers()
166 for (i = 0; i < num_workers; i++) { in launch_enc_workers()
[all …]
/external/libaom/libaom/av1/common/
Dthread_common.c56 int width, int num_workers) { in loop_filter_alloc() argument
88 aom_malloc(num_workers * sizeof(*(lf_sync->lfdata)))); in loop_filter_alloc()
89 lf_sync->num_workers = num_workers; in loop_filter_alloc()
423 const int num_workers = nworkers; in loop_filter_rows_mt() local
427 num_workers > lf_sync->num_workers) { in loop_filter_rows_mt()
429 loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); in loop_filter_rows_mt()
445 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt()
465 if (i == num_workers - 1) { in loop_filter_rows_mt()
473 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt()
484 AVxWorker *workers, int num_workers, in av1_loop_filter_frame_mt() argument
[all …]
Dthread_common.h47 int num_workers; member
87 int num_workers; member
109 AVxWorker *workers, int num_workers,
114 int num_workers, AV1LrSync *lr_sync,
116 void av1_loop_restoration_dealloc(AV1LrSync *lr_sync, int num_workers);
/external/tensorflow/tensorflow/core/grappler/optimizers/data/
Dauto_shard.cc110 Status OptimizeGraph(const GrapplerItem& item, int64 num_workers, int64 index,
123 int64 num_workers, int64 index) { in AddShardNode() argument
131 graph_utils::AddScalarConstNode<int64>(num_workers, graph); in AddShardNode()
306 int64 num_workers, int64 index) { in ProcessDatasetSourceNode() argument
314 TF_RETURN_IF_ERROR(AddShardNode(graph, node, num_workers, index)); in ProcessDatasetSourceNode()
335 Status RecursivelyHandleOp(const NodeDef& node, int64 num_workers, int64 index, in RecursivelyHandleOp() argument
347 TF_RETURN_IF_ERROR(RecursivelyHandleOp(*input_node, num_workers, index, in RecursivelyHandleOp()
363 return ProcessDatasetSourceNode(graph, node, nodes_to_delete, num_workers, in RecursivelyHandleOp()
369 return ProcessDatasetSourceNode(graph, node, nodes_to_delete, num_workers, in RecursivelyHandleOp()
386 return RecursivelyHandleOp(*input_node, num_workers, index, flib, graph, in RecursivelyHandleOp()
[all …]
/external/tensorflow/tensorflow/python/training/
Dsync_replicas_optimizer_test.py35 def get_workers(num_workers, replicas_to_aggregate, workers): argument
39 for worker_id in range(num_workers):
64 total_num_replicas=num_workers)
72 is_chief, num_tokens=num_workers)
94 num_workers = 2
97 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
100 sessions, graphs, train_ops = get_workers(num_workers,
185 num_workers = 3
188 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
191 sessions, graphs, train_ops = get_workers(num_workers,
Dlocalhost_cluster_performance_test.py40 workers, _ = test.create_local_cluster(num_workers=2, num_ps=2)
67 test.create_local_cluster(num_workers=1, num_ps=10)
83 workers, _ = test.create_local_cluster(num_workers=1, num_ps=100)
/external/tensorflow/tensorflow/python/data/experimental/ops/
Ddistribute.py47 def __init__(self, input_dataset, num_workers, index): argument
56 num_workers=num_workers,
64 num_workers=num_workers,
74 def _AutoShardDatasetV1(input_dataset, num_workers, index): # pylint: disable=invalid-name argument
76 _AutoShardDataset(input_dataset, num_workers, index))
/external/libvpx/libvpx/vp9/common/
Dvp9_thread_common.h39 int num_workers; // number of allocated workers. member
53 int width, int num_workers);
63 int num_workers, VP9LfSync *lf_sync);
67 int frame_filter_level, int num_workers);
Dvp9_thread_common.c170 const int num_workers = VPXMIN(nworkers, VPXMIN(num_tile_cols, sb_rows)); in loop_filter_rows_mt() local
174 num_workers > lf_sync->num_workers) { in loop_filter_rows_mt()
176 vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); in loop_filter_rows_mt()
178 lf_sync->num_active_workers = num_workers; in loop_filter_rows_mt()
191 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt()
206 if (i == num_workers - 1) { in loop_filter_rows_mt()
214 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt()
223 int num_workers, VP9LfSync *lf_sync) { in vp9_loop_filter_frame_mt() argument
239 workers, num_workers, lf_sync); in vp9_loop_filter_frame_mt()
243 int num_workers) { in vp9_lpf_mt_init() argument
[all …]
/external/tensorflow/tensorflow/python/keras/distribute/
Dmulti_worker_test.py213 num_workers = 2
216 cluster_spec = test_base.create_cluster_spec(num_workers=num_workers)
221 num_epoch=num_epoch, num_worker=num_workers)
269 num_workers = 2
272 num_workers=num_workers, num_ps=2)
277 num_epoch=num_epoch, num_worker=num_workers)
Dmulti_worker_fault_tolerance_test.py229 num_workers = 2
253 cluster_spec = test_base.create_cluster_spec(num_workers=num_workers)
289 cluster_spec = test_base.create_cluster_spec(num_workers=num_workers)
294 for _ in range(num_workers)
Dmulti_worker_callback_tf2_test.py109 cluster_spec=test_base.create_cluster_spec(num_workers=2),
145 cluster_spec=test_base.create_cluster_spec(num_workers=2),
174 cluster_spec=test_base.create_cluster_spec(num_workers=2),
203 cluster_spec=test_base.create_cluster_spec(num_workers=2),
/external/tensorflow/tensorflow/core/util/
Dwork_sharder_test.cc30 void RunSharding(int64 num_workers, int64 total, int64 cost_per_unit, in RunSharding() argument
37 Shard(num_workers, threads, total, cost_per_unit, in RunSharding()
50 LOG(INFO) << num_workers << " " << total << " " << cost_per_unit << " " in RunSharding()
53 if (std::min(num_workers, per_thread_max_parallelism) < in RunSharding()
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_ExperimentalAutoShardDataset.pbtxt11 name: "num_workers"
19 A scalar representing the index of the current worker out of num_workers.
24 Creates a dataset that shards the input dataset by num_workers, returning a
Dapi_def_AutoShardDataset.pbtxt11 name: "num_workers"
19 A scalar representing the index of the current worker out of num_workers.
24 Creates a dataset that shards the input dataset by num_workers, returning a

1234