/external/tensorflow/tensorflow/core/distributed_runtime/ |
D | collective_param_resolver_distributed_test.cc | 145 void DefineWorkers(int num_workers, int num_devices, in DefineWorkers() argument 148 for (int w = 0; w < num_workers; ++w) { in DefineWorkers() 183 void DefineCollectiveParams(int num_workers, int num_devices) { in DefineCollectiveParams() argument 186 for (int wi = 0; wi < num_workers; ++wi) { in DefineCollectiveParams() 193 cp.group.group_size = num_workers * num_devices; in DefineCollectiveParams() 195 cp.group.num_tasks = num_workers; in DefineCollectiveParams() 205 void IssueRequests(int num_workers, int num_devices) { in IssueRequests() argument 206 const int device_count = num_workers * num_devices; in IssueRequests() 214 for (int wi = 0; wi < num_workers; ++wi) { in IssueRequests() 216 IssueRequest(num_workers, num_devices, idx); in IssueRequests() [all …]
|
/external/tensorflow/tensorflow/python/distribute/ |
D | all_reduce_test.py | 90 def _buildInput(self, num_workers, num_gpus): argument 96 for w in range(0, num_workers): 145 def _buildRing(self, num_workers, num_gpus, subdiv): argument 148 x, num_workers, subdiv, gpu_perm, math_ops.add, un_op) 150 def _testAllReduce(self, num_workers, num_gpus, shape, build_f): argument 152 num_devices = num_workers * num_gpus 166 def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv): argument 168 build_f = self._buildRing(num_workers, num_gpus, subdiv) 169 self._testAllReduce(num_workers, num_gpus, shape, build_f) 173 (num_workers, num_gpus, shape, subdiv, elapsed)) [all …]
|
D | all_reduce.py | 193 def _ring_permutations(num_workers, num_subchunks, gpu_perm): argument 224 devices = num_workers * num_gpus 235 for w in range(0, num_workers): 254 def build_ring_all_reduce(input_tensors, num_workers, num_subchunks, argument 282 num_workers, num_subchunks, gpu_perm) 727 num_workers = len(per_worker_devices) 728 up_values = [None for w in range(0, num_workers)] 732 for w in range(0, num_workers): 745 for w in range(0, num_workers): 818 num_workers = len(per_worker_devices) [all …]
|
/external/libaom/libaom/av1/encoder/ |
D | ethread.c | 198 int num_tiles, int num_workers) { in assign_tile_to_thread() argument 202 for (i = 0; i < num_workers; i++) { in assign_tile_to_thread() 363 t += cpi->num_workers) { in enc_worker_hook() 377 static void create_enc_workers(AV1_COMP *cpi, int num_workers) { in create_enc_workers() argument 382 aom_malloc(num_workers * sizeof(*cpi->workers))); in create_enc_workers() 385 aom_calloc(num_workers, sizeof(*cpi->tile_thr_data))); in create_enc_workers() 397 for (int i = num_workers - 1; i >= 0; i--) { in create_enc_workers() 401 ++cpi->num_workers; in create_enc_workers() 484 static void launch_enc_workers(AV1_COMP *cpi, int num_workers) { in launch_enc_workers() argument 487 for (int i = num_workers - 1; i >= 0; i--) { in launch_enc_workers() [all …]
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_ethread.c | 49 t += cpi->num_workers) { in enc_worker_hook() 78 static void create_enc_workers(VP9_COMP *cpi, int num_workers) { in create_enc_workers() argument 84 if (cpi->num_workers == 0) { in create_enc_workers() 85 int allocated_workers = num_workers; in create_enc_workers() 105 ++cpi->num_workers; in create_enc_workers() 140 int num_workers) { in launch_enc_workers() argument 144 for (i = 0; i < num_workers; i++) { in launch_enc_workers() 152 for (i = 0; i < num_workers; i++) { in launch_enc_workers() 159 if (i == cpi->num_workers - 1) in launch_enc_workers() 166 for (i = 0; i < num_workers; i++) { in launch_enc_workers() [all …]
|
/external/tensorflow/tensorflow/contrib/boosted_trees/lib/quantiles/ |
D | weighted_quantiles_stream_test.cc | 197 int32 num_workers, double eps, int64 max_elements, in TestDistributedStreams() argument 205 for (int32 i = 0; i < num_workers; ++i) { in TestDistributedStreams() 207 worker_summary_generator(i, max_elements / num_workers, &total_weight, in TestDistributedStreams() 238 const int32 num_workers = 10; in TEST() local 240 const int64 max_elements = num_workers * (1 << 16); in TEST() 242 num_workers, eps, max_elements, GenerateFixedUniformSummary, in TEST() 247 const int32 num_workers = 10; in TEST() local 249 const int64 max_elements = num_workers * (1 << 16); in TEST() 250 TestDistributedStreams(num_workers, eps, max_elements, in TEST() 259 const int32 num_workers = 10; in TEST() local [all …]
|
/external/tensorflow/tensorflow/core/kernels/boosted_trees/quantiles/ |
D | weighted_quantiles_stream_test.cc | 196 int32 num_workers, double eps, int64 max_elements, in TestDistributedStreams() argument 204 for (int32 i = 0; i < num_workers; ++i) { in TestDistributedStreams() 206 worker_summary_generator(i, max_elements / num_workers, &total_weight, in TestDistributedStreams() 237 const int32 num_workers = 10; in TEST() local 239 const int64 max_elements = num_workers * (1 << 16); in TEST() 241 num_workers, eps, max_elements, GenerateFixedUniformSummary, in TEST() 246 const int32 num_workers = 10; in TEST() local 248 const int64 max_elements = num_workers * (1 << 16); in TEST() 249 TestDistributedStreams(num_workers, eps, max_elements, in TEST() 258 const int32 num_workers = 10; in TEST() local [all …]
|
/external/tensorflow/tensorflow/core/kernels/data/experimental/ |
D | auto_shard_dataset_op.cc | 37 int64 num_workers; in MakeDataset() local 38 OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, "num_workers", &num_workers)); in MakeDataset() 40 ctx, num_workers > 0, in MakeDataset() 44 OP_REQUIRES(ctx, index >= 0 && index < num_workers, in MakeDataset() 46 num_workers - 1)); in MakeDataset() 48 Dataset* dataset = new Dataset(ctx, input, num_workers, index, in MakeDataset() 64 const int64 num_workers, const int64 index, in Dataset() argument 68 num_workers_(num_workers), in Dataset()
|
D | rebatch_dataset_op.cc | 36 int64 num_workers; in MakeDataset() local 37 OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, "num_workers", &num_workers)); in MakeDataset() 39 ctx, num_workers > 0, in MakeDataset() 43 new Dataset(ctx, input, num_workers, output_types_, output_shapes_); in MakeDataset() 57 const int64 num_workers, const DataTypeVector& output_types, in Dataset() argument 60 num_workers_(num_workers) {} in Dataset()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/data/ |
D | rebatch.cc | 140 Status MutateBatchSize(const NodeDef& node, int64 num_workers, in MutateBatchSize() argument 159 if (batch_size % num_workers != 0) { in MutateBatchSize() 162 " is not divisible by num_workers: ", num_workers); in MutateBatchSize() 164 batch_size /= num_workers; in MutateBatchSize() 175 Status OptimizeGraph(const GrapplerItem& item, int64 num_workers, 186 Status RecursivelyHandleOp(const NodeDef& node, int64 num_workers, in RecursivelyHandleOp() argument 190 return MutateBatchSize(node, num_workers, graph); in RecursivelyHandleOp() 196 RecursivelyHandleOp(*input_node, num_workers, flib, graph)); in RecursivelyHandleOp() 203 RecursivelyHandleOp(*input_node, num_workers, flib, graph)); in RecursivelyHandleOp() 211 Status s = OptimizeGraph(f_item, num_workers, &optimized_func_graph); in RecursivelyHandleOp() [all …]
|
D | auto_shard.cc | 94 Status OptimizeGraph(const GrapplerItem& item, int64 num_workers, int64 index, 107 int64 num_workers, int64 index) { in AddShardNode() argument 115 graph_utils::AddScalarConstNode<int64>(num_workers, graph); in AddShardNode() 176 Status RecursivelyHandleOp(const NodeDef& node, int64 num_workers, int64 index, in RecursivelyHandleOp() argument 188 TF_RETURN_IF_ERROR(RecursivelyHandleOp(*input_node, num_workers, index, in RecursivelyHandleOp() 204 TF_RETURN_IF_ERROR(AddShardNode(graph, node, num_workers, index)); in RecursivelyHandleOp() 211 TF_RETURN_IF_ERROR(AddShardNode(graph, node, num_workers, index)); in RecursivelyHandleOp() 223 return RecursivelyHandleOp(*input_node, num_workers, index, flib, graph, in RecursivelyHandleOp() 227 Status OptimizeGraph(const GrapplerItem& item, int64 num_workers, int64 index, in OptimizeGraph() argument 245 TF_RETURN_IF_ERROR(RecursivelyHandleOp(sink_node, num_workers, index, &flib, in OptimizeGraph()
|
/external/tensorflow/tensorflow/contrib/opt/python/training/ |
D | drop_stale_gradient_optimizer_test.py | 34 def _get_workers(num_workers, staleness): argument 35 worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)] 44 for ix in range(num_workers) 60 for worker_id in range(num_workers): 83 [compute_gradients_queue.dequeue_many(num_workers - 1)]): 118 num_workers = 1 119 sessions, graphs, train_ops = _get_workers(num_workers, 0) 141 num_workers = 1 142 sessions, graphs, train_ops = _get_workers(num_workers, -1) 164 num_workers = 2 [all …]
|
D | agn_optimizer_test.py | 38 def create_local_cluster(num_workers, num_ps, protocol="grpc"): argument 40 worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)] 51 for ix in range(num_workers) 64 def _get_workers(num_workers, period, workers, num_ps=1): argument 68 for worker_id in range(num_workers): 107 num_worker=num_workers, 139 num_workers = 1 142 _, workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps) 144 sessions, graphs, train_ops = _get_workers(num_workers, 200 num_workers = 2 [all …]
|
D | elastic_average_optimizer_test.py | 41 def create_local_cluster(num_workers, num_ps, protocol="grpc"): argument 43 worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)] 54 for ix in range(num_workers) 67 def _get_workers(num_workers, period, workers, moving_rate, num_ps=1): argument 72 for worker_id in range(num_workers): 114 num_worker=num_workers, 151 num_workers = 1 155 num_workers=num_workers, num_ps=num_ps) 158 num_workers, communication_period, workers, 1.0) 214 num_workers = 2 [all …]
|
D | model_average_optimizer_test.py | 35 def create_local_cluster(num_workers, num_ps, protocol="grpc"): argument 37 worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)] 48 for ix in range(num_workers) 61 def _get_workers(num_workers, steps, workers): argument 65 for worker_id in range(num_workers): 93 num_worker=num_workers, 118 num_workers = 2 122 num_workers=num_workers, num_ps=num_ps) 124 sessions, graphs, train_ops = _get_workers(num_workers, steps, workers)
|
/external/libaom/libaom/av1/common/ |
D | thread_common.c | 56 int width, int num_workers) { in loop_filter_alloc() argument 88 aom_malloc(num_workers * sizeof(*(lf_sync->lfdata)))); in loop_filter_alloc() 89 lf_sync->num_workers = num_workers; in loop_filter_alloc() 416 const int num_workers = nworkers; in loop_filter_rows_mt() local 420 num_workers > lf_sync->num_workers) { in loop_filter_rows_mt() 422 loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); in loop_filter_rows_mt() 438 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt() 458 if (i == num_workers - 1) { in loop_filter_rows_mt() 466 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt() 477 AVxWorker *workers, int num_workers, in av1_loop_filter_frame_mt() argument [all …]
|
D | thread_common.h | 47 int num_workers; member 87 int num_workers; member 109 AVxWorker *workers, int num_workers, 114 int num_workers, AV1LrSync *lr_sync, 116 void av1_loop_restoration_dealloc(AV1LrSync *lr_sync, int num_workers);
|
/external/tensorflow/tensorflow/python/training/ |
D | sync_replicas_optimizer_test.py | 35 def get_workers(num_workers, replicas_to_aggregate, workers): argument 39 for worker_id in range(num_workers): 64 total_num_replicas=num_workers) 72 is_chief, num_tokens=num_workers) 94 num_workers = 2 97 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps) 100 sessions, graphs, train_ops = get_workers(num_workers, 185 num_workers = 3 188 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps) 191 sessions, graphs, train_ops = get_workers(num_workers,
|
/external/tensorflow/tensorflow/python/data/experimental/kernel_tests/ |
D | rebatch_dataset_test.py | 45 rebatched_dataset = batching._RebatchDataset(dataset, num_workers=4) 59 batching._RebatchDataset(dataset, num_workers=4) 66 rebatched_dataset = batching._RebatchDataset(dataset, num_workers=5) 74 rebatched_dataset = batching._RebatchDataset(dataset, num_workers=4) 84 rebatched_dataset = batching._RebatchDataset(dataset, num_workers=4) 93 rebatched_dataset = batching._RebatchDataset(dataset, num_workers=4) 107 rebatched_dataset = batching._RebatchDataset(dataset, num_workers=4) 147 rebatched_dataset = batching._RebatchDataset(dataset, num_workers=4) 161 rebatched_dataset = batching._RebatchDataset(dataset, num_workers=4) 187 rebatched_dataset = batching._RebatchDataset(dataset, num_workers=4) [all …]
|
/external/libvpx/libvpx/vp9/common/ |
D | vp9_thread_common.c | 170 const int num_workers = VPXMIN(nworkers, VPXMIN(num_tile_cols, sb_rows)); in loop_filter_rows_mt() local 174 num_workers > lf_sync->num_workers) { in loop_filter_rows_mt() 176 vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); in loop_filter_rows_mt() 178 lf_sync->num_active_workers = num_workers; in loop_filter_rows_mt() 191 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt() 206 if (i == num_workers - 1) { in loop_filter_rows_mt() 214 for (i = 0; i < num_workers; ++i) { in loop_filter_rows_mt() 223 int num_workers, VP9LfSync *lf_sync) { in vp9_loop_filter_frame_mt() argument 239 workers, num_workers, lf_sync); in vp9_loop_filter_frame_mt() 243 int num_workers) { in vp9_lpf_mt_init() argument [all …]
|
D | vp9_thread_common.h | 39 int num_workers; // number of allocated workers. member 53 int width, int num_workers); 63 int num_workers, VP9LfSync *lf_sync); 67 int frame_filter_level, int num_workers);
|
/external/tensorflow/tensorflow/contrib/distribute/python/ |
D | multi_worker_test_base.py | 69 def _create_cluster(num_workers, argument 79 worker_ports = [pick_unused_port() for _ in range(num_workers)] 83 if num_workers > 0: 94 for i in range(num_workers): 133 def create_in_process_cluster(num_workers, argument 139 gpu_mem_frac = 0.7 / (num_workers + int(has_chief) + int(has_eval)) 166 num_workers, 176 num_workers=1, argument 186 if num_workers: 188 'localhost:%s' % pick_unused_port() for _ in range(num_workers) [all …]
|
/external/tensorflow/tensorflow/core/util/ |
D | work_sharder_test.cc | 30 void RunSharding(int64 num_workers, int64 total, int64 cost_per_unit, in RunSharding() argument 37 Shard(num_workers, threads, total, cost_per_unit, in RunSharding() 50 LOG(INFO) << num_workers << " " << total << " " << cost_per_unit << " " in RunSharding() 53 if (std::min(num_workers, per_thread_max_parallelism) < in RunSharding()
|
/external/tensorflow/tensorflow/python/data/experimental/ops/ |
D | distribute.py | 43 def __init__(self, input_dataset, num_workers, index): argument 49 num_workers=num_workers,
|
/external/v8/tools/testrunner/local/ |
D | pool.py | 104 def __init__(self, num_workers, heartbeat_timeout=1): argument 105 self.num_workers = num_workers 153 for w in xrange(self.num_workers): 196 while self.processing_count < self.num_workers * self.BUFFER_FACTOR:
|