| /external/tensorflow/tensorflow/python/distribute/v1/ | 
| D | all_reduce_test.py | 85   def _buildInput(self, num_workers, num_gpus):  argument140   def _buildRing(self, num_workers, num_gpus, subdiv):  argument
 145   def _testAllReduce(self, num_workers, num_gpus, shape, build_f):  argument
 161   def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv):  argument
 185   def _buildShuffle(self, num_workers, num_gpus, num_shards):  argument
 192   def _testShuffleAllReduce(self, num_workers, num_gpus, shape, num_shards):  argument
 212   def _buildRecursiveHD(self, num_workers, num_gpus):  argument
 218   def _testRecursiveHDAllReduce(self, num_workers, num_gpus, shape):  argument
 
 | 
| /external/tensorflow/tensorflow/core/distributed_runtime/ | 
| D | collective_param_resolver_distributed_test.cc | 116   void DefineWorkers(int num_workers, int num_devices,  in DefineWorkers()163   void DefineCollectiveParams(int num_workers, int num_devices,  in DefineCollectiveParams()
 180   CollectiveParams* CreateCollectiveParams(int num_workers, int num_devices,  in CreateCollectiveParams()
 200   void IssueRequests(int num_workers, int num_devices) {  in IssueRequests()
 236   void ValidateCollectiveParams(int num_workers, int num_devices) {  in ValidateCollectiveParams()
 282   void RestartWorker(int worker_idx, int num_workers, int num_devices,  in RestartWorker()
 323   const int num_workers = 1;  in TEST_F()  local
 332   const int num_workers = 2;  in TEST_F()  local
 341   const int num_workers = 2;  in TEST_F()  local
 354   const int num_workers = 2;  in TEST_F()  local
 [all …]
 
 | 
| /external/libaom/av1/encoder/ | 
| D | ethread.c | 298                                              int num_tiles, int num_workers) {  in assign_tile_to_thread()778   int num_workers = p_mt_info->num_workers;  in av1_init_tile_thread_data()  local
 891 void av1_create_workers(AV1_PRIMARY *ppi, int num_workers) {  in av1_create_workers()
 1056     const int num_workers, const int parallel_frame_count) {  in compute_num_workers_per_frame()
 1075   int num_workers = p_mt_info->num_workers;  in prepare_fpmt_workers()  local
 1142   int num_workers = ppi->p_mt_info.p_num_workers;  in launch_fpmt_workers()  local
 1156   int num_workers = ppi->p_mt_info.p_num_workers;  in sync_fpmt_workers()  local
 1182   int num_workers = p_mt_info->num_workers;  in restore_workers_after_fpmt()  local
 1250                                       int num_workers) {  in launch_workers()
 1262                                         AV1_COMMON *const cm, int num_workers) {  in sync_enc_workers()
 [all …]
 
 | 
| /external/libaom/av1/common/ | 
| D | thread_common.c | 58                            int width, int num_workers) {  in av1_loop_filter_alloc()142                          int num_workers) {  in av1_alloc_cdef_sync()
 342                                 int num_workers, AV1LfSync *lf_sync,  in loop_filter_rows_mt()
 402                               int num_workers, AV1LfSync *lf_sync,  in av1_loop_filter_frame_mt()
 489                                 int num_workers, int num_rows_lr,  in av1_loop_restoration_alloc()
 553 void av1_loop_restoration_dealloc(AV1LrSync *lr_sync, int num_workers) {  in av1_loop_restoration_dealloc()
 782   const int num_workers = nworkers;  in foreach_rest_unit_in_planes_mt()  local
 826                                           AVxWorker *workers, int num_workers,  in av1_loop_restoration_filter_frame_mt()
 850                                            int num_workers) {  in launch_cdef_workers()
 863                                          int num_workers) {  in sync_cdef_workers()
 [all …]
 
 | 
| /external/tensorflow/tensorflow/core/grappler/optimizers/data/ | 
| D | auto_shard.cc | 199                     int64_t num_workers, int64_t index) {  in AddShardNode()441                                 int64_t num_workers, int64_t index) {  in ProcessDatasetSourceNode()
 482     const NodeDef* node, int64_t num_workers, int64_t index,  in FindFuncAndTensorSliceDataset()
 542 Status RecursivelyHandleOp(const NodeDef& node, int64_t num_workers,  in RecursivelyHandleOp()
 647 Status ShardByFile(const NodeDef& sink_node, int64_t num_workers, int64_t index,  in ShardByFile()
 704 Status ShardByData(const NodeDef& sink_node, int64_t num_workers, int64_t index,  in ShardByData()
 726 Status ShardByHint(const NodeDef& sink_node, int64_t num_workers, int64_t index,  in ShardByHint()
 756 Status ApplyAutoShard(const NodeDef& sink_node, int64_t num_workers,  in ApplyAutoShard()
 788 Status OptimizeGraph(const GrapplerItem& item, int64_t num_workers,  in OptimizeGraph()
 
 | 
| /external/tensorflow/tensorflow/python/data/experimental/ops/ | 
| D | distribute.py | 70   def __init__(self, input_dataset, num_workers, index, num_replicas=None):  argument89 def _AutoShardDatasetV1(input_dataset, num_workers, index, num_replicas=None):  # pylint: disable=i…  argument
 365 def batch_sizes_for_worker(global_batch_size, num_workers,  argument
 
 | 
| /external/tensorflow/tensorflow/python/distribute/ | 
| D | combinations_test.py | 45   def testClusterParams(self, distribution, has_chief, num_workers):  argument54   def testClusterParamsHasDefault(self, distribution, has_chief, num_workers):  argument
 61   def testClusterParamsNoStrategy(self, v, has_chief, num_workers):  argument
 
 | 
| D | combinations.py | 287                num_workers=1,  argument542   def decorator(self, has_chief, num_workers, num_ps, share_gpu, runner,  argument
 619 def _num_total_workers(has_chief, num_workers):  argument
 
 | 
| D | multi_worker_test_base.py | 56 def _create_cluster(num_workers,  argument123 def create_in_process_cluster(num_workers,  argument
 328 def create_multi_process_cluster(num_workers,  argument
 355                         num_workers=1,  argument
 432   def setUpClass(cls, num_workers=2, num_ps=1):  # pylint: disable=g-missing-super-call  argument
 
 | 
| D | strategy_combinations.py | 214 def _get_ps_strategy_creator(num_workers,  argument284                           num_workers,  argument
 483                                  num_workers,  argument
 
 | 
| /external/tensorflow/tensorflow/core/kernels/data/experimental/ | 
| D | auto_shard_dataset_op.cc | 47   int64_t index, num_workers, auto_shard_policy, num_replicas;  in MakeDataset()  local80 RewriterConfig AutoShardDatasetOp::CreateConfig(int64_t num_workers,  in CreateConfig()
 
 | 
| /external/libvpx/vp9/encoder/ | 
| D | vp9_ethread.c | 80 static void create_enc_workers(VP9_COMP *cpi, int num_workers) {  in create_enc_workers()141                                int num_workers) {  in launch_enc_workers()
 197   const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols);  in vp9_encode_tiles_mt()  local
 465   int num_workers = VPXMAX(cpi->oxcf.max_threads, 1);  in vp9_encode_fp_row_mt()  local
 551   int num_workers = cpi->num_workers ? cpi->num_workers : 1;  in vp9_temporal_filter_row_mt()  local
 624   int num_workers = VPXMAX(cpi->oxcf.max_threads, 1);  in vp9_encode_tiles_row_mt()  local
 
 | 
| /external/libvpx/vp9/common/ | 
| D | vp9_thread_common.c | 170   const int num_workers = VPXMIN(nworkers, VPXMIN(num_tile_cols, sb_rows));  in loop_filter_rows_mt()  local223                               int num_workers, VP9LfSync *lf_sync) {  in vp9_loop_filter_frame_mt()
 243                      int num_workers) {  in vp9_lpf_mt_init()
 280                            int width, int num_workers) {  in vp9_loop_filter_alloc()
 
 | 
| /external/tensorflow/tensorflow/python/data/experimental/kernel_tests/service/ | 
| D | multi_process_cluster.py | 97   def _start_local_workers(self, num_workers, worker_tags=None):  argument102   def _start_remote_workers(self, num_workers, worker_tags=None):  argument
 
 | 
| D | cross_trainer_cache_ft_test.py | 84                       num_workers,  argument
 | 
| D | dynamic_sharding_test.py | 277   def testChooseFromDatasets(self, num_workers):  argument315   def testConcatenate(self, num_workers):  argument
 
 | 
| D | coordinated_read_test.py | 37   def testBasic(self, num_workers, num_consumers):  argument
 | 
| /external/tensorflow/tensorflow/core/common_runtime/ | 
| D | permuter_test.cc | 50   void Init(int num_workers, int num_devices,  in Init()85   void RunTest(DataType dtype, const DeviceType& device_type, int num_workers,  in RunTest()
 
 | 
| D | ring_gatherer_test.cc | 47   void Init(int num_workers, int num_devices, DataType dtype,  in Init()79   void RunTest(DataType dtype, const DeviceType& device_type, int num_workers,  in RunTest()
 
 | 
| /external/openscreen/third_party/abseil/src/absl/synchronization/ | 
| D | blocking_counter_test.cc | 39   const int num_workers = 10;  in TEST()  local
 | 
| /external/libtextclassifier/abseil-cpp/absl/synchronization/ | 
| D | blocking_counter_test.cc | 39   const int num_workers = 10;  in TEST()  local
 | 
| /external/rust/crates/grpcio-sys/grpc/third_party/abseil-cpp/absl/synchronization/ | 
| D | blocking_counter_test.cc | 39   const int num_workers = 10;  in TEST()  local
 | 
| /external/tensorflow/tensorflow/python/training/ | 
| D | sync_replicas_optimizer_test.py | 32 def get_workers(num_workers, replicas_to_aggregate, workers):  argument
 | 
| /external/abseil-cpp/absl/synchronization/ | 
| D | blocking_counter_test.cc | 39   const int num_workers = 10;  in TEST()  local
 | 
| /external/angle/third_party/abseil-cpp/absl/synchronization/ | 
| D | blocking_counter_test.cc | 39   const int num_workers = 10;  in TEST()  local
 |