Home
last modified time | relevance | path

Searched refs:num_gpus (Results 1 – 25 of 57) sorted by relevance

123

/external/tensorflow/tensorflow/python/distribute/v1/
Dall_reduce_test.py85 def _buildInput(self, num_workers, num_gpus): argument
92 for d in range(0, num_gpus):
93 dn = "/replica:0/task:%d/device:GPU:%d" % (w, d % num_gpus)
140 def _buildRing(self, num_workers, num_gpus, subdiv): argument
141 gpu_perm = range(0, num_gpus)
145 def _testAllReduce(self, num_workers, num_gpus, shape, build_f): argument
147 num_devices = num_workers * num_gpus
161 def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv): argument
163 build_f = self._buildRing(num_workers, num_gpus, subdiv)
164 self._testAllReduce(num_workers, num_gpus, shape, build_f)
[all …]
Dcross_device_ops_test.py160 if context.num_gpus() < sum(1 for d in devices if "GPU" in d.upper()):
337 if context.num_gpus() < 1:
457 num_gpus=0, argument
464 if num_gpus:
465 devices = ["/device:GPU:%d" % i for i in range(num_gpus)]
485 if num_gpus:
488 for i in range(num_gpus)
502 num_accelerators={"GPU": num_gpus})
530 num_gpus, argument
538 num_gpus,
[all …]
Dall_reduce.py219 num_gpus = len(gpu_perm)
220 devices = num_workers * num_gpus
223 if num_subchunks > num_gpus:
225 "num_subchunks %d must be <= num_gpus %d" % (num_subchunks, num_gpus))
226 rotation_interval = max(1, int(num_gpus / num_subchunks))
232 default_order = [(w * num_gpus) + i for i in gpu_perm]
/external/tensorflow/tensorflow/python/distribute/
Dparameter_server_strategy_test.py72 num_gpus=None, argument
75 if num_gpus is None:
76 num_gpus = context.num_gpus()
82 num_accelerators={'GPU': num_gpus})
88 central_storage_strategy.CentralStorageStrategy._from_num_gpus(num_gpus)
111 def _get_test_objects(self, task_type, task_id, num_gpus): argument
116 num_gpus=num_gpus,
119 def _test_device_assignment_distributed(self, task_type, task_id, num_gpus): argument
121 d, _, sess_config = self._get_test_objects(task_type, task_id, num_gpus)
132 if num_gpus == 0:
[all …]
Dcollective_all_reduce_strategy_test.py74 num_gpus=None, argument
76 if num_gpus is None:
77 num_gpus = context.num_gpus()
88 num_accelerators={'GPU': num_gpus, 'TPU': num_tpus})
92 ClusterSpec({}), num_accelerators={'GPU': num_gpus, 'TPU': num_tpus})
113 num_gpus=0, argument
120 num_gpus=num_gpus,
123 if use_devices_arg and num_gpus > 0:
124 devices = ['GPU:%d' % i for i in range(num_gpus)]
134 strategy._extended._retrace_functions_for_each_device = (num_gpus > 1)
[all …]
Dparameter_server_strategy.py206 num_gpus = context.num_gpus()
208 num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
211 self._num_gpus_per_worker = num_gpus
228 if num_gpus > 0:
231 for i in range(num_gpus))
291 num_gpus = context.num_gpus()
293 num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
296 self._num_gpus_per_worker = num_gpus
298 compute_devices = device_util.local_devices_from_num_gpus(num_gpus)
Dparameter_server_strategy_v2.py591 num_gpus = 0
593 if num_gpus > 0 and count != num_gpus:
595 num_gpus = count
597 self._num_gpus_per_worker = num_gpus
908 num_gpus = self._num_gpus_per_worker
909 if num_gpus > 0:
910 compute_devices = tuple("/device:GPU:%d" % (i,) for i in range(num_gpus))
Dmirrored_values_test.py86 if context.num_gpus() < 1 and context.executing_eagerly():
203 if context.num_gpus() < 1 and context.executing_eagerly():
216 if context.num_gpus() < 1 and context.executing_eagerly():
229 if context.num_gpus() < 1 and context.executing_eagerly():
244 if context.num_gpus() < 1:
Dmirrored_strategy.py164 num_gpus = None
167 if num_gpus is None:
168 num_gpus = sum(1 for d in device_in_task if _is_gpu_device(d))
171 elif num_gpus != sum(1 for d in device_in_task if _is_gpu_device(d)):
177 d_spec.device_index >= num_gpus):
180 return num_gpus
183 def all_local_devices(num_gpus=None): argument
185 if num_gpus is not None:
186 devices = devices[:num_gpus]
195 context.num_gpus())
Dcentral_storage_strategy.py70 def _from_num_gpus(cls, num_gpus): argument
71 return cls(device_util.local_devices_from_num_gpus(num_gpus))
Ddevice_util.py157 def local_devices_from_num_gpus(num_gpus): argument
159 return (tuple("/device:GPU:%d" % i for i in range(num_gpus)) or
Dcombinations_test.py211 num_gpus = combinations.env().total_phsyical_gpus
212 if num_gpus != 2 and num_gpus != 4:
Dmirrored_strategy_test.py1202 self.assertEqual(context.num_gpus() * 2, distribution.num_replicas_in_sync)
1221 num_gpus = context.num_gpus()
1224 expected_values = [[i+j for j in range(num_gpus)] * num_workers
1225 for i in range(0, 100, num_gpus)]
1232 expected_num_replicas_in_sync=num_workers*num_gpus,
1245 num_gpus = context.num_gpus()
1249 for i in range(0, 100, num_gpus):
1250 expected_values.append([i+j for j in range(num_gpus)] * num_workers)
1257 expected_num_replicas_in_sync=num_workers*num_gpus,
1283 i) for i in range(context.num_gpus())]),
[all …]
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/
Dgrpc_testlib_server.cc39 int num_cpus, int num_gpus, int task_index, in FillServerDef() argument
71 (*config->mutable_device_count())["GPU"] = num_gpus; in FillServerDef()
83 int num_gpus = 0; in main() local
90 tensorflow::Flag("num_gpus", &num_gpus, "number of GPUs"), in main()
101 num_gpus, task_index, &def); in main()
Dgrpc_testlib.cc52 int num_gpus = 0; in MakeTestCluster() local
59 num_gpus = iter->second; in MakeTestCluster()
70 strings::StrCat("--num_gpus=", num_gpus)}); in MakeTestCluster()
/external/tensorflow/tensorflow/core/grappler/optimizers/
Dgeneric_layout_optimizer.cc51 int num_gpus = 0; in GetNumGPUs() local
57 num_gpus++; in GetNumGPUs()
69 return {num_gpus, num_volta}; in GetNumGPUs()
108 const TransposeContext& context, int num_gpus, int num_voltas) { in GetSrcAndDstDataFormats() argument
115 ((static_cast<float>(num_voltas) / static_cast<float>(num_gpus)) >= in GetSrcAndDstDataFormats()
426 const int num_gpus = num_gpus_and_num_volta.first; in Optimize() local
433 if (num_gpus > 0) { in Optimize()
438 context, num_gpus, num_gpus_and_num_volta.second); in Optimize()
/external/tensorflow/tensorflow/python/ops/
Dcompiled_collective_ops_gpu_test.py44 def _setup_context(self, num_gpus=2): argument
47 if len(gpus) < num_gpus:
49 num_gpus, len(gpus)))
Dcollective_ops_gpu_test.py46 def _setup_context(self, num_gpus=2): argument
49 if len(gpus) < num_gpus:
51 num_gpus, len(gpus)))
274 self._setup_context(num_gpus=1)
285 self._setup_context(num_gpus=2)
/external/tensorflow/tensorflow/python/eager/
Drun_eager_op_as_function_test.py99 if device == GPU and not context.num_gpus():
108 if device == GPU and not context.num_gpus():
117 if device == GPU and not context.num_gpus():
192 if not context.num_gpus():
238 if context.num_gpus():
257 if not context.num_gpus():
Dbenchmarks_test.py174 with ops.device("GPU:0" if context.num_gpus() else "CPU:0"):
186 if context.num_gpus():
192 if context.num_gpus():
202 with ops.device("GPU:0" if context.num_gpus() else "CPU:0"):
212 with ops.device("GPU:0" if context.num_gpus() else "CPU:0"):
264 if not context.num_gpus():
269 if not context.num_gpus():
277 if not context.num_gpus():
283 if not context.num_gpus():
326 if not context.num_gpus():
[all …]
/external/tensorflow/tensorflow/core/profiler/backends/gpu/
Dcupti_collector.h205 uint32 num_gpus; member
215 explicit AnnotationMap(uint64 max_size, uint32 num_gpus) in AnnotationMap() argument
216 : max_size_(max_size), per_device_map_(num_gpus) {} in AnnotationMap()
243 annotation_map_(options.max_annotation_strings, options.num_gpus) {} in CuptiTraceCollector()
/external/mesa3d/src/amd/addrlib/src/chip/r800/
Dsi_gb_reg.h64 unsigned int num_gpus : 3; member
82 unsigned int num_gpus : 3; member
/external/tensorflow/tensorflow/core/distributed_runtime/
Drpcbench_test.cc57 int num_gpus = 0; in MakeGRPCCluster() local
64 num_gpus = iter->second; in MakeGRPCCluster()
69 worker_threads->Schedule([worker_idx, n, num_cpus, num_gpus, &port] { in MakeGRPCCluster()
84 (*config->mutable_device_count())["GPU"] = num_gpus; in MakeGRPCCluster()
/external/tensorflow/tensorflow/core/grappler/
Ddevices.cc46 int num_gpus = gpu_manager->VisibleDeviceCount(); in GetNumAvailableGPUs() local
47 for (int i = 0; i < num_gpus; i++) { in GetNumAvailableGPUs()
/external/tensorflow/tensorflow/core/grappler/clusters/
Dsingle_machine.cc41 SingleMachine::SingleMachine(int timeout_s, int num_cpu_cores, int num_gpus) in SingleMachine() argument
44 << " Number of GPUs: " << num_gpus; in SingleMachine()
49 if (num_gpus > 0) { in SingleMachine()
50 (*options_.config.mutable_device_count())["GPU"] = num_gpus; in SingleMachine()

123