Home
last modified time | relevance | path

Searched refs:num_gpus (Results 1 – 25 of 68) sorted by relevance

123

/external/tensorflow/tensorflow/contrib/distribute/python/
Dcollective_all_reduce_strategy_test.py74 num_gpus=None, argument
77 if num_gpus is None:
78 num_gpus = context.num_gpus()
85 num_accelerators={'GPU': num_gpus})
89 ClusterSpec({}), num_accelerators={'GPU': num_gpus})
96 num_gpus_per_worker=num_gpus)
126 num_gpus=0, argument
132 num_gpus=num_gpus,
136 group_key_start=10 * num_gpus +
138 instance_key_start=num_gpus * 100 +
[all …]
Dparameter_server_strategy_test.py82 num_gpus=None, argument
86 if num_gpus is None:
87 num_gpus = context.num_gpus()
94 num_accelerators={'GPU': num_gpus})
98 ClusterSpec({}), num_accelerators={'GPU': num_gpus})
106 num_gpus_per_worker=num_gpus)
137 num_gpus, argument
143 num_gpus=num_gpus,
150 num_gpus, argument
154 task_type, task_id, num_gpus, use_core_strategy=use_core_strategy)
[all …]
Dcross_device_ops_test.py456 num_gpus=0, argument
460 group_key_start=10 * num_gpus +
462 instance_key_start=num_gpus * 100 +
464 instance_key_with_id_start=num_gpus * 10000 +
467 if num_gpus:
468 devices = ["/device:GPU:%d" % i for i in range(num_gpus)]
476 num_gpus_per_worker=num_gpus)
482 1, num_gpus, collective_keys=collective_keys)
485 if num_gpus:
488 for i in range(num_gpus)
[all …]
Dmirrored_strategy.py89 num_gpus=None, argument
95 if num_gpus is not None and num_gpus_per_worker is not None:
98 if num_gpus is None:
99 num_gpus = num_gpus_per_worker
100 extended = MirroredExtended(self, devices, num_gpus,
Dmulti_worker_test_base.py282 def _run_client(self, client_fn, task_type, task_id, num_gpus, *args, argument
284 result = client_fn(task_type, task_id, num_gpus, *args, **kwargs)
289 def _run_between_graph_clients(self, client_fn, cluster_spec, num_gpus, *args, argument
306 args=(client_fn, task_type, task_id, num_gpus) + args,
/external/tensorflow/tensorflow/python/distribute/
Dall_reduce_test.py90 def _buildInput(self, num_workers, num_gpus): argument
97 for d in range(0, num_gpus):
98 dn = "/replica:0/task:%d/device:GPU:%d" % (w, d % num_gpus)
145 def _buildRing(self, num_workers, num_gpus, subdiv): argument
146 gpu_perm = range(0, num_gpus)
150 def _testAllReduce(self, num_workers, num_gpus, shape, build_f): argument
152 num_devices = num_workers * num_gpus
166 def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv): argument
168 build_f = self._buildRing(num_workers, num_gpus, subdiv)
169 self._testAllReduce(num_workers, num_gpus, shape, build_f)
[all …]
Dcollective_all_reduce_strategy.py114 num_gpus = context.num_gpus()
116 num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
118 if num_gpus:
119 local_devices = tuple("/device:GPU:%d" % i for i in range(num_gpus))
130 num_gpus_per_worker=num_gpus,
144 self._num_gpus_per_worker = num_gpus
158 num_gpus = context.num_gpus()
160 num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
184 if num_gpus:
186 for i in range(num_gpus))
[all …]
Dparameter_server_strategy.py133 num_gpus = context.num_gpus()
135 num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
138 self._num_gpus_per_worker = num_gpus
155 if num_gpus > 0:
157 "%s/device:GPU:%d" % (worker_device, i) for i in range(num_gpus))
214 num_gpus = context.num_gpus()
216 num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
219 self._num_gpus_per_worker = num_gpus
224 if num_gpus > 0:
225 compute_devices = tuple(map("/device:GPU:{}".format, range(num_gpus)))
[all …]
Dmirrored_strategy.py386 num_gpus = None
389 if num_gpus is None:
390 num_gpus = sum(1 for d in device_in_task if _is_gpu_device(d))
393 elif num_gpus != sum(1 for d in device_in_task if _is_gpu_device(d)):
399 d_spec.device_index >= num_gpus):
402 return num_gpus
405 def all_local_devices(num_gpus=None): argument
406 if num_gpus is None:
407 num_gpus = context.num_gpus()
408 return (tuple("/device:GPU:%d" % i for i in range(num_gpus)) or
[all …]
Dall_reduce.py223 num_gpus = len(gpu_perm)
224 devices = num_workers * num_gpus
227 if num_subchunks > num_gpus:
229 "num_subchunks %d must be <= num_gpus %d" % (num_subchunks, num_gpus))
230 rotation_interval = max(1, int(num_gpus / num_subchunks))
236 default_order = [(w * num_gpus) + i for i in gpu_perm]
/external/tensorflow/tensorflow/contrib/eager/python/examples/rnn_ptb/
Drnn_ptb_test.py32 return "/device:GPU:0" if tfe.num_gpus() else "/device:CPU:0"
38 model = rnn_ptb.test_model(tfe.num_gpus() > 0)
48 model = rnn_ptb.test_model(tfe.num_gpus() > 0)
54 if tfe.num_gpus():
98 if not tfe.num_gpus():
103 if not tfe.num_gpus():
142 if not tfe.num_gpus():
147 if not tfe.num_gpus():
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/
Dgrpc_testlib_server.cc39 int num_cpus, int num_gpus, int task_index, in FillServerDef() argument
71 (*config->mutable_device_count())["GPU"] = num_gpus; in FillServerDef()
83 int num_gpus = 0; in main() local
90 tensorflow::Flag("num_gpus", &num_gpus, "number of GPUs"), in main()
101 num_gpus, task_index, &def); in main()
Dgrpc_testlib.cc43 int num_gpus = 0; in MakeTestCluster() local
50 num_gpus = iter->second; in MakeTestCluster()
65 strings::StrCat("--num_gpus=", num_gpus)}); in MakeTestCluster()
/external/tensorflow/tensorflow/python/eager/
Dcore_test.py126 if not context.context().num_gpus():
151 if not context.context().num_gpus():
178 ctx.num_gpus()
190 if not context.context().num_gpus():
194 self.assertEquals(0, ctx.num_gpus())
208 if not context.context().num_gpus():
221 if not context.context().num_gpus():
233 if not context.context().num_gpus():
244 x.gpu(context.context().num_gpus() + 1)
247 if not context.context().num_gpus():
[all …]
Dbenchmarks_test.py203 if not context.num_gpus():
208 if not context.num_gpus():
216 if not context.num_gpus():
222 if not context.num_gpus():
261 if not context.num_gpus():
273 if not context.num_gpus():
489 if not context.num_gpus():
497 if not context.num_gpus():
508 if not context.num_gpus():
516 if not context.num_gpus():
[all …]
Dops_test.py52 if not context.context().num_gpus():
101 if not context.context().num_gpus():
246 if not context.context().num_gpus():
264 if not context.context().num_gpus():
272 if not context.context().num_gpus():
289 if not context.context().num_gpus():
345 if not context.context().num_gpus():
/external/tensorflow/tensorflow/contrib/distribute/python/examples/
Dmnist_eager_multigpu.py90 num_gpus = FLAGS.num_gpus
91 if num_gpus is None:
93 elif num_gpus == 0:
96 devices = ["/device:GPU:{}".format(i) for i in range(num_gpus)]
/external/tensorflow/tensorflow/python/keras/utils/
Dmulti_gpu_utils.py161 num_gpus = len(gpus)
168 num_gpus = gpus
169 target_gpu_ids = range(num_gpus)
232 'parts': num_gpus
/external/tensorflow/tensorflow/python/grappler/
Dgraph_placer_test.py62 def _buildCluster(num_cpus=1, num_gpus=1): argument
64 if num_gpus > 0:
80 for i in range(num_gpus):
126 gcluster = GraphPlacerTest._buildCluster(num_gpus=1)
/external/tensorflow/tensorflow/contrib/cudnn_rnn/python/kernel_tests/
Dcudnn_rnn_ops_test.py389 if not context.context().num_gpus():
414 if not context.context().num_gpus():
440 if not context.context().num_gpus():
473 if not context.context().num_gpus():
511 if not context.context().num_gpus():
771 if not context.context().num_gpus():
796 if not context.context().num_gpus():
822 if not context.context().num_gpus():
851 if not context.context().num_gpus():
885 if not context.context().num_gpus():
[all …]
/external/tensorflow/tensorflow/core/distributed_runtime/
Drpcbench_test.cc57 int num_gpus = 0; in MakeGRPCCluster() local
64 num_gpus = iter->second; in MakeGRPCCluster()
69 worker_threads->Schedule([worker_idx, n, num_cpus, num_gpus, &port] { in MakeGRPCCluster()
84 (*config->mutable_device_count())["GPU"] = num_gpus; in MakeGRPCCluster()
/external/tensorflow/tensorflow/core/grappler/
Ddevices.cc36 int num_gpus = gpu_manager->VisibleDeviceCount(); in GetNumAvailableGPUs() local
37 for (int i = 0; i < num_gpus; i++) { in GetNumAvailableGPUs()
/external/mesa3d/src/amd/addrlib/inc/chip/r800/
Dsi_gb_reg.h64 unsigned int num_gpus : 3; member
82 unsigned int num_gpus : 3; member
/external/tensorflow/tensorflow/core/grappler/clusters/
Dsingle_machine.cc41 SingleMachine::SingleMachine(int timeout_s, int num_cpu_cores, int num_gpus) in SingleMachine() argument
44 << " Number of GPUs: " << num_gpus; in SingleMachine()
49 if (num_gpus > 0) { in SingleMachine()
50 (*options_.config.mutable_device_count())["GPU"] = num_gpus; in SingleMachine()
/external/tensorflow/tensorflow/contrib/eager/python/examples/gan/
Dmnist_test.py40 return '/gpu:0' if tfe.num_gpus() else '/cpu:0'
47 dev = 'gpu' if tfe.num_gpus() else 'cpu'

123