Home
last modified time | relevance | path

Searched refs:device_names (Results 1 – 25 of 44) sorted by relevance

12

/external/tensorflow/tensorflow/core/common_runtime/
Dcollective_param_resolver_local_test.cc54 cp.instance.device_names = shared_cp.instance.device_names; in RunCompleteDefaultRanking()
73 EXPECT_EQ(ir.shared.instance.device_names, expected_device_order); in RunCompleteDefaultRanking()
96 cp.instance.device_names.push_back(strings::StrCat( in TEST_F()
171 cp->instance.device_names.push_back( in TEST_F()
176 prl_->CompleteParamsAsync(cp->instance.device_names[0], cp, in TEST_F()
189 ASSERT_EQ(cps[i].instance.device_names.size(), 3); in TEST_F()
193 cps[i].instance.device_names[j]); in TEST_F()
214 cp->instance.device_names.push_back(strings::StrCat( in InitializeCollectiveParamsForBroadcast()
229 prl_->CompleteParamsAsync(cp->instance.device_names[0], cp, in TEST_F()
242 ASSERT_EQ(cps[i].instance.device_names.size(), 3); in TEST_F()
[all …]
Dcollective_param_resolver_local.cc162 CHECK_EQ(ip.device_names.size(), ip.task_names.size()); in BuildDevRecs()
163 CHECK_EQ(ip.device_names.size(), localities.size()); in BuildDevRecs()
164 for (int i = 0; i < ip.device_names.size(); ++i) { in BuildDevRecs()
166 DevRec* dr = &tdm[ip.device_names[i]]; in BuildDevRecs()
168 dr->device = ip.device_names[i]; in BuildDevRecs()
355 CHECK_EQ(cp->group.group_size, cp->instance.device_names.size()); in SortDevicesAndTasks()
364 return cp->instance.device_names[a] < cp->instance.device_names[b]; in SortDevicesAndTasks()
371 new_devs.push_back(cp->instance.device_names[pi]); in SortDevicesAndTasks()
374 cp->instance.device_names = std::move(new_devs); in SortDevicesAndTasks()
391 CHECK_EQ(cp->group.group_size, cp->instance.device_names.size()) << cp; in SetDefaultRank()
[all …]
Dhierarchical_tree_broadcaster.cc82 col_params->instance.device_names[col_params->default_rank]; in InitializeCollectiveParams()
138 col_params->instance.device_names[col_params->source_rank] == in InitializeCollectiveParams()
144 col_params->instance.device_names[device_count] == device_name; in InitializeCollectiveParams()
167 if (col_params->instance.device_names[abs_di] == device_name) { in InitializeCollectiveParams()
408 << col_params_->instance.device_names[dst_idx] << " subdiv=" << subdiv in DispatchSend()
410 col_ctx_->col_exec->PostToPeer(col_params_->instance.device_names[dst_idx], in DispatchSend()
426 << col_params_->instance.device_names[src_idx] << " to_device " in DispatchRecv()
430 col_params_->instance.device_names[src_idx], in DispatchRecv()
Ddevice_resolver_local_test.cc51 cp.instance.device_names.push_back( in TEST_F()
53 cp.instance.device_names.push_back( in TEST_F()
71 cp.instance.device_names.push_back( in TEST_F()
Ddevice_mgr.cc100 std::vector<StringPiece> device_names; in LookupDevice() local
102 device_names.push_back(itr.first); in LookupDevice()
105 << " all devices: " << str_util::Join(device_names, ", "); in LookupDevice()
Dcollective_util.cc63 CHECK_GT(col_params.instance.device_names.size(), idx); in SubdivPermDebugString()
64 strings::StrAppend(&buf, col_params.instance.device_names[idx], "\n"); in SubdivPermDebugString()
Dring_alg.cc166 col_params->instance.device_names[col_params->default_rank]; in InitializeCollectiveParams()
229 if (col_params->instance.device_names[permuted_di] == device_name) { in InitializeCollectiveParams()
391 col_params_->instance.device_names[send_to_dev_idx], in DispatchSend()
410 col_params_->instance.device_names[rf->recv_dev_idx], in DispatchRecv()
/external/tensorflow/tensorflow/core/grappler/inputs/
Dtrivial_test_graph_input_yielder.cc33 const std::vector<string>& device_names) { in CreateGraphDef() argument
52 device_names[use_multiple_devices ? j % device_names.size() in CreateGraphDef()
59 device_names[use_multiple_devices ? j % device_names.size() in CreateGraphDef()
91 const std::vector<string>& device_names) in TrivialTestGraphInputYielder() argument
96 device_names_(device_names) {} in TrivialTestGraphInputYielder()
/external/tensorflow/tensorflow/core/grappler/clusters/
Dcluster.cc99 std::vector<string> device_names; in GetDeviceNames() local
100 device_names.reserve(devices_.size()); in GetDeviceNames()
102 device_names.push_back(device.first); in GetDeviceNames()
104 std::sort(device_names.begin(), device_names.end()); in GetDeviceNames()
105 return device_names; in GetDeviceNames()
/external/tensorflow/tensorflow/compiler/jit/
Dxla_cluster_util.cc251 Status PickDeviceForXlaImpl(absl::Span<const string> device_names, in PickDeviceForXlaImpl() argument
269 TF_RET_CHECK(!device_names.empty()) << "No devices to choose from"; in PickDeviceForXlaImpl()
273 for (absl::string_view device_name : device_names) { in PickDeviceForXlaImpl()
290 "Multiple GPU devices ", absl::StrJoin(device_names, ", "))); in PickDeviceForXlaImpl()
296 "Multiple CPU devices ", absl::StrJoin(device_names, ", "))); in PickDeviceForXlaImpl()
302 "Multiple unknown devices ", absl::StrJoin(device_names, ", "))); in PickDeviceForXlaImpl()
337 Status PickDeviceForXla(absl::Span<const string> device_names, in PickDeviceForXla() argument
340 return PickDeviceForXlaImpl(device_names, allow_mixing_unknown_and_cpu, in PickDeviceForXla()
345 Status CanPickDeviceForXla(absl::Span<const string> device_names, in CanPickDeviceForXla() argument
348 return PickDeviceForXlaImpl(device_names, allow_mixing_unknown_and_cpu, in CanPickDeviceForXla()
Dxla_cluster_util.h118 Status PickDeviceForXla(absl::Span<const string> device_names,
125 Status CanPickDeviceForXla(absl::Span<const string> device_names,
Dbuild_xla_ops_pass.cc284 std::set<string> device_names; in InferDeviceForCluster() local
288 device_names.insert(ndef.device()); in InferDeviceForCluster()
296 device_names.insert(n->assigned_device_name()); in InferDeviceForCluster()
300 absl::c_copy(device_names, std::back_inserter(device_names_vector)); in InferDeviceForCluster()
/external/tensorflow/tensorflow/python/distribute/cluster_resolver/
Dcluster_resolver_test.py52 device_names = [
60 for name in device_names
62 mock_eager_list_devices.return_value = device_names
72 device_names = [
84 for name in device_names
86 mock_eager_list_devices.return_value = device_names
96 device_names = [
108 for name in device_names
110 mock_eager_list_devices.return_value = device_names
Dtpu_cluster_resolver_test.py566 device_names = [
578 name, 'TPU', 1024, 0) for name in device_names
591 device_names = [
603 name, 'XLA', 1024, 0) for name in device_names
629 device_names = [
641 name, 'TPU', 1024, 0) for name in device_names
643 mock_eager_list_devices.return_value = device_names
Dtfconfig_cluster_resolver_test.py264 device_names = [
276 for name in device_names
278 mock_eager_list_devices.return_value = device_names
/external/tensorflow/tensorflow/compiler/tf2xla/
Dtf2xla_supported_ops.cc70 std::vector<string> device_names = XlaOpRegistry::BackendNames(); in SupportedOpsMain() local
71 std::sort(device_names.begin(), device_names.end()); in SupportedOpsMain()
79 absl::StrJoin(device_names, ",")}, in SupportedOpsMain()
/external/tensorflow/tensorflow/core/framework/
Dcollective.cc64 device_names.clear(); in operator =()
65 device_names.assign(other.device_names.begin(), other.device_names.end()); in operator =()
91 for (const auto& d : device_names) { in ToString()
173 device_name(col_params.instance.device_names[col_params.default_rank]) {} in CollectiveContext()
/external/tensorflow/tensorflow/python/distribute/
Dall_reduce_test.py95 device_names = []
99 device_names.append(dn)
102 return input_tensors, device_names
107 input_tensors, device_names = self._buildInput(1, 1)
109 output_tensors = ar._build_ring_gather(input_tensors, device_names, 1,
114 input_tensors, device_names = self._buildInput(1, 4)
117 input_tensors, device_names, 2, pred_by_c_d, rank_by_c_d, math_ops.add)
/external/autotest/client/common_lib/cros/cfm/
Dcras_node_collector_unittest.py51 device_names = set([node.device_name for node in nodes])
92 device_names = set([node.device_name for node in nodes])
97 self.assertEquals(device_names, set(
/external/tensorflow/tensorflow/python/client/
Dsession_list_devices_test.py74 device_names = set(d.name for d in devices)
76 '/job:worker/replica:0/task:0/device:CPU:0' in device_names)
78 '/job:worker/replica:0/task:1/device:CPU:0' in device_names)
/external/tensorflow/tensorflow/core/kernels/
Dcollective_nccl_reducer_test.cc99 std::vector<string> device_names; in Init() local
107 device_names.push_back(device->name()); in Init()
129 col_params_.instance.device_names.push_back( in Init()
130 device_names[rank % num_gpus]); in Init()
135 rank, col_params_.instance.device_names[rank], this)); in Init()
/external/tensorflow/tensorflow/core/distributed_runtime/
Dcollective_param_resolver_distributed_test.cc266 EXPECT_EQ(cp_[idx].instance.device_names.size(), dev_count); in ValidateCollectiveParams()
267 EXPECT_EQ(cp_[idx].instance.device_names[idx], device_name); in ValidateCollectiveParams()
273 EXPECT_EQ(cp_[0].instance.device_names[i], in ValidateCollectiveParams()
274 cp_[idx].instance.device_names[i]); in ValidateCollectiveParams()
Ddevice_resolver_distributed.cc78 if (i < inst_params.device_names.size()) { in GetDeviceLocalitiesRecursive()
80 GetLocalityAsync(inst_params.device_names[i], inst_params.task_names[i], in GetDeviceLocalitiesRecursive()
/external/tensorflow/tensorflow/python/grappler/
Dcluster.i289 std::vector<string> device_names; in TF_GetSupportedDevices() local
294 device_names.push_back(name); in TF_GetSupportedDevices()
298 PyObject* dev = PyList_New(device_names.size()); in TF_GetSupportedDevices()
299 for (int i = 0; i < device_names.size(); ++i) { in TF_GetSupportedDevices()
300 PyList_SetItem(dev, i, PyString_FromString(device_names[i].c_str())); in TF_GetSupportedDevices()
/external/tensorflow/tensorflow/core/grappler/optimizers/
Dpin_to_host_optimizer.cc318 const std::vector<string> device_names = cluster->GetDeviceNames(); in Optimize() local
319 devices.insert(device_names.begin(), device_names.end()); in Optimize()

12