Home
last modified time | relevance | path

Searched refs:device_types (Results 1 – 25 of 43) sorted by relevance

12

/external/tensorflow/tensorflow/core/grappler/optimizers/
Dcustom_graph_optimizer_registry.cc109 const std::set<string>& device_types) { in CreateOptimizers() argument
113 if (device_types.find(it->first) == device_types.end()) continue; in CreateOptimizers()
134 const std::set<string>& device_types) { in PrintPluginConfigsIfConflict() argument
138 for (const auto& device_type : device_types) { in PrintPluginConfigsIfConflict()
156 for (const auto& device_type : device_types) { in PrintPluginConfigsIfConflict()
178 bool use_plugin_optimizers, const std::set<string>& device_types) { in GetPluginConfigs() argument
182 for (const auto& device_type : device_types) { in GetPluginConfigs()
Dmeta_optimizer.h59 const string& optimizer, const std::set<string>& device_types) const;
66 const std::set<string>& device_types,
70 const std::set<string>& device_types,
74 const std::set<string>& device_types,
78 const std::set<string>& device_types,
90 void PrintUserAndPluginConfigs(const std::set<string>& device_types) const;
Dcustom_graph_optimizer_registry.h85 const std::set<string>& device_types);
92 const std::set<string>& device_types);
103 const std::set<string>& device_types);
Dmeta_optimizer.cc207 const string& optimizer, const std::set<string>& device_types) const { in MakeNewOptimizer()
209 cfg_.use_plugin_optimizers() != RewriterConfig::OFF, device_types); in MakeNewOptimizer()
277 const std::set<string>& device_types, in InitializeOptimizers() argument
284 cfg_.use_plugin_optimizers() != RewriterConfig::OFF, device_types); in InitializeOptimizers()
499 return InitializeCustomGraphOptimizers(device_types, std::set<string>(), in InitializeOptimizers()
504 const std::set<string>& device_types, in InitializeOptimizersByName() argument
508 auto optimizer = MakeNewOptimizer(optimizer_name, device_types); in InitializeOptimizersByName()
529 device_types, initialized_custom_optimizers, optimizers); in InitializeOptimizersByName()
533 const std::set<string>& device_types, in InitializeCustomGraphOptimizers() argument
555 auto optimizer = MakeNewOptimizer(optimizer_config.name(), device_types); in InitializeCustomGraphOptimizers()
[all …]
/external/pytorch/torch/_library/
Dcustom_ops.py39 device_types: device_types_t = None,
152 result.register_kernel(device_types)(fn)
268 self, device_types: device_types_t, fn: Optional[Callable] = None, /
309 if device_types is None or isinstance(device_types, str):
310 dtypes: List[Union[str, None]] = [device_types]
312 dtypes = list(device_types)
372 if device_types is not None and not utils.has_tensor_arg(
/external/pytorch/torch/
D_custom_ops.py120 def impl(qualname, *, device_types=("cpu", "cuda"), func=None): argument
175 custom_op.impl(device_types, _stacklevel=3)(func)
Dlibrary.py622 device_types: device_types_t,
676 return opdef.register_kernel(device_types, func)
678 if device_types is None:
679 device_types = "CompositeExplicitAutograd"
681 return _impl(op, device_types, func, lib=lib, disable_dynamo=True)
/external/tensorflow/tensorflow/dtensor/python/
Dlayout.py171 device_types = set([device.device_type for device in local_devices])
172 if not device_types:
173 device_types = set([device.device_type for device in global_devices])
174 if None in device_types:
176 if len(device_types) > 1:
178 device_types)
181 self._device_type = device_types.pop()
/external/pytorch/test/jit/
Dtest_device_analysis.py34 cls.device_types = [cls.cpu, cls.cuda, cls.vulkan]
120 for in_device, fn in product(self.device_types, functions):
127 for in_device in self.device_types:
135 for in_device in self.device_types:
265 for in_device in self.device_types:
/external/tensorflow/tensorflow/python/grappler/
Dcluster_wrapper.cc169 std::unordered_map<std::string, std::vector<std::string>> device_types; in PYBIND11_MODULE() local
171 device_types[dev.second.type()].push_back(dev.first); in PYBIND11_MODULE()
180 for (const auto& dev : device_types) { in PYBIND11_MODULE()
232 auto it = device_types.find(type); in PYBIND11_MODULE()
233 DCHECK(it != device_types.end()); in PYBIND11_MODULE()
/external/pytorch/torch/_custom_op/
Dimpl.py199 self, device_types: typing.Union[str, typing.Iterable[str]], _stacklevel=2,
204 if isinstance(device_types, str):
205 device_types = [device_types]
206 for device_type in device_types:
210 for device_type in set(device_types):
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/transforms/
Dtpu_cluster_formation.cc140 std::set<llvm::StringRef> device_types; in CollectAndGroupClusterOps() local
149 if (device_type_attr) device_types.insert(device_type_attr); in CollectAndGroupClusterOps()
175 if (device_types.size() > 1) { in CollectAndGroupClusterOps()
178 << "' attribute values (" << llvm::join(device_types, ",") in CollectAndGroupClusterOps()
184 assert(device_types.size() == 1); in CollectAndGroupClusterOps()
185 device_type = device_types.begin()->str(); in CollectAndGroupClusterOps()
/external/pytorch/torch/_inductor/
Dgraph.py348 self.device_types: Set[str] = (
349 const_module.device_types if const_module else set()
679 self.device_types.add(device.type)
1515 self.cuda = "cuda" in self.device_types
1519 device_types = self.device_types.copy()
1520 device_types.discard("cpu")
1521 device_types.discard("meta")
1523 assert len(device_types) <= 1, "Does not support mixing {}".format(
1524 "+".join(device_types)
1526 only_cpu = len(device_types) == 0
[all …]
/external/OpenCL-CTS/test_conformance/
Drun_conformance.py302 device_types = ["CL_DEVICE_TYPE_DEFAULT", "CL_DEVICE_TYPE_CPU", "CL_DEVICE_TYPE_GPU", "CL_DEVICE_TY… variable
304 for device in device_types:
318 if arg in device_types:
/external/tensorflow/tensorflow/core/common_runtime/
Dinspecting_placer.h47 PrioritizedDeviceTypeVector device_types; member
Dcolocation_graph.cc110 bool HasPriorities(const PrioritizedDeviceTypeVector& device_types) { in HasPriorities() argument
111 for (const auto& prioritized_device_type : device_types) { in HasPriorities()
244 possible_device->device_types = supported_device_types_; in FillPossibleDevices()
563 MergeSupportedDevices(devices.device_types); in LimitToPossibleDevices()
905 std::back_inserter(possible_devices.device_types), in AddHostOnlyDataTypesConstraints()
Dinspecting_placer.cc59 devices.device_types, ", ", in DebugString()
/external/tensorflow/tensorflow/python/distribute/
Done_device_strategy_test.py133 device_types = (
138 self.assertAllEqual(device_types, expected_device_types)
Dcollective_all_reduce_strategy_test.py327 device_types = {
330 self.assertAllEqual(list(device_types), ['GPU'])
346 device_types = {
349 self.assertAllEqual(list(device_types), ['CPU'])
/external/pytorch/torch/utils/
Dcheckpoint.py133 device_types = []
136 nonlocal device_types
138 device_types.append(arg.device.type)
141 device_types_set = set(device_types)
151 if len(device_types) == 0:
156 return device_types[0]
/external/pytorch/test/inductor/
Dtest_mmdecomp.py191 device_types = ("cpu", GPU_TYPE) variable
192 instantiate_device_type_tests(TestDecomp, globals(), only_for=device_types)
/external/pytorch/torch/csrc/distributed/c10d/
DProcessGroupWrapper.cpp95 auto device_types = std::vector<int8_t>(); in deserialize_fingerprint() local
110 device_types.reserve(num_tensors); in deserialize_fingerprint()
120 device_types.push_back(serialized_tensor[index].item<int8_t>()); in deserialize_fingerprint()
139 optype, num_tensors, dtypes, device_types, sizes, seq); in deserialize_fingerprint()
/external/pytorch/test/onnx/
Dtest_fx_passes.py62 "mylibrary::foo_op", device_types="cpu", mutates_args=()
68 "mylibrary::bar_op", device_types="cpu", mutates_args=()
/external/pytorch/test/
Dtest_custom_ops.py867 @custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types=["cpu"])
901 @custom_ops.impl(f"{self.test_ns}::foo", device_types="cpu")
1088 @custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types="cpu")
1109 custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types=device_type)(
1119 f"{TestCustomOp.test_ns}::foo", device_types=invalid_type
1462 @custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types="cpu")
1466 @custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types="cuda")
2562 device_types="cpu",
2570 "_torch_testing::numpy_sin_inplace", mutates_args={"x"}, device_types="cpu"
2611 "_torch_testing::f", mutates_args="unknown", device_types="cpu"
[all …]
/external/tensorflow/tensorflow/core/graph/
Dgraph_partition.cc111 std::vector<DeviceType> device_types; member
136 if (info.device_types[src->id()] != DEVICE_CPU) { in NeedSameDeviceSendRecv()
151 if (info.device_types[dst->id()] != DEVICE_CPU) { in IsDstInputOnHost()
579 info->device_types.resize(g.num_node_ids(), DEVICE_CPU); in BuildMemoryDeviceInfo()
593 info->device_types[node_id] = DeviceType(parsed.type); in BuildMemoryDeviceInfo()

12