/external/mesa3d/src/gbm/main/ |
D | backend.c | 122 GBM_GET_BACKEND_PROC_PTR get_backend; in load_backend_by_name() local 124 get_backend = dlsym(lib, GBM_GET_BACKEND_PROC_NAME); in load_backend_by_name() 126 if (!get_backend) in load_backend_by_name() 129 gbm_backend = get_backend(&gbm_core); in load_backend_by_name()
|
/external/ComputeLibrary/src/graph/detail/ |
D | ExecutionHelpers.cpp | 49 …ds::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target… in validate_all_nodes() 65 … backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(target); in configure_all_tensors() 149 …ds::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target… in configure_all_nodes()
|
/external/scapy/scapy/libs/ |
D | matplot.py | 27 from matplotlib import get_backend as matplotlib_get_backend
|
/external/pytorch/torch/distributed/elastic/rendezvous/ |
D | api.py | 143 def get_backend(self) -> str: member in RendezvousHandler 368 if handler.get_backend() != params.backend:
|
D | static_tcp_rendezvous.py | 56 def get_backend(self) -> str: member in StaticTCPRendezvous
|
/external/ComputeLibrary/arm_compute/graph/backends/ |
D | BackendRegistry.h | 64 IDeviceBackend &get_backend(Target target);
|
/external/ComputeLibrary/src/graph/backends/ |
D | BackendRegistry.cpp | 51 IDeviceBackend &BackendRegistry::get_backend(Target target) in get_backend() function in arm_compute::graph::backends::BackendRegistry
|
/external/pytorch/torch/distributed/nn/ |
D | functional.py | 334 if dist.get_backend(group=ctx.group) is dist.Backend.NCCL: 356 if dist.get_backend(group=ctx.group) is dist.Backend.NCCL: 384 if dist.get_backend(group=group) is dist.Backend.GLOO:
|
/external/pytorch/torch/csrc/dynamo/ |
D | extra_state.h | 147 PyObject* get_backend(PyObject* callback);
|
D | cache_entry.cpp | 47 PyObject* get_backend(PyObject* callback) { in get_backend() function
|
/external/ComputeLibrary/src/graph/mutators/ |
D | NodeExecutionMethodMutator.cpp | 58 …backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(node->assigned_ta… in set_default_on_invalid_method()
|
D | DepthConcatSubTensorMutator.cpp | 97 …kends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(input_tensor->d… in mutate()
|
D | SplitLayerSubTensorMutator.cpp | 97 …kends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(output_tensor->… in mutate()
|
D | GroupedConvolutionMutator.cpp | 131 …backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(node->assigned_ta… in mutate()
|
/external/pytorch/test/distributed/elastic/rendezvous/ |
D | etcd_rendezvous_test.py | 83 self.assertEqual("etcd", etcd_rdzv.get_backend())
|
D | static_rendezvous_test.py | 75 self.assertEqual("static", static_rdzv.get_backend())
|
D | api_test.py | 196 def get_backend(self) -> str: member in _DummyRendezvousHandler
|
/external/pytorch/torch/distributed/algorithms/ddp_comm_hooks/ |
D | ddp_zero_hook.py | 235 pg = dist.get_backend(ddp_ref().process_group) # type: ignore[union-attr] 397 pg = dist.get_backend(ddp_ref().process_group) # type: ignore[union-attr]
|
/external/pytorch/torch/csrc/tensor/ |
D | python_tensor.cpp | 44 Backend get_backend() const { in get_backend() function 444 set_default_tensor_type(type->get_backend(), type->get_scalar_type()); in py_set_default_tensor_type()
|
/external/pytorch/torch/distributed/ |
D | device_mesh.py | 43 get_backend, 454 if is_initialized() and get_backend() == "threaded": 510 and get_backend(default_group) == "gloo"
|
D | distributed_c10d.py | 1198 def get_backend(group: Optional[ProcessGroup] = None) -> Backend: function 2629 backend = get_backend(group) 2641 backend = get_backend(group) 4221 if get_backend(group) != Backend.GLOO: 4225 timeout = _get_default_timeout(get_backend(group)) 4288 backend = get_backend(_get_default_group())
|
/external/pytorch/torch/_inductor/ |
D | scheduler.py | 846 group_fn = self.scheduler.get_backend(self.node.get_device()).group_fn 2311 backend = self.get_backend(device) 2564 node3 = self.get_backend(device).fuse(node1, node2) 2989 return self.get_backend(device).can_fuse_vertical(node1, node2) 2994 return self.get_backend(device).can_fuse_horizontal(node1, node2) 3197 self.get_backend(device).get_fusion_pair_priority(node1, node2) 3358 def get_backend(self, device: torch.device) -> BaseScheduling: member in Scheduler 3445 self.get_backend(device).codegen_template(node, epilogue) 3451 backend_ = self.get_backend(device) 3461 self.get_backend(device).codegen_node(node) [all …]
|
/external/pytorch/c10/xpu/ |
D | XPUFunctions.cpp | 34 if (platform.get_backend() != sycl::backend::ext_oneapi_level_zero) { in enumDevices()
|
/external/ComputeLibrary/src/graph/ |
D | Utils.cpp | 222 … backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(target); in configure_tensor()
|
/external/pytorch/torch/distributed/elastic/agent/server/ |
D | local_elastic_agent.py | 261 "rdzv_backend": spec.rdzv_handler.get_backend(),
|