/external/pytorch/torch/cpu/amp/ |
D | autocast_mode.py | 26 cache_enabled: bool = True, 34 "cpu", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
|
/external/pytorch/torch/cuda/amp/ |
D | autocast_mode.py | 27 cache_enabled: bool = True, 35 "cuda", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
|
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/ |
D | grpc_worker_service.cc | 449 bool cache_enabled = (response_cache_ != nullptr && request_id != 0); in GrpcRecvTensorAsync() local 451 auto do_response = [response, done, cache_enabled](const Tensor& tensor, in GrpcRecvTensorAsync() 455 grpc::EncodeTensorToByteBuffer(is_dead, tensor, cache_enabled, response); in GrpcRecvTensorAsync() 464 if (cache_enabled && in GrpcRecvTensorAsync() 469 auto rendezvous_done = [this, request_id, do_response, cache_enabled]( in GrpcRecvTensorAsync() 472 if (cache_enabled) { in GrpcRecvTensorAsync() 595 bool cache_enabled = (response_cache_ != nullptr && request_id != 0); in RecvBufAsync() local 597 auto do_response = [this, response, done, cache_enabled]( in RecvBufAsync() 604 response->set_require_ack(cache_enabled); in RecvBufAsync() 612 if (cache_enabled && in RecvBufAsync() [all …]
|
/external/compiler-rt/lib/sanitizer_common/ |
D | sanitizer_procmaps_common.cc | 67 MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) { in MemoryMappingLayout() argument 69 if (cache_enabled) { in MemoryMappingLayout() 79 if (cache_enabled) in MemoryMappingLayout()
|
D | sanitizer_procmaps.h | 36 explicit MemoryMappingLayout(bool cache_enabled);
|
D | sanitizer_procmaps_mac.cc | 24 MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) { in MemoryMappingLayout() argument
|
/external/python/mako/mako/ |
D | template.py | 237 cache_enabled=True, argument 334 cache_enabled, 351 cache_enabled, argument 358 self.cache_enabled = cache_enabled 514 cache_enabled=True, argument 546 cache_enabled,
|
D | lookup.py | 164 cache_enabled=True, argument 210 "cache_enabled": cache_enabled,
|
D | cache.py | 89 if not self.template.cache_enabled:
|
/external/pytorch/torch/_higher_order_ops/ |
D | wrap.py | 70 cache_enabled: Optional[bool], 82 with torch.autocast(device_type, dtype, enabled, cache_enabled):
|
/external/pytorch/torch/amp/ |
D | autocast_mode.py | 223 cache_enabled: Optional[bool] = None, 272 if cache_enabled is not None: 273 self._cache_enabled = cache_enabled
|
/external/pytorch/aten/src/ATen/ |
D | autocast_mode.cpp | 83 thread_local bool cache_enabled = true; variable 109 return cache_enabled; in is_autocast_cache_enabled() 113 cache_enabled = enabled; in set_autocast_cache_enabled() 126 arg.is_leaf() && !arg.is_view() && cache_enabled && in cached_cast()
|
/external/executorch/extension/llm/modules/ |
D | attention.py | 161 self.cache_enabled = False 189 self.cache_enabled = True
|
/external/pytorch/test/ |
D | test_jit_autocast.py | 800 with torch.cpu.amp.autocast(cache_enabled=False), torch.no_grad(): 809 with torch.cpu.amp.autocast(cache_enabled=False), torch.no_grad(): 824 with torch.cpu.amp.autocast(cache_enabled=False), torch.no_grad(): 848 … with torch.cpu.amp.autocast(cache_enabled=False, dtype=torch.bfloat16), torch.no_grad():
|
D | test_cuda.py | 2628 self, with_amp, cache_enabled, allow_unused_input argument 2700 device_type="cuda", enabled=with_amp, cache_enabled=cache_enabled 2742 device_type="cuda", enabled=with_amp, cache_enabled=cache_enabled 2779 self, with_amp, cache_enabled, allow_unused_input argument 2813 device_type="cuda", enabled=with_amp, cache_enabled=cache_enabled 2831 device_type="cuda", enabled=with_amp, cache_enabled=cache_enabled
|
D | test_ops.py | 2409 cache_enabled = unittest.mock.patch( 2412 cache_enabled.start() 2413 self.addCleanup(cache_enabled.stop)
|
D | test_jit_llga_fuser.py | 71 with torch.cpu.amp.autocast(cache_enabled=False, dtype=torch.bfloat16):
|
/external/mesa3d/src/broadcom/vulkan/ |
D | v3dv_pipeline_cache.c | 205 bool cache_enabled) in v3dv_pipeline_cache_init() argument 210 if (cache_enabled) { in v3dv_pipeline_cache_init()
|
D | v3dv_private.h | 2483 bool cache_enabled);
|
/external/python/mako/test/ |
D | test_cache.py | 132 cache_enabled=False,
|
/external/python/mako/doc/build/ |
D | caching.rst | 87 * ``cache_enabled`` - Setting this 96 cache_enabled = False
|
D | changelog.rst | 1742 added "cache_enabled=True" flag to Template, 1746 cache_enabled configuration setting.
|
/external/pytorch/torch/_subclasses/ |
D | fake_tensor.py | 1139 self.cache_enabled = ( 1691 if self.cache_enabled:
|
/external/bcc/libbpf-tools/x86/ |
D | vmlinux.h | 114836 bool (*cache_enabled)(struct mmc_host *); member
|
D | vmlinux_518.h | 114836 bool (*cache_enabled)(struct mmc_host *); member
|