/external/tensorflow/tensorflow/core/common_runtime/pluggable_device/ |
D | pluggable_device_bfc_allocator.cc | 23 const GPUOptions& gpu_options, bool force_memory_growth_requested) { in GetAllowGrowthValue() argument 27 if (force_memory_growth_requested && !gpu_options.allow_growth()) { in GetAllowGrowthValue() 33 return gpu_options.allow_growth(); in GetAllowGrowthValue() 43 if (gpu_options.allow_growth()) { in GetAllowGrowthValue() 47 << " config value was " << gpu_options.allow_growth() << "."; in GetAllowGrowthValue() 51 if (!gpu_options.allow_growth()) { in GetAllowGrowthValue() 55 << " config value was " << gpu_options.allow_growth() << "."; in GetAllowGrowthValue() 64 << " of " << gpu_options.allow_growth() << "."; in GetAllowGrowthValue() 65 return gpu_options.allow_growth(); in GetAllowGrowthValue() 97 const GPUOptions& gpu_options, const string& name, in PluggableDeviceBFCAllocator() argument [all …]
|
D | pluggable_device_bfc_allocator.h | 41 const GPUOptions& gpu_options, const string& name, 48 static bool GetAllowGrowthValue(const GPUOptions& gpu_options,
|
D | pluggable_device.cc | 154 force_gpu_compatible_ = options.config.gpu_options().force_gpu_compatible(); in PluggableDevice() 176 options.config.gpu_options()); in Init() 179 device_type(), tf_device_id_, 0, executor_, options.config.gpu_options()); in Init()
|
D | pluggable_device_factory.cc | 176 const auto& device_options = options.config.gpu_options(); in CreateDevices() 250 options.config.gpu_options(), tf_device_id, memory_limit); in CreatePluggableDevice()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_executable_run_options.cc | 58 const GpuExecutableRunOptions* gpu_options = in NcclExecuteParams() local 60 gpu_global_device_ids = gpu_options && gpu_options->gpu_global_device_ids() in NcclExecuteParams() 61 ? &*gpu_options->gpu_global_device_ids() in NcclExecuteParams() 64 gpu_options && gpu_options->nccl_unique_id_callback() in NcclExecuteParams() 65 ? &gpu_options->nccl_unique_id_callback() in NcclExecuteParams()
|
/external/tensorflow/tensorflow/core/common_runtime/device/ |
D | device_event_mgr.cc | 93 EventMgr::EventMgr(se::StreamExecutor* se, const GPUOptions& gpu_options) in EventMgr() argument 95 polling_active_delay_usecs_(gpu_options.polling_active_delay_usecs() in EventMgr() 96 ? gpu_options.polling_active_delay_usecs() in EventMgr() 252 const GPUOptions& gpu_options) { in GetEventMgr() argument 260 auto event_mgr = new EventMgr(se, gpu_options); in GetEventMgr()
|
D | device_event_mgr.h | 99 EventMgr(se::StreamExecutor* se, const GPUOptions& gpu_options); 153 EventMgr* GetEventMgr(se::StreamExecutor* se, const GPUOptions& gpu_options);
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
D | gpu_event_mgr.cc | 92 EventMgr::EventMgr(se::StreamExecutor* se, const GPUOptions& gpu_options) in EventMgr() argument 94 polling_active_delay_usecs_(gpu_options.polling_active_delay_usecs() in EventMgr() 95 ? gpu_options.polling_active_delay_usecs() in EventMgr() 251 const GPUOptions& gpu_options) { in GetEventMgr() argument 259 auto event_mgr = new EventMgr(se, gpu_options); in GetEventMgr()
|
D | gpu_device.cc | 447 tf_device_id_, 0, executor_, options.config.gpu_options()); in Init() 464 options.config.gpu_options()); in Init() 467 options.config.gpu_options().experimental().kernel_tracker_max_interval(), in Init() 468 options.config.gpu_options().experimental().kernel_tracker_max_bytes(), in Init() 469 options.config.gpu_options().experimental().kernel_tracker_max_pending()); in Init() 471 options.config.gpu_options().experimental().timestamped_allocator(); in Init() 932 const size_t num_gpus_to_use, const GPUOptions& gpu_options, in VerifyVirtualDeviceSettings() argument 936 const auto& virtual_devices = gpu_options.experimental().virtual_devices(); in VerifyVirtualDeviceSettings() 938 if (gpu_options.per_process_gpu_memory_fraction() > 0) { in VerifyVirtualDeviceSettings() 949 if (!gpu_options.visible_device_list().empty() && in VerifyVirtualDeviceSettings() [all …]
|
D | gpu_device_factory.cc | 40 options.config.gpu_options().force_gpu_compatible(); in GPUDevice() 90 options.config.gpu_options().force_gpu_compatible(); in GPUCompatibleCPUDevice()
|
D | gpu_device_test.cc | 81 GPUOptions* gpu_options = config->mutable_gpu_options(); local 82 gpu_options->set_visible_device_list(visible_device_list); 83 gpu_options->set_per_process_gpu_memory_fraction( 85 gpu_options->mutable_experimental()->set_use_cuda_malloc_async( 89 gpu_options->mutable_experimental()->add_virtual_devices();
|
/external/tensorflow/tensorflow/compiler/mlir/tfrt/transforms/lmhlo_to_gpu/ |
D | kernel_ops_pattern.cc | 219 const GpuBinaryOptions& gpu_options, in Emit() argument 234 gpu_options.platform_name, gpu_options.gpu_device_info, in Emit() 235 gpu_options.cuda_compute_capability, gpu_options.rocm_compute_capability, in Emit() 253 GpuBinaryOptions gpu_options) { in Match() argument 284 gpu_options, hlo_module_config, llvm_module.get()); in Match() 319 gpu_options.cuda_compute_capability, in Match()
|
/external/tensorflow/tensorflow/python/framework/ |
D | config_test.py | 585 self.assertTrue(c.gpu_options.allow_growth) 715 self.assertTrue(c.gpu_options.allow_growth) 720 self.assertFalse(c.gpu_options.allow_growth) 731 self.assertTrue(c.gpu_options.allow_growth) 746 self.assertFalse(c.gpu_options.allow_growth) 790 gpu_options=config_pb2.GPUOptions(visible_device_list='0')) 792 self.assertEqual(new_config.gpu_options.visible_device_list, '0') 811 gpu_options=config_pb2.GPUOptions(visible_device_list='')) 815 self.assertEqual(new_config.gpu_options.visible_device_list, 821 gpu_options=config_pb2.GPUOptions(visible_device_list=str(gpu_count))) [all …]
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | xla_helpers.cc | 151 xla::gpu::GpuExecutableRunOptions& gpu_options) { in ResolveDeviceAssignment() argument 245 gpu_options.set_gpu_global_device_ids(global_device_ids); in ResolveDeviceAssignment() 249 gpu_options.set_nccl_unique_id_callback( in ResolveDeviceAssignment() 252 run_options.set_gpu_executable_run_options(&gpu_options); in ResolveDeviceAssignment()
|
D | xla_helpers.h | 198 xla::gpu::GpuExecutableRunOptions& gpu_options);
|
/external/tensorflow/tensorflow/compiler/tf2tensorrt/convert/ |
D | convert_graph_test.cc | 80 GPUOptions* gpu_options = config->mutable_gpu_options(); in TEST() local 82 gpu_options->mutable_experimental()->add_virtual_devices(); in TEST()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_compile_on_demand_op.cc | 82 xla::gpu::GpuExecutableRunOptions gpu_options; in Run() local 87 gpu_options)); in Run()
|
D | xla_gpu_device.cc | 118 session_options.config.gpu_options().visible_device_list(); in CreateDevices()
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | xrt_execute_op.cc | 327 xla::gpu::GpuExecutableRunOptions gpu_options; in RunExecutable() local 334 gpu_options.set_gpu_global_device_ids(gpu_global_ids); in RunExecutable() 348 gpu_options.set_nccl_unique_id_callback(uid_callback); in RunExecutable() 350 run_options.set_gpu_executable_run_options(&gpu_options); in RunExecutable()
|
/external/tensorflow/tensorflow/security/advisory/ |
D | tfsa-2022-059.md | 11 flr->config_proto()->gpu_options().visible_device_list();
|
/external/tensorflow/tensorflow/python/debug/lib/ |
D | grpc_tensorflow_server.py | 105 config = config_pb2.ConfigProto(gpu_options=config_pb2.GPUOptions(
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | collective_executor_mgr.cc | 38 config.gpu_options().experimental().collective_ring_order()), in CollectiveExecutorMgr()
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_device.cc | 117 GPUOptions gpu_options; in Acquire() local
|
/external/tensorflow/tensorflow/python/training/ |
D | server_lib_test.py | 181 gpu_options=config_pb2.GPUOptions(per_process_gpu_memory_fraction=0.1)) 185 self.assertEqual(0.1, server.server_def.default_session_config.gpu_options. 199 self.assertEqual(0.1, server.server_def.default_session_config.gpu_options.
|
/external/tensorflow/tensorflow/python/kernel_tests/sparse_ops/ |
D | sparse_xent_op_test.py | 74 config.gpu_options.per_process_gpu_memory_fraction = 0.3
|