Home
last modified time | relevance | path

Searched refs:max_split_size (Results 1 – 6 of 6) sorted by relevance

/external/pytorch/c10/core/
DCachingDeviceAllocator.h108 int64_t max_split_size = 0; member
/external/pytorch/c10/cuda/
DCUDAAllocatorConfig.h18 static size_t max_split_size() { in max_split_size() function
DCUDACachingAllocator.cpp1091 stats.max_split_size = in DeviceCachingAllocator()
1092 static_cast<int64_t>(CUDAAllocatorConfig::max_split_size()); in DeviceCachingAllocator()
1430 if (block->size >= CUDAAllocatorConfig::max_split_size()) in alloc_found_block()
1480 if (block->size >= CUDAAllocatorConfig::max_split_size()) in free()
2463 return (size < CUDAAllocatorConfig::max_split_size()) && in should_split()
2525 if ((p.size() < CUDAAllocatorConfig::max_split_size()) && in get_free_block()
2526 ((*it)->size >= CUDAAllocatorConfig::max_split_size())) in get_free_block()
2529 if ((p.size() >= CUDAAllocatorConfig::max_split_size()) && in get_free_block()
2701 if (size >= CUDAAllocatorConfig::max_split_size()) in alloc_block()
2729 if (CUDAAllocatorConfig::max_split_size() == in release_available_cached_blocks()
[all …]
DCUDACachingAllocator.h156 size_t max_split_size; member
/external/pytorch/torch/csrc/cuda/
Dmemory_snapshot.cpp375 max_split_size_s, int64_t(snapshot.config_metadata.max_split_size)); in _memory_snapshot_pickled()
DModule.cpp599 result["max_split_size"] = stats.max_split_size; in THCPModule_memoryStats()
825 int64_t(snapshot.config_metadata.max_split_size); in THCPModule_memorySnapshot()