1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
9
10 #include "include/gpu/vk/GrVkExtensions.h"
11 #include "src/core/SkTraceEvent.h"
12 #include "src/gpu/vk/GrVkInterface.h"
13 #include "src/gpu/vk/GrVkMemory.h"
14 #include "src/gpu/vk/GrVkUtil.h"
15
16 #ifndef SK_USE_VMA
Make(VkInstance instance,VkPhysicalDevice physicalDevice,VkDevice device,uint32_t physicalDeviceVersion,const GrVkExtensions * extensions,sk_sp<const GrVkInterface> interface,const GrVkCaps * caps)17 sk_sp<GrVkMemoryAllocator> GrVkAMDMemoryAllocator::Make(VkInstance instance,
18 VkPhysicalDevice physicalDevice,
19 VkDevice device,
20 uint32_t physicalDeviceVersion,
21 const GrVkExtensions* extensions,
22 sk_sp<const GrVkInterface> interface,
23 const GrVkCaps* caps) {
24 return nullptr;
25 }
26 #else
27
Make(VkInstance instance,VkPhysicalDevice physicalDevice,VkDevice device,uint32_t physicalDeviceVersion,const GrVkExtensions * extensions,sk_sp<const GrVkInterface> interface,const GrVkCaps * caps)28 sk_sp<GrVkMemoryAllocator> GrVkAMDMemoryAllocator::Make(VkInstance instance,
29 VkPhysicalDevice physicalDevice,
30 VkDevice device,
31 uint32_t physicalDeviceVersion,
32 const GrVkExtensions* extensions,
33 sk_sp<const GrVkInterface> interface,
34 const GrVkCaps* caps) {
35 #define GR_COPY_FUNCTION(NAME) functions.vk##NAME = interface->fFunctions.f##NAME
36 #define GR_COPY_FUNCTION_KHR(NAME) functions.vk##NAME##KHR = interface->fFunctions.f##NAME
37
38 VmaVulkanFunctions functions;
39 GR_COPY_FUNCTION(GetPhysicalDeviceProperties);
40 GR_COPY_FUNCTION(GetPhysicalDeviceMemoryProperties);
41 GR_COPY_FUNCTION(AllocateMemory);
42 GR_COPY_FUNCTION(FreeMemory);
43 GR_COPY_FUNCTION(MapMemory);
44 GR_COPY_FUNCTION(UnmapMemory);
45 GR_COPY_FUNCTION(FlushMappedMemoryRanges);
46 GR_COPY_FUNCTION(InvalidateMappedMemoryRanges);
47 GR_COPY_FUNCTION(BindBufferMemory);
48 GR_COPY_FUNCTION(BindImageMemory);
49 GR_COPY_FUNCTION(GetBufferMemoryRequirements);
50 GR_COPY_FUNCTION(GetImageMemoryRequirements);
51 GR_COPY_FUNCTION(CreateBuffer);
52 GR_COPY_FUNCTION(DestroyBuffer);
53 GR_COPY_FUNCTION(CreateImage);
54 GR_COPY_FUNCTION(DestroyImage);
55 GR_COPY_FUNCTION(CmdCopyBuffer);
56 GR_COPY_FUNCTION_KHR(GetBufferMemoryRequirements2);
57 GR_COPY_FUNCTION_KHR(GetImageMemoryRequirements2);
58 GR_COPY_FUNCTION_KHR(BindBufferMemory2);
59 GR_COPY_FUNCTION_KHR(BindImageMemory2);
60 GR_COPY_FUNCTION_KHR(GetPhysicalDeviceMemoryProperties2);
61
62 VmaAllocatorCreateInfo info;
63 info.flags = VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT;
64 if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
65 (extensions->hasExtension(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME, 1) &&
66 extensions->hasExtension(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, 1))) {
67 info.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
68 }
69
70 info.physicalDevice = physicalDevice;
71 info.device = device;
72 // 4MB was picked for the size here by looking at memory usage of Android apps and runs of DM.
73 // It seems to be a good compromise of not wasting unused allocated space and not making too
74 // many small allocations. The AMD allocator will start making blocks at 1/8 the max size and
75 // builds up block size as needed before capping at the max set here.
76 info.preferredLargeHeapBlockSize = 4*1024*1024;
77 info.pAllocationCallbacks = nullptr;
78 info.pDeviceMemoryCallbacks = nullptr;
79 info.frameInUseCount = 0;
80 info.pHeapSizeLimit = nullptr;
81 info.pVulkanFunctions = &functions;
82 info.pRecordSettings = nullptr;
83 info.instance = instance;
84 info.vulkanApiVersion = physicalDeviceVersion;
85
86 VmaAllocator allocator;
87 vmaCreateAllocator(&info, &allocator);
88
89 return sk_sp<GrVkAMDMemoryAllocator>(new GrVkAMDMemoryAllocator(
90 allocator, std::move(interface), caps->mustUseCoherentHostVisibleMemory()));
91 }
92
GrVkAMDMemoryAllocator(VmaAllocator allocator,sk_sp<const GrVkInterface> interface,bool mustUseCoherentHostVisibleMemory)93 GrVkAMDMemoryAllocator::GrVkAMDMemoryAllocator(VmaAllocator allocator,
94 sk_sp<const GrVkInterface> interface,
95 bool mustUseCoherentHostVisibleMemory)
96 : fAllocator(allocator)
97 , fInterface(std::move(interface))
98 , fMustUseCoherentHostVisibleMemory(mustUseCoherentHostVisibleMemory) {}
99
~GrVkAMDMemoryAllocator()100 GrVkAMDMemoryAllocator::~GrVkAMDMemoryAllocator() {
101 vmaDestroyAllocator(fAllocator);
102 fAllocator = VK_NULL_HANDLE;
103 }
104
allocateImageMemory(VkImage image,AllocationPropertyFlags flags,GrVkBackendMemory * backendMemory)105 VkResult GrVkAMDMemoryAllocator::allocateImageMemory(VkImage image, AllocationPropertyFlags flags,
106 GrVkBackendMemory* backendMemory) {
107 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
108 VmaAllocationCreateInfo info;
109 info.flags = 0;
110 info.usage = VMA_MEMORY_USAGE_UNKNOWN;
111 info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
112 info.preferredFlags = 0;
113 info.memoryTypeBits = 0;
114 info.pool = VK_NULL_HANDLE;
115 info.pUserData = nullptr;
116
117 if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
118 info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
119 }
120
121 if (AllocationPropertyFlags::kLazyAllocation & flags) {
122 info.requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
123 }
124
125 if (AllocationPropertyFlags::kProtected & flags) {
126 info.requiredFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
127 }
128
129 VmaAllocation allocation;
130 VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
131 if (VK_SUCCESS == result) {
132 *backendMemory = (GrVkBackendMemory)allocation;
133 }
134 return result;
135 }
136
allocateBufferMemory(VkBuffer buffer,BufferUsage usage,AllocationPropertyFlags flags,GrVkBackendMemory * backendMemory)137 VkResult GrVkAMDMemoryAllocator::allocateBufferMemory(VkBuffer buffer, BufferUsage usage,
138 AllocationPropertyFlags flags,
139 GrVkBackendMemory* backendMemory) {
140 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
141 VmaAllocationCreateInfo info;
142 info.flags = 0;
143 info.usage = VMA_MEMORY_USAGE_UNKNOWN;
144 info.memoryTypeBits = 0;
145 info.pool = VK_NULL_HANDLE;
146 info.pUserData = nullptr;
147
148 switch (usage) {
149 case BufferUsage::kGpuOnly:
150 info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
151 info.preferredFlags = 0;
152 break;
153 case BufferUsage::kCpuWritesGpuReads:
154 // When doing cpu writes and gpu reads the general rule of thumb is to use coherent
155 // memory. Though this depends on the fact that we are not doing any cpu reads and the
156 // cpu writes are sequential. For sparse writes we'd want cpu cached memory, however we
157 // don't do these types of writes in Skia.
158 //
159 // TODO: In the future there may be times where specific types of memory could benefit
160 // from a coherent and cached memory. Typically these allow for the gpu to read cpu
161 // writes from the cache without needing to flush the writes throughout the cache. The
162 // reverse is not true and GPU writes tend to invalidate the cache regardless. Also
163 // these gpu cache read access are typically lower bandwidth than non-cached memory.
164 // For now Skia doesn't really have a need or want of this type of memory. But if we
165 // ever do we could pass in an AllocationPropertyFlag that requests the cached property.
166 info.requiredFlags =
167 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
168 info.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
169 break;
170 case BufferUsage::kTransfersFromCpuToGpu:
171 info.requiredFlags =
172 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
173 break;
174 case BufferUsage::kTransfersFromGpuToCpu:
175 info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
176 info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
177 break;
178 }
179
180 if (fMustUseCoherentHostVisibleMemory &&
181 (info.requiredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) {
182 info.requiredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
183 }
184
185 if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
186 info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
187 }
188
189 if ((AllocationPropertyFlags::kLazyAllocation & flags) && BufferUsage::kGpuOnly == usage) {
190 info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
191 }
192
193 if (AllocationPropertyFlags::kPersistentlyMapped & flags) {
194 SkASSERT(BufferUsage::kGpuOnly != usage);
195 info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
196 }
197
198 VmaAllocation allocation;
199 VkResult result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
200 if (VK_SUCCESS == result) {
201 *backendMemory = (GrVkBackendMemory)allocation;
202 }
203
204 return result;
205 }
206
freeMemory(const GrVkBackendMemory & memoryHandle)207 void GrVkAMDMemoryAllocator::freeMemory(const GrVkBackendMemory& memoryHandle) {
208 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
209 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
210 vmaFreeMemory(fAllocator, allocation);
211 }
212
getAllocInfo(const GrVkBackendMemory & memoryHandle,GrVkAlloc * alloc) const213 void GrVkAMDMemoryAllocator::getAllocInfo(const GrVkBackendMemory& memoryHandle,
214 GrVkAlloc* alloc) const {
215 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
216 VmaAllocationInfo vmaInfo;
217 vmaGetAllocationInfo(fAllocator, allocation, &vmaInfo);
218
219 VkMemoryPropertyFlags memFlags;
220 vmaGetMemoryTypeProperties(fAllocator, vmaInfo.memoryType, &memFlags);
221
222 uint32_t flags = 0;
223 if (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT & memFlags) {
224 flags |= GrVkAlloc::kMappable_Flag;
225 }
226 if (!SkToBool(VK_MEMORY_PROPERTY_HOST_COHERENT_BIT & memFlags)) {
227 flags |= GrVkAlloc::kNoncoherent_Flag;
228 }
229 if (VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT & memFlags) {
230 flags |= GrVkAlloc::kLazilyAllocated_Flag;
231 }
232
233 alloc->fMemory = vmaInfo.deviceMemory;
234 alloc->fOffset = vmaInfo.offset;
235 alloc->fSize = vmaInfo.size;
236 alloc->fFlags = flags;
237 alloc->fBackendMemory = memoryHandle;
238 }
239
mapMemory(const GrVkBackendMemory & memoryHandle,void ** data)240 VkResult GrVkAMDMemoryAllocator::mapMemory(const GrVkBackendMemory& memoryHandle, void** data) {
241 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
242 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
243 return vmaMapMemory(fAllocator, allocation, data);
244 }
245
unmapMemory(const GrVkBackendMemory & memoryHandle)246 void GrVkAMDMemoryAllocator::unmapMemory(const GrVkBackendMemory& memoryHandle) {
247 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
248 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
249 vmaUnmapMemory(fAllocator, allocation);
250 }
251
flushMemory(const GrVkBackendMemory & memoryHandle,VkDeviceSize offset,VkDeviceSize size)252 VkResult GrVkAMDMemoryAllocator::flushMemory(const GrVkBackendMemory& memoryHandle,
253 VkDeviceSize offset, VkDeviceSize size) {
254 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
255 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
256 return vmaFlushAllocation(fAllocator, allocation, offset, size);
257 }
258
invalidateMemory(const GrVkBackendMemory & memoryHandle,VkDeviceSize offset,VkDeviceSize size)259 VkResult GrVkAMDMemoryAllocator::invalidateMemory(const GrVkBackendMemory& memoryHandle,
260 VkDeviceSize offset, VkDeviceSize size) {
261 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
262 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
263 return vmaInvalidateAllocation(fAllocator, allocation, offset, size);
264 }
265
totalUsedMemory() const266 uint64_t GrVkAMDMemoryAllocator::totalUsedMemory() const {
267 VmaStats stats;
268 vmaCalculateStats(fAllocator, &stats);
269 return stats.total.usedBytes;
270 }
271
totalAllocatedMemory() const272 uint64_t GrVkAMDMemoryAllocator::totalAllocatedMemory() const {
273 VmaStats stats;
274 vmaCalculateStats(fAllocator, &stats);
275 return stats.total.usedBytes + stats.total.unusedBytes;
276 }
277
278 #endif // SK_USE_VMA
279