1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
9
10 #include <semaphore>
11 #include <thread>
12
13 #include "include/core/SkExecutor.h"
14 #include "include/core/SkLog.h"
15 #include "include/gpu/vk/GrVkExtensions.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/vk/GrVkInterface.h"
18 #include "src/gpu/vk/GrVkMemory.h"
19 #include "src/gpu/vk/GrVkUtil.h"
20 #include "src/core/SkUtils.h"
21
GetThreadPool()22 static SkExecutor& GetThreadPool() {
23 static std::unique_ptr<SkExecutor> executor = SkExecutor::MakeFIFOThreadPool(1, false);
24 return *executor;
25 }
26
27 #ifndef SK_USE_VMA
Make(VkInstance instance,VkPhysicalDevice physicalDevice,VkDevice device,uint32_t physicalDeviceVersion,const GrVkExtensions * extensions,sk_sp<const GrVkInterface> interface,const GrVkCaps * caps,bool cacheFlag,size_t maxBlockCount)28 sk_sp<GrVkMemoryAllocator> GrVkAMDMemoryAllocator::Make(VkInstance instance,
29 VkPhysicalDevice physicalDevice,
30 VkDevice device,
31 uint32_t physicalDeviceVersion,
32 const GrVkExtensions* extensions,
33 sk_sp<const GrVkInterface> interface,
34 const GrVkCaps* caps,
35 bool cacheFlag,
36 size_t maxBlockCount) {
37 return nullptr;
38 }
39 #else
40
Make(VkInstance instance,VkPhysicalDevice physicalDevice,VkDevice device,uint32_t physicalDeviceVersion,const GrVkExtensions * extensions,sk_sp<const GrVkInterface> interface,const GrVkCaps * caps,bool cacheFlag,size_t maxBlockCount)41 sk_sp<GrVkMemoryAllocator> GrVkAMDMemoryAllocator::Make(VkInstance instance,
42 VkPhysicalDevice physicalDevice,
43 VkDevice device,
44 uint32_t physicalDeviceVersion,
45 const GrVkExtensions* extensions,
46 sk_sp<const GrVkInterface> interface,
47 const GrVkCaps* caps,
48 bool cacheFlag,
49 size_t maxBlockCount) {
50 #define GR_COPY_FUNCTION(NAME) functions.vk##NAME = interface->fFunctions.f##NAME
51 #define GR_COPY_FUNCTION_KHR(NAME) functions.vk##NAME##KHR = interface->fFunctions.f##NAME
52
53 VmaVulkanFunctions functions;
54 GR_COPY_FUNCTION(GetPhysicalDeviceProperties);
55 GR_COPY_FUNCTION(GetPhysicalDeviceMemoryProperties);
56 GR_COPY_FUNCTION(AllocateMemory);
57 GR_COPY_FUNCTION(FreeMemory);
58 GR_COPY_FUNCTION(MapMemory);
59 GR_COPY_FUNCTION(UnmapMemory);
60 GR_COPY_FUNCTION(FlushMappedMemoryRanges);
61 GR_COPY_FUNCTION(InvalidateMappedMemoryRanges);
62 GR_COPY_FUNCTION(BindBufferMemory);
63 GR_COPY_FUNCTION(BindImageMemory);
64 GR_COPY_FUNCTION(GetBufferMemoryRequirements);
65 GR_COPY_FUNCTION(GetImageMemoryRequirements);
66 GR_COPY_FUNCTION(CreateBuffer);
67 GR_COPY_FUNCTION(DestroyBuffer);
68 GR_COPY_FUNCTION(CreateImage);
69 GR_COPY_FUNCTION(DestroyImage);
70 GR_COPY_FUNCTION(CmdCopyBuffer);
71 GR_COPY_FUNCTION_KHR(GetBufferMemoryRequirements2);
72 GR_COPY_FUNCTION_KHR(GetImageMemoryRequirements2);
73 GR_COPY_FUNCTION_KHR(BindBufferMemory2);
74 GR_COPY_FUNCTION_KHR(BindImageMemory2);
75 GR_COPY_FUNCTION_KHR(GetPhysicalDeviceMemoryProperties2);
76
77 VmaAllocatorCreateInfo info;
78 info.flags = 0; // OH ISSUE: enable vma lock protect
79 if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
80 (extensions->hasExtension(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME, 1) &&
81 extensions->hasExtension(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, 1))) {
82 info.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
83 }
84
85 info.physicalDevice = physicalDevice;
86 info.device = device;
87 // 4MB was picked for the size here by looking at memory usage of Android apps and runs of DM.
88 // It seems to be a good compromise of not wasting unused allocated space and not making too
89 // many small allocations. The AMD allocator will start making blocks at 1/8 the max size and
90 // builds up block size as needed before capping at the max set here.
91 if (cacheFlag) {
92 info.preferredLargeHeapBlockSize = SkGetVmaBlockSizeMB() * 1024 * 1024; // 1024 = 1K
93 } else {
94 info.preferredLargeHeapBlockSize = 4 * 1024 * 1024;
95 }
96 info.maxBlockCount = maxBlockCount;
97 info.pAllocationCallbacks = nullptr;
98 info.pDeviceMemoryCallbacks = nullptr;
99 info.frameInUseCount = 0;
100 info.pHeapSizeLimit = nullptr;
101 info.pVulkanFunctions = &functions;
102 info.pRecordSettings = nullptr;
103 info.instance = instance;
104 info.vulkanApiVersion = physicalDeviceVersion;
105
106 VmaAllocator allocator;
107 vmaCreateAllocator(&info, &allocator);
108
109 return sk_sp<GrVkAMDMemoryAllocator>(new GrVkAMDMemoryAllocator(
110 #ifdef NOT_USE_PRE_ALLOC
111 allocator, std::move(interface), caps->mustUseCoherentHostVisibleMemory()));
112 #else
113 allocator, std::move(interface), caps->mustUseCoherentHostVisibleMemory(), cacheFlag));
114 #endif
115 }
116
GrVkAMDMemoryAllocator(VmaAllocator allocator,sk_sp<const GrVkInterface> interface,bool mustUseCoherentHostVisibleMemory)117 GrVkAMDMemoryAllocator::GrVkAMDMemoryAllocator(VmaAllocator allocator,
118 sk_sp<const GrVkInterface> interface,
119 #ifdef NOT_USE_PRE_ALLOC
120 bool mustUseCoherentHostVisibleMemory)
121 #else
122 bool mustUseCoherentHostVisibleMemory,
123 bool cacheFlag)
124 #endif
125 : fAllocator(allocator)
126 , fInterface(std::move(interface))
127 #ifdef NOT_USE_PRE_ALLOC
128 , fMustUseCoherentHostVisibleMemory(mustUseCoherentHostVisibleMemory) {}
129 #else
130 , fMustUseCoherentHostVisibleMemory(mustUseCoherentHostVisibleMemory)
131 , fCacheFlag(cacheFlag) {}
132 #endif
133
~GrVkAMDMemoryAllocator()134 GrVkAMDMemoryAllocator::~GrVkAMDMemoryAllocator() {
135 vmaDestroyAllocator(fAllocator);
136 fAllocator = VK_NULL_HANDLE;
137 }
138
139 // OH ISSUE: VMA preAlloc
FirstPreAllocMemory(VmaAllocator allocator,VmaAllocationCreateInfo info)140 static void FirstPreAllocMemory(VmaAllocator allocator, VmaAllocationCreateInfo info) {
141 VkImage fakeImage;
142 VmaAllocation reservedAllocation;
143 if (allocator == nullptr) {
144 return;
145 }
146 VkResult result = vmaCreateFakeImage(allocator, &fakeImage);
147 if (result != VK_SUCCESS) {
148 SK_LOGE("FirstPreAllocMemory: CreateFakeImage Failed!! VkResult %d", result);
149 return;
150 }
151 {
152 HITRACE_OHOS_NAME_FMT_ALWAYS("vmaAllocateReservedMemoryForImage");
153 result = vmaAllocateReservedMemoryForImage(allocator, fakeImage, &info, &reservedAllocation, nullptr);
154 }
155 if (result != VK_SUCCESS) {
156 SK_LOGE("FirstPreAllocMemory: AllocateReservedMemory Failed!! VkResult %d", result);
157 vmaDestroyFakeImage(allocator, fakeImage);
158 return;
159 }
160 {
161 HITRACE_OHOS_NAME_FMT_ALWAYS("vmaBindImageMemory");
162 result = vmaBindImageMemory(allocator, reservedAllocation, fakeImage);
163 }
164 if (result != VK_SUCCESS) {
165 SK_LOGE("FirstPreAllocMemory: BindImageMemory Failed!! VkResult %d", result);
166 }
167 vmaDestroyFakeImage(allocator, fakeImage);
168 vmaFreeReservedMemory(allocator, reservedAllocation);
169 }
170
171 // OH ISSUE: VMA preAlloc
PreAllocMemory(VmaAllocator allocator,VmaAllocation reservedAllocation)172 static void PreAllocMemory(VmaAllocator allocator, VmaAllocation reservedAllocation) {
173 VkImage fakeImage;
174 if (allocator == nullptr) {
175 return;
176 }
177 VkResult result = vmaCreateFakeImage(allocator, &fakeImage);
178 if (result != VK_SUCCESS) {
179 SK_LOGE("PreAllocMemory: CreateFakeImage Failed!! VkResult %d", result);
180 return;
181 }
182 {
183 HITRACE_OHOS_NAME_FMT_ALWAYS("vmaBindImageMemory");
184 result = vmaBindImageMemory(allocator, reservedAllocation, fakeImage);
185 }
186 if (result != VK_SUCCESS) {
187 SK_LOGE("PreAllocMemory: BindImageMemory Failed!! VkResult %d", result);
188 }
189 vmaDestroyFakeImage(allocator, fakeImage);
190 vmaFreeReservedMemory(allocator, reservedAllocation);
191 }
192
allocateImageMemory(VkImage image,AllocationPropertyFlags flags,GrVkBackendMemory * backendMemory)193 VkResult GrVkAMDMemoryAllocator::allocateImageMemory(VkImage image, AllocationPropertyFlags flags,
194 GrVkBackendMemory* backendMemory) {
195 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
196 VmaAllocationCreateInfo info;
197 info.flags = 0;
198 info.usage = VMA_MEMORY_USAGE_UNKNOWN;
199 info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
200 info.preferredFlags = 0;
201 info.memoryTypeBits = 0;
202 info.pool = VK_NULL_HANDLE;
203 info.pUserData = nullptr;
204
205 if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
206 info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
207 }
208
209 if (AllocationPropertyFlags::kLazyAllocation & flags) {
210 info.requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
211 }
212
213 if (AllocationPropertyFlags::kProtected & flags) {
214 info.requiredFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
215 }
216
217 VmaAllocation allocation;
218 VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
219 if (VK_SUCCESS != result) {
220 return result;
221 }
222 *backendMemory = (GrVkBackendMemory)allocation;
223
224 // OH ISSUE: VMA preAlloc
225 bool newBlockflag = false;
226 vmaGetNewBlockStats(allocation, &newBlockflag);
227 if (newBlockflag && fCacheFlag && SkGetPreAllocFlag()) {
228 HITRACE_OHOS_NAME_FMT_ALWAYS("GrVkAMDMemoryAllocator trigger preAlloc");
229 vmaClearNewBlockStats(allocation);
230 std::lock_guard<std::mutex> lock(mPreAllocMutex);
231 // After swap, allocation belongs to vma reserved block.
232 VkResult result2 = vmaSwapReservedBlock(fAllocator, image, &info, &allocation, nullptr);
233 if (result2 == VK_NOT_READY) {
234 GetThreadPool().add([=] {
235 std::lock_guard<std::mutex> lock(mPreAllocMutex);
236 HITRACE_OHOS_NAME_FMT_ALWAYS("FirstPreAllocMemory");
237 FirstPreAllocMemory(fAllocator, info);
238 });
239 return result;
240 }
241 if (result2 == VK_SUCCESS) {
242 GetThreadPool().add([=] {
243 std::this_thread::sleep_for(std::chrono::microseconds(SkGetPreAllocDelay()));
244 std::lock_guard<std::mutex> lock(mPreAllocMutex);
245 HITRACE_OHOS_NAME_FMT_ALWAYS("PreAllocMemory");
246 PreAllocMemory(fAllocator, allocation);
247 });
248 VmaAllocation newAllocation;
249 VkResult result3 = vmaAllocateMemoryForImage(fAllocator, image, &info, &newAllocation, nullptr);
250 if (result3 == VK_SUCCESS) {
251 *backendMemory = (GrVkBackendMemory)newAllocation;
252 }
253 return result3;
254 }
255 }
256 return result;
257 }
258
allocateBufferMemory(VkBuffer buffer,BufferUsage usage,AllocationPropertyFlags flags,GrVkBackendMemory * backendMemory)259 VkResult GrVkAMDMemoryAllocator::allocateBufferMemory(VkBuffer buffer, BufferUsage usage,
260 AllocationPropertyFlags flags,
261 GrVkBackendMemory* backendMemory) {
262 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
263 VmaAllocationCreateInfo info;
264 info.flags = 0;
265 info.usage = VMA_MEMORY_USAGE_UNKNOWN;
266 info.memoryTypeBits = 0;
267 info.pool = VK_NULL_HANDLE;
268 info.pUserData = nullptr;
269
270 switch (usage) {
271 case BufferUsage::kGpuOnly:
272 info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
273 info.preferredFlags = 0;
274 break;
275 case BufferUsage::kCpuWritesGpuReads:
276 // When doing cpu writes and gpu reads the general rule of thumb is to use coherent
277 // memory. Though this depends on the fact that we are not doing any cpu reads and the
278 // cpu writes are sequential. For sparse writes we'd want cpu cached memory, however we
279 // don't do these types of writes in Skia.
280 //
281 // TODO: In the future there may be times where specific types of memory could benefit
282 // from a coherent and cached memory. Typically these allow for the gpu to read cpu
283 // writes from the cache without needing to flush the writes throughout the cache. The
284 // reverse is not true and GPU writes tend to invalidate the cache regardless. Also
285 // these gpu cache read access are typically lower bandwidth than non-cached memory.
286 // For now Skia doesn't really have a need or want of this type of memory. But if we
287 // ever do we could pass in an AllocationPropertyFlag that requests the cached property.
288 info.requiredFlags =
289 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
290 info.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
291 break;
292 case BufferUsage::kTransfersFromCpuToGpu:
293 info.requiredFlags =
294 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
295 break;
296 case BufferUsage::kTransfersFromGpuToCpu:
297 info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
298 info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
299 break;
300 }
301
302 if (fMustUseCoherentHostVisibleMemory &&
303 (info.requiredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) {
304 info.requiredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
305 }
306
307 if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
308 info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
309 }
310
311 if ((AllocationPropertyFlags::kLazyAllocation & flags) && BufferUsage::kGpuOnly == usage) {
312 info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
313 }
314
315 if (AllocationPropertyFlags::kPersistentlyMapped & flags) {
316 SkASSERT(BufferUsage::kGpuOnly != usage);
317 info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
318 }
319
320 VmaAllocation allocation;
321 VkResult result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
322 if (VK_SUCCESS == result) {
323 *backendMemory = (GrVkBackendMemory)allocation;
324 }
325
326 return result;
327 }
328
freeMemory(const GrVkBackendMemory & memoryHandle)329 void GrVkAMDMemoryAllocator::freeMemory(const GrVkBackendMemory& memoryHandle) {
330 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
331 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
332 vmaFreeMemory(fAllocator, allocation);
333 }
334
getAllocInfo(const GrVkBackendMemory & memoryHandle,GrVkAlloc * alloc) const335 void GrVkAMDMemoryAllocator::getAllocInfo(const GrVkBackendMemory& memoryHandle,
336 GrVkAlloc* alloc) const {
337 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
338 VmaAllocationInfo vmaInfo;
339 vmaGetAllocationInfo(fAllocator, allocation, &vmaInfo);
340
341 VkMemoryPropertyFlags memFlags;
342 vmaGetMemoryTypeProperties(fAllocator, vmaInfo.memoryType, &memFlags);
343
344 uint32_t flags = 0;
345 if (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT & memFlags) {
346 flags |= GrVkAlloc::kMappable_Flag;
347 }
348 if (!SkToBool(VK_MEMORY_PROPERTY_HOST_COHERENT_BIT & memFlags)) {
349 flags |= GrVkAlloc::kNoncoherent_Flag;
350 }
351 if (VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT & memFlags) {
352 flags |= GrVkAlloc::kLazilyAllocated_Flag;
353 }
354
355 alloc->fMemory = vmaInfo.deviceMemory;
356 alloc->fOffset = vmaInfo.offset;
357 alloc->fSize = vmaInfo.size;
358 alloc->fFlags = flags;
359 alloc->fBackendMemory = memoryHandle;
360 alloc->fAllocator = (GrVkMemoryAllocator *)this;
361 }
362
mapMemory(const GrVkBackendMemory & memoryHandle,void ** data)363 VkResult GrVkAMDMemoryAllocator::mapMemory(const GrVkBackendMemory& memoryHandle, void** data) {
364 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
365 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
366 return vmaMapMemory(fAllocator, allocation, data);
367 }
368
unmapMemory(const GrVkBackendMemory & memoryHandle)369 void GrVkAMDMemoryAllocator::unmapMemory(const GrVkBackendMemory& memoryHandle) {
370 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
371 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
372 vmaUnmapMemory(fAllocator, allocation);
373 }
374
flushMemory(const GrVkBackendMemory & memoryHandle,VkDeviceSize offset,VkDeviceSize size)375 VkResult GrVkAMDMemoryAllocator::flushMemory(const GrVkBackendMemory& memoryHandle,
376 VkDeviceSize offset, VkDeviceSize size) {
377 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
378 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
379 return vmaFlushAllocation(fAllocator, allocation, offset, size);
380 }
381
invalidateMemory(const GrVkBackendMemory & memoryHandle,VkDeviceSize offset,VkDeviceSize size)382 VkResult GrVkAMDMemoryAllocator::invalidateMemory(const GrVkBackendMemory& memoryHandle,
383 VkDeviceSize offset, VkDeviceSize size) {
384 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
385 const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
386 return vmaInvalidateAllocation(fAllocator, allocation, offset, size);
387 }
388
totalUsedMemory() const389 uint64_t GrVkAMDMemoryAllocator::totalUsedMemory() const {
390 VmaStats stats;
391 vmaCalculateStats(fAllocator, &stats);
392 return stats.total.usedBytes;
393 }
394
totalAllocatedMemory() const395 uint64_t GrVkAMDMemoryAllocator::totalAllocatedMemory() const {
396 VmaStats stats;
397 vmaCalculateStats(fAllocator, &stats);
398 return stats.total.usedBytes + stats.total.unusedBytes;
399 }
400
dumpVmaStats(SkString * out,const char * sep) const401 void GrVkAMDMemoryAllocator::dumpVmaStats(SkString *out, const char *sep) const
402 {
403 constexpr int MB = 1024 * 1024;
404 if (out == nullptr || sep == nullptr) {
405 return;
406 }
407 bool flag = SkGetMemoryOptimizedFlag();
408 out->appendf("vma_flag: %d %s", flag, sep);
409 if (!flag) {
410 return;
411 }
412 VmaStats stats;
413 vmaCalculateStats(fAllocator, &stats);
414 uint64_t free = stats.total.unusedBytes;
415 uint64_t used = stats.total.usedBytes;
416 uint64_t total = free + used;
417 auto maxBlockCount = SkGetVmaBlockCountMax();
418 out->appendf("vma_free: %llu (%d MB)%s", free, free / MB, sep);
419 out->appendf("vma_used: %llu (%d MB)%s", used, used / MB, sep);
420 out->appendf("vma_total: %llu (%d MB)%s", total, total / MB, sep);
421 out->appendf("vma_cacheBlockSize: %d MB%s", SkGetVmaBlockSizeMB(), sep);
422 out->appendf("vma_cacheBlockCount: %llu / %llu%s",
423 stats.total.blockCount <= maxBlockCount ? stats.total.blockCount : maxBlockCount, maxBlockCount, sep);
424 out->appendf("vma_dedicatedBlockCount: %llu%s",
425 stats.total.blockCount <= maxBlockCount ? 0 : stats.total.blockCount - maxBlockCount, sep);
426 out->appendf("vma_allocationCount: %u%s", stats.total.allocationCount, sep);
427 out->appendf("vma_unusedRangeCount: %u%s", stats.total.unusedRangeCount, sep);
428 out->appendf("vma_allocationSize: %llu / %llu / %llu%s",
429 stats.total.allocationSizeMin, stats.total.allocationSizeAvg, stats.total.allocationSizeMax, sep);
430 out->appendf("vma_unusedRangeSize: %llu / %llu / %llu%s",
431 stats.total.unusedRangeSizeMin, stats.total.unusedRangeSizeAvg, stats.total.unusedRangeSizeMax, sep);
432 uint32_t blockSize = 0;
433 vmaGetPreAllocBlockSize(fAllocator, &blockSize);
434 out->appendf("vma_preAllocBlockSize: %d / 1%s", blockSize, sep);
435 }
436
vmaDefragment()437 void GrVkAMDMemoryAllocator::vmaDefragment()
438 {
439 if (!fCacheFlag) {
440 return;
441 }
442 bool flag = SkGetVmaDefragmentOn();
443 if (!flag) {
444 return;
445 }
446 bool debugFlag = SkGetVmaDebugFlag();
447 if (!debugFlag) {
448 std::lock_guard<std::mutex> lock(mPreAllocMutex);
449 vmaFreeEmptyBlock(fAllocator);
450 return;
451 }
452
453 // dfx
454 SkString debugInfo;
455 dumpVmaStats(&debugInfo);
456 SkDebugf("GrVkAMDMemoryAllocator::vmaDefragment() before: %s",
457 debugInfo.c_str());
458 HITRACE_OHOS_NAME_FMT_ALWAYS("GrVkAMDMemoryAllocator::vmaDefragment() before: %s", debugInfo.c_str());
459
460 {
461 std::lock_guard<std::mutex> lock(mPreAllocMutex);
462 vmaFreeEmptyBlock(fAllocator);
463 }
464
465 // dfx
466 debugInfo = "";
467 dumpVmaStats(&debugInfo);
468 SkDebugf("GrVkAMDMemoryAllocator::vmaDefragment() after: %s",
469 debugInfo.c_str());
470 HITRACE_OHOS_NAME_FMT_ALWAYS("GrVkAMDMemoryAllocator::vmaDefragment() after: %s", debugInfo.c_str());
471 }
472
473 #endif // SK_USE_VMA
474