• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
9 
10 #include <semaphore>
11 #include <thread>
12 
13 #include "include/core/SkExecutor.h"
14 #include "include/core/SkLog.h"
15 #include "include/gpu/vk/GrVkExtensions.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/vk/GrVkInterface.h"
18 #include "src/gpu/vk/GrVkMemory.h"
19 #include "src/gpu/vk/GrVkUtil.h"
20 #include "src/core/SkUtils.h"
21 
GetThreadPool()22 static SkExecutor& GetThreadPool() {
23     static std::unique_ptr<SkExecutor> executor = SkExecutor::MakeFIFOThreadPool(1, false);
24     return *executor;
25 }
26 
27 #ifndef SK_USE_VMA
Make(VkInstance instance,VkPhysicalDevice physicalDevice,VkDevice device,uint32_t physicalDeviceVersion,const GrVkExtensions * extensions,sk_sp<const GrVkInterface> interface,const GrVkCaps * caps,bool cacheFlag,size_t maxBlockCount)28 sk_sp<GrVkMemoryAllocator> GrVkAMDMemoryAllocator::Make(VkInstance instance,
29                                                         VkPhysicalDevice physicalDevice,
30                                                         VkDevice device,
31                                                         uint32_t physicalDeviceVersion,
32                                                         const GrVkExtensions* extensions,
33                                                         sk_sp<const GrVkInterface> interface,
34                                                         const GrVkCaps* caps,
35                                                         bool cacheFlag,
36                                                         size_t maxBlockCount) {
37     return nullptr;
38 }
39 #else
40 
Make(VkInstance instance,VkPhysicalDevice physicalDevice,VkDevice device,uint32_t physicalDeviceVersion,const GrVkExtensions * extensions,sk_sp<const GrVkInterface> interface,const GrVkCaps * caps,bool cacheFlag,size_t maxBlockCount)41 sk_sp<GrVkMemoryAllocator> GrVkAMDMemoryAllocator::Make(VkInstance instance,
42                                                         VkPhysicalDevice physicalDevice,
43                                                         VkDevice device,
44                                                         uint32_t physicalDeviceVersion,
45                                                         const GrVkExtensions* extensions,
46                                                         sk_sp<const GrVkInterface> interface,
47                                                         const GrVkCaps* caps,
48                                                         bool cacheFlag,
49                                                         size_t maxBlockCount) {
50 #define GR_COPY_FUNCTION(NAME) functions.vk##NAME = interface->fFunctions.f##NAME
51 #define GR_COPY_FUNCTION_KHR(NAME) functions.vk##NAME##KHR = interface->fFunctions.f##NAME
52 
53     VmaVulkanFunctions functions;
54     GR_COPY_FUNCTION(GetPhysicalDeviceProperties);
55     GR_COPY_FUNCTION(GetPhysicalDeviceMemoryProperties);
56     GR_COPY_FUNCTION(AllocateMemory);
57     GR_COPY_FUNCTION(FreeMemory);
58     GR_COPY_FUNCTION(MapMemory);
59     GR_COPY_FUNCTION(UnmapMemory);
60     GR_COPY_FUNCTION(FlushMappedMemoryRanges);
61     GR_COPY_FUNCTION(InvalidateMappedMemoryRanges);
62     GR_COPY_FUNCTION(BindBufferMemory);
63     GR_COPY_FUNCTION(BindImageMemory);
64     GR_COPY_FUNCTION(GetBufferMemoryRequirements);
65     GR_COPY_FUNCTION(GetImageMemoryRequirements);
66     GR_COPY_FUNCTION(CreateBuffer);
67     GR_COPY_FUNCTION(DestroyBuffer);
68     GR_COPY_FUNCTION(CreateImage);
69     GR_COPY_FUNCTION(DestroyImage);
70     GR_COPY_FUNCTION(CmdCopyBuffer);
71     GR_COPY_FUNCTION_KHR(GetBufferMemoryRequirements2);
72     GR_COPY_FUNCTION_KHR(GetImageMemoryRequirements2);
73     GR_COPY_FUNCTION_KHR(BindBufferMemory2);
74     GR_COPY_FUNCTION_KHR(BindImageMemory2);
75     GR_COPY_FUNCTION_KHR(GetPhysicalDeviceMemoryProperties2);
76 
77     VmaAllocatorCreateInfo info;
78     info.flags = 0; // OH ISSUE: enable vma lock protect
79     if (physicalDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
80         (extensions->hasExtension(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME, 1) &&
81          extensions->hasExtension(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME, 1))) {
82         info.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
83     }
84 
85     info.physicalDevice = physicalDevice;
86     info.device = device;
87     // 4MB was picked for the size here by looking at memory usage of Android apps and runs of DM.
88     // It seems to be a good compromise of not wasting unused allocated space and not making too
89     // many small allocations. The AMD allocator will start making blocks at 1/8 the max size and
90     // builds up block size as needed before capping at the max set here.
91     if (cacheFlag) {
92         info.preferredLargeHeapBlockSize = SkGetVmaBlockSizeMB() * 1024 * 1024; // 1024 = 1K
93     } else {
94         info.preferredLargeHeapBlockSize = 4 * 1024 * 1024;
95     }
96     info.maxBlockCount = maxBlockCount;
97     info.pAllocationCallbacks = nullptr;
98     info.pDeviceMemoryCallbacks = nullptr;
99     info.frameInUseCount = 0;
100     info.pHeapSizeLimit = nullptr;
101     info.pVulkanFunctions = &functions;
102     info.pRecordSettings = nullptr;
103     info.instance = instance;
104     info.vulkanApiVersion = physicalDeviceVersion;
105 
106     VmaAllocator allocator;
107     vmaCreateAllocator(&info, &allocator);
108 
109     return sk_sp<GrVkAMDMemoryAllocator>(new GrVkAMDMemoryAllocator(
110 #ifdef NOT_USE_PRE_ALLOC
111             allocator, std::move(interface), caps->mustUseCoherentHostVisibleMemory()));
112 #else
113             allocator, std::move(interface), caps->mustUseCoherentHostVisibleMemory(), cacheFlag));
114 #endif
115 }
116 
GrVkAMDMemoryAllocator(VmaAllocator allocator,sk_sp<const GrVkInterface> interface,bool mustUseCoherentHostVisibleMemory)117 GrVkAMDMemoryAllocator::GrVkAMDMemoryAllocator(VmaAllocator allocator,
118                                                sk_sp<const GrVkInterface> interface,
119 #ifdef NOT_USE_PRE_ALLOC
120                                                bool mustUseCoherentHostVisibleMemory)
121 #else
122                                                bool mustUseCoherentHostVisibleMemory,
123                                                bool cacheFlag)
124 #endif
125         : fAllocator(allocator)
126         , fInterface(std::move(interface))
127 #ifdef NOT_USE_PRE_ALLOC
128         , fMustUseCoherentHostVisibleMemory(mustUseCoherentHostVisibleMemory) {}
129 #else
130         , fMustUseCoherentHostVisibleMemory(mustUseCoherentHostVisibleMemory)
131         , fCacheFlag(cacheFlag) {}
132 #endif
133 
~GrVkAMDMemoryAllocator()134 GrVkAMDMemoryAllocator::~GrVkAMDMemoryAllocator() {
135     vmaDestroyAllocator(fAllocator);
136     fAllocator = VK_NULL_HANDLE;
137 }
138 
139 // OH ISSUE: VMA preAlloc
FirstPreAllocMemory(VmaAllocator allocator,VmaAllocationCreateInfo info)140 static void FirstPreAllocMemory(VmaAllocator allocator, VmaAllocationCreateInfo info) {
141     VkImage fakeImage;
142     VmaAllocation reservedAllocation;
143     if (allocator == nullptr) {
144         return;
145     }
146     VkResult result = vmaCreateFakeImage(allocator, &fakeImage);
147     if (result != VK_SUCCESS) {
148         SK_LOGE("FirstPreAllocMemory: CreateFakeImage Failed!! VkResult %d", result);
149         return;
150     }
151     {
152         HITRACE_OHOS_NAME_FMT_ALWAYS("vmaAllocateReservedMemoryForImage");
153         result = vmaAllocateReservedMemoryForImage(allocator, fakeImage, &info, &reservedAllocation, nullptr);
154     }
155     if (result != VK_SUCCESS) {
156         SK_LOGE("FirstPreAllocMemory: AllocateReservedMemory Failed!! VkResult %d", result);
157         vmaDestroyFakeImage(allocator, fakeImage);
158         return;
159     }
160     {
161         HITRACE_OHOS_NAME_FMT_ALWAYS("vmaBindImageMemory");
162         result = vmaBindImageMemory(allocator, reservedAllocation, fakeImage);
163     }
164     if (result != VK_SUCCESS) {
165         SK_LOGE("FirstPreAllocMemory: BindImageMemory Failed!! VkResult %d", result);
166     }
167     vmaDestroyFakeImage(allocator, fakeImage);
168     vmaFreeReservedMemory(allocator, reservedAllocation);
169 }
170 
171 // OH ISSUE: VMA preAlloc
PreAllocMemory(VmaAllocator allocator,VmaAllocation reservedAllocation)172 static void PreAllocMemory(VmaAllocator allocator, VmaAllocation reservedAllocation) {
173     VkImage fakeImage;
174     if (allocator == nullptr) {
175         return;
176     }
177     VkResult result = vmaCreateFakeImage(allocator, &fakeImage);
178     if (result != VK_SUCCESS) {
179         SK_LOGE("PreAllocMemory: CreateFakeImage Failed!! VkResult %d", result);
180         return;
181     }
182     {
183         HITRACE_OHOS_NAME_FMT_ALWAYS("vmaBindImageMemory");
184         result = vmaBindImageMemory(allocator, reservedAllocation, fakeImage);
185     }
186     if (result != VK_SUCCESS) {
187         SK_LOGE("PreAllocMemory: BindImageMemory Failed!! VkResult %d", result);
188     }
189     vmaDestroyFakeImage(allocator, fakeImage);
190     vmaFreeReservedMemory(allocator, reservedAllocation);
191 }
192 
allocateImageMemory(VkImage image,AllocationPropertyFlags flags,GrVkBackendMemory * backendMemory)193 VkResult GrVkAMDMemoryAllocator::allocateImageMemory(VkImage image, AllocationPropertyFlags flags,
194                                                      GrVkBackendMemory* backendMemory) {
195     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
196     VmaAllocationCreateInfo info;
197     info.flags = 0;
198     info.usage = VMA_MEMORY_USAGE_UNKNOWN;
199     info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
200     info.preferredFlags = 0;
201     info.memoryTypeBits = 0;
202     info.pool = VK_NULL_HANDLE;
203     info.pUserData = nullptr;
204 
205     if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
206         info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
207     }
208 
209     if (AllocationPropertyFlags::kLazyAllocation & flags) {
210         info.requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
211     }
212 
213     if (AllocationPropertyFlags::kProtected & flags) {
214         info.requiredFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
215     }
216 
217     VmaAllocation allocation;
218     VkResult result = vmaAllocateMemoryForImage(fAllocator, image, &info, &allocation, nullptr);
219     if (VK_SUCCESS != result) {
220         return result;
221     }
222     *backendMemory = (GrVkBackendMemory)allocation;
223 
224     // OH ISSUE: VMA preAlloc
225     bool newBlockflag = false;
226     vmaGetNewBlockStats(allocation, &newBlockflag);
227     if (newBlockflag && fCacheFlag && SkGetPreAllocFlag()) {
228         HITRACE_OHOS_NAME_FMT_ALWAYS("GrVkAMDMemoryAllocator trigger preAlloc");
229         vmaClearNewBlockStats(allocation);
230         std::lock_guard<std::mutex> lock(mPreAllocMutex);
231         // After swap, allocation belongs to vma reserved block.
232         VkResult result2 = vmaSwapReservedBlock(fAllocator, image, &info, &allocation, nullptr);
233         if (result2 == VK_INCOMPLETE) {
234             return result;
235         }
236         if (result2 == VK_NOT_READY) {
237             GetThreadPool().add([=] {
238                 std::lock_guard<std::mutex> lock(mPreAllocMutex);
239                 HITRACE_OHOS_NAME_FMT_ALWAYS("FirstPreAllocMemory");
240                 FirstPreAllocMemory(fAllocator, info);
241             });
242             return result;
243         }
244         if (result2 == VK_SUCCESS) {
245             GetThreadPool().add([=] {
246                 std::this_thread::sleep_for(std::chrono::microseconds(SkGetPreAllocDelay()));
247                 std::lock_guard<std::mutex> lock(mPreAllocMutex);
248                 HITRACE_OHOS_NAME_FMT_ALWAYS("PreAllocMemory");
249                 PreAllocMemory(fAllocator, allocation);
250             });
251             VmaAllocation newAllocation;
252             VkResult result3 = vmaAllocateMemoryForImage(fAllocator, image, &info, &newAllocation, nullptr);
253             if (result3 == VK_SUCCESS) {
254                 *backendMemory = (GrVkBackendMemory)newAllocation;
255             }
256             return result3;
257         }
258     }
259     return result;
260 }
261 
allocateBufferMemory(VkBuffer buffer,BufferUsage usage,AllocationPropertyFlags flags,GrVkBackendMemory * backendMemory)262 VkResult GrVkAMDMemoryAllocator::allocateBufferMemory(VkBuffer buffer, BufferUsage usage,
263                                                       AllocationPropertyFlags flags,
264                                                       GrVkBackendMemory* backendMemory) {
265     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
266     VmaAllocationCreateInfo info;
267     info.flags = 0;
268     info.usage = VMA_MEMORY_USAGE_UNKNOWN;
269     info.memoryTypeBits = 0;
270     info.pool = VK_NULL_HANDLE;
271     info.pUserData = nullptr;
272 
273     switch (usage) {
274         case BufferUsage::kGpuOnly:
275             info.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
276             info.preferredFlags = 0;
277             break;
278         case BufferUsage::kCpuWritesGpuReads:
279             // When doing cpu writes and gpu reads the general rule of thumb is to use coherent
280             // memory. Though this depends on the fact that we are not doing any cpu reads and the
281             // cpu writes are sequential. For sparse writes we'd want cpu cached memory, however we
282             // don't do these types of writes in Skia.
283             //
284             // TODO: In the future there may be times where specific types of memory could benefit
285             // from a coherent and cached memory. Typically these allow for the gpu to read cpu
286             // writes from the cache without needing to flush the writes throughout the cache. The
287             // reverse is not true and GPU writes tend to invalidate the cache regardless. Also
288             // these gpu cache read access are typically lower bandwidth than non-cached memory.
289             // For now Skia doesn't really have a need or want of this type of memory. But if we
290             // ever do we could pass in an AllocationPropertyFlag that requests the cached property.
291             info.requiredFlags =
292                     VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
293             info.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
294             break;
295         case BufferUsage::kTransfersFromCpuToGpu:
296             info.requiredFlags =
297                     VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
298             break;
299         case BufferUsage::kTransfersFromGpuToCpu:
300             info.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
301             info.preferredFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
302             break;
303     }
304 
305     if (fMustUseCoherentHostVisibleMemory &&
306         (info.requiredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) {
307         info.requiredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
308     }
309 
310     if (AllocationPropertyFlags::kDedicatedAllocation & flags) {
311         info.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
312     }
313 
314     if ((AllocationPropertyFlags::kLazyAllocation & flags) && BufferUsage::kGpuOnly == usage) {
315         info.preferredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
316     }
317 
318     if (AllocationPropertyFlags::kPersistentlyMapped & flags) {
319         SkASSERT(BufferUsage::kGpuOnly != usage);
320         info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
321     }
322 
323     VmaAllocation allocation;
324     VkResult result = vmaAllocateMemoryForBuffer(fAllocator, buffer, &info, &allocation, nullptr);
325     if (VK_SUCCESS == result) {
326         *backendMemory = (GrVkBackendMemory)allocation;
327     }
328 
329     return result;
330 }
331 
freeMemory(const GrVkBackendMemory & memoryHandle)332 void GrVkAMDMemoryAllocator::freeMemory(const GrVkBackendMemory& memoryHandle) {
333     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
334     const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
335     vmaFreeMemory(fAllocator, allocation);
336 }
337 
getAllocInfo(const GrVkBackendMemory & memoryHandle,GrVkAlloc * alloc) const338 void GrVkAMDMemoryAllocator::getAllocInfo(const GrVkBackendMemory& memoryHandle,
339                                           GrVkAlloc* alloc) const {
340     const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
341     VmaAllocationInfo vmaInfo;
342     vmaGetAllocationInfo(fAllocator, allocation, &vmaInfo);
343 
344     VkMemoryPropertyFlags memFlags;
345     vmaGetMemoryTypeProperties(fAllocator, vmaInfo.memoryType, &memFlags);
346 
347     uint32_t flags = 0;
348     if (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT & memFlags) {
349         flags |= GrVkAlloc::kMappable_Flag;
350     }
351     if (!SkToBool(VK_MEMORY_PROPERTY_HOST_COHERENT_BIT & memFlags)) {
352         flags |= GrVkAlloc::kNoncoherent_Flag;
353     }
354     if (VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT & memFlags) {
355         flags |= GrVkAlloc::kLazilyAllocated_Flag;
356     }
357 
358     alloc->fMemory        = vmaInfo.deviceMemory;
359     alloc->fOffset        = vmaInfo.offset;
360     alloc->fSize          = vmaInfo.size;
361     alloc->fFlags         = flags;
362     alloc->fBackendMemory = memoryHandle;
363     alloc->fAllocator     = (GrVkMemoryAllocator *)this;
364 }
365 
mapMemory(const GrVkBackendMemory & memoryHandle,void ** data)366 VkResult GrVkAMDMemoryAllocator::mapMemory(const GrVkBackendMemory& memoryHandle, void** data) {
367     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
368     const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
369     return vmaMapMemory(fAllocator, allocation, data);
370 }
371 
unmapMemory(const GrVkBackendMemory & memoryHandle)372 void GrVkAMDMemoryAllocator::unmapMemory(const GrVkBackendMemory& memoryHandle) {
373     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
374     const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
375     vmaUnmapMemory(fAllocator, allocation);
376 }
377 
flushMemory(const GrVkBackendMemory & memoryHandle,VkDeviceSize offset,VkDeviceSize size)378 VkResult GrVkAMDMemoryAllocator::flushMemory(const GrVkBackendMemory& memoryHandle,
379                                              VkDeviceSize offset, VkDeviceSize size) {
380     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
381     const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
382     return vmaFlushAllocation(fAllocator, allocation, offset, size);
383 }
384 
invalidateMemory(const GrVkBackendMemory & memoryHandle,VkDeviceSize offset,VkDeviceSize size)385 VkResult GrVkAMDMemoryAllocator::invalidateMemory(const GrVkBackendMemory& memoryHandle,
386                                                   VkDeviceSize offset, VkDeviceSize size) {
387     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
388     const VmaAllocation allocation = (const VmaAllocation)memoryHandle;
389     return vmaInvalidateAllocation(fAllocator, allocation, offset, size);
390 }
391 
totalUsedMemory() const392 uint64_t GrVkAMDMemoryAllocator::totalUsedMemory() const {
393     VmaStats stats;
394     vmaCalculateStats(fAllocator, &stats);
395     return stats.total.usedBytes;
396 }
397 
totalAllocatedMemory() const398 uint64_t GrVkAMDMemoryAllocator::totalAllocatedMemory() const {
399     VmaStats stats;
400     vmaCalculateStats(fAllocator, &stats);
401     return stats.total.usedBytes + stats.total.unusedBytes;
402 }
403 
dumpVmaStats(SkString * out,const char * sep) const404 void GrVkAMDMemoryAllocator::dumpVmaStats(SkString *out, const char *sep) const
405 {
406     constexpr int MB = 1024 * 1024;
407     if (out == nullptr || sep == nullptr) {
408         return;
409     }
410     bool flag = SkGetMemoryOptimizedFlag();
411     out->appendf("vma_flag: %d %s", flag, sep);
412     if (!flag) {
413         return;
414     }
415     VmaStats stats;
416     vmaCalculateStats(fAllocator, &stats);
417     uint64_t free = stats.total.unusedBytes;
418     uint64_t used = stats.total.usedBytes;
419     uint64_t total = free + used;
420     auto maxBlockCount = SkGetVmaBlockCountMax();
421     out->appendf("vma_free: %llu (%d MB)%s", free, free / MB, sep);
422     out->appendf("vma_used: %llu (%d MB)%s", used, used / MB, sep);
423     out->appendf("vma_total: %llu (%d MB)%s", total, total / MB, sep);
424     out->appendf("vma_cacheBlockSize: %d MB%s", SkGetVmaBlockSizeMB(), sep);
425     out->appendf("vma_cacheBlockCount: %llu / %llu%s",
426         stats.total.blockCount <= maxBlockCount ? stats.total.blockCount : maxBlockCount, maxBlockCount, sep);
427     out->appendf("vma_dedicatedBlockCount: %llu%s",
428         stats.total.blockCount <= maxBlockCount ? 0 : stats.total.blockCount - maxBlockCount, sep);
429     out->appendf("vma_allocationCount: %u%s", stats.total.allocationCount, sep);
430     out->appendf("vma_unusedRangeCount: %u%s", stats.total.unusedRangeCount, sep);
431     out->appendf("vma_allocationSize: %llu / %llu / %llu%s",
432         stats.total.allocationSizeMin, stats.total.allocationSizeAvg, stats.total.allocationSizeMax, sep);
433     out->appendf("vma_unusedRangeSize: %llu / %llu / %llu%s",
434         stats.total.unusedRangeSizeMin, stats.total.unusedRangeSizeAvg, stats.total.unusedRangeSizeMax, sep);
435     uint32_t blockSize = 0;
436     vmaGetPreAllocBlockSize(fAllocator, &blockSize);
437     out->appendf("vma_preAllocBlockSize: %d / 1%s", blockSize, sep);
438 }
439 
vmaDefragment()440 void GrVkAMDMemoryAllocator::vmaDefragment()
441 {
442     if (!fCacheFlag) {
443         return;
444     }
445     bool flag = SkGetVmaDefragmentOn();
446     if (!flag) {
447         return;
448     }
449     bool debugFlag = SkGetVmaDebugFlag();
450     if (!debugFlag) {
451         std::lock_guard<std::mutex> lock(mPreAllocMutex);
452         vmaFreeEmptyBlock(fAllocator);
453         return;
454     }
455 
456     // dfx
457     SkString debugInfo;
458     dumpVmaStats(&debugInfo);
459     SkDebugf("GrVkAMDMemoryAllocator::vmaDefragment() before: %s",
460         debugInfo.c_str());
461     HITRACE_OHOS_NAME_FMT_ALWAYS("GrVkAMDMemoryAllocator::vmaDefragment() before: %s", debugInfo.c_str());
462 
463     {
464         std::lock_guard<std::mutex> lock(mPreAllocMutex);
465         vmaFreeEmptyBlock(fAllocator);
466     }
467 
468     // dfx
469     debugInfo = "";
470     dumpVmaStats(&debugInfo);
471     SkDebugf("GrVkAMDMemoryAllocator::vmaDefragment() after: %s",
472         debugInfo.c_str());
473     HITRACE_OHOS_NAME_FMT_ALWAYS("GrVkAMDMemoryAllocator::vmaDefragment() after: %s", debugInfo.c_str());
474 }
475 
476 #endif // SK_USE_VMA
477