1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkTypes.h"
9 #include "include/gpu/GpuTypes.h"
10 #include "include/gpu/vk/VulkanMemoryAllocator.h"
11 #include "include/gpu/vk/VulkanTypes.h"
12 #include "src/gpu/ganesh/vk/GrVkUtil.h"
13 #include "src/gpu/vk/VulkanMemory.h"
14 #include "src/base/SkUtils.h"
15
16 #include <cstdint>
17 #include <cstring>
18
19 #define VK_CALL(GPU, X) GR_VK_CALL((GPU)->vkInterface(), X)
20
21 namespace skgpu {
22
23 using BufferUsage = VulkanMemoryAllocator::BufferUsage;
24
FindMemoryType(GrVkGpu * gpu,uint32_t typeFilter,VkMemoryPropertyFlags properties,uint32_t & typeIndex)25 static bool FindMemoryType(GrVkGpu *gpu, uint32_t typeFilter, VkMemoryPropertyFlags properties, uint32_t &typeIndex)
26 {
27 VkPhysicalDevice physicalDevice = gpu->physicalDevice();
28 VkPhysicalDeviceMemoryProperties memProperties{};
29 VK_CALL(gpu, GetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties));
30
31 bool hasFound = false;
32 for (uint32_t i = 0; i < memProperties.memoryTypeCount && !hasFound; ++i) {
33 if (typeFilter & (1 << i)) {
34 uint32_t supportedFlags = memProperties.memoryTypes[i].propertyFlags & properties;
35 if (supportedFlags == properties) {
36 typeIndex = i;
37 hasFound = true;
38 }
39 }
40 }
41
42 return hasFound;
43 }
44
AllocBufferMemory(VulkanMemoryAllocator * allocator,VkBuffer buffer,skgpu::Protected isProtected,BufferUsage usage,bool shouldPersistentlyMapCpuToGpu,const std::function<CheckResult> & checkResult,VulkanAlloc * alloc,size_t size)45 bool VulkanMemory::AllocBufferMemory(VulkanMemoryAllocator* allocator,
46 VkBuffer buffer,
47 skgpu::Protected isProtected,
48 BufferUsage usage,
49 bool shouldPersistentlyMapCpuToGpu,
50 const std::function<CheckResult>& checkResult,
51 #ifdef SKIA_DFX_FOR_OHOS
52 VulkanAlloc* alloc,
53 size_t size) {
54 #else
55 VulkanAlloc* alloc) {
56 #endif
57 VulkanBackendMemory memory = 0;
58 uint32_t propFlags;
59 if (usage == BufferUsage::kTransfersFromCpuToGpu ||
60 (usage == BufferUsage::kCpuWritesGpuReads && shouldPersistentlyMapCpuToGpu)) {
61 // In general it is always fine (and often better) to keep buffers always mapped that we are
62 // writing to on the cpu.
63 propFlags = VulkanMemoryAllocator::kPersistentlyMapped_AllocationPropertyFlag;
64 } else {
65 propFlags = VulkanMemoryAllocator::kNone_AllocationPropertyFlag;
66 }
67
68 if (isProtected == Protected::kYes) {
69 propFlags = propFlags | VulkanMemoryAllocator::kProtected_AllocationPropertyFlag;
70 }
71
72 VkResult result = allocator->allocateBufferMemory(buffer, usage, propFlags, &memory);
73 if (!checkResult(result)) {
74 return false;
75 }
76 allocator->getAllocInfo(memory, alloc);
77 return true;
78 }
79
80 bool VulkanMemory::ImportAndBindBufferMemory(GrVkGpu* gpu,
81 OH_NativeBuffer *nativeBuffer,
82 VkBuffer buffer,
83 VulkanAlloc* alloc) {
84 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
85 HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "ImportAndBindBufferMemory");
86 #endif
87 VkDevice device = gpu->device();
88 VkMemoryRequirements memReqs{};
89 VK_CALL(gpu, GetBufferMemoryRequirements(device, buffer, &memReqs));
90
91 uint32_t typeIndex = 0;
92 bool hasFound = FindMemoryType(gpu, memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, typeIndex);
93 if (!hasFound) {
94 return false;
95 }
96
97 // Import external memory
98 VkImportNativeBufferInfoOHOS importInfo{};
99 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_NATIVE_BUFFER_INFO_OHOS;
100 importInfo.pNext = nullptr;
101 importInfo.buffer = nativeBuffer;
102
103 VkMemoryDedicatedAllocateInfo dedicatedAllocInfo{};
104 dedicatedAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
105 dedicatedAllocInfo.pNext = &importInfo;
106 dedicatedAllocInfo.image = VK_NULL_HANDLE;
107 dedicatedAllocInfo.buffer = buffer;
108
109 VkMemoryAllocateInfo allocateInfo{};
110 allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
111 allocateInfo.pNext = &dedicatedAllocInfo;
112 allocateInfo.allocationSize = memReqs.size;
113 allocateInfo.memoryTypeIndex = typeIndex;
114
115 VkResult err;
116 VkDeviceMemory memory;
117 GR_VK_CALL_RESULT(gpu, err, AllocateMemory(device, &allocateInfo, nullptr, &memory));
118 if (err) {
119 return false;
120 }
121
122 // Bind buffer
123 GR_VK_CALL_RESULT(gpu, err, BindBufferMemory(device, buffer, memory, 0));
124 if (err) {
125 VK_CALL(gpu, FreeMemory(device, memory, nullptr));
126 return false;
127 }
128
129 alloc->fMemory = memory;
130 alloc->fOffset = 0;
131 alloc->fSize = memReqs.size;
132 alloc->fFlags = 0;
133 alloc->fIsExternalMemory = true;
134
135 return true;
136 }
137
138 void VulkanMemory::FreeBufferMemory(VulkanMemoryAllocator* allocator, const VulkanAlloc& alloc) {
139 SkASSERT(alloc.fBackendMemory);
140 allocator->freeMemory(alloc.fBackendMemory);
141 }
142
143 void VulkanMemory::FreeBufferMemory(const GrVkGpu* gpu, const VulkanAlloc& alloc) {
144 if (alloc.fIsExternalMemory) {
145 VK_CALL(gpu, FreeMemory(gpu->device(), alloc.fMemory, nullptr));
146 } else {
147 SkASSERT(alloc.fBackendMemory);
148 VulkanMemoryAllocator* allocator = gpu->memoryAllocator();
149 if (alloc.fAllocator != nullptr) {
150 allocator = alloc.fAllocator;
151 }
152 allocator->freeMemory(alloc.fBackendMemory);
153 }
154 }
155
156 bool VulkanMemory::AllocImageMemory(VulkanMemoryAllocator* allocator,
157 VulkanMemoryAllocator* allocatorCacheImage,
158 VkImage image,
159 Protected isProtected,
160 bool forceDedicatedMemory,
161 bool useLazyAllocation,
162 const std::function<CheckResult>& checkResult,
163 VulkanAlloc* alloc,
164 int memorySize) {
165 VulkanBackendMemory memory = 0;
166
167 bool vmaFlag = SkGetVmaCacheFlag();
168 bool vmaCacheFlag = vmaFlag && memorySize > SkGetNeedCachedMemroySize();
169 if (vmaCacheFlag && allocatorCacheImage) {
170 allocator = allocatorCacheImage;
171 }
172 uint32_t propFlags;
173 // If we ever find that our allocator is not aggressive enough in using dedicated image
174 // memory we can add a size check here to force the use of dedicate memory. However for now,
175 // we let the allocators decide. The allocator can query the GPU for each image to see if the
176 // GPU recommends or requires the use of dedicated memory.
177 if (vmaCacheFlag) {
178 propFlags = VulkanMemoryAllocator::kNone_AllocationPropertyFlag;
179 } else if (forceDedicatedMemory) {
180 propFlags = VulkanMemoryAllocator::kDedicatedAllocation_AllocationPropertyFlag;
181 } else {
182 propFlags = VulkanMemoryAllocator::kNone_AllocationPropertyFlag;
183 }
184
185 if (isProtected == Protected::kYes) {
186 propFlags = propFlags | VulkanMemoryAllocator::kProtected_AllocationPropertyFlag;
187 }
188
189 if (useLazyAllocation) {
190 propFlags = propFlags | VulkanMemoryAllocator::kLazyAllocation_AllocationPropertyFlag;
191 }
192
193 { // OH ISSUE: add trace for vulkan interface
194 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
195 HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "allocateImageMemory");
196 #endif
197 VkResult result = allocator->allocateImageMemory(image, propFlags, &memory);
198 if (!checkResult(result)) {
199 return false;
200 }
201 }
202
203 allocator->getAllocInfo(memory, alloc);
204 return true;
205 }
206
207 void VulkanMemory::FreeImageMemory(VulkanMemoryAllocator* allocator,
208 const VulkanAlloc& alloc) {
209 SkASSERT(alloc.fBackendMemory);
210 if (alloc.fAllocator != nullptr) {
211 allocator = alloc.fAllocator;
212 }
213 allocator->freeMemory(alloc.fBackendMemory);
214 }
215
216 void* VulkanMemory::MapAlloc(VulkanMemoryAllocator* allocator,
217 const VulkanAlloc& alloc,
218 const std::function<CheckResult>& checkResult) {
219 SkASSERT(VulkanAlloc::kMappable_Flag & alloc.fFlags);
220 SkASSERT(alloc.fBackendMemory);
221 if (alloc.fAllocator != nullptr) {
222 allocator = alloc.fAllocator;
223 }
224 void* mapPtr;
225 VkResult result = allocator->mapMemory(alloc.fBackendMemory, &mapPtr);
226 if (!checkResult(result)) {
227 return nullptr;
228 }
229 return mapPtr;
230 }
231
232 void VulkanMemory::UnmapAlloc(VulkanMemoryAllocator* allocator,
233 const VulkanAlloc& alloc) {
234 SkASSERT(alloc.fBackendMemory);
235 if (alloc.fAllocator != nullptr) {
236 allocator = alloc.fAllocator;
237 }
238 allocator->unmapMemory(alloc.fBackendMemory);
239 }
240
241 void VulkanMemory::GetNonCoherentMappedMemoryRange(const VulkanAlloc& alloc,
242 VkDeviceSize offset,
243 VkDeviceSize size,
244 VkDeviceSize alignment,
245 VkMappedMemoryRange* range) {
246 SkASSERT(alloc.fFlags & VulkanAlloc::kNoncoherent_Flag);
247 offset = offset + alloc.fOffset;
248 VkDeviceSize offsetDiff = offset & (alignment -1);
249 offset = offset - offsetDiff;
250 size = (size + alignment - 1) & ~(alignment - 1);
251 #ifdef SK_DEBUG
252 SkASSERT(offset >= alloc.fOffset);
253 SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
254 SkASSERT(0 == (offset & (alignment-1)));
255 SkASSERT(size > 0);
256 SkASSERT(0 == (size & (alignment-1)));
257 #endif
258
259 std::memset(range, 0, sizeof(VkMappedMemoryRange));
260 range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
261 range->memory = alloc.fMemory;
262 range->offset = offset;
263 range->size = size;
264 }
265
266 void VulkanMemory::FlushMappedAlloc(VulkanMemoryAllocator* allocator,
267 const VulkanAlloc& alloc,
268 VkDeviceSize offset,
269 VkDeviceSize size,
270 const std::function<CheckResult>& checkResult) {
271 if (alloc.fFlags & VulkanAlloc::kNoncoherent_Flag) {
272 SkASSERT(offset == 0);
273 SkASSERT(size <= alloc.fSize);
274 SkASSERT(alloc.fBackendMemory);
275 if (alloc.fAllocator != nullptr) {
276 allocator = alloc.fAllocator;
277 }
278 VkResult result = allocator->flushMemory(alloc.fBackendMemory, offset, size);
279 checkResult(result);
280 }
281 }
282
283 void VulkanMemory::InvalidateMappedAlloc(VulkanMemoryAllocator* allocator,
284 const VulkanAlloc& alloc,
285 VkDeviceSize offset,
286 VkDeviceSize size,
287 const std::function<CheckResult>& checkResult) {
288 if (alloc.fFlags & VulkanAlloc::kNoncoherent_Flag) {
289 SkASSERT(offset == 0);
290 SkASSERT(size <= alloc.fSize);
291 SkASSERT(alloc.fBackendMemory);
292 if (alloc.fAllocator != nullptr) {
293 allocator = alloc.fAllocator;
294 }
295 VkResult result = allocator->invalidateMemory(alloc.fBackendMemory, offset, size);
296 checkResult(result);
297 }
298 }
299
300 } // namespace skgpu
301