1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/vk/GrVkMemory.h"
9
10 #include "src/gpu/vk/GrVkGpu.h"
11 #include "src/gpu/vk/GrVkUtil.h"
12
13 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
14 #include "hitrace_meter.h"
15 #endif
16
17 using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
18 using BufferUsage = GrVkMemoryAllocator::BufferUsage;
19
AllocAndBindBufferMemory(GrVkGpu * gpu,VkBuffer buffer,BufferUsage usage,GrVkAlloc * alloc)20 bool GrVkMemory::AllocAndBindBufferMemory(GrVkGpu* gpu,
21 VkBuffer buffer,
22 BufferUsage usage,
23 GrVkAlloc* alloc) {
24 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
25 GrVkBackendMemory memory = 0;
26
27 AllocationPropertyFlags propFlags;
28 bool shouldPersistentlyMapCpuToGpu = gpu->vkCaps().shouldPersistentlyMapCpuToGpuBuffers();
29 if (usage == BufferUsage::kTransfersFromCpuToGpu ||
30 (usage == BufferUsage::kCpuWritesGpuReads && shouldPersistentlyMapCpuToGpu)) {
31 // In general it is always fine (and often better) to keep buffers always mapped that we are
32 // writing to on the cpu.
33 propFlags = AllocationPropertyFlags::kPersistentlyMapped;
34 } else {
35 propFlags = AllocationPropertyFlags::kNone;
36 }
37
38 VkResult result = allocator->allocateBufferMemory(buffer, usage, propFlags, &memory);
39 if (!gpu->checkVkResult(result)) {
40 return false;
41 }
42 allocator->getAllocInfo(memory, alloc);
43
44 // Bind buffer
45 VkResult err;
46 GR_VK_CALL_RESULT(gpu, err, BindBufferMemory(gpu->device(), buffer, alloc->fMemory,
47 alloc->fOffset));
48 if (err) {
49 FreeBufferMemory(gpu, *alloc);
50 return false;
51 }
52
53 return true;
54 }
55
FreeBufferMemory(const GrVkGpu * gpu,const GrVkAlloc & alloc)56 void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
57 SkASSERT(alloc.fBackendMemory);
58 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
59 allocator->freeMemory(alloc.fBackendMemory);
60 }
61
AllocAndBindImageMemory(GrVkGpu * gpu,VkImage image,GrMemoryless memoryless,GrVkAlloc * alloc)62 bool GrVkMemory::AllocAndBindImageMemory(GrVkGpu* gpu,
63 VkImage image,
64 GrMemoryless memoryless,
65 GrVkAlloc* alloc) {
66 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
67 GrVkBackendMemory memory = 0;
68
69 VkMemoryRequirements memReqs;
70 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
71 HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "AllocAndBindImageMemory");
72 #endif
73 GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
74
75 AllocationPropertyFlags propFlags;
76 // If we ever find that our allocator is not aggressive enough in using dedicated image
77 // memory we can add a size check here to force the use of dedicate memory. However for now,
78 // we let the allocators decide. The allocator can query the GPU for each image to see if the
79 // GPU recommends or requires the use of dedicated memory.
80 if (gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
81 propFlags = AllocationPropertyFlags::kDedicatedAllocation;
82 } else {
83 propFlags = AllocationPropertyFlags::kNone;
84 }
85
86 if (gpu->protectedContext()) {
87 propFlags |= AllocationPropertyFlags::kProtected;
88 }
89
90 if (memoryless == GrMemoryless::kYes) {
91 propFlags |= AllocationPropertyFlags::kLazyAllocation;
92 }
93
94 VkResult result = allocator->allocateImageMemory(image, propFlags, &memory);
95 if (!gpu->checkVkResult(result)) {
96 return false;
97 }
98
99 allocator->getAllocInfo(memory, alloc);
100
101 // Bind buffer
102 VkResult err;
103 GR_VK_CALL_RESULT(gpu, err, BindImageMemory(gpu->device(), image, alloc->fMemory,
104 alloc->fOffset));
105 if (err) {
106 FreeImageMemory(gpu, *alloc);
107 return false;
108 }
109
110 return true;
111 }
112
FreeImageMemory(const GrVkGpu * gpu,const GrVkAlloc & alloc)113 void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
114 SkASSERT(alloc.fBackendMemory);
115 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
116 allocator->freeMemory(alloc.fBackendMemory);
117 }
118
MapAlloc(GrVkGpu * gpu,const GrVkAlloc & alloc)119 void* GrVkMemory::MapAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc) {
120 SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
121 SkASSERT(alloc.fBackendMemory);
122 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
123 void* mapPtr;
124 VkResult result = allocator->mapMemory(alloc.fBackendMemory, &mapPtr);
125 if (!gpu->checkVkResult(result)) {
126 return nullptr;
127 }
128 return mapPtr;
129 }
130
UnmapAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc)131 void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
132 SkASSERT(alloc.fBackendMemory);
133 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
134 allocator->unmapMemory(alloc.fBackendMemory);
135 }
136
GetNonCoherentMappedMemoryRange(const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size,VkDeviceSize alignment,VkMappedMemoryRange * range)137 void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
138 VkDeviceSize size, VkDeviceSize alignment,
139 VkMappedMemoryRange* range) {
140 SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
141 offset = offset + alloc.fOffset;
142 VkDeviceSize offsetDiff = offset & (alignment -1);
143 offset = offset - offsetDiff;
144 size = (size + alignment - 1) & ~(alignment - 1);
145 #ifdef SK_DEBUG
146 SkASSERT(offset >= alloc.fOffset);
147 SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
148 SkASSERT(0 == (offset & (alignment-1)));
149 SkASSERT(size > 0);
150 SkASSERT(0 == (size & (alignment-1)));
151 #endif
152
153 memset(range, 0, sizeof(VkMappedMemoryRange));
154 range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
155 range->memory = alloc.fMemory;
156 range->offset = offset;
157 range->size = size;
158 }
159
FlushMappedAlloc(GrVkGpu * gpu,const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size)160 void GrVkMemory::FlushMappedAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
161 VkDeviceSize size) {
162 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
163 SkASSERT(offset == 0);
164 SkASSERT(size <= alloc.fSize);
165 SkASSERT(alloc.fBackendMemory);
166 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
167 VkResult result = allocator->flushMemory(alloc.fBackendMemory, offset, size);
168 gpu->checkVkResult(result);
169 }
170 }
171
InvalidateMappedAlloc(GrVkGpu * gpu,const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size)172 void GrVkMemory::InvalidateMappedAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc,
173 VkDeviceSize offset, VkDeviceSize size) {
174 if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
175 SkASSERT(offset == 0);
176 SkASSERT(size <= alloc.fSize);
177 SkASSERT(alloc.fBackendMemory);
178 GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
179 VkResult result = allocator->invalidateMemory(alloc.fBackendMemory, offset, size);
180 gpu->checkVkResult(result);
181 }
182 }
183
184