• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #include "src/gpu/vk/GrVkMemory.h"
9 
10 #include "src/gpu/vk/GrVkGpu.h"
11 #include "src/gpu/vk/GrVkUtil.h"
12 
13 using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
14 using BufferUsage = GrVkMemoryAllocator::BufferUsage;
15 
AllocAndBindBufferMemory(GrVkGpu * gpu,VkBuffer buffer,BufferUsage usage,GrVkAlloc * alloc)16 bool GrVkMemory::AllocAndBindBufferMemory(GrVkGpu* gpu,
17                                           VkBuffer buffer,
18                                           BufferUsage usage,
19                                           GrVkAlloc* alloc) {
20     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
21     GrVkBackendMemory memory = 0;
22 
23     AllocationPropertyFlags propFlags;
24     bool shouldPersistentlyMapCpuToGpu = gpu->vkCaps().shouldPersistentlyMapCpuToGpuBuffers();
25     if (usage == BufferUsage::kTransfersFromCpuToGpu ||
26         (usage == BufferUsage::kCpuWritesGpuReads && shouldPersistentlyMapCpuToGpu)) {
27         // In general it is always fine (and often better) to keep buffers always mapped that we are
28         // writing to on the cpu.
29         propFlags = AllocationPropertyFlags::kPersistentlyMapped;
30     } else {
31         propFlags = AllocationPropertyFlags::kNone;
32     }
33 
34     VkResult result = allocator->allocateBufferMemory(buffer, usage, propFlags, &memory);
35     if (!gpu->checkVkResult(result)) {
36         return false;
37     }
38     allocator->getAllocInfo(memory, alloc);
39 
40     // Bind buffer
41     VkResult err;
42     GR_VK_CALL_RESULT(gpu, err, BindBufferMemory(gpu->device(), buffer, alloc->fMemory,
43                                                  alloc->fOffset));
44     if (err) {
45         FreeBufferMemory(gpu, *alloc);
46         return false;
47     }
48 
49     return true;
50 }
51 
FreeBufferMemory(const GrVkGpu * gpu,const GrVkAlloc & alloc)52 void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
53     SkASSERT(alloc.fBackendMemory);
54     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
55     allocator->freeMemory(alloc.fBackendMemory);
56 }
57 
AllocAndBindImageMemory(GrVkGpu * gpu,VkImage image,GrMemoryless memoryless,GrVkAlloc * alloc)58 bool GrVkMemory::AllocAndBindImageMemory(GrVkGpu* gpu,
59                                          VkImage image,
60                                          GrMemoryless memoryless,
61                                          GrVkAlloc* alloc) {
62     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
63     GrVkBackendMemory memory = 0;
64 
65     VkMemoryRequirements memReqs;
66     GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
67 
68     AllocationPropertyFlags propFlags;
69     // If we ever find that our allocator is not aggressive enough in using dedicated image
70     // memory we can add a size check here to force the use of dedicate memory. However for now,
71     // we let the allocators decide. The allocator can query the GPU for each image to see if the
72     // GPU recommends or requires the use of dedicated memory.
73     if (gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
74         propFlags = AllocationPropertyFlags::kDedicatedAllocation;
75     } else {
76         propFlags = AllocationPropertyFlags::kNone;
77     }
78 
79     if (gpu->protectedContext()) {
80         propFlags |= AllocationPropertyFlags::kProtected;
81     }
82 
83     if (memoryless == GrMemoryless::kYes) {
84         propFlags |= AllocationPropertyFlags::kLazyAllocation;
85     }
86 
87     VkResult result = allocator->allocateImageMemory(image, propFlags, &memory);
88     if (!gpu->checkVkResult(result)) {
89         return false;
90     }
91 
92     allocator->getAllocInfo(memory, alloc);
93 
94     // Bind buffer
95     VkResult err;
96     GR_VK_CALL_RESULT(gpu, err, BindImageMemory(gpu->device(), image, alloc->fMemory,
97                                                 alloc->fOffset));
98     if (err) {
99         FreeImageMemory(gpu, *alloc);
100         return false;
101     }
102 
103     return true;
104 }
105 
FreeImageMemory(const GrVkGpu * gpu,const GrVkAlloc & alloc)106 void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
107     SkASSERT(alloc.fBackendMemory);
108     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
109     allocator->freeMemory(alloc.fBackendMemory);
110 }
111 
MapAlloc(GrVkGpu * gpu,const GrVkAlloc & alloc)112 void* GrVkMemory::MapAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc) {
113     SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
114     SkASSERT(alloc.fBackendMemory);
115     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
116     void* mapPtr;
117     VkResult result = allocator->mapMemory(alloc.fBackendMemory, &mapPtr);
118     if (!gpu->checkVkResult(result)) {
119         return nullptr;
120     }
121     return mapPtr;
122 }
123 
UnmapAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc)124 void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
125     SkASSERT(alloc.fBackendMemory);
126     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
127     allocator->unmapMemory(alloc.fBackendMemory);
128 }
129 
GetNonCoherentMappedMemoryRange(const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size,VkDeviceSize alignment,VkMappedMemoryRange * range)130 void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
131                                                  VkDeviceSize size, VkDeviceSize alignment,
132                                                  VkMappedMemoryRange* range) {
133     SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
134     offset = offset + alloc.fOffset;
135     VkDeviceSize offsetDiff = offset & (alignment -1);
136     offset = offset - offsetDiff;
137     size = (size + alignment - 1) & ~(alignment - 1);
138 #ifdef SK_DEBUG
139     SkASSERT(offset >= alloc.fOffset);
140     SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
141     SkASSERT(0 == (offset & (alignment-1)));
142     SkASSERT(size > 0);
143     SkASSERT(0 == (size & (alignment-1)));
144 #endif
145 
146     memset(range, 0, sizeof(VkMappedMemoryRange));
147     range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
148     range->memory = alloc.fMemory;
149     range->offset = offset;
150     range->size = size;
151 }
152 
FlushMappedAlloc(GrVkGpu * gpu,const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size)153 void GrVkMemory::FlushMappedAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
154                                   VkDeviceSize size) {
155     if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
156         SkASSERT(offset == 0);
157         SkASSERT(size <= alloc.fSize);
158         SkASSERT(alloc.fBackendMemory);
159         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
160         VkResult result = allocator->flushMemory(alloc.fBackendMemory, offset, size);
161         gpu->checkVkResult(result);
162     }
163 }
164 
InvalidateMappedAlloc(GrVkGpu * gpu,const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size)165 void GrVkMemory::InvalidateMappedAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc,
166                                        VkDeviceSize offset, VkDeviceSize size) {
167     if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
168         SkASSERT(offset == 0);
169         SkASSERT(size <= alloc.fSize);
170         SkASSERT(alloc.fBackendMemory);
171         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
172         VkResult result = allocator->invalidateMemory(alloc.fBackendMemory, offset, size);
173         gpu->checkVkResult(result);
174     }
175 }
176 
177