• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #include "GrVkMemory.h"
9 
10 #include "GrVkGpu.h"
11 #include "GrVkUtil.h"
12 #include "vk/GrVkMemoryAllocator.h"
13 
14 using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
15 using BufferUsage = GrVkMemoryAllocator::BufferUsage;
16 
get_buffer_usage(GrVkBuffer::Type type,bool dynamic)17 static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
18     switch (type) {
19         case GrVkBuffer::kVertex_Type: // fall through
20         case GrVkBuffer::kIndex_Type: // fall through
21         case GrVkBuffer::kTexel_Type:
22             return dynamic ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
23         case GrVkBuffer::kUniform_Type:
24             SkASSERT(dynamic);
25             return BufferUsage::kCpuWritesGpuReads;
26         case GrVkBuffer::kCopyRead_Type: // fall through
27         case GrVkBuffer::kCopyWrite_Type:
28             return BufferUsage::kCpuOnly;
29     }
30     SK_ABORT("Invalid GrVkBuffer::Type");
31     return BufferUsage::kCpuOnly; // Just returning an arbitrary value.
32 }
33 
AllocAndBindBufferMemory(const GrVkGpu * gpu,VkBuffer buffer,GrVkBuffer::Type type,bool dynamic,GrVkAlloc * alloc)34 bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
35                                           VkBuffer buffer,
36                                           GrVkBuffer::Type type,
37                                           bool dynamic,
38                                           GrVkAlloc* alloc) {
39     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
40     GrVkBackendMemory memory = 0;
41 
42     GrVkMemoryAllocator::BufferUsage usage = get_buffer_usage(type, dynamic);
43 
44     AllocationPropertyFlags propFlags;
45     if (usage == GrVkMemoryAllocator::BufferUsage::kCpuWritesGpuReads) {
46         // In general it is always fine (and often better) to keep buffers always mapped.
47         // TODO: According to AMDs guide for the VulkanMemoryAllocator they suggest there are two
48         // cases when keeping it mapped can hurt. The first is when running on Win7 or Win8 (Win 10
49         // is fine). In general, by the time Vulkan ships it is probably less likely to be running
50         // on non Win10 or newer machines. The second use case is if running on an AMD card and you
51         // are using the special GPU local and host mappable memory. However, in general we don't
52         // pick this memory as we've found it slower than using the cached host visible memory. In
53         // the future if we find the need to special case either of these two issues we can add
54         // checks for them here.
55         propFlags = AllocationPropertyFlags::kPersistentlyMapped;
56     } else {
57         propFlags = AllocationPropertyFlags::kNone;
58     }
59 
60     if (!allocator->allocateMemoryForBuffer(buffer, usage, propFlags, &memory)) {
61         return false;
62     }
63     allocator->getAllocInfo(memory, alloc);
64 
65     // Bind buffer
66     VkResult err = GR_VK_CALL(gpu->vkInterface(), BindBufferMemory(gpu->device(), buffer,
67                                                                    alloc->fMemory,
68                                                                    alloc->fOffset));
69     if (err) {
70         FreeBufferMemory(gpu, type, *alloc);
71         return false;
72     }
73 
74     return true;
75 }
76 
FreeBufferMemory(const GrVkGpu * gpu,GrVkBuffer::Type type,const GrVkAlloc & alloc)77 void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
78                                   const GrVkAlloc& alloc) {
79     if (alloc.fBackendMemory) {
80         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
81         allocator->freeMemory(alloc.fBackendMemory);
82     } else {
83         GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
84     }
85 }
86 
87 const VkDeviceSize kMaxSmallImageSize = 16 * 1024;
88 
AllocAndBindImageMemory(const GrVkGpu * gpu,VkImage image,bool linearTiling,GrVkAlloc * alloc)89 bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
90                                          VkImage image,
91                                          bool linearTiling,
92                                          GrVkAlloc* alloc) {
93     SkASSERT(!linearTiling);
94     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
95     GrVkBackendMemory memory = 0;
96 
97     VkMemoryRequirements memReqs;
98     GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
99 
100     AllocationPropertyFlags propFlags;
101     if (memReqs.size > kMaxSmallImageSize || gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
102         propFlags = AllocationPropertyFlags::kDedicatedAllocation;
103     } else {
104         propFlags = AllocationPropertyFlags::kNone;
105     }
106 
107     if (!allocator->allocateMemoryForImage(image, propFlags, &memory)) {
108         return false;
109     }
110     allocator->getAllocInfo(memory, alloc);
111 
112     // Bind buffer
113     VkResult err = GR_VK_CALL(gpu->vkInterface(), BindImageMemory(gpu->device(), image,
114                                                                   alloc->fMemory, alloc->fOffset));
115     if (err) {
116         FreeImageMemory(gpu, linearTiling, *alloc);
117         return false;
118     }
119 
120     return true;
121 }
122 
FreeImageMemory(const GrVkGpu * gpu,bool linearTiling,const GrVkAlloc & alloc)123 void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
124                                  const GrVkAlloc& alloc) {
125     if (alloc.fBackendMemory) {
126         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
127         allocator->freeMemory(alloc.fBackendMemory);
128     } else {
129         GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
130     }
131 }
132 
MapAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc)133 void* GrVkMemory::MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
134     SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
135 #ifdef SK_DEBUG
136     if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
137         VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
138         SkASSERT(0 == (alloc.fOffset & (alignment-1)));
139         SkASSERT(0 == (alloc.fSize & (alignment-1)));
140     }
141 #endif
142     if (alloc.fBackendMemory) {
143         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
144         return allocator->mapMemory(alloc.fBackendMemory);
145     }
146 
147     void* mapPtr;
148     VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), alloc.fMemory,
149                                                             alloc.fOffset,
150                                                             alloc.fSize, 0, &mapPtr));
151     if (err) {
152         mapPtr = nullptr;
153     }
154     return mapPtr;
155 }
156 
UnmapAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc)157 void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
158     if (alloc.fBackendMemory) {
159         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
160         allocator->unmapMemory(alloc.fBackendMemory);
161     } else {
162         GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
163     }
164 }
165 
GetNonCoherentMappedMemoryRange(const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size,VkDeviceSize alignment,VkMappedMemoryRange * range)166 void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
167                                                  VkDeviceSize size, VkDeviceSize alignment,
168                                                  VkMappedMemoryRange* range) {
169     SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
170     offset = offset + alloc.fOffset;
171     VkDeviceSize offsetDiff = offset & (alignment -1);
172     offset = offset - offsetDiff;
173     size = (size + alignment - 1) & ~(alignment - 1);
174 #ifdef SK_DEBUG
175     SkASSERT(offset >= alloc.fOffset);
176     SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
177     SkASSERT(0 == (offset & (alignment-1)));
178     SkASSERT(size > 0);
179     SkASSERT(0 == (size & (alignment-1)));
180 #endif
181 
182     memset(range, 0, sizeof(VkMappedMemoryRange));
183     range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
184     range->memory = alloc.fMemory;
185     range->offset = offset;
186     range->size = size;
187 }
188 
FlushMappedAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size)189 void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
190                                   VkDeviceSize size) {
191     if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
192         SkASSERT(offset == 0);
193         SkASSERT(size <= alloc.fSize);
194         if (alloc.fBackendMemory) {
195             GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
196             allocator->flushMappedMemory(alloc.fBackendMemory, offset, size);
197         } else {
198             VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
199             VkMappedMemoryRange mappedMemoryRange;
200             GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
201                                                         &mappedMemoryRange);
202             GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(), 1,
203                                                                    &mappedMemoryRange));
204         }
205     }
206 }
207 
InvalidateMappedAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size)208 void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
209                                        VkDeviceSize offset, VkDeviceSize size) {
210     if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
211         SkASSERT(offset == 0);
212         SkASSERT(size <= alloc.fSize);
213         if (alloc.fBackendMemory) {
214             GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
215             allocator->invalidateMappedMemory(alloc.fBackendMemory, offset, size);
216         } else {
217             VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
218             VkMappedMemoryRange mappedMemoryRange;
219             GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
220                                                         &mappedMemoryRange);
221             GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(), 1,
222                                                                         &mappedMemoryRange));
223         }
224     }
225 }
226 
227