• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 
8 #include "src/gpu/vk/GrVkMemory.h"
9 
10 #include "include/gpu/vk/GrVkMemoryAllocator.h"
11 #include "src/gpu/vk/GrVkGpu.h"
12 #include "src/gpu/vk/GrVkUtil.h"
13 
14 using AllocationPropertyFlags = GrVkMemoryAllocator::AllocationPropertyFlags;
15 using BufferUsage = GrVkMemoryAllocator::BufferUsage;
16 
get_buffer_usage(GrVkBuffer::Type type,bool dynamic)17 static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
18     switch (type) {
19         case GrVkBuffer::kVertex_Type: // fall through
20         case GrVkBuffer::kIndex_Type: // fall through
21         case GrVkBuffer::kTexel_Type:
22             return dynamic ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
23         case GrVkBuffer::kUniform_Type:
24             SkASSERT(dynamic);
25             return BufferUsage::kCpuWritesGpuReads;
26         case GrVkBuffer::kCopyRead_Type: // fall through
27         case GrVkBuffer::kCopyWrite_Type:
28             return BufferUsage::kCpuOnly;
29     }
30     SK_ABORT("Invalid GrVkBuffer::Type");
31 }
32 
AllocAndBindBufferMemory(const GrVkGpu * gpu,VkBuffer buffer,GrVkBuffer::Type type,bool dynamic,GrVkAlloc * alloc)33 bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
34                                           VkBuffer buffer,
35                                           GrVkBuffer::Type type,
36                                           bool dynamic,
37                                           GrVkAlloc* alloc) {
38     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
39     GrVkBackendMemory memory = 0;
40 
41     GrVkMemoryAllocator::BufferUsage usage = get_buffer_usage(type, dynamic);
42 
43     AllocationPropertyFlags propFlags;
44     if (usage == GrVkMemoryAllocator::BufferUsage::kCpuWritesGpuReads) {
45         // In general it is always fine (and often better) to keep buffers always mapped.
46         // TODO: According to AMDs guide for the VulkanMemoryAllocator they suggest there are two
47         // cases when keeping it mapped can hurt. The first is when running on Win7 or Win8 (Win 10
48         // is fine). In general, by the time Vulkan ships it is probably less likely to be running
49         // on non Win10 or newer machines. The second use case is if running on an AMD card and you
50         // are using the special GPU local and host mappable memory. However, in general we don't
51         // pick this memory as we've found it slower than using the cached host visible memory. In
52         // the future if we find the need to special case either of these two issues we can add
53         // checks for them here.
54         propFlags = AllocationPropertyFlags::kPersistentlyMapped;
55     } else {
56         propFlags = AllocationPropertyFlags::kNone;
57     }
58 
59     if (!allocator->allocateMemoryForBuffer(buffer, usage, propFlags, &memory)) {
60         return false;
61     }
62     allocator->getAllocInfo(memory, alloc);
63 
64     // Bind buffer
65     VkResult err = GR_VK_CALL(gpu->vkInterface(), BindBufferMemory(gpu->device(), buffer,
66                                                                    alloc->fMemory,
67                                                                    alloc->fOffset));
68     if (err) {
69         FreeBufferMemory(gpu, type, *alloc);
70         return false;
71     }
72 
73     return true;
74 }
75 
FreeBufferMemory(const GrVkGpu * gpu,GrVkBuffer::Type type,const GrVkAlloc & alloc)76 void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
77                                   const GrVkAlloc& alloc) {
78     if (alloc.fBackendMemory) {
79         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
80         allocator->freeMemory(alloc.fBackendMemory);
81     } else {
82         GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
83     }
84 }
85 
86 const VkDeviceSize kMaxSmallImageSize = 256 * 1024;
87 
AllocAndBindImageMemory(const GrVkGpu * gpu,VkImage image,bool linearTiling,GrVkAlloc * alloc)88 bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
89                                          VkImage image,
90                                          bool linearTiling,
91                                          GrVkAlloc* alloc) {
92     SkASSERT(!linearTiling);
93     GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
94     GrVkBackendMemory memory = 0;
95 
96     VkMemoryRequirements memReqs;
97     GR_VK_CALL(gpu->vkInterface(), GetImageMemoryRequirements(gpu->device(), image, &memReqs));
98 
99     AllocationPropertyFlags propFlags;
100     if (gpu->protectedContext()) {
101         propFlags = AllocationPropertyFlags::kProtected;
102     } else if (memReqs.size > kMaxSmallImageSize ||
103                gpu->vkCaps().shouldAlwaysUseDedicatedImageMemory()) {
104         propFlags = AllocationPropertyFlags::kDedicatedAllocation;
105     } else {
106         propFlags = AllocationPropertyFlags::kNone;
107     }
108 
109     if (!allocator->allocateMemoryForImage(image, propFlags, &memory)) {
110         return false;
111     }
112     allocator->getAllocInfo(memory, alloc);
113 
114     // Bind buffer
115     VkResult err = GR_VK_CALL(gpu->vkInterface(), BindImageMemory(gpu->device(), image,
116                                                                   alloc->fMemory, alloc->fOffset));
117     if (err) {
118         FreeImageMemory(gpu, linearTiling, *alloc);
119         return false;
120     }
121 
122     return true;
123 }
124 
FreeImageMemory(const GrVkGpu * gpu,bool linearTiling,const GrVkAlloc & alloc)125 void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
126                                  const GrVkAlloc& alloc) {
127     if (alloc.fBackendMemory) {
128         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
129         allocator->freeMemory(alloc.fBackendMemory);
130     } else {
131         GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
132     }
133 }
134 
MapAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc)135 void* GrVkMemory::MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
136     SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
137 #ifdef SK_DEBUG
138     if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
139         VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
140         SkASSERT(0 == (alloc.fOffset & (alignment-1)));
141         SkASSERT(0 == (alloc.fSize & (alignment-1)));
142     }
143 #endif
144     if (alloc.fBackendMemory) {
145         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
146         return allocator->mapMemory(alloc.fBackendMemory);
147     }
148 
149     void* mapPtr;
150     VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), alloc.fMemory,
151                                                             alloc.fOffset,
152                                                             alloc.fSize, 0, &mapPtr));
153     if (err) {
154         mapPtr = nullptr;
155     }
156     return mapPtr;
157 }
158 
UnmapAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc)159 void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
160     if (alloc.fBackendMemory) {
161         GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
162         allocator->unmapMemory(alloc.fBackendMemory);
163     } else {
164         GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
165     }
166 }
167 
GetNonCoherentMappedMemoryRange(const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size,VkDeviceSize alignment,VkMappedMemoryRange * range)168 void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
169                                                  VkDeviceSize size, VkDeviceSize alignment,
170                                                  VkMappedMemoryRange* range) {
171     SkASSERT(alloc.fFlags & GrVkAlloc::kNoncoherent_Flag);
172     offset = offset + alloc.fOffset;
173     VkDeviceSize offsetDiff = offset & (alignment -1);
174     offset = offset - offsetDiff;
175     size = (size + alignment - 1) & ~(alignment - 1);
176 #ifdef SK_DEBUG
177     SkASSERT(offset >= alloc.fOffset);
178     SkASSERT(offset + size <= alloc.fOffset + alloc.fSize);
179     SkASSERT(0 == (offset & (alignment-1)));
180     SkASSERT(size > 0);
181     SkASSERT(0 == (size & (alignment-1)));
182 #endif
183 
184     memset(range, 0, sizeof(VkMappedMemoryRange));
185     range->sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
186     range->memory = alloc.fMemory;
187     range->offset = offset;
188     range->size = size;
189 }
190 
FlushMappedAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size)191 void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
192                                   VkDeviceSize size) {
193     if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
194         SkASSERT(offset == 0);
195         SkASSERT(size <= alloc.fSize);
196         if (alloc.fBackendMemory) {
197             GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
198             allocator->flushMappedMemory(alloc.fBackendMemory, offset, size);
199         } else {
200             VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
201             VkMappedMemoryRange mappedMemoryRange;
202             GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
203                                                         &mappedMemoryRange);
204             GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(), 1,
205                                                                    &mappedMemoryRange));
206         }
207     }
208 }
209 
InvalidateMappedAlloc(const GrVkGpu * gpu,const GrVkAlloc & alloc,VkDeviceSize offset,VkDeviceSize size)210 void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc,
211                                        VkDeviceSize offset, VkDeviceSize size) {
212     if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
213         SkASSERT(offset == 0);
214         SkASSERT(size <= alloc.fSize);
215         if (alloc.fBackendMemory) {
216             GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
217             allocator->invalidateMappedMemory(alloc.fBackendMemory, offset, size);
218         } else {
219             VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
220             VkMappedMemoryRange mappedMemoryRange;
221             GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
222                                                         &mappedMemoryRange);
223             GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(), 1,
224                                                                         &mappedMemoryRange));
225         }
226     }
227 }
228 
229