• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Google LLC.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef skgpu_VulkanMemoryAllocator_DEFINED
9 #define skgpu_VulkanMemoryAllocator_DEFINED
10 
11 #include "include/core/SkRefCnt.h"
12 #include "include/core/SkString.h"
13 #include "include/gpu/vk/VulkanTypes.h"
14 #include "include/private/gpu/vk/SkiaVulkan.h"
15 
16 #include <cstdint>
17 #include <utility>
18 
19 namespace skgpu {
20 
21 class VulkanMemoryAllocator : public SkRefCnt {
22 public:
23     enum AllocationPropertyFlags {
24         kNone_AllocationPropertyFlag                = 0b0000,
25         // Allocation will be placed in its own VkDeviceMemory and not suballocated from some larger
26         // block.
27         kDedicatedAllocation_AllocationPropertyFlag = 0b0001,
28         // Says that the backing memory can only be accessed by the device. Additionally the device
29         // may lazily allocate the memory. This cannot be used with buffers that will be host
30         // visible. Setting this flag does not guarantee that we will allocate memory that respects
31         // it, but we will try to prefer memory that can respect it.
32         kLazyAllocation_AllocationPropertyFlag      = 0b0010,
33         // The allocation will be mapped immediately and stay mapped until it is destroyed. This
34         // flag is only valid for buffers which are host visible (i.e. must have a usage other than
35         // BufferUsage::kGpuOnly).
36         kPersistentlyMapped_AllocationPropertyFlag  = 0b0100,
37         // Allocation can only be accessed by the device using a protected context.
38         kProtected_AllocationPropertyFlag           = 0b1000,
39     };
40 
41     enum class BufferUsage {
42         // Buffers that will only be accessed from the device (large const buffers) will always be
43         // in device local memory.
44         kGpuOnly,
45         // Buffers that typically will be updated multiple times by the host and read on the gpu
46         // (e.g. uniform or vertex buffers). CPU writes will generally be sequential in the buffer
47         // and will try to take advantage of the write-combined nature of the gpu buffers. Thus this
48         // will always be mappable and coherent memory, and it will prefer to be in device local
49         // memory.
50         kCpuWritesGpuReads,
51         // Buffers that will be accessed on the host and copied to another GPU resource (transfer
52         // buffers). Will always be mappable and coherent memory.
53         kTransfersFromCpuToGpu,
54         // Buffers which are typically writted to by the GPU and then read on the host. Will always
55         // be mappable memory, and will prefer cached memory.
56         kTransfersFromGpuToCpu,
57     };
58 
59     virtual VkResult allocateImageMemory(VkImage image,
60                                          uint32_t allocationPropertyFlags,
61                                          skgpu::VulkanBackendMemory* memory) = 0;
62 
63     virtual VkResult allocateBufferMemory(VkBuffer buffer,
64                                           BufferUsage usage,
65                                           uint32_t allocationPropertyFlags,
66                                           skgpu::VulkanBackendMemory* memory) = 0;
67 
68     // Fills out the passed in skgpu::VulkanAlloc struct for the passed in
69     // skgpu::VulkanBackendMemory.
70     virtual void getAllocInfo(const skgpu::VulkanBackendMemory&, skgpu::VulkanAlloc*) const = 0;
71 
72     // Maps the entire allocation and returns a pointer to the start of the allocation. The
73     // implementation may map more memory than just the allocation, but the returned pointer must
74     // point at the start of the memory for the requested allocation.
mapMemory(const skgpu::VulkanBackendMemory &)75     virtual void* mapMemory(const skgpu::VulkanBackendMemory&) { return nullptr; }
mapMemory(const skgpu::VulkanBackendMemory & memory,void ** data)76     virtual VkResult mapMemory(const skgpu::VulkanBackendMemory& memory, void** data) {
77         *data = this->mapMemory(memory);
78         // VK_ERROR_INITIALIZATION_FAILED is a bogus result to return from this function, but it is
79         // just something to return that is not VK_SUCCESS and can't be interpreted by a caller to
80         // mean something specific happened like device lost or oom. This will be removed once we
81         // update clients to implement this virtual.
82         return *data ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
83     }
84     virtual void unmapMemory(const skgpu::VulkanBackendMemory&) = 0;
85 
86     // The following two calls are used for managing non-coherent memory. The offset is relative to
87     // the start of the allocation and not the underlying VkDeviceMemory. Additionaly the client
88     // must make sure that the offset + size passed in is less that or equal to the allocation size.
89     // It is the responsibility of the implementation to make sure all alignment requirements are
90     // followed. The client should not have to deal with any sort of alignment issues.
flushMappedMemory(const skgpu::VulkanBackendMemory &,VkDeviceSize,VkDeviceSize)91     virtual void flushMappedMemory(const skgpu::VulkanBackendMemory&, VkDeviceSize, VkDeviceSize) {}
flushMemory(const skgpu::VulkanBackendMemory & memory,VkDeviceSize offset,VkDeviceSize size)92     virtual VkResult flushMemory(const skgpu::VulkanBackendMemory& memory,
93                                  VkDeviceSize offset,
94                                  VkDeviceSize size) {
95         this->flushMappedMemory(memory, offset, size);
96         return VK_SUCCESS;
97     }
invalidateMappedMemory(const skgpu::VulkanBackendMemory &,VkDeviceSize,VkDeviceSize)98     virtual void invalidateMappedMemory(const skgpu::VulkanBackendMemory&,
99                                         VkDeviceSize,
100                                         VkDeviceSize) {}
invalidateMemory(const skgpu::VulkanBackendMemory & memory,VkDeviceSize offset,VkDeviceSize size)101     virtual VkResult invalidateMemory(const skgpu::VulkanBackendMemory& memory,
102                                       VkDeviceSize offset,
103                                       VkDeviceSize size) {
104         this->invalidateMappedMemory(memory, offset, size);
105         return VK_SUCCESS;
106     }
107 
108     virtual void freeMemory(const skgpu::VulkanBackendMemory&) = 0;
109 
110     // Returns the total amount of memory that is allocated as well as total
111     // amount of memory in use by an allocation from this allocator.
112     // Return 1st param is total allocated memory, 2nd is total used memory.
113     virtual std::pair<uint64_t, uint64_t> totalAllocatedAndUsedMemory() const = 0;
114 
vmaDefragment()115     virtual void vmaDefragment() {}
116     virtual void dumpVmaStats(SkString *out, const char *sep = ", ") const {}
117 };
118 
119 } // namespace skgpu
120 
121 #endif // skgpu_VulkanMemoryAllocator_DEFINED
122