1 /* 2 * Copyright 2021 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef skgpu_graphite_BufferManager_DEFINED 9 #define skgpu_graphite_BufferManager_DEFINED 10 11 #include "include/core/SkRefCnt.h" 12 #include "src/gpu/BufferWriter.h" 13 #include "src/gpu/graphite/DrawTypes.h" 14 #include "src/gpu/graphite/ResourceTypes.h" 15 16 #include <array> 17 #include <vector> 18 19 namespace skgpu::graphite { 20 21 class Buffer; 22 class Caps; 23 class Context; 24 class GlobalCache; 25 class QueueManager; 26 class Recording; 27 class ResourceProvider; 28 29 /** 30 * DrawBufferManager controls writing to buffer data ranges within larger, cacheable Buffers and 31 * automatically handles either mapping or copying via transfer buffer depending on what the GPU 32 * hardware supports for the requested buffer type and use case. It is intended for repeatedly 33 * uploading dynamic data to the GPU. 34 */ 35 class DrawBufferManager { 36 public: 37 DrawBufferManager(ResourceProvider*, const Caps*); 38 ~DrawBufferManager(); 39 40 std::tuple<VertexWriter, BindBufferInfo> getVertexWriter(size_t requiredBytes); 41 std::tuple<IndexWriter, BindBufferInfo> getIndexWriter(size_t requiredBytes); 42 std::tuple<UniformWriter, BindBufferInfo> getUniformWriter(size_t requiredBytes); 43 std::tuple<UniformWriter, BindBufferInfo> getSsboWriter(size_t requiredBytes); 44 45 // Returns a pointer to a mapped storage buffer slice without a specific data writer. 46 std::tuple<void*, BindBufferInfo> getMappedStorage(size_t requiredBytes); 47 48 // Utilities that return an unmapped buffer slice with a particular usage. These slices are 49 // intended to be only accessed by the GPU and are configured to prioritize GPU reads. 50 BindBufferInfo getStorage(size_t requiredBytes); 51 BindBufferInfo getVertexStorage(size_t requiredBytes); 52 BindBufferInfo getIndexStorage(size_t requiredBytes); 53 BindBufferInfo getIndirectStorage(size_t requiredBytes); 54 55 // Returns the last 'unusedBytes' from the last call to getVertexWriter(). Assumes that 56 // 'unusedBytes' is less than the 'requiredBytes' to the original allocation. 57 void returnVertexBytes(size_t unusedBytes); 58 alignUniformBlockSize(size_t dataSize)59 size_t alignUniformBlockSize(size_t dataSize) { 60 return SkAlignTo(dataSize, fCurrentBuffers[kUniformBufferIndex].fStartAlignment); 61 } 62 63 // Finalizes all buffers and transfers ownership of them to a Recording. 64 void transferToRecording(Recording*); 65 66 private: 67 struct BufferInfo { 68 BufferInfo(BufferType type, size_t blockSize, const Caps* caps); 69 70 const BufferType fType; 71 const size_t fStartAlignment; 72 const size_t fBlockSize; 73 sk_sp<Buffer> fBuffer{}; 74 // The fTransferBuffer can be null, if draw buffer cannot be mapped, 75 // see Caps::drawBufferCanBeMapped() for detail. 76 sk_sp<Buffer> fTransferBuffer{}; 77 size_t fOffset = 0; 78 getMappableBufferBufferInfo79 Buffer* getMappableBuffer() { 80 return fTransferBuffer ? fTransferBuffer.get() : fBuffer.get(); 81 } 82 }; 83 std::pair<void*, BindBufferInfo> prepareMappedBindBuffer(BufferInfo* info, 84 size_t requiredBytes); 85 BindBufferInfo prepareBindBuffer(BufferInfo* info, size_t requiredBytes, bool mappable = false); 86 87 ResourceProvider* const fResourceProvider; 88 const Caps* const fCaps; 89 90 static constexpr size_t kVertexBufferIndex = 0; 91 static constexpr size_t kIndexBufferIndex = 1; 92 static constexpr size_t kUniformBufferIndex = 2; 93 static constexpr size_t kStorageBufferIndex = 3; 94 static constexpr size_t kGpuOnlyStorageBufferIndex = 4; 95 static constexpr size_t kVertexStorageBufferIndex = 5; 96 static constexpr size_t kIndexStorageBufferIndex = 6; 97 static constexpr size_t kIndirectStorageBufferIndex = 7; 98 std::array<BufferInfo, 8> fCurrentBuffers; 99 100 // Vector of buffer and transfer buffer pairs. 101 std::vector<std::pair<sk_sp<Buffer>, sk_sp<Buffer>>> fUsedBuffers; 102 }; 103 104 /** 105 * The StaticBufferManager is the one-time-only analog to DrawBufferManager and provides "static" 106 * Buffers to RenderSteps and other Context-lifetime-tied objects, where the Buffers' contents will 107 * not change and can benefit from prioritizing GPU reads. The assumed use case is that they remain 108 * read-only on the GPU as well, so a single static buffer can be shared by all Recorders. 109 * 110 * Unlike DrawBufferManager's getXWriter() functions that return both a Writer and a BindBufferInfo, 111 * StaticBufferManager returns only a Writer and accepts a BindBufferInfo* as an argument. This will 112 * be re-written with the final binding info for the GPU-private data once that can be determined 113 * after *all* static buffers have been requested. 114 */ 115 class StaticBufferManager { 116 public: 117 StaticBufferManager(ResourceProvider*, const Caps*); 118 ~StaticBufferManager(); 119 120 // The passed in BindBufferInfos are updated when finalize() is later called, to point to the 121 // packed, GPU-private buffer at the appropriate offset. The data written to the returned Writer 122 // is copied to the private buffer at that offset. 'binding' must live until finalize() returns. 123 VertexWriter getVertexWriter(size_t size, BindBufferInfo* binding); 124 // TODO: Update the tessellation index buffer generation functions to use an IndexWriter so this 125 // can return an IndexWriter vs. a VertexWriter that happens to just write uint16s... 126 VertexWriter getIndexWriter(size_t size, BindBufferInfo* binding); 127 128 enum class FinishResult : int { 129 kFailure, // Unable to create or copy static buffers 130 kSuccess, // Successfully created static buffers and added GPU tasks to the queue 131 kNoWork // No static buffers required, no GPU tasks add to the queue 132 }; 133 134 // Finalizes all buffers and records a copy task to compact and privatize static data. The 135 // final static buffers will become owned by the Context's GlobalCache. 136 FinishResult finalize(Context*, QueueManager*, GlobalCache*); 137 138 private: 139 struct CopyRange { 140 BindBufferInfo fSource; // The CPU-to-GPU buffer and offset for the source of the copy 141 BindBufferInfo* fTarget; // The late-assigned destination of the copy 142 size_t fSize; // The number of bytes to copy 143 }; 144 struct BufferInfo { 145 BufferInfo(BufferType type, const Caps* caps); 146 147 bool createAndUpdateBindings(ResourceProvider*, 148 Context*, 149 QueueManager*, 150 GlobalCache*) const; resetBufferInfo151 void reset() { 152 fData.clear(); 153 fTotalRequiredBytes = 0; 154 } 155 156 const BufferType fBufferType; 157 const size_t fAlignment; 158 159 std::vector<CopyRange> fData; 160 size_t fTotalRequiredBytes; 161 }; 162 163 void* prepareStaticData(BufferInfo* info, size_t requiredBytes, BindBufferInfo* target); 164 165 ResourceProvider* const fResourceProvider; 166 167 // The source data that's copied into a final GPU-private buffer 168 BufferInfo fVertexBufferInfo; 169 BufferInfo fIndexBufferInfo; 170 171 std::vector<sk_sp<Buffer>> fUsedBuffers; 172 173 sk_sp<Buffer> fCurrentTransferBuffer; 174 size_t fCurrentOffset; 175 }; 176 177 } // namespace skgpu::graphite 178 179 #endif // skgpu_graphite_BufferManager_DEFINED 180