1 // 2 // Copyright 2019 The ANGLE Project Authors. All rights reserved. 3 // Use of this source code is governed by a BSD-style license that can be 4 // found in the LICENSE file. 5 // 6 // mtl_buffer_pool.h: 7 // Defines class interface for BufferPool, managing a pool of mtl::Buffer 8 // 9 10 #ifndef LIBANGLE_RENDERER_METAL_MTL_BUFFER_POOL_H_ 11 #define LIBANGLE_RENDERER_METAL_MTL_BUFFER_POOL_H_ 12 13 #include "libANGLE/renderer/metal/mtl_resources.h" 14 15 #include <deque> 16 17 namespace rx 18 { 19 20 class ContextMtl; 21 22 namespace mtl 23 { 24 25 enum class BufferPoolMemPolicy 26 { 27 // Always allocate buffer in shared memory, useful for dynamic small buffer. 28 // This translates to MTLResourceStorageModeShared. 29 AlwaysSharedMem, 30 // Always allocate buffer in GPU dedicated memory. Note: a CPU side copy is also allocated so 31 // that buffer can still be mapped on CPU side. 32 // This translates to MTLResourceStorageModeManaged on macOS or MTLResourceStorageModeShared on 33 // iOS. 34 AlwaysGPUMem, 35 // Auto allocate buffer in shared memory if it is small. GPU otherwise. 36 Auto, 37 }; 38 39 // A buffer pool is conceptually an infinitely long buffer. Each time you write to the buffer, 40 // you will always write to a previously unused portion. After a series of writes, you must flush 41 // the buffer data to the device. Buffer lifetime currently assumes that each new allocation will 42 // last as long or longer than each prior allocation. 43 // 44 // Buffer pool is used to implement a variety of data streaming operations in Metal, such 45 // as for immediate vertex array and element array data, and other dynamic data. 46 // 47 // Internally buffer pool keeps a collection of mtl::Buffer. When we write past the end of a 48 // currently active mtl::Buffer we keep it until it is no longer in use. We then mark it available 49 // for future allocations in a free list. 50 class BufferPool 51 { 52 public: 53 BufferPool(); 54 // - alwaysAllocNewBuffer=true will always allocate new buffer or reuse free buffer on 55 // allocate(), regardless of whether current buffer still has unused portion or not. 56 // - memPolicy: indicate the allocated buffers should be in shared memory or not. 57 // See BufferPoolMemPolicy. 58 BufferPool(bool alwaysAllocNewBuffer); 59 BufferPool(bool alwaysAllocNewBuffer, BufferPoolMemPolicy memPolicy); 60 ~BufferPool(); 61 62 // Init is called after the buffer creation so that the alignment can be specified later. 63 void initialize(Context *context, size_t initialSize, size_t alignment, size_t maxBuffers); 64 // Calling this without initialize() will have same effect as calling initialize(). 65 // If called after initialize(), the old pending buffers will be flushed and might be re-used if 66 // their size are big enough for the requested initialSize parameter. 67 angle::Result reset(ContextMtl *contextMtl, 68 size_t initialSize, 69 size_t alignment, 70 size_t maxBuffers); 71 72 // This call will allocate a new region at the end of the buffer. It internally may trigger 73 // a new buffer to be created (which is returned in the optional parameter 74 // `newBufferAllocatedOut`). The new region will be in the returned buffer at given offset. If 75 // a memory pointer is given, the buffer will be automatically map()ed. 76 angle::Result allocate(ContextMtl *contextMtl, 77 size_t sizeInBytes, 78 uint8_t **ptrOut = nullptr, 79 BufferRef *bufferOut = nullptr, 80 size_t *offsetOut = nullptr, 81 bool *newBufferAllocatedOut = nullptr); 82 83 // After a sequence of CPU writes, call commit to ensure the data is visible to the device. 84 angle::Result commit(ContextMtl *contextMtl, bool flushEntireBuffer = false); 85 86 // This releases all the buffers that have been allocated since this was last called. 87 void releaseInFlightBuffers(ContextMtl *contextMtl); 88 89 // This frees resources immediately. 90 void destroy(ContextMtl *contextMtl); 91 getCurrentBuffer()92 const BufferRef &getCurrentBuffer() { return mBuffer; } 93 getAlignment()94 size_t getAlignment() { return mAlignment; } 95 void updateAlignment(Context *context, size_t alignment); 96 getMaxBuffers()97 size_t getMaxBuffers() const { return mMaxBuffers; } 98 99 // Set whether allocate() will always allocate new buffer or attempting to append to previous 100 // buffer or not. Default is false. setAlwaysAllocateNewBuffer(bool e)101 void setAlwaysAllocateNewBuffer(bool e) { mAlwaysAllocateNewBuffer = e; } 102 setMemoryPolicy(BufferPoolMemPolicy policy)103 void setMemoryPolicy(BufferPoolMemPolicy policy) { mMemPolicy = policy; } 104 105 // Set all subsequent allocated buffers should always use shared memory setAlwaysUseSharedMem()106 void setAlwaysUseSharedMem() { setMemoryPolicy(BufferPoolMemPolicy::AlwaysSharedMem); } 107 108 // Set all subsequent allocated buffers should always use GPU memory setAlwaysUseGPUMem()109 void setAlwaysUseGPUMem() { setMemoryPolicy(BufferPoolMemPolicy::AlwaysGPUMem); } 110 111 private: 112 bool shouldAllocateInSharedMem(ContextMtl *contextMtl) const; 113 void reset(); 114 angle::Result allocateNewBuffer(ContextMtl *contextMtl); 115 void destroyBufferList(ContextMtl *contextMtl, std::deque<BufferRef> *buffers); 116 angle::Result finalizePendingBuffer(ContextMtl *contextMtl); 117 size_t mInitialSize; 118 BufferRef mBuffer; 119 uint32_t mNextAllocationOffset; 120 uint32_t mLastFlushOffset; 121 size_t mSize; 122 size_t mAlignment; 123 124 std::deque<BufferRef> mInFlightBuffers; 125 std::deque<BufferRef> mBufferFreeList; 126 127 size_t mBuffersAllocated; 128 size_t mMaxBuffers; 129 BufferPoolMemPolicy mMemPolicy; 130 bool mAlwaysAllocateNewBuffer; 131 }; 132 133 } // namespace mtl 134 } // namespace rx 135 136 #endif /* LIBANGLE_RENDERER_METAL_MTL_BUFFER_POOL_H_ */ 137