1 /* 2 * Copyright 2020 Google LLC 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrRingBuffer_DEFINED 9 #define GrRingBuffer_DEFINED 10 11 #include "src/gpu/GrGpuBuffer.h" 12 13 #include <vector> 14 15 class GrGpu; 16 17 /** 18 * A wrapper for a GPU buffer that allocates slices in a continuous ring. 19 * 20 * It's assumed that suballocate and startSubmit are always called in the same thread, 21 * and that finishSubmit could be called in a separate thread. 22 */ 23 class GrRingBuffer { 24 public: GrRingBuffer(GrGpu * gpu,size_t size,size_t alignment,GrGpuBufferType intendedType)25 GrRingBuffer(GrGpu* gpu, size_t size, size_t alignment, GrGpuBufferType intendedType) 26 : fGpu(gpu) 27 , fTotalSize(size) 28 , fAlignment(alignment) 29 , fType(intendedType) 30 , fNewAllocation(false) 31 , fHead(0) 32 , fTail(0) 33 , fGenID(0) { 34 // We increment fHead and fTail without bound and let overflow handle any wrapping. 35 // Because of this, size needs to be a power of two. 36 SkASSERT(SkIsPow2(size)); 37 } 38 39 struct Slice { 40 GrGpuBuffer* fBuffer; 41 size_t fOffset; 42 }; 43 Slice suballocate(size_t size); 44 45 // Backends should call startSubmit() at submit time 46 void startSubmit(GrGpu*); 47 size()48 size_t size() const { return fTotalSize; } 49 50 private: 51 size_t getAllocationOffset(size_t size); 52 struct SubmitData { 53 GrRingBuffer* fOwner; 54 size_t fLastHead; 55 size_t fGenID; 56 }; 57 static void FinishSubmit(void*); 58 59 GrGpu* fGpu; 60 sk_sp<GrGpuBuffer> fCurrentBuffer; 61 std::vector<sk_sp<GrGpuBuffer>> fPreviousBuffers; // previous buffers we've used in this submit 62 size_t fTotalSize; 63 size_t fAlignment; 64 GrGpuBufferType fType; 65 bool fNewAllocation; // true if there's been a new allocation in this submit 66 size_t fHead; // where we start allocating 67 size_t fTail; // where we start deallocating 68 uint64_t fGenID; // incremented when createBuffer is called 69 }; 70 71 #endif 72