1 /*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrRingBuffer.h"
9
10 #include "src/gpu/GrDirectContextPriv.h"
11 #include "src/gpu/GrGpu.h"
12 #include "src/gpu/GrResourceProvider.h"
13
14 // Get offset into buffer that has enough space for size
15 // Returns fTotalSize if no space
getAllocationOffset(size_t size)16 size_t GrRingBuffer::getAllocationOffset(size_t size) {
17 // capture current state locally (because fTail could be overwritten by the completion handler)
18 size_t head, tail;
19 head = fHead;
20 tail = fTail;
21
22 // The head and tail indices increment without bound, wrapping with overflow,
23 // so we need to mod them down to the actual bounds of the allocation to determine
24 // which blocks are available.
25 size_t modHead = head & (fTotalSize - 1);
26 size_t modTail = tail & (fTotalSize - 1);
27
28 bool full = (head != tail && modHead == modTail);
29
30 if (full) {
31 return fTotalSize;
32 }
33
34 // case 1: free space lies at the beginning and/or the end of the buffer
35 if (modHead >= modTail) {
36 // check for room at the end
37 if (fTotalSize - modHead < size) {
38 // no room at the end, check the beginning
39 if (modTail < size) {
40 // no room at the beginning
41 return fTotalSize;
42 }
43 // we are going to allocate from the beginning, adjust head to '0' position
44 head += fTotalSize - modHead;
45 modHead = 0;
46 }
47 // case 2: free space lies in the middle of the buffer, check for room there
48 } else if (modTail - modHead < size) {
49 // no room in the middle
50 return fTotalSize;
51 }
52
53 fHead = SkAlignTo(head + size, fAlignment);
54 return modHead;
55 }
56
suballocate(size_t size)57 GrRingBuffer::Slice GrRingBuffer::suballocate(size_t size) {
58 fNewAllocation = true;
59 if (fCurrentBuffer) {
60 size_t offset = this->getAllocationOffset(size);
61 if (offset < fTotalSize) {
62 return { fCurrentBuffer.get(), offset };
63 }
64
65 // Try to grow allocation (old allocation will age out).
66 fTotalSize *= 2;
67 // Add current buffer to be tracked for next submit.
68 fPreviousBuffers.push_back(std::move(fCurrentBuffer));
69 }
70
71 GrResourceProvider* resourceProvider = fGpu->getContext()->priv().resourceProvider();
72 fCurrentBuffer = resourceProvider->createBuffer(fTotalSize, fType, kDynamic_GrAccessPattern);
73
74 SkASSERT(fCurrentBuffer);
75 fHead = 0;
76 fTail = 0;
77 fGenID++;
78 size_t offset = this->getAllocationOffset(size);
79 SkASSERT(offset < fTotalSize);
80 return { fCurrentBuffer.get(), offset };
81 }
82
83 // used when current command buffer/command list is submitted
startSubmit(GrGpu * gpu)84 void GrRingBuffer::startSubmit(GrGpu* gpu) {
85 for (unsigned int i = 0; i < fPreviousBuffers.size(); ++i) {
86 fPreviousBuffers[i]->unmap();
87 gpu->takeOwnershipOfBuffer(std::move(fPreviousBuffers[i]));
88 }
89 fPreviousBuffers.clear();
90
91 if (fNewAllocation) {
92 #ifdef SK_BUILD_FOR_MAC
93 // Since we're using a Managed buffer on MacOS we need to unmap to write back to GPU
94 // TODO: once we set up persistently mapped UPLOAD buffers on D3D, we can remove the
95 // platform restriction.
96 fCurrentBuffer->unmap();
97 #endif
98 SubmitData* submitData = new SubmitData();
99 submitData->fOwner = this;
100 submitData->fLastHead = fHead;
101 submitData->fGenID = fGenID;
102 gpu->addFinishedProc(FinishSubmit, submitData);
103 fNewAllocation = false;
104 }
105 }
106
107 // used when current command buffer/command list is completed
FinishSubmit(void * finishedContext)108 void GrRingBuffer::FinishSubmit(void* finishedContext) {
109 GrRingBuffer::SubmitData* submitData = (GrRingBuffer::SubmitData*)finishedContext;
110 if (submitData && submitData->fOwner && submitData->fGenID == submitData->fOwner->fGenID) {
111 submitData->fOwner->fTail = submitData->fLastHead;
112 submitData->fOwner = nullptr;
113 }
114 delete submitData;
115 }
116