1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkTraceMemoryDump.h"
9 #include "src/gpu/GrGpuResourcePriv.h"
10 #include "src/gpu/gl/GrGLBuffer.h"
11 #include "src/gpu/gl/GrGLGpu.h"
12
13 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
14 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
15
16 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
17 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
18 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
19 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
20 #else
21 #define CLEAR_ERROR_BEFORE_ALLOC(iface)
22 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
23 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
24 #endif
25
26 #ifdef SK_DEBUG
27 #define VALIDATE() this->validate()
28 #else
29 #define VALIDATE() do {} while(false)
30 #endif
31
Make(GrGLGpu * gpu,size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,const void * data)32 sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
33 GrAccessPattern accessPattern, const void* data) {
34 if (gpu->glCaps().transferBufferType() == GrGLCaps::kNone_TransferBufferType &&
35 (GrGpuBufferType::kXferCpuToGpu == intendedType ||
36 GrGpuBufferType::kXferGpuToCpu == intendedType)) {
37 return nullptr;
38 }
39
40 sk_sp<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, accessPattern, data));
41 if (0 == buffer->bufferID()) {
42 return nullptr;
43 }
44 return buffer;
45 }
46
47 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
48 // objects are implemented as client-side-arrays on tile-deferred architectures.
49 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
50
gr_to_gl_access_pattern(GrGpuBufferType bufferType,GrAccessPattern accessPattern)51 inline static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType,
52 GrAccessPattern accessPattern) {
53 auto drawUsage = [](GrAccessPattern pattern) {
54 switch (pattern) {
55 case kDynamic_GrAccessPattern:
56 // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
57 return DYNAMIC_DRAW_PARAM;
58 case kStatic_GrAccessPattern:
59 return GR_GL_STATIC_DRAW;
60 case kStream_GrAccessPattern:
61 return GR_GL_STREAM_DRAW;
62 }
63 SK_ABORT("Unexpected access pattern");
64 };
65
66 auto readUsage = [](GrAccessPattern pattern) {
67 switch (pattern) {
68 case kDynamic_GrAccessPattern:
69 return GR_GL_DYNAMIC_READ;
70 case kStatic_GrAccessPattern:
71 return GR_GL_STATIC_READ;
72 case kStream_GrAccessPattern:
73 return GR_GL_STREAM_READ;
74 }
75 SK_ABORT("Unexpected access pattern");
76 };
77
78 auto usageType = [&drawUsage, &readUsage](GrGpuBufferType type, GrAccessPattern pattern) {
79 switch (type) {
80 case GrGpuBufferType::kVertex:
81 case GrGpuBufferType::kIndex:
82 case GrGpuBufferType::kXferCpuToGpu:
83 return drawUsage(pattern);
84 case GrGpuBufferType::kXferGpuToCpu:
85 return readUsage(pattern);
86 }
87 SK_ABORT("Unexpected gpu buffer type.");
88 };
89
90 return usageType(bufferType, accessPattern);
91 }
92
GrGLBuffer(GrGLGpu * gpu,size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,const void * data)93 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
94 GrAccessPattern accessPattern, const void* data)
95 : INHERITED(gpu, size, intendedType, accessPattern)
96 , fIntendedType(intendedType)
97 , fBufferID(0)
98 , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern))
99 , fGLSizeInBytes(0)
100 , fHasAttachedToTexture(false) {
101 GL_CALL(GenBuffers(1, &fBufferID));
102 if (fBufferID) {
103 GrGLenum target = gpu->bindBuffer(fIntendedType, this);
104 CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface());
105 // make sure driver can allocate memory for this buffer
106 GL_ALLOC_CALL(gpu->glInterface(), BufferData(target,
107 (GrGLsizeiptr) size,
108 data,
109 fUsage));
110 if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) {
111 GL_CALL(DeleteBuffers(1, &fBufferID));
112 fBufferID = 0;
113 } else {
114 fGLSizeInBytes = size;
115 }
116 }
117 VALIDATE();
118 this->registerWithCache(SkBudgeted::kYes);
119 if (!fBufferID) {
120 this->resourcePriv().removeScratchKey();
121 }
122 }
123
glGpu() const124 inline GrGLGpu* GrGLBuffer::glGpu() const {
125 SkASSERT(!this->wasDestroyed());
126 return static_cast<GrGLGpu*>(this->getGpu());
127 }
128
glCaps() const129 inline const GrGLCaps& GrGLBuffer::glCaps() const {
130 return this->glGpu()->glCaps();
131 }
132
onRelease()133 void GrGLBuffer::onRelease() {
134 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
135
136 if (!this->wasDestroyed()) {
137 VALIDATE();
138 // make sure we've not been abandoned or already released
139 if (fBufferID) {
140 GL_CALL(DeleteBuffers(1, &fBufferID));
141 fBufferID = 0;
142 fGLSizeInBytes = 0;
143 }
144 fMapPtr = nullptr;
145 VALIDATE();
146 }
147
148 INHERITED::onRelease();
149 }
150
onAbandon()151 void GrGLBuffer::onAbandon() {
152 fBufferID = 0;
153 fGLSizeInBytes = 0;
154 fMapPtr = nullptr;
155 VALIDATE();
156 INHERITED::onAbandon();
157 }
158
onMap()159 void GrGLBuffer::onMap() {
160 SkASSERT(fBufferID);
161 SkASSERT(!this->wasDestroyed());
162 VALIDATE();
163 SkASSERT(!this->isMapped());
164
165 // TODO: Make this a function parameter.
166 bool readOnly = (GrGpuBufferType::kXferGpuToCpu == fIntendedType);
167
168 // Handling dirty context is done in the bindBuffer call
169 switch (this->glCaps().mapBufferType()) {
170 case GrGLCaps::kNone_MapBufferType:
171 return;
172 case GrGLCaps::kMapBuffer_MapBufferType: {
173 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
174 if (!readOnly) {
175 // Let driver know it can discard the old data
176 if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) {
177 GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
178 }
179 }
180 GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
181 break;
182 }
183 case GrGLCaps::kMapBufferRange_MapBufferType: {
184 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
185 // Make sure the GL buffer size agrees with fDesc before mapping.
186 if (fGLSizeInBytes != this->size()) {
187 GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
188 }
189 GrGLbitfield access;
190 if (readOnly) {
191 access = GR_GL_MAP_READ_BIT;
192 } else {
193 access = GR_GL_MAP_WRITE_BIT;
194 if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) {
195 // TODO: Make this a function parameter.
196 access |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
197 }
198 }
199 GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access));
200 break;
201 }
202 case GrGLCaps::kChromium_MapBufferType: {
203 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
204 // Make sure the GL buffer size agrees with fDesc before mapping.
205 if (fGLSizeInBytes != this->size()) {
206 GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
207 }
208 GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(),
209 readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
210 break;
211 }
212 }
213 fGLSizeInBytes = this->size();
214 VALIDATE();
215 }
216
onUnmap()217 void GrGLBuffer::onUnmap() {
218 SkASSERT(fBufferID);
219 VALIDATE();
220 SkASSERT(this->isMapped());
221 if (0 == fBufferID) {
222 fMapPtr = nullptr;
223 return;
224 }
225 // bind buffer handles the dirty context
226 switch (this->glCaps().mapBufferType()) {
227 case GrGLCaps::kNone_MapBufferType:
228 SkDEBUGFAIL("Shouldn't get here.");
229 return;
230 case GrGLCaps::kMapBuffer_MapBufferType: // fall through
231 case GrGLCaps::kMapBufferRange_MapBufferType: {
232 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
233 GL_CALL(UnmapBuffer(target));
234 break;
235 }
236 case GrGLCaps::kChromium_MapBufferType:
237 this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed?
238 GL_CALL(UnmapBufferSubData(fMapPtr));
239 break;
240 }
241 fMapPtr = nullptr;
242 }
243
onUpdateData(const void * src,size_t srcSizeInBytes)244 bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
245 SkASSERT(fBufferID);
246 if (this->wasDestroyed()) {
247 return false;
248 }
249
250 SkASSERT(!this->isMapped());
251 VALIDATE();
252 if (srcSizeInBytes > this->size()) {
253 return false;
254 }
255 SkASSERT(srcSizeInBytes <= this->size());
256 // bindbuffer handles dirty context
257 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
258
259 if (this->glCaps().useBufferDataNullHint()) {
260 if (this->size() == srcSizeInBytes) {
261 GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage));
262 } else {
263 // Before we call glBufferSubData we give the driver a hint using
264 // glBufferData with nullptr. This makes the old buffer contents
265 // inaccessible to future draws. The GPU may still be processing
266 // draws that reference the old contents. With this hint it can
267 // assign a different allocation for the new contents to avoid
268 // flushing the gpu past draws consuming the old contents.
269 // TODO I think we actually want to try calling bufferData here
270 GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
271 GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src));
272 }
273 fGLSizeInBytes = this->size();
274 } else {
275 // Note that we're cheating on the size here. Currently no methods
276 // allow a partial update that preserves contents of non-updated
277 // portions of the buffer (map() does a glBufferData(..size, nullptr..))
278 GL_CALL(BufferData(target, srcSizeInBytes, src, fUsage));
279 fGLSizeInBytes = srcSizeInBytes;
280 }
281 VALIDATE();
282 return true;
283 }
284
setMemoryBacking(SkTraceMemoryDump * traceMemoryDump,const SkString & dumpName) const285 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
286 const SkString& dumpName) const {
287 SkString buffer_id;
288 buffer_id.appendU32(this->bufferID());
289 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
290 buffer_id.c_str());
291 }
292
293 #ifdef SK_DEBUG
294
validate() const295 void GrGLBuffer::validate() const {
296 SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);
297 SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->size());
298 }
299
300 #endif
301