1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkTraceMemoryDump.h"
9 #include "src/gpu/GrGpuResourcePriv.h"
10 #include "src/gpu/gl/GrGLBuffer.h"
11 #include "src/gpu/gl/GrGLCaps.h"
12 #include "src/gpu/gl/GrGLGpu.h"
13
14 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
15 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
16
17 #define GL_ALLOC_CALL(call) \
18 [&] { \
19 if (this->glGpu()->glCaps().skipErrorChecks()) { \
20 GR_GL_CALL(this->glGpu()->glInterface(), call); \
21 return static_cast<GrGLenum>(GR_GL_NO_ERROR); \
22 } else { \
23 this->glGpu()->clearErrorsAndCheckForOOM(); \
24 GR_GL_CALL_NOERRCHECK(this->glGpu()->glInterface(), call); \
25 return this->glGpu()->getErrorAndCheckForOOM(); \
26 } \
27 }()
28
29 #ifdef SK_DEBUG
30 #define VALIDATE() this->validate()
31 #else
32 #define VALIDATE() do {} while(false)
33 #endif
34
Make(GrGLGpu * gpu,size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,const void * data)35 sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
36 GrAccessPattern accessPattern, const void* data) {
37 if (gpu->glCaps().transferBufferType() == GrGLCaps::TransferBufferType::kNone &&
38 (GrGpuBufferType::kXferCpuToGpu == intendedType ||
39 GrGpuBufferType::kXferGpuToCpu == intendedType)) {
40 return nullptr;
41 }
42
43 sk_sp<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, accessPattern, data));
44 if (0 == buffer->bufferID()) {
45 return nullptr;
46 }
47 return buffer;
48 }
49
50 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
51 // objects are implemented as client-side-arrays on tile-deferred architectures.
52 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
53
gr_to_gl_access_pattern(GrGpuBufferType bufferType,GrAccessPattern accessPattern,const GrGLCaps & caps)54 inline static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType,
55 GrAccessPattern accessPattern,
56 const GrGLCaps& caps) {
57 auto drawUsage = [](GrAccessPattern pattern) {
58 switch (pattern) {
59 case kDynamic_GrAccessPattern:
60 // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
61 return DYNAMIC_DRAW_PARAM;
62 case kStatic_GrAccessPattern:
63 return GR_GL_STATIC_DRAW;
64 case kStream_GrAccessPattern:
65 return GR_GL_STREAM_DRAW;
66 }
67 SkUNREACHABLE;
68 };
69
70 auto readUsage = [](GrAccessPattern pattern) {
71 switch (pattern) {
72 case kDynamic_GrAccessPattern:
73 return GR_GL_DYNAMIC_READ;
74 case kStatic_GrAccessPattern:
75 return GR_GL_STATIC_READ;
76 case kStream_GrAccessPattern:
77 return GR_GL_STREAM_READ;
78 }
79 SkUNREACHABLE;
80 };
81
82 auto usageType = [&drawUsage, &readUsage, &caps](GrGpuBufferType type,
83 GrAccessPattern pattern) {
84 // GL_NV_pixel_buffer_object adds transfer buffers but not the related <usage> values.
85 if (caps.transferBufferType() == GrGLCaps::TransferBufferType::kNV_PBO) {
86 return drawUsage(pattern);
87 }
88 switch (type) {
89 case GrGpuBufferType::kVertex:
90 case GrGpuBufferType::kIndex:
91 case GrGpuBufferType::kDrawIndirect:
92 case GrGpuBufferType::kXferCpuToGpu:
93 case GrGpuBufferType::kUniform:
94 return drawUsage(pattern);
95 case GrGpuBufferType::kXferGpuToCpu:
96 return readUsage(pattern);
97 }
98 SkUNREACHABLE;
99 };
100
101 return usageType(bufferType, accessPattern);
102 }
103
GrGLBuffer(GrGLGpu * gpu,size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,const void * data)104 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
105 GrAccessPattern accessPattern, const void* data)
106 : INHERITED(gpu, size, intendedType, accessPattern)
107 , fIntendedType(intendedType)
108 , fBufferID(0)
109 , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern, gpu->glCaps()))
110 , fGLSizeInBytes(0)
111 , fHasAttachedToTexture(false) {
112 GL_CALL(GenBuffers(1, &fBufferID));
113 if (fBufferID) {
114 GrGLenum target = gpu->bindBuffer(fIntendedType, this);
115 GrGLenum error = GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)size, data, fUsage));
116 if (error != GR_GL_NO_ERROR) {
117 GL_CALL(DeleteBuffers(1, &fBufferID));
118 fBufferID = 0;
119 } else {
120 fGLSizeInBytes = size;
121 }
122 }
123 VALIDATE();
124 this->registerWithCache(SkBudgeted::kYes);
125 if (!fBufferID) {
126 this->resourcePriv().removeScratchKey();
127 }
128 }
129
glGpu() const130 inline GrGLGpu* GrGLBuffer::glGpu() const {
131 SkASSERT(!this->wasDestroyed());
132 return static_cast<GrGLGpu*>(this->getGpu());
133 }
134
glCaps() const135 inline const GrGLCaps& GrGLBuffer::glCaps() const {
136 return this->glGpu()->glCaps();
137 }
138
onRelease()139 void GrGLBuffer::onRelease() {
140 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
141
142 if (!this->wasDestroyed()) {
143 VALIDATE();
144 // make sure we've not been abandoned or already released
145 if (fBufferID) {
146 GL_CALL(DeleteBuffers(1, &fBufferID));
147 fBufferID = 0;
148 fGLSizeInBytes = 0;
149 }
150 fMapPtr = nullptr;
151 VALIDATE();
152 }
153
154 INHERITED::onRelease();
155 }
156
onAbandon()157 void GrGLBuffer::onAbandon() {
158 fBufferID = 0;
159 fGLSizeInBytes = 0;
160 fMapPtr = nullptr;
161 VALIDATE();
162 INHERITED::onAbandon();
163 }
164
onMap()165 void GrGLBuffer::onMap() {
166 SkASSERT(fBufferID);
167 SkASSERT(!this->wasDestroyed());
168 VALIDATE();
169 SkASSERT(!this->isMapped());
170
171 // TODO: Make this a function parameter.
172 bool readOnly = (GrGpuBufferType::kXferGpuToCpu == fIntendedType);
173
174 // Handling dirty context is done in the bindBuffer call
175 switch (this->glCaps().mapBufferType()) {
176 case GrGLCaps::kNone_MapBufferType:
177 return;
178 case GrGLCaps::kMapBuffer_MapBufferType: {
179 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
180 if (!readOnly) {
181 // Let driver know it can discard the old data
182 if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) {
183 GrGLenum error =
184 GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
185 if (error != GR_GL_NO_ERROR) {
186 return;
187 }
188 }
189 }
190 GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
191 break;
192 }
193 case GrGLCaps::kMapBufferRange_MapBufferType: {
194 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
195 // Make sure the GL buffer size agrees with fDesc before mapping.
196 if (fGLSizeInBytes != this->size()) {
197 GrGLenum error = GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
198 if (error != GR_GL_NO_ERROR) {
199 return;
200 }
201 }
202 GrGLbitfield access;
203 if (readOnly) {
204 access = GR_GL_MAP_READ_BIT;
205 } else {
206 access = GR_GL_MAP_WRITE_BIT;
207 if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) {
208 // TODO: Make this a function parameter.
209 access |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
210 }
211 }
212 GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access));
213 break;
214 }
215 case GrGLCaps::kChromium_MapBufferType: {
216 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
217 // Make sure the GL buffer size agrees with fDesc before mapping.
218 if (fGLSizeInBytes != this->size()) {
219 GrGLenum error = GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
220 if (error != GR_GL_NO_ERROR) {
221 return;
222 }
223 }
224 GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(),
225 readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
226 break;
227 }
228 }
229 fGLSizeInBytes = this->size();
230 VALIDATE();
231 }
232
onUnmap()233 void GrGLBuffer::onUnmap() {
234 SkASSERT(fBufferID);
235 VALIDATE();
236 SkASSERT(this->isMapped());
237 if (0 == fBufferID) {
238 fMapPtr = nullptr;
239 return;
240 }
241 // bind buffer handles the dirty context
242 switch (this->glCaps().mapBufferType()) {
243 case GrGLCaps::kNone_MapBufferType:
244 SkDEBUGFAIL("Shouldn't get here.");
245 return;
246 case GrGLCaps::kMapBuffer_MapBufferType: // fall through
247 case GrGLCaps::kMapBufferRange_MapBufferType: {
248 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
249 GL_CALL(UnmapBuffer(target));
250 break;
251 }
252 case GrGLCaps::kChromium_MapBufferType:
253 this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed?
254 GL_CALL(UnmapBufferSubData(fMapPtr));
255 break;
256 }
257 fMapPtr = nullptr;
258 }
259
onUpdateData(const void * src,size_t srcSizeInBytes)260 bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
261 SkASSERT(fBufferID);
262 if (this->wasDestroyed()) {
263 return false;
264 }
265
266 SkASSERT(!this->isMapped());
267 VALIDATE();
268 if (srcSizeInBytes > this->size()) {
269 return false;
270 }
271 SkASSERT(srcSizeInBytes <= this->size());
272 // bindbuffer handles dirty context
273 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
274
275 if (this->glCaps().useBufferDataNullHint()) {
276 if (this->size() == srcSizeInBytes) {
277 GrGLenum error =
278 GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)srcSizeInBytes, src, fUsage));
279 if (error != GR_GL_NO_ERROR) {
280 return false;
281 }
282 } else {
283 // Before we call glBufferSubData we give the driver a hint using
284 // glBufferData with nullptr. This makes the old buffer contents
285 // inaccessible to future draws. The GPU may still be processing
286 // draws that reference the old contents. With this hint it can
287 // assign a different allocation for the new contents to avoid
288 // flushing the gpu past draws consuming the old contents.
289 // TODO I think we actually want to try calling bufferData here
290 GrGLenum error =
291 GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)this->size(), nullptr, fUsage));
292 if (error != GR_GL_NO_ERROR) {
293 return false;
294 }
295 GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src));
296 }
297 fGLSizeInBytes = this->size();
298 } else {
299 // Note that we're cheating on the size here. Currently no methods
300 // allow a partial update that preserves contents of non-updated
301 // portions of the buffer (map() does a glBufferData(..size, nullptr..))
302 GrGLenum error =
303 GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)srcSizeInBytes, src, fUsage));
304 if (error != GR_GL_NO_ERROR) {
305 return false;
306 }
307 fGLSizeInBytes = srcSizeInBytes;
308 }
309 VALIDATE();
310 return true;
311 }
312
setMemoryBacking(SkTraceMemoryDump * traceMemoryDump,const SkString & dumpName) const313 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
314 const SkString& dumpName) const {
315 SkString buffer_id;
316 buffer_id.appendU32(this->bufferID());
317 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
318 buffer_id.c_str());
319 }
320
321 #ifdef SK_DEBUG
322
validate() const323 void GrGLBuffer::validate() const {
324 SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);
325 SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->size());
326 }
327
328 #endif
329