1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/gl/GrGLBuffer.h"
9
10 #include "include/core/SkTraceMemoryDump.h"
11 #include "src/core/SkTraceEvent.h"
12 #include "src/gpu/ganesh/GrGpuResourcePriv.h"
13 #include "src/gpu/ganesh/gl/GrGLCaps.h"
14 #include "src/gpu/ganesh/gl/GrGLGpu.h"
15
16 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
17 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
18
19 #define GL_ALLOC_CALL(gpu, call) \
20 [&] { \
21 if (gpu->glCaps().skipErrorChecks()) { \
22 GR_GL_CALL(gpu->glInterface(), call); \
23 return static_cast<GrGLenum>(GR_GL_NO_ERROR); \
24 } else { \
25 gpu->clearErrorsAndCheckForOOM(); \
26 GR_GL_CALL_NOERRCHECK(gpu->glInterface(), call); \
27 return gpu->getErrorAndCheckForOOM(); \
28 } \
29 }()
30
Make(GrGLGpu * gpu,size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern)31 sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu,
32 size_t size,
33 GrGpuBufferType intendedType,
34 GrAccessPattern accessPattern) {
35 if (gpu->glCaps().transferBufferType() == GrGLCaps::TransferBufferType::kNone &&
36 (GrGpuBufferType::kXferCpuToGpu == intendedType ||
37 GrGpuBufferType::kXferGpuToCpu == intendedType)) {
38 return nullptr;
39 }
40
41 sk_sp<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, accessPattern,
42 /*label=*/"MakeGlBuffer"));
43 if (0 == buffer->bufferID()) {
44 return nullptr;
45 }
46 return buffer;
47 }
48
49 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
50 // objects are implemented as client-side-arrays on tile-deferred architectures.
51 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
52
gr_to_gl_access_pattern(GrGpuBufferType bufferType,GrAccessPattern accessPattern,const GrGLCaps & caps)53 inline static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType,
54 GrAccessPattern accessPattern,
55 const GrGLCaps& caps) {
56 auto drawUsage = [](GrAccessPattern pattern) {
57 switch (pattern) {
58 case kDynamic_GrAccessPattern:
59 // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
60 return DYNAMIC_DRAW_PARAM;
61 case kStatic_GrAccessPattern:
62 return GR_GL_STATIC_DRAW;
63 case kStream_GrAccessPattern:
64 return GR_GL_STREAM_DRAW;
65 }
66 SkUNREACHABLE;
67 };
68
69 auto readUsage = [](GrAccessPattern pattern) {
70 switch (pattern) {
71 case kDynamic_GrAccessPattern:
72 return GR_GL_DYNAMIC_READ;
73 case kStatic_GrAccessPattern:
74 return GR_GL_STATIC_READ;
75 case kStream_GrAccessPattern:
76 return GR_GL_STREAM_READ;
77 }
78 SkUNREACHABLE;
79 };
80
81 auto usageType = [&drawUsage, &readUsage, &caps](GrGpuBufferType type,
82 GrAccessPattern pattern) {
83 // GL_NV_pixel_buffer_object adds transfer buffers but not the related <usage> values.
84 if (caps.transferBufferType() == GrGLCaps::TransferBufferType::kNV_PBO) {
85 return drawUsage(pattern);
86 }
87 switch (type) {
88 case GrGpuBufferType::kVertex:
89 case GrGpuBufferType::kIndex:
90 case GrGpuBufferType::kDrawIndirect:
91 case GrGpuBufferType::kXferCpuToGpu:
92 case GrGpuBufferType::kUniform:
93 return drawUsage(pattern);
94 case GrGpuBufferType::kXferGpuToCpu:
95 return readUsage(pattern);
96 }
97 SkUNREACHABLE;
98 };
99
100 return usageType(bufferType, accessPattern);
101 }
102
GrGLBuffer(GrGLGpu * gpu,size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,std::string_view label)103 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu,
104 size_t size,
105 GrGpuBufferType intendedType,
106 GrAccessPattern accessPattern,
107 std::string_view label)
108 : INHERITED(gpu, size, intendedType, accessPattern, label)
109 , fIntendedType(intendedType)
110 , fBufferID(0)
111 , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern, gpu->glCaps()))
112 , fHasAttachedToTexture(false) {
113 GL_CALL(GenBuffers(1, &fBufferID));
114 if (fBufferID) {
115 GrGLenum target = gpu->bindBuffer(fIntendedType, this);
116 GrGLenum error = GL_ALLOC_CALL(this->glGpu(), BufferData(target,
117 (GrGLsizeiptr)size,
118 nullptr,
119 fUsage));
120 if (error != GR_GL_NO_ERROR) {
121 GL_CALL(DeleteBuffers(1, &fBufferID));
122 fBufferID = 0;
123 }
124 }
125 this->registerWithCache(skgpu::Budgeted::kYes);
126 if (!fBufferID) {
127 this->resourcePriv().removeScratchKey();
128 }
129 }
130
glGpu() const131 inline GrGLGpu* GrGLBuffer::glGpu() const {
132 SkASSERT(!this->wasDestroyed());
133 return static_cast<GrGLGpu*>(this->getGpu());
134 }
135
glCaps() const136 inline const GrGLCaps& GrGLBuffer::glCaps() const {
137 return this->glGpu()->glCaps();
138 }
139
onRelease()140 void GrGLBuffer::onRelease() {
141 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
142
143 if (!this->wasDestroyed()) {
144 // make sure we've not been abandoned or already released
145 if (fBufferID) {
146 GL_CALL(DeleteBuffers(1, &fBufferID));
147 fBufferID = 0;
148 }
149 fMapPtr = nullptr;
150 }
151
152 INHERITED::onRelease();
153 }
154
onAbandon()155 void GrGLBuffer::onAbandon() {
156 fBufferID = 0;
157 fMapPtr = nullptr;
158 INHERITED::onAbandon();
159 }
160
invalidate_buffer(GrGLGpu * gpu,GrGLenum target,GrGLenum usage,GrGLuint bufferID,size_t bufferSize)161 static inline GrGLenum SK_WARN_UNUSED_RESULT invalidate_buffer(GrGLGpu* gpu,
162 GrGLenum target,
163 GrGLenum usage,
164 GrGLuint bufferID,
165 size_t bufferSize) {
166 switch (gpu->glCaps().invalidateBufferType()) {
167 case GrGLCaps::InvalidateBufferType::kNone:
168 return GR_GL_NO_ERROR;
169 case GrGLCaps::InvalidateBufferType::kNullData:
170 return GL_ALLOC_CALL(gpu, BufferData(target, bufferSize, nullptr, usage));
171 case GrGLCaps::InvalidateBufferType::kInvalidate:
172 GR_GL_CALL(gpu->glInterface(), InvalidateBufferData(bufferID));
173 return GR_GL_NO_ERROR;
174 }
175 SkUNREACHABLE;
176 }
177
onMap(MapType type)178 void GrGLBuffer::onMap(MapType type) {
179 SkASSERT(fBufferID);
180 SkASSERT(!this->wasDestroyed());
181 SkASSERT(!this->isMapped());
182
183 // Handling dirty context is done in the bindBuffer call
184 switch (this->glCaps().mapBufferType()) {
185 case GrGLCaps::kNone_MapBufferType:
186 return;
187 case GrGLCaps::kMapBuffer_MapBufferType: {
188 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
189 if (type == MapType::kWriteDiscard) {
190 GrGLenum error = invalidate_buffer(this->glGpu(),
191 target,
192 fUsage,
193 fBufferID,
194 this->size());
195 if (error != GR_GL_NO_ERROR) {
196 return;
197 }
198 }
199 GrGLenum access = type == MapType::kRead ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY;
200 GL_CALL_RET(fMapPtr, MapBuffer(target, access));
201 break;
202 }
203 case GrGLCaps::kMapBufferRange_MapBufferType: {
204 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
205 GrGLbitfield access;
206 switch (type) {
207 case MapType::kRead:
208 access = GR_GL_MAP_READ_BIT;
209 break;
210 case MapType::kWriteDiscard:
211 access = GR_GL_MAP_WRITE_BIT | GR_GL_MAP_INVALIDATE_BUFFER_BIT;
212 break;
213 }
214 GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access));
215 break;
216 }
217 case GrGLCaps::kChromium_MapBufferType: {
218 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
219 GrGLenum access = type == MapType::kRead ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY;
220 GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(), access));
221 break;
222 }
223 }
224 }
225
onUnmap(MapType)226 void GrGLBuffer::onUnmap(MapType) {
227 SkASSERT(fBufferID);
228 // bind buffer handles the dirty context
229 switch (this->glCaps().mapBufferType()) {
230 case GrGLCaps::kNone_MapBufferType:
231 SkUNREACHABLE;
232 case GrGLCaps::kMapBuffer_MapBufferType: // fall through
233 case GrGLCaps::kMapBufferRange_MapBufferType: {
234 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
235 GL_CALL(UnmapBuffer(target));
236 break;
237 }
238 case GrGLCaps::kChromium_MapBufferType:
239 this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed?
240 GL_CALL(UnmapBufferSubData(fMapPtr));
241 break;
242 }
243 fMapPtr = nullptr;
244 }
245
onClearToZero()246 bool GrGLBuffer::onClearToZero() {
247 SkASSERT(fBufferID);
248
249 // We could improve this on GL 4.3+ with glClearBufferData (also GL_ARB_clear_buffer_object).
250 this->onMap(GrGpuBuffer::MapType::kWriteDiscard);
251 if (fMapPtr) {
252 std::memset(fMapPtr, 0, this->size());
253 this->onUnmap(GrGpuBuffer::MapType::kWriteDiscard);
254 return true;
255 }
256
257 void* zeros = sk_calloc_throw(this->size());
258 bool result = this->updateData(zeros, 0, this->size(), /*preserve=*/false);
259 sk_free(zeros);
260 return result;
261 }
262
onUpdateData(const void * src,size_t offset,size_t size,bool preserve)263 bool GrGLBuffer::onUpdateData(const void* src, size_t offset, size_t size, bool preserve) {
264 SkASSERT(fBufferID);
265
266 // bindbuffer handles dirty context
267 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
268 if (!preserve) {
269 GrGLenum error = invalidate_buffer(this->glGpu(), target, fUsage, fBufferID, this->size());
270 if (error != GR_GL_NO_ERROR) {
271 return false;
272 }
273 }
274 GL_CALL(BufferSubData(target, offset, size, src));
275 return true;
276 }
277
onSetLabel()278 void GrGLBuffer::onSetLabel() {
279 SkASSERT(fBufferID);
280 if (!this->getLabel().empty()) {
281 const std::string label = "_Skia_" + this->getLabel();
282 if (this->glGpu()->glCaps().debugSupport()) {
283 GL_CALL(ObjectLabel(GR_GL_BUFFER, fBufferID, -1, label.c_str()));
284 }
285 }
286 }
287
setMemoryBacking(SkTraceMemoryDump * traceMemoryDump,const SkString & dumpName) const288 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
289 const SkString& dumpName) const {
290 SkString buffer_id;
291 buffer_id.appendU32(this->bufferID());
292 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer", buffer_id.c_str());
293 }
294