1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrGpu_DEFINED
9 #define GrGpu_DEFINED
10
11 #include "include/core/SkPath.h"
12 #include "include/core/SkSpan.h"
13 #include "include/core/SkSurface.h"
14 #include "include/gpu/GrDirectContext.h"
15 #include "include/gpu/GrTypes.h"
16 #include "include/private/SkTArray.h"
17 #include "src/core/SkTInternalLList.h"
18 #include "src/gpu/GrAttachment.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrOpsRenderPass.h"
21 #include "src/gpu/GrPixmap.h"
22 #include "src/gpu/GrSwizzle.h"
23 #include "src/gpu/GrXferProcessor.h"
24
25 class GrAttachment;
26 class GrBackendRenderTarget;
27 class GrBackendSemaphore;
28 struct GrContextOptions;
29 class GrDirectContext;
30 class GrGpuBuffer;
31 class GrGLContext;
32 class GrPath;
33 class GrPathRenderer;
34 class GrPathRendererChain;
35 class GrPipeline;
36 class GrGeometryProcessor;
37 class GrRenderTarget;
38 class GrRingBuffer;
39 class GrSemaphore;
40 class GrStagingBufferManager;
41 class GrStencilSettings;
42 class GrSurface;
43 class GrTexture;
44 class GrThreadSafePipelineBuilder;
45 struct GrVkDrawableInfo;
46 class SkJSONWriter;
47
48 namespace SkSL {
49 class Compiler;
50 }
51
52 class SK_API GrGpu : public SkRefCnt {
53 public:
54 GrGpu(GrDirectContext* direct);
55 ~GrGpu() override;
56
getContext()57 GrDirectContext* getContext() { return fContext; }
getContext()58 const GrDirectContext* getContext() const { return fContext; }
59
setCurrentGrResourceTag(const GrGpuResourceTag & tag)60 void setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
61 if (fContext) {
62 fContext->setCurrentGrResourceTag(tag);
63 }
64 }
65
popGrResourceTag()66 void popGrResourceTag()
67 {
68 if (fContext) {
69 fContext->popGrResourceTag();
70 }
71 }
72
73 /**
74 * Gets the capabilities of the draw target.
75 */
caps()76 const GrCaps* caps() const { return fCaps.get(); }
refCaps()77 sk_sp<const GrCaps> refCaps() const { return fCaps; }
78
stagingBufferManager()79 virtual GrStagingBufferManager* stagingBufferManager() { return nullptr; }
80
uniformsRingBuffer()81 virtual GrRingBuffer* uniformsRingBuffer() { return nullptr; }
82
shaderCompiler()83 SkSL::Compiler* shaderCompiler() const { return fCompiler.get(); }
84
85 enum class DisconnectType {
86 // No cleanup should be attempted, immediately cease making backend API calls
87 kAbandon,
88 // Free allocated resources (not known by GrResourceCache) before returning and
89 // ensure no backend backend 3D API calls will be made after disconnect() returns.
90 kCleanup,
91 };
92
93 // Called by context when the underlying backend context is already or will be destroyed
94 // before GrDirectContext.
95 virtual void disconnect(DisconnectType);
96
97 virtual GrThreadSafePipelineBuilder* pipelineBuilder() = 0;
98 virtual sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() = 0;
99
100 // Called by GrDirectContext::isContextLost. Returns true if the backend Gpu object has gotten
101 // into an unrecoverable, lost state.
isDeviceLost()102 virtual bool isDeviceLost() const { return false; }
103
104 /**
105 * The GrGpu object normally assumes that no outsider is setting state
106 * within the underlying 3D API's context/device/whatever. This call informs
107 * the GrGpu that the state was modified and it shouldn't make assumptions
108 * about the state.
109 */
110 void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
111
112 /**
113 * Creates a texture object. If renderable is kYes then the returned texture can
114 * be used as a render target by calling GrTexture::asRenderTarget(). Not all
115 * pixel configs can be used as render targets. Support for configs as textures
116 * or render targets can be checked using GrCaps.
117 *
118 * @param dimensions dimensions of the texture to be created.
119 * @param format the format for the texture (not currently used).
120 * @param renderable should the resulting texture be renderable
121 * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is
122 * kYes. If renderable is kNo then this must be 1.
123 * @param budgeted does this texture count against the resource cache budget?
124 * @param isProtected should the texture be created as protected.
125 * @param texels array of mipmap levels containing texel data to load.
126 * If level i has pixels then it is assumed that its dimensions are
127 * max(1, floor(dimensions.fWidth / 2)) by
128 * max(1, floor(dimensions.fHeight / 2)).
129 * If texels[i].fPixels == nullptr for all i <= mipLevelCount or
130 * mipLevelCount is 0 then the texture's contents are uninitialized.
131 * If a level has non-null pixels, its row bytes must be a multiple of the
132 * config's bytes-per-pixel. The row bytes must be tight to the
133 * level width if !caps->writePixelsRowBytesSupport().
134 * If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0
135 * then all levels must have non-null pixels. All levels must have
136 * non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true.
137 * @param textureColorType The color type interpretation of the texture for the purpose of
138 * of uploading texel data.
139 * @param srcColorType The color type of data in texels[].
140 * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or
141 * floor(max((log2(dimensions.fWidth), log2(dimensions.fHeight)))). It
142 * must be the latter if GrCaps::createTextureMustSpecifyAllLevels() is
143 * true.
144 * @return The texture object if successful, otherwise nullptr.
145 */
146 sk_sp<GrTexture> createTexture(SkISize dimensions,
147 const GrBackendFormat& format,
148 GrTextureType textureType,
149 GrRenderable renderable,
150 int renderTargetSampleCnt,
151 SkBudgeted budgeted,
152 GrProtected isProtected,
153 GrColorType textureColorType,
154 GrColorType srcColorType,
155 const GrMipLevel texels[],
156 int texelLevelCount);
157
158 /**
159 * Simplified createTexture() interface for when there is no initial texel data to upload.
160 */
161 sk_sp<GrTexture> createTexture(SkISize dimensions,
162 const GrBackendFormat& format,
163 GrTextureType textureType,
164 GrRenderable renderable,
165 int renderTargetSampleCnt,
166 GrMipmapped mipMapped,
167 SkBudgeted budgeted,
168 GrProtected isProtected);
169
170 sk_sp<GrTexture> createCompressedTexture(SkISize dimensions,
171 const GrBackendFormat& format,
172 SkBudgeted budgeted,
173 GrMipmapped mipMapped,
174 GrProtected isProtected,
175 const void* data, size_t dataSize);
176
177 /**
178 * Implements GrResourceProvider::wrapBackendTexture
179 */
180 sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&,
181 GrWrapOwnership,
182 GrWrapCacheable,
183 GrIOType);
184
185 sk_sp<GrTexture> wrapCompressedBackendTexture(const GrBackendTexture&,
186 GrWrapOwnership,
187 GrWrapCacheable);
188
189 /**
190 * Implements GrResourceProvider::wrapRenderableBackendTexture
191 */
192 sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&,
193 int sampleCnt,
194 GrWrapOwnership,
195 GrWrapCacheable);
196
197 /**
198 * Implements GrResourceProvider::wrapBackendRenderTarget
199 */
200 sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&);
201
202 /**
203 * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget
204 */
205 sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
206 const GrVkDrawableInfo&);
207
208 /**
209 * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
210 *
211 * @param size size of buffer to create.
212 * @param intendedType hint to the graphics subsystem about what the buffer will be used for.
213 * @param accessPattern hint to the graphics subsystem about how the data will be accessed.
214 * @param data optional data with which to initialize the buffer.
215 *
216 * @return the buffer if successful, otherwise nullptr.
217 */
218 sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType,
219 GrAccessPattern accessPattern, const void* data = nullptr);
220
221 /**
222 * Resolves MSAA. The resolveRect must already be in the native destination space.
223 */
224 void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect);
225
226 /**
227 * Uses the base of the texture to recompute the contents of the other levels.
228 */
229 bool regenerateMipMapLevels(GrTexture*);
230
231 /**
232 * If the backend API has stateful texture bindings, this resets them back to defaults.
233 */
234 void resetTextureBindings();
235
236 /**
237 * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed.
238 *
239 * @param surface the surface to read from
240 * @param rect the rectangle of pixels to read
241 * @param surfaceColorType the color type for this use of the surface.
242 * @param dstColorType the color type of the destination buffer.
243 * @param buffer memory to read the rectangle into.
244 * @param rowBytes the number of bytes between consecutive rows. Must be a multiple of
245 * dstColorType's bytes-per-pixel. Must be tight to width if
246 * !caps->readPixelsRowBytesSupport().
247 *
248 * @return true if the read succeeded, false if not. The read can fail
249 * because of the surface doesn't support reading, the color type
250 * is not allowed for the format of the surface or if the rectangle
251 * read is not contained in the surface.
252 */
253 bool readPixels(GrSurface* surface,
254 SkIRect rect,
255 GrColorType surfaceColorType,
256 GrColorType dstColorType,
257 void* buffer,
258 size_t rowBytes);
259
260 /**
261 * Updates the pixels in a rectangle of a surface. No sRGB/linear conversions are performed.
262 *
263 * @param surface the surface to write to.
264 * @param rect the rectangle of pixels to overwrite
265 * @param surfaceColorType the color type for this use of the surface.
266 * @param srcColorType the color type of the source buffer.
267 * @param texels array of mipmap levels containing texture data. Row bytes must be a
268 * multiple of srcColorType's bytes-per-pixel. Must be tight to level
269 * width if !caps->writePixelsRowBytesSupport().
270 * @param mipLevelCount number of levels in 'texels'
271 * @param prepForTexSampling After doing write pixels should the surface be prepared for texture
272 * sampling. This is currently only used by Vulkan for inline uploads
273 * to set that layout back to sampled after doing the upload. Inline
274 * uploads currently can happen between draws in a single op so it is
275 * not trivial to break up the OpsTask into two tasks when we see
276 * an inline upload. However, once we are able to support doing that
277 * we can remove this parameter.
278 *
279 * @return true if the write succeeded, false if not. The read can fail
280 * because of the surface doesn't support writing (e.g. read only),
281 * the color type is not allowed for the format of the surface or
282 * if the rectangle written is not contained in the surface.
283 */
284 bool writePixels(GrSurface* surface,
285 SkIRect rect,
286 GrColorType surfaceColorType,
287 GrColorType srcColorType,
288 const GrMipLevel texels[],
289 int mipLevelCount,
290 bool prepForTexSampling = false);
291
292 /**
293 * Helper for the case of a single level.
294 */
295 bool writePixels(GrSurface* surface,
296 SkIRect rect,
297 GrColorType surfaceColorType,
298 GrColorType srcColorType,
299 const void* buffer,
300 size_t rowBytes,
301 bool prepForTexSampling = false) {
302 GrMipLevel mipLevel = {buffer, rowBytes, nullptr};
303 return this->writePixels(surface,
304 rect,
305 surfaceColorType,
306 srcColorType,
307 &mipLevel,
308 1,
309 prepForTexSampling);
310 }
311
312 /**
313 * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped,
314 * the base level is written to.
315 *
316 * @param texture the texture to write to.
317 * @param rect the rectangle of pixels in the texture to overwrite
318 * @param textureColorType the color type for this use of the surface.
319 * @param bufferColorType the color type of the transfer buffer's pixel data
320 * @param transferBuffer GrBuffer to read pixels from (type must be "kXferCpuToGpu")
321 * @param offset offset from the start of the buffer
322 * @param rowBytes number of bytes between consecutive rows in the buffer. Must be a
323 * multiple of bufferColorType's bytes-per-pixel. Must be tight to
324 * rect.width() if !caps->writePixelsRowBytesSupport().
325 */
326 bool transferPixelsTo(GrTexture* texture,
327 SkIRect rect,
328 GrColorType textureColorType,
329 GrColorType bufferColorType,
330 sk_sp<GrGpuBuffer> transferBuffer,
331 size_t offset,
332 size_t rowBytes);
333
334 /**
335 * Reads the pixels from a rectangle of a surface into a buffer. Use
336 * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for
337 * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read.
338 *
339 * If successful the row bytes in the buffer is always:
340 * GrColorTypeBytesPerPixel(bufferColorType) * rect.width()
341 *
342 * Asserts that the caller has passed a properly aligned offset and that the buffer is
343 * large enough to hold the result
344 *
345 * @param surface the surface to read from.
346 * @param rect the rectangle of pixels to read
347 * @param surfaceColorType the color type for this use of the surface.
348 * @param bufferColorType the color type of the transfer buffer's pixel data
349 * @param transferBuffer GrBuffer to write pixels to (type must be "kXferGpuToCpu")
350 * @param offset offset from the start of the buffer
351 */
352 bool transferPixelsFrom(GrSurface* surface,
353 SkIRect rect,
354 GrColorType surfaceColorType,
355 GrColorType bufferColorType,
356 sk_sp<GrGpuBuffer> transferBuffer,
357 size_t offset);
358
359 // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
360 // take place at higher levels and this function implement faster copy paths. The rect
361 // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the
362 // src/dst bounds and non-empty. They must also be in their exact device space coords, including
363 // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
364 // then we don't need to preserve any data on the dst surface outside of the copy.
365 bool copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
366 const SkIPoint& dstPoint);
367
368 // Returns a GrOpsRenderPass which OpsTasks send draw commands to instead of directly
369 // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget.
370 // If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not
371 // provided but 'renderTarget' has a stencil buffer then that is a signal that the
372 // render target's stencil buffer should be ignored.
373 GrOpsRenderPass* getOpsRenderPass(GrRenderTarget* renderTarget,
374 bool useMSAASurface,
375 GrAttachment* stencil,
376 GrSurfaceOrigin,
377 const SkIRect& bounds,
378 const GrOpsRenderPass::LoadAndStoreInfo&,
379 const GrOpsRenderPass::StencilLoadAndStoreInfo&,
380 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
381 GrXferBarrierFlags renderPassXferBarriers);
382
383 // Called by GrDrawingManager when flushing.
384 // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
385 // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
386 // inserted semaphores.
387 void executeFlushInfo(SkSpan<GrSurfaceProxy*>,
388 SkSurface::BackendSurfaceAccess access,
389 const GrFlushInfo&,
390 const GrBackendSurfaceMutableState* newState);
391
392 bool submitToGpu(bool syncCpu);
393
394 virtual void submit(GrOpsRenderPass*) = 0;
395
396 virtual GrFence SK_WARN_UNUSED_RESULT insertFence() = 0;
397 virtual bool waitFence(GrFence) = 0;
398 virtual void deleteFence(GrFence) const = 0;
399
400 virtual std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(
401 bool isOwned = true) = 0;
402 virtual std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
403 GrSemaphoreWrapType,
404 GrWrapOwnership) = 0;
405 virtual void insertSemaphore(GrSemaphore* semaphore) = 0;
406 virtual void waitSemaphore(GrSemaphore* semaphore) = 0;
407
408 virtual void addFinishedProc(GrGpuFinishedProc finishedProc,
409 GrGpuFinishedContext finishedContext) = 0;
410 virtual void checkFinishProcs() = 0;
411 virtual void finishOutstandingGpuWork() = 0;
412
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>)413 virtual void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) {}
414
415 /**
416 * Checks if we detected an OOM from the underlying 3D API and if so returns true and resets
417 * the internal OOM state to false. Otherwise, returns false.
418 */
419 bool checkAndResetOOMed();
420
421 /**
422 * Put this texture in a safe and known state for use across multiple contexts. Depending on
423 * the backend, this may return a GrSemaphore. If so, other contexts should wait on that
424 * semaphore before using this texture.
425 */
426 virtual std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
427
428 /**
429 * Frees any backend specific objects that are not currently in use by the GPU. This is called
430 * when the client is trying to free up as much GPU memory as possible. We will not release
431 * resources connected to programs/pipelines since the cost to recreate those is significantly
432 * higher that other resources.
433 */
releaseUnlockedBackendObjects()434 virtual void releaseUnlockedBackendObjects() {}
435
436 ///////////////////////////////////////////////////////////////////////////
437 // Debugging and Stats
438
439 class Stats {
440 public:
441 #if GR_GPU_STATS
442 Stats() = default;
443
reset()444 void reset() { *this = {}; }
445
textureCreates()446 int textureCreates() const { return fTextureCreates; }
incTextureCreates()447 void incTextureCreates() { fTextureCreates++; }
448
textureUploads()449 int textureUploads() const { return fTextureUploads; }
incTextureUploads()450 void incTextureUploads() { fTextureUploads++; }
451
transfersToTexture()452 int transfersToTexture() const { return fTransfersToTexture; }
incTransfersToTexture()453 void incTransfersToTexture() { fTransfersToTexture++; }
454
transfersFromSurface()455 int transfersFromSurface() const { return fTransfersFromSurface; }
incTransfersFromSurface()456 void incTransfersFromSurface() { fTransfersFromSurface++; }
457
stencilAttachmentCreates()458 int stencilAttachmentCreates() const { return fStencilAttachmentCreates; }
incStencilAttachmentCreates()459 void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
460
msaaAttachmentCreates()461 int msaaAttachmentCreates() const { return fMSAAAttachmentCreates; }
incMSAAAttachmentCreates()462 void incMSAAAttachmentCreates() { fMSAAAttachmentCreates++; }
463
numDraws()464 int numDraws() const { return fNumDraws; }
incNumDraws()465 void incNumDraws() { fNumDraws++; }
466
numFailedDraws()467 int numFailedDraws() const { return fNumFailedDraws; }
incNumFailedDraws()468 void incNumFailedDraws() { ++fNumFailedDraws; }
469
numSubmitToGpus()470 int numSubmitToGpus() const { return fNumSubmitToGpus; }
incNumSubmitToGpus()471 void incNumSubmitToGpus() { ++fNumSubmitToGpus; }
472
numScratchTexturesReused()473 int numScratchTexturesReused() const { return fNumScratchTexturesReused; }
incNumScratchTexturesReused()474 void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; }
475
numScratchMSAAAttachmentsReused()476 int numScratchMSAAAttachmentsReused() const { return fNumScratchMSAAAttachmentsReused; }
incNumScratchMSAAAttachmentsReused()477 void incNumScratchMSAAAttachmentsReused() { ++fNumScratchMSAAAttachmentsReused; }
478
renderPasses()479 int renderPasses() const { return fRenderPasses; }
incRenderPasses()480 void incRenderPasses() { fRenderPasses++; }
481
numReorderedDAGsOverBudget()482 int numReorderedDAGsOverBudget() const { return fNumReorderedDAGsOverBudget; }
incNumReorderedDAGsOverBudget()483 void incNumReorderedDAGsOverBudget() { fNumReorderedDAGsOverBudget++; }
484
485 #if GR_TEST_UTILS
486 void dump(SkString*);
487 void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values);
488 #endif
489 private:
490 int fTextureCreates = 0;
491 int fTextureUploads = 0;
492 int fTransfersToTexture = 0;
493 int fTransfersFromSurface = 0;
494 int fStencilAttachmentCreates = 0;
495 int fMSAAAttachmentCreates = 0;
496 int fNumDraws = 0;
497 int fNumFailedDraws = 0;
498 int fNumSubmitToGpus = 0;
499 int fNumScratchTexturesReused = 0;
500 int fNumScratchMSAAAttachmentsReused = 0;
501 int fRenderPasses = 0;
502 int fNumReorderedDAGsOverBudget = 0;
503
504 #else // !GR_GPU_STATS
505
506 #if GR_TEST_UTILS
507 void dump(SkString*) {}
508 void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {}
509 #endif
510 void incTextureCreates() {}
511 void incTextureUploads() {}
512 void incTransfersToTexture() {}
513 void incTransfersFromSurface() {}
514 void incStencilAttachmentCreates() {}
515 void incMSAAAttachmentCreates() {}
516 void incNumDraws() {}
517 void incNumFailedDraws() {}
518 void incNumSubmitToGpus() {}
519 void incNumScratchTexturesReused() {}
520 void incNumScratchMSAAAttachmentsReused() {}
521 void incRenderPasses() {}
522 void incNumReorderedDAGsOverBudget() {}
523 #endif
524 };
525
stats()526 Stats* stats() { return &fStats; }
527 void dumpJSON(SkJSONWriter*) const;
528
529
530 /**
531 * Creates a texture directly in the backend API without wrapping it in a GrTexture.
532 * Must be matched with a call to deleteBackendTexture().
533 *
534 * If data is null the texture is uninitialized.
535 *
536 * If data represents a color then all texture levels are cleared to that color.
537 *
538 * If data represents pixmaps then it must have a either one pixmap or, if mipmapping
539 * is specified, a complete MIP hierarchy of pixmaps. Additionally, if provided, the mip
540 * levels must be sized correctly according to the MIP sizes implied by dimensions. They
541 * must all have the same color type and that color type must be compatible with the
542 * texture format.
543 */
544 GrBackendTexture createBackendTexture(SkISize dimensions,
545 const GrBackendFormat&,
546 GrRenderable,
547 GrMipmapped,
548 GrProtected);
549
550 bool clearBackendTexture(const GrBackendTexture&,
551 sk_sp<GrRefCntedCallback> finishedCallback,
552 std::array<float, 4> color);
553
554 /**
555 * Same as the createBackendTexture case except compressed backend textures can
556 * never be renderable.
557 */
558 GrBackendTexture createCompressedBackendTexture(SkISize dimensions,
559 const GrBackendFormat&,
560 GrMipmapped,
561 GrProtected);
562
563 bool updateCompressedBackendTexture(const GrBackendTexture&,
564 sk_sp<GrRefCntedCallback> finishedCallback,
565 const void* data,
566 size_t length);
567
setBackendTextureState(const GrBackendTexture &,const GrBackendSurfaceMutableState &,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)568 virtual bool setBackendTextureState(const GrBackendTexture&,
569 const GrBackendSurfaceMutableState&,
570 GrBackendSurfaceMutableState* previousState,
571 sk_sp<GrRefCntedCallback> finishedCallback) {
572 return false;
573 }
574
setBackendRenderTargetState(const GrBackendRenderTarget &,const GrBackendSurfaceMutableState &,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)575 virtual bool setBackendRenderTargetState(const GrBackendRenderTarget&,
576 const GrBackendSurfaceMutableState&,
577 GrBackendSurfaceMutableState* previousState,
578 sk_sp<GrRefCntedCallback> finishedCallback) {
579 return false;
580 }
581
582 /**
583 * Frees a texture created by createBackendTexture(). If ownership of the backend
584 * texture has been transferred to a context using adopt semantics this should not be called.
585 */
586 virtual void deleteBackendTexture(const GrBackendTexture&) = 0;
587
588 /**
589 * In this case we have a program descriptor and a program info but no render target.
590 */
591 virtual bool compile(const GrProgramDesc&, const GrProgramInfo&) = 0;
592
precompileShader(const SkData & key,const SkData & data)593 virtual bool precompileShader(const SkData& key, const SkData& data) { return false; }
594
595 #if GR_TEST_UTILS
596 /** Check a handle represents an actual texture in the backend API that has not been freed. */
597 virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0;
598
599 /**
600 * Creates a GrBackendRenderTarget that can be wrapped using
601 * SkSurface::MakeFromBackendRenderTarget. Ideally this is a non-textureable allocation to
602 * differentiate from testing with SkSurface::MakeFromBackendTexture. When sampleCnt > 1 this
603 * is used to test client wrapped allocations with MSAA where Skia does not allocate a separate
604 * buffer for resolving. If the color is non-null the backing store should be cleared to the
605 * passed in color.
606 */
607 virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(
608 SkISize dimensions,
609 GrColorType,
610 int sampleCount = 1,
611 GrProtected = GrProtected::kNo) = 0;
612
613 /**
614 * Deletes a GrBackendRenderTarget allocated with the above. Synchronization to make this safe
615 * is up to the caller.
616 */
617 virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0;
618
619 // This is only to be used in GL-specific tests.
glContextForTesting()620 virtual const GrGLContext* glContextForTesting() const { return nullptr; }
621
622 // This is only to be used by testing code
resetShaderCacheForTesting()623 virtual void resetShaderCacheForTesting() const {}
624
625 /**
626 * Inserted as a pair around a block of code to do a GPU frame capture.
627 * Currently only works with the Metal backend.
628 */
testingOnly_startCapture()629 virtual void testingOnly_startCapture() {}
testingOnly_endCapture()630 virtual void testingOnly_endCapture() {}
631 #endif
632
633 // width and height may be larger than rt (if underlying API allows it).
634 // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
635 // the GrAttachment.
636 virtual sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& colorFormat,
637 SkISize dimensions,
638 int numStencilSamples) = 0;
639
640 virtual GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) = 0;
641
642 // Creates an MSAA surface to be used as an MSAA attachment on a framebuffer.
643 virtual sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
644 const GrBackendFormat& format,
645 int numSamples,
646 GrProtected isProtected,
647 GrMemoryless isMemoryless) = 0;
648
handleDirtyContext()649 void handleDirtyContext() {
650 if (fResetBits) {
651 this->resetContext();
652 }
653 }
654
storeVkPipelineCacheData()655 virtual void storeVkPipelineCacheData() {}
656
657 // http://skbug.com/9739
insertManualFramebufferBarrier()658 virtual void insertManualFramebufferBarrier() {
659 SkASSERT(!this->caps()->requiresManualFBBarrierAfterTessellatedStencilDraw());
660 SK_ABORT("Manual framebuffer barrier not supported.");
661 }
662
663 // Called before certain draws in order to guarantee coherent results from dst reads.
664 virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
665
666 protected:
667 static bool CompressedDataIsCorrect(SkISize dimensions,
668 SkImage::CompressionType,
669 GrMipmapped,
670 const void* data,
671 size_t length);
672
673 // Handles cases where a surface will be updated without a call to flushRenderTarget.
674 void didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
675 uint32_t mipLevels = 1) const;
676
setOOMed()677 void setOOMed() { fOOMed = true; }
678
679 Stats fStats;
680
681 // Subclass must call this to initialize caps & compiler in its constructor.
682 void initCapsAndCompiler(sk_sp<const GrCaps> caps);
683
684 private:
685 virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions,
686 const GrBackendFormat&,
687 GrRenderable,
688 GrMipmapped,
689 GrProtected) = 0;
690
691 virtual GrBackendTexture onCreateCompressedBackendTexture(
692 SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) = 0;
693
694 virtual bool onClearBackendTexture(const GrBackendTexture&,
695 sk_sp<GrRefCntedCallback> finishedCallback,
696 std::array<float, 4> color) = 0;
697
698 virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
699 sk_sp<GrRefCntedCallback> finishedCallback,
700 const void* data,
701 size_t length) = 0;
702
703 // called when the 3D context state is unknown. Subclass should emit any
704 // assumed 3D context state and dirty any state cache.
onResetContext(uint32_t resetBits)705 virtual void onResetContext(uint32_t resetBits) {}
706
707 // Implementation of resetTextureBindings.
onResetTextureBindings()708 virtual void onResetTextureBindings() {}
709
710 // overridden by backend-specific derived class to create objects.
711 // Texture size, renderablility, format support, sample count will have already been validated
712 // in base class before onCreateTexture is called.
713 // If the ith bit is set in levelClearMask then the ith MIP level should be cleared.
714 virtual sk_sp<GrTexture> onCreateTexture(SkISize dimensions,
715 const GrBackendFormat&,
716 GrRenderable,
717 int renderTargetSampleCnt,
718 SkBudgeted,
719 GrProtected,
720 int mipLevelCoont,
721 uint32_t levelClearMask) = 0;
722 virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
723 const GrBackendFormat&,
724 SkBudgeted,
725 GrMipmapped,
726 GrProtected,
727 const void* data, size_t dataSize) = 0;
728 virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
729 GrWrapOwnership,
730 GrWrapCacheable,
731 GrIOType) = 0;
732
733 virtual sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
734 GrWrapOwnership,
735 GrWrapCacheable) = 0;
736
737 virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
738 int sampleCnt,
739 GrWrapOwnership,
740 GrWrapCacheable) = 0;
741 virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) = 0;
742 virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
743 const GrVkDrawableInfo&);
744
745 virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType,
746 GrAccessPattern, const void* data) = 0;
747
748 // overridden by backend-specific derived class to perform the surface read
749 virtual bool onReadPixels(GrSurface*,
750 SkIRect,
751 GrColorType surfaceColorType,
752 GrColorType dstColorType,
753 void*,
754 size_t rowBytes) = 0;
755
756 // overridden by backend-specific derived class to perform the surface write
757 virtual bool onWritePixels(GrSurface*,
758 SkIRect,
759 GrColorType surfaceColorType,
760 GrColorType srcColorType,
761 const GrMipLevel[],
762 int mipLevelCount,
763 bool prepForTexSampling) = 0;
764
765 // overridden by backend-specific derived class to perform the texture transfer
766 virtual bool onTransferPixelsTo(GrTexture*,
767 SkIRect,
768 GrColorType textiueColorType,
769 GrColorType bufferColorType,
770 sk_sp<GrGpuBuffer> transferBuffer,
771 size_t offset,
772 size_t rowBytes) = 0;
773
774 // overridden by backend-specific derived class to perform the surface transfer
775 virtual bool onTransferPixelsFrom(GrSurface*,
776 SkIRect,
777 GrColorType surfaceColorType,
778 GrColorType bufferColorType,
779 sk_sp<GrGpuBuffer> transferBuffer,
780 size_t offset) = 0;
781
782 // overridden by backend-specific derived class to perform the resolve
783 virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) = 0;
784
785 // overridden by backend specific derived class to perform mip map level regeneration.
786 virtual bool onRegenerateMipMapLevels(GrTexture*) = 0;
787
788 // overridden by backend specific derived class to perform the copy surface
789 virtual bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
790 const SkIPoint& dstPoint) = 0;
791
792 virtual GrOpsRenderPass* onGetOpsRenderPass(
793 GrRenderTarget* renderTarget,
794 bool useMSAASurface,
795 GrAttachment* stencil,
796 GrSurfaceOrigin,
797 const SkIRect& bounds,
798 const GrOpsRenderPass::LoadAndStoreInfo&,
799 const GrOpsRenderPass::StencilLoadAndStoreInfo&,
800 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
801 GrXferBarrierFlags renderPassXferBarriers) = 0;
802
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrBackendSurfaceMutableState * newState)803 virtual void prepareSurfacesForBackendAccessAndStateUpdates(
804 SkSpan<GrSurfaceProxy*> proxies,
805 SkSurface::BackendSurfaceAccess access,
806 const GrBackendSurfaceMutableState* newState) {}
807
808 virtual bool onSubmitToGpu(bool syncCpu) = 0;
809
810 void reportSubmitHistograms();
onReportSubmitHistograms()811 virtual void onReportSubmitHistograms() {}
812
813 #ifdef SK_ENABLE_DUMP_GPU
onDumpJSON(SkJSONWriter *)814 virtual void onDumpJSON(SkJSONWriter*) const {}
815 #endif
816
817 sk_sp<GrTexture> createTextureCommon(SkISize,
818 const GrBackendFormat&,
819 GrTextureType textureType,
820 GrRenderable,
821 int renderTargetSampleCnt,
822 SkBudgeted,
823 GrProtected,
824 int mipLevelCnt,
825 uint32_t levelClearMask);
826
resetContext()827 void resetContext() {
828 this->onResetContext(fResetBits);
829 fResetBits = 0;
830 }
831
832 void callSubmittedProcs(bool success);
833
834 sk_sp<const GrCaps> fCaps;
835 // Compiler used for compiling SkSL into backend shader code. We only want to create the
836 // compiler once, as there is significant overhead to the first compile.
837 std::unique_ptr<SkSL::Compiler> fCompiler;
838
839 uint32_t fResetBits;
840 // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
841 GrDirectContext* fContext;
842
843 struct SubmittedProc {
SubmittedProcSubmittedProc844 SubmittedProc(GrGpuSubmittedProc proc, GrGpuSubmittedContext context)
845 : fProc(proc), fContext(context) {}
846
847 GrGpuSubmittedProc fProc;
848 GrGpuSubmittedContext fContext;
849 };
850 SkSTArray<4, SubmittedProc> fSubmittedProcs;
851
852 bool fOOMed = false;
853
854 #if SK_HISTOGRAMS_ENABLED
855 int fCurrentSubmitRenderPassCount = 0;
856 #endif
857
858 friend class GrPathRendering;
859 using INHERITED = SkRefCnt;
860 };
861
862 #endif
863