1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrGpu_DEFINED
9 #define GrGpu_DEFINED
10
11 #include "include/core/SkPath.h"
12 #include "include/core/SkSpan.h"
13 #include "include/core/SkSurface.h"
14 #include "include/gpu/GrDirectContext.h"
15 #include "include/gpu/GrTypes.h"
16 #include "include/private/SkTArray.h"
17 #include "src/core/SkTInternalLList.h"
18 #include "src/gpu/GrAttachment.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrOpsRenderPass.h"
21 #include "src/gpu/GrPixmap.h"
22 #include "src/gpu/GrSwizzle.h"
23 #include "src/gpu/GrXferProcessor.h"
24
25 class GrAttachment;
26 class GrBackendRenderTarget;
27 class GrBackendSemaphore;
28 struct GrContextOptions;
29 class GrDirectContext;
30 class GrGpuBuffer;
31 class GrGLContext;
32 class GrPath;
33 class GrPathRenderer;
34 class GrPathRendererChain;
35 class GrPipeline;
36 class GrGeometryProcessor;
37 class GrRenderTarget;
38 class GrRingBuffer;
39 class GrSemaphore;
40 class GrStagingBufferManager;
41 class GrStencilSettings;
42 class GrSurface;
43 class GrTexture;
44 class GrThreadSafePipelineBuilder;
45 struct GrVkDrawableInfo;
46 class SkJSONWriter;
47
48 namespace SkSL {
49 class Compiler;
50 }
51
52 class SK_API GrGpu : public SkRefCnt {
53 public:
54 GrGpu(GrDirectContext* direct);
55 ~GrGpu() override;
56
getContext()57 GrDirectContext* getContext() { return fContext; }
getContext()58 const GrDirectContext* getContext() const { return fContext; }
59
setCurrentGrResourceTag(const GrGpuResourceTag & tag)60 void setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
61 if (fContext) {
62 fContext->setCurrentGrResourceTag(tag);
63 }
64 }
65
popGrResourceTag()66 void popGrResourceTag()
67 {
68 if (fContext) {
69 fContext->popGrResourceTag();
70 }
71 }
72
73 /**
74 * Gets the capabilities of the draw target.
75 */
caps()76 const GrCaps* caps() const { return fCaps.get(); }
refCaps()77 sk_sp<const GrCaps> refCaps() const { return fCaps; }
78
stagingBufferManager()79 virtual GrStagingBufferManager* stagingBufferManager() { return nullptr; }
80
uniformsRingBuffer()81 virtual GrRingBuffer* uniformsRingBuffer() { return nullptr; }
82
shaderCompiler()83 SkSL::Compiler* shaderCompiler() const { return fCompiler.get(); }
84
85 enum class DisconnectType {
86 // No cleanup should be attempted, immediately cease making backend API calls
87 kAbandon,
88 // Free allocated resources (not known by GrResourceCache) before returning and
89 // ensure no backend backend 3D API calls will be made after disconnect() returns.
90 kCleanup,
91 };
92
93 // Called by context when the underlying backend context is already or will be destroyed
94 // before GrDirectContext.
95 virtual void disconnect(DisconnectType);
96
97 virtual GrThreadSafePipelineBuilder* pipelineBuilder() = 0;
98 virtual sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() = 0;
99
100 // Called by GrDirectContext::isContextLost. Returns true if the backend Gpu object has gotten
101 // into an unrecoverable, lost state.
isDeviceLost()102 virtual bool isDeviceLost() const { return false; }
103
104 /**
105 * The GrGpu object normally assumes that no outsider is setting state
106 * within the underlying 3D API's context/device/whatever. This call informs
107 * the GrGpu that the state was modified and it shouldn't make assumptions
108 * about the state.
109 */
110 void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
111
112 /**
113 * Creates a texture object. If renderable is kYes then the returned texture can
114 * be used as a render target by calling GrTexture::asRenderTarget(). Not all
115 * pixel configs can be used as render targets. Support for configs as textures
116 * or render targets can be checked using GrCaps.
117 *
118 * @param dimensions dimensions of the texture to be created.
119 * @param format the format for the texture (not currently used).
120 * @param renderable should the resulting texture be renderable
121 * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is
122 * kYes. If renderable is kNo then this must be 1.
123 * @param budgeted does this texture count against the resource cache budget?
124 * @param isProtected should the texture be created as protected.
125 * @param texels array of mipmap levels containing texel data to load.
126 * If level i has pixels then it is assumed that its dimensions are
127 * max(1, floor(dimensions.fWidth / 2)) by
128 * max(1, floor(dimensions.fHeight / 2)).
129 * If texels[i].fPixels == nullptr for all i <= mipLevelCount or
130 * mipLevelCount is 0 then the texture's contents are uninitialized.
131 * If a level has non-null pixels, its row bytes must be a multiple of the
132 * config's bytes-per-pixel. The row bytes must be tight to the
133 * level width if !caps->writePixelsRowBytesSupport().
134 * If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0
135 * then all levels must have non-null pixels. All levels must have
136 * non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true.
137 * @param textureColorType The color type interpretation of the texture for the purpose of
138 * of uploading texel data.
139 * @param srcColorType The color type of data in texels[].
140 * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or
141 * floor(max((log2(dimensions.fWidth), log2(dimensions.fHeight)))). It
142 * must be the latter if GrCaps::createTextureMustSpecifyAllLevels() is
143 * true.
144 * @return The texture object if successful, otherwise nullptr.
145 */
146 sk_sp<GrTexture> createTexture(SkISize dimensions,
147 const GrBackendFormat& format,
148 GrTextureType textureType,
149 GrRenderable renderable,
150 int renderTargetSampleCnt,
151 SkBudgeted budgeted,
152 GrProtected isProtected,
153 GrColorType textureColorType,
154 GrColorType srcColorType,
155 const GrMipLevel texels[],
156 int texelLevelCount);
157
158 /**
159 * Simplified createTexture() interface for when there is no initial texel data to upload.
160 */
161 sk_sp<GrTexture> createTexture(SkISize dimensions,
162 const GrBackendFormat& format,
163 GrTextureType textureType,
164 GrRenderable renderable,
165 int renderTargetSampleCnt,
166 GrMipmapped mipMapped,
167 SkBudgeted budgeted,
168 GrProtected isProtected);
169
170 sk_sp<GrTexture> createCompressedTexture(SkISize dimensions,
171 const GrBackendFormat& format,
172 SkBudgeted budgeted,
173 GrMipmapped mipMapped,
174 GrProtected isProtected,
175 const void* data, size_t dataSize);
176
177 sk_sp<GrTexture> createCompressedTexture(SkISize dimensions,
178 const GrBackendFormat& format,
179 SkBudgeted budgeted,
180 GrMipmapped mipMapped,
181 GrProtected isProtected,
182 OH_NativeBuffer* nativeBuffer,
183 size_t bufferSize);
184
185 /**
186 * Implements GrResourceProvider::wrapBackendTexture
187 */
188 sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&,
189 GrWrapOwnership,
190 GrWrapCacheable,
191 GrIOType);
192
193 sk_sp<GrTexture> wrapCompressedBackendTexture(const GrBackendTexture&,
194 GrWrapOwnership,
195 GrWrapCacheable);
196
197 /**
198 * Implements GrResourceProvider::wrapRenderableBackendTexture
199 */
200 sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&,
201 int sampleCnt,
202 GrWrapOwnership,
203 GrWrapCacheable);
204
205 /**
206 * Implements GrResourceProvider::wrapBackendRenderTarget
207 */
208 sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&);
209
210 /**
211 * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget
212 */
213 sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
214 const GrVkDrawableInfo&);
215
216 /**
217 * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
218 *
219 * @param size size of buffer to create.
220 * @param intendedType hint to the graphics subsystem about what the buffer will be used for.
221 * @param accessPattern hint to the graphics subsystem about how the data will be accessed.
222 * @param data optional data with which to initialize the buffer.
223 *
224 * @return the buffer if successful, otherwise nullptr.
225 */
226 sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType,
227 GrAccessPattern accessPattern, const void* data = nullptr);
228
229 /**
230 * Resolves MSAA. The resolveRect must already be in the native destination space.
231 */
232 void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect);
233
234 /**
235 * Uses the base of the texture to recompute the contents of the other levels.
236 */
237 bool regenerateMipMapLevels(GrTexture*);
238
239 /**
240 * If the backend API has stateful texture bindings, this resets them back to defaults.
241 */
242 void resetTextureBindings();
243
244 /**
245 * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed.
246 *
247 * @param surface the surface to read from
248 * @param rect the rectangle of pixels to read
249 * @param surfaceColorType the color type for this use of the surface.
250 * @param dstColorType the color type of the destination buffer.
251 * @param buffer memory to read the rectangle into.
252 * @param rowBytes the number of bytes between consecutive rows. Must be a multiple of
253 * dstColorType's bytes-per-pixel. Must be tight to width if
254 * !caps->readPixelsRowBytesSupport().
255 *
256 * @return true if the read succeeded, false if not. The read can fail
257 * because of the surface doesn't support reading, the color type
258 * is not allowed for the format of the surface or if the rectangle
259 * read is not contained in the surface.
260 */
261 bool readPixels(GrSurface* surface,
262 SkIRect rect,
263 GrColorType surfaceColorType,
264 GrColorType dstColorType,
265 void* buffer,
266 size_t rowBytes);
267
268 /**
269 * Updates the pixels in a rectangle of a surface. No sRGB/linear conversions are performed.
270 *
271 * @param surface the surface to write to.
272 * @param rect the rectangle of pixels to overwrite
273 * @param surfaceColorType the color type for this use of the surface.
274 * @param srcColorType the color type of the source buffer.
275 * @param texels array of mipmap levels containing texture data. Row bytes must be a
276 * multiple of srcColorType's bytes-per-pixel. Must be tight to level
277 * width if !caps->writePixelsRowBytesSupport().
278 * @param mipLevelCount number of levels in 'texels'
279 * @param prepForTexSampling After doing write pixels should the surface be prepared for texture
280 * sampling. This is currently only used by Vulkan for inline uploads
281 * to set that layout back to sampled after doing the upload. Inline
282 * uploads currently can happen between draws in a single op so it is
283 * not trivial to break up the OpsTask into two tasks when we see
284 * an inline upload. However, once we are able to support doing that
285 * we can remove this parameter.
286 *
287 * @return true if the write succeeded, false if not. The read can fail
288 * because of the surface doesn't support writing (e.g. read only),
289 * the color type is not allowed for the format of the surface or
290 * if the rectangle written is not contained in the surface.
291 */
292 bool writePixels(GrSurface* surface,
293 SkIRect rect,
294 GrColorType surfaceColorType,
295 GrColorType srcColorType,
296 const GrMipLevel texels[],
297 int mipLevelCount,
298 bool prepForTexSampling = false);
299
300 /**
301 * Helper for the case of a single level.
302 */
303 bool writePixels(GrSurface* surface,
304 SkIRect rect,
305 GrColorType surfaceColorType,
306 GrColorType srcColorType,
307 const void* buffer,
308 size_t rowBytes,
309 bool prepForTexSampling = false) {
310 GrMipLevel mipLevel = {buffer, rowBytes, nullptr};
311 return this->writePixels(surface,
312 rect,
313 surfaceColorType,
314 srcColorType,
315 &mipLevel,
316 1,
317 prepForTexSampling);
318 }
319
320 /**
321 * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped,
322 * the base level is written to.
323 *
324 * @param texture the texture to write to.
325 * @param rect the rectangle of pixels in the texture to overwrite
326 * @param textureColorType the color type for this use of the surface.
327 * @param bufferColorType the color type of the transfer buffer's pixel data
328 * @param transferBuffer GrBuffer to read pixels from (type must be "kXferCpuToGpu")
329 * @param offset offset from the start of the buffer
330 * @param rowBytes number of bytes between consecutive rows in the buffer. Must be a
331 * multiple of bufferColorType's bytes-per-pixel. Must be tight to
332 * rect.width() if !caps->writePixelsRowBytesSupport().
333 */
334 bool transferPixelsTo(GrTexture* texture,
335 SkIRect rect,
336 GrColorType textureColorType,
337 GrColorType bufferColorType,
338 sk_sp<GrGpuBuffer> transferBuffer,
339 size_t offset,
340 size_t rowBytes);
341
342 /**
343 * Reads the pixels from a rectangle of a surface into a buffer. Use
344 * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for
345 * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read.
346 *
347 * If successful the row bytes in the buffer is always:
348 * GrColorTypeBytesPerPixel(bufferColorType) * rect.width()
349 *
350 * Asserts that the caller has passed a properly aligned offset and that the buffer is
351 * large enough to hold the result
352 *
353 * @param surface the surface to read from.
354 * @param rect the rectangle of pixels to read
355 * @param surfaceColorType the color type for this use of the surface.
356 * @param bufferColorType the color type of the transfer buffer's pixel data
357 * @param transferBuffer GrBuffer to write pixels to (type must be "kXferGpuToCpu")
358 * @param offset offset from the start of the buffer
359 */
360 bool transferPixelsFrom(GrSurface* surface,
361 SkIRect rect,
362 GrColorType surfaceColorType,
363 GrColorType bufferColorType,
364 sk_sp<GrGpuBuffer> transferBuffer,
365 size_t offset);
366
367 // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
368 // take place at higher levels and this function implement faster copy paths. The rect
369 // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the
370 // src/dst bounds and non-empty. They must also be in their exact device space coords, including
371 // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
372 // then we don't need to preserve any data on the dst surface outside of the copy.
373 bool copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
374 const SkIPoint& dstPoint);
375
376 // Returns a GrOpsRenderPass which OpsTasks send draw commands to instead of directly
377 // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget.
378 // If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not
379 // provided but 'renderTarget' has a stencil buffer then that is a signal that the
380 // render target's stencil buffer should be ignored.
381 GrOpsRenderPass* getOpsRenderPass(GrRenderTarget* renderTarget,
382 bool useMSAASurface,
383 GrAttachment* stencil,
384 GrSurfaceOrigin,
385 const SkIRect& bounds,
386 const GrOpsRenderPass::LoadAndStoreInfo&,
387 const GrOpsRenderPass::StencilLoadAndStoreInfo&,
388 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
389 GrXferBarrierFlags renderPassXferBarriers);
390
391 // Called by GrDrawingManager when flushing.
392 // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
393 // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
394 // inserted semaphores.
395 void executeFlushInfo(SkSpan<GrSurfaceProxy*>,
396 SkSurface::BackendSurfaceAccess access,
397 const GrFlushInfo&,
398 const GrBackendSurfaceMutableState* newState);
399
400 bool submitToGpu(bool syncCpu);
401
402 virtual void submit(GrOpsRenderPass*) = 0;
403
404 virtual GrFence SK_WARN_UNUSED_RESULT insertFence() = 0;
405 virtual bool waitFence(GrFence) = 0;
406 virtual void deleteFence(GrFence) const = 0;
407
408 virtual std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(
409 bool isOwned = true) = 0;
410 virtual std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
411 GrSemaphoreWrapType,
412 GrWrapOwnership) = 0;
413 virtual void insertSemaphore(GrSemaphore* semaphore) = 0;
414 virtual void waitSemaphore(GrSemaphore* semaphore) = 0;
415
416 virtual void addFinishedProc(GrGpuFinishedProc finishedProc,
417 GrGpuFinishedContext finishedContext) = 0;
418 virtual void checkFinishProcs() = 0;
419 virtual void finishOutstandingGpuWork() = 0;
420
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>)421 virtual void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) {}
422
423 /**
424 * Checks if we detected an OOM from the underlying 3D API and if so returns true and resets
425 * the internal OOM state to false. Otherwise, returns false.
426 */
427 bool checkAndResetOOMed();
428
429 /**
430 * Put this texture in a safe and known state for use across multiple contexts. Depending on
431 * the backend, this may return a GrSemaphore. If so, other contexts should wait on that
432 * semaphore before using this texture.
433 */
434 virtual std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
435
436 /**
437 * Frees any backend specific objects that are not currently in use by the GPU. This is called
438 * when the client is trying to free up as much GPU memory as possible. We will not release
439 * resources connected to programs/pipelines since the cost to recreate those is significantly
440 * higher that other resources.
441 */
releaseUnlockedBackendObjects()442 virtual void releaseUnlockedBackendObjects() {}
443
GetHpsDimension(const SkBlurArg & blurArg)444 virtual std::array<int, 2> GetHpsDimension(const SkBlurArg& blurArg) const { return {0, 0}; }
445
446 ///////////////////////////////////////////////////////////////////////////
447 // Debugging and Stats
448
449 class Stats {
450 public:
451 #if GR_GPU_STATS
452 Stats() = default;
453
reset()454 void reset() { *this = {}; }
455
textureCreates()456 int textureCreates() const { return fTextureCreates; }
incTextureCreates()457 void incTextureCreates() { fTextureCreates++; }
458
textureUploads()459 int textureUploads() const { return fTextureUploads; }
incTextureUploads()460 void incTextureUploads() { fTextureUploads++; }
461
transfersToTexture()462 int transfersToTexture() const { return fTransfersToTexture; }
incTransfersToTexture()463 void incTransfersToTexture() { fTransfersToTexture++; }
464
transfersFromSurface()465 int transfersFromSurface() const { return fTransfersFromSurface; }
incTransfersFromSurface()466 void incTransfersFromSurface() { fTransfersFromSurface++; }
467
stencilAttachmentCreates()468 int stencilAttachmentCreates() const { return fStencilAttachmentCreates; }
incStencilAttachmentCreates()469 void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
470
msaaAttachmentCreates()471 int msaaAttachmentCreates() const { return fMSAAAttachmentCreates; }
incMSAAAttachmentCreates()472 void incMSAAAttachmentCreates() { fMSAAAttachmentCreates++; }
473
numDraws()474 int numDraws() const { return fNumDraws; }
incNumDraws()475 void incNumDraws() { fNumDraws++; }
476
numFailedDraws()477 int numFailedDraws() const { return fNumFailedDraws; }
incNumFailedDraws()478 void incNumFailedDraws() { ++fNumFailedDraws; }
479
numSubmitToGpus()480 int numSubmitToGpus() const { return fNumSubmitToGpus; }
incNumSubmitToGpus()481 void incNumSubmitToGpus() { ++fNumSubmitToGpus; }
482
numScratchTexturesReused()483 int numScratchTexturesReused() const { return fNumScratchTexturesReused; }
incNumScratchTexturesReused()484 void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; }
485
numScratchMSAAAttachmentsReused()486 int numScratchMSAAAttachmentsReused() const { return fNumScratchMSAAAttachmentsReused; }
incNumScratchMSAAAttachmentsReused()487 void incNumScratchMSAAAttachmentsReused() { ++fNumScratchMSAAAttachmentsReused; }
488
renderPasses()489 int renderPasses() const { return fRenderPasses; }
incRenderPasses()490 void incRenderPasses() { fRenderPasses++; }
491
numReorderedDAGsOverBudget()492 int numReorderedDAGsOverBudget() const { return fNumReorderedDAGsOverBudget; }
incNumReorderedDAGsOverBudget()493 void incNumReorderedDAGsOverBudget() { fNumReorderedDAGsOverBudget++; }
494
495 #if GR_TEST_UTILS
496 void dump(SkString*);
497 void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values);
498 #endif
499 private:
500 int fTextureCreates = 0;
501 int fTextureUploads = 0;
502 int fTransfersToTexture = 0;
503 int fTransfersFromSurface = 0;
504 int fStencilAttachmentCreates = 0;
505 int fMSAAAttachmentCreates = 0;
506 int fNumDraws = 0;
507 int fNumFailedDraws = 0;
508 int fNumSubmitToGpus = 0;
509 int fNumScratchTexturesReused = 0;
510 int fNumScratchMSAAAttachmentsReused = 0;
511 int fRenderPasses = 0;
512 int fNumReorderedDAGsOverBudget = 0;
513
514 #else // !GR_GPU_STATS
515
516 #if GR_TEST_UTILS
517 void dump(SkString*) {}
518 void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {}
519 #endif
520 void incTextureCreates() {}
521 void incTextureUploads() {}
522 void incTransfersToTexture() {}
523 void incTransfersFromSurface() {}
524 void incStencilAttachmentCreates() {}
525 void incMSAAAttachmentCreates() {}
526 void incNumDraws() {}
527 void incNumFailedDraws() {}
528 void incNumSubmitToGpus() {}
529 void incNumScratchTexturesReused() {}
530 void incNumScratchMSAAAttachmentsReused() {}
531 void incRenderPasses() {}
532 void incNumReorderedDAGsOverBudget() {}
533 #endif
534 };
535
stats()536 Stats* stats() { return &fStats; }
537 void dumpJSON(SkJSONWriter*) const;
538
539
540 /**
541 * Creates a texture directly in the backend API without wrapping it in a GrTexture.
542 * Must be matched with a call to deleteBackendTexture().
543 *
544 * If data is null the texture is uninitialized.
545 *
546 * If data represents a color then all texture levels are cleared to that color.
547 *
548 * If data represents pixmaps then it must have a either one pixmap or, if mipmapping
549 * is specified, a complete MIP hierarchy of pixmaps. Additionally, if provided, the mip
550 * levels must be sized correctly according to the MIP sizes implied by dimensions. They
551 * must all have the same color type and that color type must be compatible with the
552 * texture format.
553 */
554 GrBackendTexture createBackendTexture(SkISize dimensions,
555 const GrBackendFormat&,
556 GrRenderable,
557 GrMipmapped,
558 GrProtected);
559
560 bool clearBackendTexture(const GrBackendTexture&,
561 sk_sp<GrRefCntedCallback> finishedCallback,
562 std::array<float, 4> color);
563
564 /**
565 * Same as the createBackendTexture case except compressed backend textures can
566 * never be renderable.
567 */
568 GrBackendTexture createCompressedBackendTexture(SkISize dimensions,
569 const GrBackendFormat&,
570 GrMipmapped,
571 GrProtected);
572
573 bool updateCompressedBackendTexture(const GrBackendTexture&,
574 sk_sp<GrRefCntedCallback> finishedCallback,
575 const void* data,
576 size_t length);
577
setBackendTextureState(const GrBackendTexture &,const GrBackendSurfaceMutableState &,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)578 virtual bool setBackendTextureState(const GrBackendTexture&,
579 const GrBackendSurfaceMutableState&,
580 GrBackendSurfaceMutableState* previousState,
581 sk_sp<GrRefCntedCallback> finishedCallback) {
582 return false;
583 }
584
setBackendRenderTargetState(const GrBackendRenderTarget &,const GrBackendSurfaceMutableState &,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)585 virtual bool setBackendRenderTargetState(const GrBackendRenderTarget&,
586 const GrBackendSurfaceMutableState&,
587 GrBackendSurfaceMutableState* previousState,
588 sk_sp<GrRefCntedCallback> finishedCallback) {
589 return false;
590 }
591
592 /**
593 * Frees a texture created by createBackendTexture(). If ownership of the backend
594 * texture has been transferred to a context using adopt semantics this should not be called.
595 */
596 virtual void deleteBackendTexture(const GrBackendTexture&) = 0;
597
598 /**
599 * In this case we have a program descriptor and a program info but no render target.
600 */
601 virtual bool compile(const GrProgramDesc&, const GrProgramInfo&) = 0;
602
precompileShader(const SkData & key,const SkData & data)603 virtual bool precompileShader(const SkData& key, const SkData& data) { return false; }
604
605 #if GR_TEST_UTILS
606 /** Check a handle represents an actual texture in the backend API that has not been freed. */
607 virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0;
608
609 /**
610 * Creates a GrBackendRenderTarget that can be wrapped using
611 * SkSurface::MakeFromBackendRenderTarget. Ideally this is a non-textureable allocation to
612 * differentiate from testing with SkSurface::MakeFromBackendTexture. When sampleCnt > 1 this
613 * is used to test client wrapped allocations with MSAA where Skia does not allocate a separate
614 * buffer for resolving. If the color is non-null the backing store should be cleared to the
615 * passed in color.
616 */
617 virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(
618 SkISize dimensions,
619 GrColorType,
620 int sampleCount = 1,
621 GrProtected = GrProtected::kNo) = 0;
622
623 /**
624 * Deletes a GrBackendRenderTarget allocated with the above. Synchronization to make this safe
625 * is up to the caller.
626 */
627 virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0;
628
629 // This is only to be used in GL-specific tests.
glContextForTesting()630 virtual const GrGLContext* glContextForTesting() const { return nullptr; }
631
632 // This is only to be used by testing code
resetShaderCacheForTesting()633 virtual void resetShaderCacheForTesting() const {}
634
635 /**
636 * Inserted as a pair around a block of code to do a GPU frame capture.
637 * Currently only works with the Metal backend.
638 */
testingOnly_startCapture()639 virtual void testingOnly_startCapture() {}
testingOnly_endCapture()640 virtual void testingOnly_endCapture() {}
641 #endif
642
643 // width and height may be larger than rt (if underlying API allows it).
644 // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
645 // the GrAttachment.
646 virtual sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& colorFormat,
647 SkISize dimensions,
648 int numStencilSamples) = 0;
649
650 virtual GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) = 0;
651
652 // Creates an MSAA surface to be used as an MSAA attachment on a framebuffer.
653 virtual sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
654 const GrBackendFormat& format,
655 int numSamples,
656 GrProtected isProtected,
657 GrMemoryless isMemoryless) = 0;
658
handleDirtyContext()659 void handleDirtyContext() {
660 if (fResetBits) {
661 this->resetContext();
662 }
663 }
664
storeVkPipelineCacheData()665 virtual void storeVkPipelineCacheData() {}
666
667 // http://skbug.com/9739
insertManualFramebufferBarrier()668 virtual void insertManualFramebufferBarrier() {
669 SkASSERT(!this->caps()->requiresManualFBBarrierAfterTessellatedStencilDraw());
670 SK_ABORT("Manual framebuffer barrier not supported.");
671 }
672
673 // Called before certain draws in order to guarantee coherent results from dst reads.
674 virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
675
vmaDefragment()676 virtual void vmaDefragment() {}
dumpVmaStats(SkString * out)677 virtual void dumpVmaStats(SkString *out) {}
678
679 // OH ISSUE: asyn memory reclaimer
setGpuMemoryAsyncReclaimerSwitch(bool enabled,const std::function<void ()> & setThreadPriority)680 virtual void setGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority) {}
flushGpuMemoryInWaitQueue()681 virtual void flushGpuMemoryInWaitQueue() {}
682
683 protected:
684 static bool CompressedDataIsCorrect(SkISize dimensions,
685 SkImage::CompressionType,
686 GrMipmapped,
687 const void* data,
688 size_t length);
689
690 // Handles cases where a surface will be updated without a call to flushRenderTarget.
691 void didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
692 uint32_t mipLevels = 1) const;
693
setOOMed()694 void setOOMed() { fOOMed = true; }
695
696 Stats fStats;
697
698 // Subclass must call this to initialize caps & compiler in its constructor.
699 void initCapsAndCompiler(sk_sp<const GrCaps> caps);
700
701 private:
702 virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions,
703 const GrBackendFormat&,
704 GrRenderable,
705 GrMipmapped,
706 GrProtected) = 0;
707
708 virtual GrBackendTexture onCreateCompressedBackendTexture(
709 SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) = 0;
710
711 virtual bool onClearBackendTexture(const GrBackendTexture&,
712 sk_sp<GrRefCntedCallback> finishedCallback,
713 std::array<float, 4> color) = 0;
714
715 virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
716 sk_sp<GrRefCntedCallback> finishedCallback,
717 const void* data,
718 size_t length) = 0;
719
720 // called when the 3D context state is unknown. Subclass should emit any
721 // assumed 3D context state and dirty any state cache.
onResetContext(uint32_t resetBits)722 virtual void onResetContext(uint32_t resetBits) {}
723
724 // Implementation of resetTextureBindings.
onResetTextureBindings()725 virtual void onResetTextureBindings() {}
726
727 // overridden by backend-specific derived class to create objects.
728 // Texture size, renderablility, format support, sample count will have already been validated
729 // in base class before onCreateTexture is called.
730 // If the ith bit is set in levelClearMask then the ith MIP level should be cleared.
731 virtual sk_sp<GrTexture> onCreateTexture(SkISize dimensions,
732 const GrBackendFormat&,
733 GrRenderable,
734 int renderTargetSampleCnt,
735 SkBudgeted,
736 GrProtected,
737 int mipLevelCoont,
738 uint32_t levelClearMask) = 0;
739 virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
740 const GrBackendFormat&,
741 SkBudgeted,
742 GrMipmapped,
743 GrProtected,
744 const void* data, size_t dataSize) = 0;
745 virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
746 const GrBackendFormat&,
747 SkBudgeted,
748 GrMipmapped,
749 GrProtected,
750 OH_NativeBuffer* nativeBuffer,
751 size_t bufferSize) = 0;
752 virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
753 GrWrapOwnership,
754 GrWrapCacheable,
755 GrIOType) = 0;
756
757 virtual sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
758 GrWrapOwnership,
759 GrWrapCacheable) = 0;
760
761 virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
762 int sampleCnt,
763 GrWrapOwnership,
764 GrWrapCacheable) = 0;
765 virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) = 0;
766 virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
767 const GrVkDrawableInfo&);
768
769 virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType,
770 GrAccessPattern, const void* data) = 0;
771
772 // overridden by backend-specific derived class to perform the surface read
773 virtual bool onReadPixels(GrSurface*,
774 SkIRect,
775 GrColorType surfaceColorType,
776 GrColorType dstColorType,
777 void*,
778 size_t rowBytes) = 0;
779
780 // overridden by backend-specific derived class to perform the surface write
781 virtual bool onWritePixels(GrSurface*,
782 SkIRect,
783 GrColorType surfaceColorType,
784 GrColorType srcColorType,
785 const GrMipLevel[],
786 int mipLevelCount,
787 bool prepForTexSampling) = 0;
788
789 // overridden by backend-specific derived class to perform the texture transfer
790 virtual bool onTransferPixelsTo(GrTexture*,
791 SkIRect,
792 GrColorType textiueColorType,
793 GrColorType bufferColorType,
794 sk_sp<GrGpuBuffer> transferBuffer,
795 size_t offset,
796 size_t rowBytes) = 0;
797
798 // overridden by backend-specific derived class to perform the surface transfer
799 virtual bool onTransferPixelsFrom(GrSurface*,
800 SkIRect,
801 GrColorType surfaceColorType,
802 GrColorType bufferColorType,
803 sk_sp<GrGpuBuffer> transferBuffer,
804 size_t offset) = 0;
805
806 // overridden by backend-specific derived class to perform the resolve
807 virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) = 0;
808
809 // overridden by backend specific derived class to perform mip map level regeneration.
810 virtual bool onRegenerateMipMapLevels(GrTexture*) = 0;
811
812 // overridden by backend specific derived class to perform the copy surface
813 virtual bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
814 const SkIPoint& dstPoint) = 0;
815
816 virtual GrOpsRenderPass* onGetOpsRenderPass(
817 GrRenderTarget* renderTarget,
818 bool useMSAASurface,
819 GrAttachment* stencil,
820 GrSurfaceOrigin,
821 const SkIRect& bounds,
822 const GrOpsRenderPass::LoadAndStoreInfo&,
823 const GrOpsRenderPass::StencilLoadAndStoreInfo&,
824 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
825 GrXferBarrierFlags renderPassXferBarriers) = 0;
826
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrBackendSurfaceMutableState * newState)827 virtual void prepareSurfacesForBackendAccessAndStateUpdates(
828 SkSpan<GrSurfaceProxy*> proxies,
829 SkSurface::BackendSurfaceAccess access,
830 const GrBackendSurfaceMutableState* newState) {}
831
832 virtual bool onSubmitToGpu(bool syncCpu) = 0;
833
834 void reportSubmitHistograms();
onReportSubmitHistograms()835 virtual void onReportSubmitHistograms() {}
836
837 #ifdef SK_ENABLE_DUMP_GPU
onDumpJSON(SkJSONWriter *)838 virtual void onDumpJSON(SkJSONWriter*) const {}
839 #endif
840
841 sk_sp<GrTexture> createTextureCommon(SkISize,
842 const GrBackendFormat&,
843 GrTextureType textureType,
844 GrRenderable,
845 int renderTargetSampleCnt,
846 SkBudgeted,
847 GrProtected,
848 int mipLevelCnt,
849 uint32_t levelClearMask);
850
resetContext()851 void resetContext() {
852 this->onResetContext(fResetBits);
853 fResetBits = 0;
854 }
855
856 void callSubmittedProcs(bool success);
857
858 sk_sp<const GrCaps> fCaps;
859 // Compiler used for compiling SkSL into backend shader code. We only want to create the
860 // compiler once, as there is significant overhead to the first compile.
861 std::unique_ptr<SkSL::Compiler> fCompiler;
862
863 uint32_t fResetBits;
864 // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
865 GrDirectContext* fContext;
866
867 struct SubmittedProc {
SubmittedProcSubmittedProc868 SubmittedProc(GrGpuSubmittedProc proc, GrGpuSubmittedContext context)
869 : fProc(proc), fContext(context) {}
870
871 GrGpuSubmittedProc fProc;
872 GrGpuSubmittedContext fContext;
873 };
874 SkSTArray<4, SubmittedProc> fSubmittedProcs;
875
876 bool fOOMed = false;
877
878 #if SK_HISTOGRAMS_ENABLED
879 int fCurrentSubmitRenderPassCount = 0;
880 #endif
881
882 friend class GrPathRendering;
883 using INHERITED = SkRefCnt;
884 };
885
886 #endif
887