1 /* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrGpu_DEFINED 9 #define GrGpu_DEFINED 10 11 #include "include/core/SkPath.h" 12 #include "include/core/SkSpan.h" 13 #include "include/core/SkSurface.h" 14 #include "include/core/SkTypes.h" 15 #include "include/gpu/GrTypes.h" 16 #include "include/private/base/SkTArray.h" 17 #include "src/base/SkTInternalLList.h" 18 #include "src/gpu/RefCntedCallback.h" 19 #include "src/gpu/Swizzle.h" 20 #include "src/gpu/ganesh/GrAttachment.h" 21 #include "src/gpu/ganesh/GrCaps.h" 22 #include "src/gpu/ganesh/GrGpuBuffer.h" 23 #include "src/gpu/ganesh/GrOpsRenderPass.h" 24 #include "src/gpu/ganesh/GrPixmap.h" 25 #include "src/gpu/ganesh/GrXferProcessor.h" 26 27 class GrAttachment; 28 class GrBackendRenderTarget; 29 class GrBackendSemaphore; 30 struct GrContextOptions; 31 class GrDirectContext; 32 class GrGLContext; 33 class GrPath; 34 class GrPathRenderer; 35 class GrPathRendererChain; 36 class GrPipeline; 37 class GrGeometryProcessor; 38 class GrRenderTarget; 39 class GrRingBuffer; 40 class GrSemaphore; 41 class GrStagingBufferManager; 42 class GrStencilSettings; 43 class GrSurface; 44 class GrTexture; 45 class GrThreadSafePipelineBuilder; 46 struct GrVkDrawableInfo; 47 class SkJSONWriter; 48 49 namespace SkSL { 50 class Compiler; 51 } 52 53 class GrGpu : public SkRefCnt { 54 public: 55 GrGpu(GrDirectContext* direct); 56 ~GrGpu() override; 57 getContext()58 GrDirectContext* getContext() { return fContext; } getContext()59 const GrDirectContext* getContext() const { return fContext; } 60 61 /** 62 * Gets the capabilities of the draw target. 63 */ caps()64 const GrCaps* caps() const { return fCaps.get(); } refCaps()65 sk_sp<const GrCaps> refCaps() const { return fCaps; } 66 stagingBufferManager()67 virtual GrStagingBufferManager* stagingBufferManager() { return nullptr; } 68 uniformsRingBuffer()69 virtual GrRingBuffer* uniformsRingBuffer() { return nullptr; } 70 shaderCompiler()71 SkSL::Compiler* shaderCompiler() const { return fCompiler.get(); } 72 73 enum class DisconnectType { 74 // No cleanup should be attempted, immediately cease making backend API calls 75 kAbandon, 76 // Free allocated resources (not known by GrResourceCache) before returning and 77 // ensure no backend backend 3D API calls will be made after disconnect() returns. 78 kCleanup, 79 }; 80 81 // Called by context when the underlying backend context is already or will be destroyed 82 // before GrDirectContext. 83 virtual void disconnect(DisconnectType); 84 85 virtual GrThreadSafePipelineBuilder* pipelineBuilder() = 0; 86 virtual sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() = 0; 87 88 // Called by GrDirectContext::isContextLost. Returns true if the backend Gpu object has gotten 89 // into an unrecoverable, lost state. isDeviceLost()90 virtual bool isDeviceLost() const { return false; } 91 92 /** 93 * The GrGpu object normally assumes that no outsider is setting state 94 * within the underlying 3D API's context/device/whatever. This call informs 95 * the GrGpu that the state was modified and it shouldn't make assumptions 96 * about the state. 97 */ 98 void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; } 99 100 /** 101 * Creates a texture object. If renderable is kYes then the returned texture can 102 * be used as a render target by calling GrTexture::asRenderTarget(). Not all 103 * pixel configs can be used as render targets. Support for configs as textures 104 * or render targets can be checked using GrCaps. 105 * 106 * @param dimensions dimensions of the texture to be created. 107 * @param format the format for the texture (not currently used). 108 * @param renderable should the resulting texture be renderable 109 * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is 110 * kYes. If renderable is kNo then this must be 1. 111 * @param budgeted does this texture count against the resource cache budget? 112 * @param isProtected should the texture be created as protected. 113 * @param texels array of mipmap levels containing texel data to load. 114 * If level i has pixels then it is assumed that its dimensions are 115 * max(1, floor(dimensions.fWidth / 2)) by 116 * max(1, floor(dimensions.fHeight / 2)). 117 * If texels[i].fPixels == nullptr for all i <= mipLevelCount or 118 * mipLevelCount is 0 then the texture's contents are uninitialized. 119 * If a level has non-null pixels, its row bytes must be a multiple of the 120 * config's bytes-per-pixel. The row bytes must be tight to the 121 * level width if !caps->writePixelsRowBytesSupport(). 122 * If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0 123 * then all levels must have non-null pixels. All levels must have 124 * non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true. 125 * @param textureColorType The color type interpretation of the texture for the purpose of 126 * of uploading texel data. 127 * @param srcColorType The color type of data in texels[]. 128 * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or 129 * floor(max((log2(dimensions.fWidth), log2(dimensions.fHeight)))). It 130 * must be the latter if GrCaps::createTextureMustSpecifyAllLevels() is 131 * true. 132 * @return The texture object if successful, otherwise nullptr. 133 */ 134 sk_sp<GrTexture> createTexture(SkISize dimensions, 135 const GrBackendFormat& format, 136 GrTextureType textureType, 137 GrRenderable renderable, 138 int renderTargetSampleCnt, 139 skgpu::Budgeted budgeted, 140 GrProtected isProtected, 141 GrColorType textureColorType, 142 GrColorType srcColorType, 143 const GrMipLevel texels[], 144 int texelLevelCount, 145 std::string_view label); 146 147 /** 148 * Simplified createTexture() interface for when there is no initial texel data to upload. 149 */ 150 sk_sp<GrTexture> createTexture(SkISize dimensions, 151 const GrBackendFormat& format, 152 GrTextureType textureType, 153 GrRenderable renderable, 154 int renderTargetSampleCnt, 155 GrMipmapped mipmapped, 156 skgpu::Budgeted budgeted, 157 GrProtected isProtected, 158 std::string_view label); 159 160 sk_sp<GrTexture> createCompressedTexture(SkISize dimensions, 161 const GrBackendFormat& format, 162 skgpu::Budgeted budgeted, 163 GrMipmapped mipmapped, 164 GrProtected isProtected, 165 const void* data, 166 size_t dataSize); 167 168 /** 169 * Implements GrResourceProvider::wrapBackendTexture 170 */ 171 sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&, 172 GrWrapOwnership, 173 GrWrapCacheable, 174 GrIOType); 175 176 sk_sp<GrTexture> wrapCompressedBackendTexture(const GrBackendTexture&, 177 GrWrapOwnership, 178 GrWrapCacheable); 179 180 /** 181 * Implements GrResourceProvider::wrapRenderableBackendTexture 182 */ 183 sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&, 184 int sampleCnt, 185 GrWrapOwnership, 186 GrWrapCacheable); 187 188 /** 189 * Implements GrResourceProvider::wrapBackendRenderTarget 190 */ 191 sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&); 192 193 /** 194 * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget 195 */ 196 sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, 197 const GrVkDrawableInfo&); 198 199 /** 200 * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked. 201 * 202 * @param size size of buffer to create. 203 * @param intendedType hint to the graphics subsystem about what the buffer will be used for. 204 * @param accessPattern hint to the graphics subsystem about how the data will be accessed. 205 * 206 * @return the buffer if successful, otherwise nullptr. 207 */ 208 sk_sp<GrGpuBuffer> createBuffer(size_t size, 209 GrGpuBufferType intendedType, 210 GrAccessPattern accessPattern); 211 212 /** 213 * Resolves MSAA. The resolveRect must already be in the native destination space. 214 */ 215 void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect); 216 217 /** 218 * Uses the base of the texture to recompute the contents of the other levels. 219 */ 220 bool regenerateMipMapLevels(GrTexture*); 221 222 /** 223 * If the backend API has stateful texture bindings, this resets them back to defaults. 224 */ 225 void resetTextureBindings(); 226 227 /** 228 * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed. 229 * 230 * @param surface the surface to read from 231 * @param rect the rectangle of pixels to read 232 * @param surfaceColorType the color type for this use of the surface. 233 * @param dstColorType the color type of the destination buffer. 234 * @param buffer memory to read the rectangle into. 235 * @param rowBytes the number of bytes between consecutive rows. Must be a multiple of 236 * dstColorType's bytes-per-pixel. Must be tight to width if 237 * !caps->readPixelsRowBytesSupport(). 238 * 239 * @return true if the read succeeded, false if not. The read can fail 240 * because of the surface doesn't support reading, the color type 241 * is not allowed for the format of the surface or if the rectangle 242 * read is not contained in the surface. 243 */ 244 bool readPixels(GrSurface* surface, 245 SkIRect rect, 246 GrColorType surfaceColorType, 247 GrColorType dstColorType, 248 void* buffer, 249 size_t rowBytes); 250 251 /** 252 * Updates the pixels in a rectangle of a surface. No sRGB/linear conversions are performed. 253 * 254 * @param surface the surface to write to. 255 * @param rect the rectangle of pixels to overwrite 256 * @param surfaceColorType the color type for this use of the surface. 257 * @param srcColorType the color type of the source buffer. 258 * @param texels array of mipmap levels containing texture data. Row bytes must be a 259 * multiple of srcColorType's bytes-per-pixel. Must be tight to level 260 * width if !caps->writePixelsRowBytesSupport(). 261 * @param mipLevelCount number of levels in 'texels' 262 * @param prepForTexSampling After doing write pixels should the surface be prepared for texture 263 * sampling. This is currently only used by Vulkan for inline uploads 264 * to set that layout back to sampled after doing the upload. Inline 265 * uploads currently can happen between draws in a single op so it is 266 * not trivial to break up the OpsTask into two tasks when we see 267 * an inline upload. However, once we are able to support doing that 268 * we can remove this parameter. 269 * 270 * @return true if the write succeeded, false if not. The read can fail 271 * because of the surface doesn't support writing (e.g. read only), 272 * the color type is not allowed for the format of the surface or 273 * if the rectangle written is not contained in the surface. 274 */ 275 bool writePixels(GrSurface* surface, 276 SkIRect rect, 277 GrColorType surfaceColorType, 278 GrColorType srcColorType, 279 const GrMipLevel texels[], 280 int mipLevelCount, 281 bool prepForTexSampling = false); 282 283 /** 284 * Helper for the case of a single level. 285 */ 286 bool writePixels(GrSurface* surface, 287 SkIRect rect, 288 GrColorType surfaceColorType, 289 GrColorType srcColorType, 290 const void* buffer, 291 size_t rowBytes, 292 bool prepForTexSampling = false) { 293 GrMipLevel mipLevel = {buffer, rowBytes, nullptr}; 294 return this->writePixels(surface, 295 rect, 296 surfaceColorType, 297 srcColorType, 298 &mipLevel, 299 1, 300 prepForTexSampling); 301 } 302 303 /** 304 * Transfer bytes from one GPU buffer to another. The src buffer must have type kXferCpuToGpu 305 * and the dst buffer must not. Neither buffer may currently be mapped. The offsets and size 306 * must be aligned to GrCaps::transferFromBufferToBufferAlignment. 307 * 308 * @param src the buffer to read from 309 * @param srcOffset the aligned offset at the src at which the transfer begins. 310 * @param dst the buffer to write to 311 * @param dstOffset the aligned offset in the dst at which the transfer begins 312 * @param size the aligned number of bytes to transfer; 313 */ 314 bool transferFromBufferToBuffer(sk_sp<GrGpuBuffer> src, 315 size_t srcOffset, 316 sk_sp<GrGpuBuffer> dst, 317 size_t dstOffset, 318 size_t size); 319 320 /** 321 * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped, 322 * the base level is written to. 323 * 324 * @param texture the texture to write to. 325 * @param rect the rectangle of pixels in the texture to overwrite 326 * @param textureColorType the color type for this use of the surface. 327 * @param bufferColorType the color type of the transfer buffer's pixel data 328 * @param transferBuffer GrBuffer to read pixels from (type must be "kXferCpuToGpu") 329 * @param offset offset from the start of the buffer 330 * @param rowBytes number of bytes between consecutive rows in the buffer. Must be a 331 * multiple of bufferColorType's bytes-per-pixel. Must be tight to 332 * rect.width() if !caps->writePixelsRowBytesSupport(). 333 */ 334 bool transferPixelsTo(GrTexture* texture, 335 SkIRect rect, 336 GrColorType textureColorType, 337 GrColorType bufferColorType, 338 sk_sp<GrGpuBuffer> transferBuffer, 339 size_t offset, 340 size_t rowBytes); 341 342 /** 343 * Reads the pixels from a rectangle of a surface into a buffer. Use 344 * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for 345 * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read. 346 * 347 * If successful the row bytes in the buffer is always: 348 * GrColorTypeBytesPerPixel(bufferColorType) * rect.width() 349 * 350 * Asserts that the caller has passed a properly aligned offset and that the buffer is 351 * large enough to hold the result 352 * 353 * @param surface the surface to read from. 354 * @param rect the rectangle of pixels to read 355 * @param surfaceColorType the color type for this use of the surface. 356 * @param bufferColorType the color type of the transfer buffer's pixel data 357 * @param transferBuffer GrBuffer to write pixels to (type must be "kXferGpuToCpu") 358 * @param offset offset from the start of the buffer 359 */ 360 bool transferPixelsFrom(GrSurface* surface, 361 SkIRect rect, 362 GrColorType surfaceColorType, 363 GrColorType bufferColorType, 364 sk_sp<GrGpuBuffer> transferBuffer, 365 size_t offset); 366 367 // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst 368 // take place at higher levels and this function implement faster copy paths. The src and dst 369 // rects are pre-clipped. The src rect and dst rect are guaranteed to be within the 370 // src/dst bounds and non-empty. They must also be in their exact device space coords, including 371 // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true 372 // then we don't need to preserve any data on the dst surface outside of the copy. 373 // 374 // Backends may or may not support src and dst rects with differing dimensions. This can assume 375 // that GrCaps.canCopySurface() returned true for these surfaces and rects. 376 bool copySurface(GrSurface* dst, const SkIRect& dstRect, 377 GrSurface* src, const SkIRect& srcRect, 378 GrSamplerState::Filter filter); 379 380 // Returns a GrOpsRenderPass which OpsTasks send draw commands to instead of directly 381 // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget. 382 // If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not 383 // provided but 'renderTarget' has a stencil buffer then that is a signal that the 384 // render target's stencil buffer should be ignored. 385 GrOpsRenderPass* getOpsRenderPass(GrRenderTarget* renderTarget, 386 bool useMSAASurface, 387 GrAttachment* stencil, 388 GrSurfaceOrigin, 389 const SkIRect& bounds, 390 const GrOpsRenderPass::LoadAndStoreInfo&, 391 const GrOpsRenderPass::StencilLoadAndStoreInfo&, 392 const SkTArray<GrSurfaceProxy*, true>& sampledProxies, 393 GrXferBarrierFlags renderPassXferBarriers); 394 395 // Called by GrDrawingManager when flushing. 396 // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also 397 // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the 398 // inserted semaphores. 399 void executeFlushInfo(SkSpan<GrSurfaceProxy*>, 400 SkSurface::BackendSurfaceAccess access, 401 const GrFlushInfo&, 402 const skgpu::MutableTextureState* newState); 403 404 // Called before render tasks are executed during a flush. willExecute()405 virtual void willExecute() {} 406 407 bool submitToGpu(bool syncCpu); 408 409 virtual void submit(GrOpsRenderPass*) = 0; 410 411 virtual GrFence SK_WARN_UNUSED_RESULT insertFence() = 0; 412 virtual bool waitFence(GrFence) = 0; 413 virtual void deleteFence(GrFence) = 0; 414 415 virtual std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore( 416 bool isOwned = true) = 0; 417 virtual std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&, 418 GrSemaphoreWrapType, 419 GrWrapOwnership) = 0; 420 virtual void insertSemaphore(GrSemaphore* semaphore) = 0; 421 virtual void waitSemaphore(GrSemaphore* semaphore) = 0; 422 423 virtual void addFinishedProc(GrGpuFinishedProc finishedProc, 424 GrGpuFinishedContext finishedContext) = 0; 425 virtual void checkFinishProcs() = 0; 426 virtual void finishOutstandingGpuWork() = 0; 427 takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>)428 virtual void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) {} 429 430 /** 431 * Checks if we detected an OOM from the underlying 3D API and if so returns true and resets 432 * the internal OOM state to false. Otherwise, returns false. 433 */ 434 bool checkAndResetOOMed(); 435 436 /** 437 * Put this texture in a safe and known state for use across multiple contexts. Depending on 438 * the backend, this may return a GrSemaphore. If so, other contexts should wait on that 439 * semaphore before using this texture. 440 */ 441 virtual std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0; 442 443 /** 444 * Frees any backend specific objects that are not currently in use by the GPU. This is called 445 * when the client is trying to free up as much GPU memory as possible. We will not release 446 * resources connected to programs/pipelines since the cost to recreate those is significantly 447 * higher that other resources. 448 */ releaseUnlockedBackendObjects()449 virtual void releaseUnlockedBackendObjects() {} 450 451 /////////////////////////////////////////////////////////////////////////// 452 // Debugging and Stats 453 454 class Stats { 455 public: 456 #if GR_GPU_STATS 457 Stats() = default; 458 reset()459 void reset() { *this = {}; } 460 textureCreates()461 int textureCreates() const { return fTextureCreates; } incTextureCreates()462 void incTextureCreates() { fTextureCreates++; } 463 textureUploads()464 int textureUploads() const { return fTextureUploads; } incTextureUploads()465 void incTextureUploads() { fTextureUploads++; } 466 transfersToTexture()467 int transfersToTexture() const { return fTransfersToTexture; } incTransfersToTexture()468 void incTransfersToTexture() { fTransfersToTexture++; } 469 transfersFromSurface()470 int transfersFromSurface() const { return fTransfersFromSurface; } incTransfersFromSurface()471 void incTransfersFromSurface() { fTransfersFromSurface++; } 472 incBufferTransfers()473 void incBufferTransfers() { fBufferTransfers++; } bufferTransfers()474 int bufferTransfers() const { return fBufferTransfers; } 475 stencilAttachmentCreates()476 int stencilAttachmentCreates() const { return fStencilAttachmentCreates; } incStencilAttachmentCreates()477 void incStencilAttachmentCreates() { fStencilAttachmentCreates++; } 478 msaaAttachmentCreates()479 int msaaAttachmentCreates() const { return fMSAAAttachmentCreates; } incMSAAAttachmentCreates()480 void incMSAAAttachmentCreates() { fMSAAAttachmentCreates++; } 481 numDraws()482 int numDraws() const { return fNumDraws; } incNumDraws()483 void incNumDraws() { fNumDraws++; } 484 numFailedDraws()485 int numFailedDraws() const { return fNumFailedDraws; } incNumFailedDraws()486 void incNumFailedDraws() { ++fNumFailedDraws; } 487 numSubmitToGpus()488 int numSubmitToGpus() const { return fNumSubmitToGpus; } incNumSubmitToGpus()489 void incNumSubmitToGpus() { ++fNumSubmitToGpus; } 490 numScratchTexturesReused()491 int numScratchTexturesReused() const { return fNumScratchTexturesReused; } incNumScratchTexturesReused()492 void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; } 493 numScratchMSAAAttachmentsReused()494 int numScratchMSAAAttachmentsReused() const { return fNumScratchMSAAAttachmentsReused; } incNumScratchMSAAAttachmentsReused()495 void incNumScratchMSAAAttachmentsReused() { ++fNumScratchMSAAAttachmentsReused; } 496 renderPasses()497 int renderPasses() const { return fRenderPasses; } incRenderPasses()498 void incRenderPasses() { fRenderPasses++; } 499 numReorderedDAGsOverBudget()500 int numReorderedDAGsOverBudget() const { return fNumReorderedDAGsOverBudget; } incNumReorderedDAGsOverBudget()501 void incNumReorderedDAGsOverBudget() { fNumReorderedDAGsOverBudget++; } 502 503 #if GR_TEST_UTILS 504 void dump(SkString*); 505 void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values); 506 #endif 507 private: 508 int fTextureCreates = 0; 509 int fTextureUploads = 0; 510 int fTransfersToTexture = 0; 511 int fTransfersFromSurface = 0; 512 int fBufferTransfers = 0; 513 int fStencilAttachmentCreates = 0; 514 int fMSAAAttachmentCreates = 0; 515 int fNumDraws = 0; 516 int fNumFailedDraws = 0; 517 int fNumSubmitToGpus = 0; 518 int fNumScratchTexturesReused = 0; 519 int fNumScratchMSAAAttachmentsReused = 0; 520 int fRenderPasses = 0; 521 int fNumReorderedDAGsOverBudget = 0; 522 523 #else // !GR_GPU_STATS 524 525 #if GR_TEST_UTILS 526 void dump(SkString*) {} 527 void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {} 528 #endif 529 void incTextureCreates() {} 530 void incTextureUploads() {} 531 void incTransfersToTexture() {} 532 void incBufferTransfers() {} 533 void incTransfersFromSurface() {} 534 void incStencilAttachmentCreates() {} 535 void incMSAAAttachmentCreates() {} 536 void incNumDraws() {} 537 void incNumFailedDraws() {} 538 void incNumSubmitToGpus() {} 539 void incNumScratchTexturesReused() {} 540 void incNumScratchMSAAAttachmentsReused() {} 541 void incRenderPasses() {} 542 void incNumReorderedDAGsOverBudget() {} 543 #endif 544 }; 545 stats()546 Stats* stats() { return &fStats; } 547 void dumpJSON(SkJSONWriter*) const; 548 549 550 /** 551 * Creates a texture directly in the backend API without wrapping it in a GrTexture. 552 * Must be matched with a call to deleteBackendTexture(). 553 * 554 * If data is null the texture is uninitialized. 555 * 556 * If data represents a color then all texture levels are cleared to that color. 557 * 558 * If data represents pixmaps then it must have a either one pixmap or, if mipmapping 559 * is specified, a complete MIP hierarchy of pixmaps. Additionally, if provided, the mip 560 * levels must be sized correctly according to the MIP sizes implied by dimensions. They 561 * must all have the same color type and that color type must be compatible with the 562 * texture format. 563 */ 564 GrBackendTexture createBackendTexture(SkISize dimensions, 565 const GrBackendFormat&, 566 GrRenderable, 567 GrMipmapped, 568 GrProtected, 569 std::string_view label); 570 571 bool clearBackendTexture(const GrBackendTexture&, 572 sk_sp<skgpu::RefCntedCallback> finishedCallback, 573 std::array<float, 4> color); 574 575 /** 576 * Same as the createBackendTexture case except compressed backend textures can 577 * never be renderable. 578 */ 579 GrBackendTexture createCompressedBackendTexture(SkISize dimensions, 580 const GrBackendFormat&, 581 GrMipmapped, 582 GrProtected); 583 584 bool updateCompressedBackendTexture(const GrBackendTexture&, 585 sk_sp<skgpu::RefCntedCallback> finishedCallback, 586 const void* data, 587 size_t length); 588 setBackendTextureState(const GrBackendTexture &,const skgpu::MutableTextureState &,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)589 virtual bool setBackendTextureState(const GrBackendTexture&, 590 const skgpu::MutableTextureState&, 591 skgpu::MutableTextureState* previousState, 592 sk_sp<skgpu::RefCntedCallback> finishedCallback) { 593 return false; 594 } 595 setBackendRenderTargetState(const GrBackendRenderTarget &,const skgpu::MutableTextureState &,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)596 virtual bool setBackendRenderTargetState(const GrBackendRenderTarget&, 597 const skgpu::MutableTextureState&, 598 skgpu::MutableTextureState* previousState, 599 sk_sp<skgpu::RefCntedCallback> finishedCallback) { 600 return false; 601 } 602 603 /** 604 * Frees a texture created by createBackendTexture(). If ownership of the backend 605 * texture has been transferred to a context using adopt semantics this should not be called. 606 */ 607 virtual void deleteBackendTexture(const GrBackendTexture&) = 0; 608 609 /** 610 * In this case we have a program descriptor and a program info but no render target. 611 */ 612 virtual bool compile(const GrProgramDesc&, const GrProgramInfo&) = 0; 613 precompileShader(const SkData & key,const SkData & data)614 virtual bool precompileShader(const SkData& key, const SkData& data) { return false; } 615 616 #if GR_TEST_UTILS 617 /** Check a handle represents an actual texture in the backend API that has not been freed. */ 618 virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0; 619 620 /** 621 * Creates a GrBackendRenderTarget that can be wrapped using 622 * SkSurface::MakeFromBackendRenderTarget. Ideally this is a non-textureable allocation to 623 * differentiate from testing with SkSurface::MakeFromBackendTexture. When sampleCnt > 1 this 624 * is used to test client wrapped allocations with MSAA where Skia does not allocate a separate 625 * buffer for resolving. If the color is non-null the backing store should be cleared to the 626 * passed in color. 627 */ 628 virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget( 629 SkISize dimensions, 630 GrColorType, 631 int sampleCount = 1, 632 GrProtected = GrProtected::kNo) = 0; 633 634 /** 635 * Deletes a GrBackendRenderTarget allocated with the above. Synchronization to make this safe 636 * is up to the caller. 637 */ 638 virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0; 639 640 // This is only to be used in GL-specific tests. glContextForTesting()641 virtual const GrGLContext* glContextForTesting() const { return nullptr; } 642 643 // This is only to be used by testing code resetShaderCacheForTesting()644 virtual void resetShaderCacheForTesting() const {} 645 646 /** 647 * Inserted as a pair around a block of code to do a GPU frame capture. 648 * Currently only works with the Metal backend. 649 */ testingOnly_startCapture()650 virtual void testingOnly_startCapture() {} testingOnly_stopCapture()651 virtual void testingOnly_stopCapture() {} 652 #endif 653 654 // width and height may be larger than rt (if underlying API allows it). 655 // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on 656 // the GrAttachment. 657 virtual sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& colorFormat, 658 SkISize dimensions, 659 int numStencilSamples) = 0; 660 661 virtual GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) = 0; 662 663 // Creates an MSAA surface to be used as an MSAA attachment on a framebuffer. 664 virtual sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions, 665 const GrBackendFormat& format, 666 int numSamples, 667 GrProtected isProtected, 668 GrMemoryless isMemoryless) = 0; 669 handleDirtyContext()670 void handleDirtyContext() { 671 if (fResetBits) { 672 this->resetContext(); 673 } 674 } 675 storeVkPipelineCacheData()676 virtual void storeVkPipelineCacheData() {} 677 678 // Called before certain draws in order to guarantee coherent results from dst reads. 679 virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0; 680 681 protected: 682 static bool CompressedDataIsCorrect(SkISize dimensions, 683 SkImage::CompressionType, 684 GrMipmapped, 685 const void* data, 686 size_t length); 687 688 // If the surface is a texture this marks its mipmaps as dirty. 689 void didWriteToSurface(GrSurface* surface, 690 GrSurfaceOrigin origin, 691 const SkIRect* bounds, 692 uint32_t mipLevels = 1) const; 693 setOOMed()694 void setOOMed() { fOOMed = true; } 695 696 Stats fStats; 697 698 // Subclass must call this to initialize caps & compiler in its constructor. 699 void initCapsAndCompiler(sk_sp<const GrCaps> caps); 700 701 private: 702 virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions, 703 const GrBackendFormat&, 704 GrRenderable, 705 GrMipmapped, 706 GrProtected, 707 std::string_view label) = 0; 708 709 virtual GrBackendTexture onCreateCompressedBackendTexture( 710 SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) = 0; 711 712 virtual bool onClearBackendTexture(const GrBackendTexture&, 713 sk_sp<skgpu::RefCntedCallback> finishedCallback, 714 std::array<float, 4> color) = 0; 715 716 virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture&, 717 sk_sp<skgpu::RefCntedCallback> finishedCallback, 718 const void* data, 719 size_t length) = 0; 720 721 // called when the 3D context state is unknown. Subclass should emit any 722 // assumed 3D context state and dirty any state cache. onResetContext(uint32_t resetBits)723 virtual void onResetContext(uint32_t resetBits) {} 724 725 // Implementation of resetTextureBindings. onResetTextureBindings()726 virtual void onResetTextureBindings() {} 727 728 // overridden by backend-specific derived class to create objects. 729 // Texture size, renderablility, format support, sample count will have already been validated 730 // in base class before onCreateTexture is called. 731 // If the ith bit is set in levelClearMask then the ith MIP level should be cleared. 732 virtual sk_sp<GrTexture> onCreateTexture(SkISize dimensions, 733 const GrBackendFormat&, 734 GrRenderable, 735 int renderTargetSampleCnt, 736 skgpu::Budgeted, 737 GrProtected, 738 int mipLevelCoont, 739 uint32_t levelClearMask, 740 std::string_view label) = 0; 741 virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions, 742 const GrBackendFormat&, 743 skgpu::Budgeted, 744 GrMipmapped, 745 GrProtected, 746 const void* data, 747 size_t dataSize) = 0; 748 virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, 749 GrWrapOwnership, 750 GrWrapCacheable, 751 GrIOType) = 0; 752 753 virtual sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&, 754 GrWrapOwnership, 755 GrWrapCacheable) = 0; 756 757 virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, 758 int sampleCnt, 759 GrWrapOwnership, 760 GrWrapCacheable) = 0; 761 virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) = 0; 762 virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, 763 const GrVkDrawableInfo&); 764 765 virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, 766 GrGpuBufferType intendedType, 767 GrAccessPattern) = 0; 768 769 // overridden by backend-specific derived class to perform the surface read 770 virtual bool onReadPixels(GrSurface*, 771 SkIRect, 772 GrColorType surfaceColorType, 773 GrColorType dstColorType, 774 void*, 775 size_t rowBytes) = 0; 776 777 // overridden by backend-specific derived class to perform the surface write 778 virtual bool onWritePixels(GrSurface*, 779 SkIRect, 780 GrColorType surfaceColorType, 781 GrColorType srcColorType, 782 const GrMipLevel[], 783 int mipLevelCount, 784 bool prepForTexSampling) = 0; 785 786 // overridden by backend-specific derived class to perform the buffer transfer 787 virtual bool onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src, 788 size_t srcOffset, 789 sk_sp<GrGpuBuffer> dst, 790 size_t dstOffset, 791 size_t size) = 0; 792 793 // overridden by backend-specific derived class to perform the texture transfer 794 virtual bool onTransferPixelsTo(GrTexture*, 795 SkIRect, 796 GrColorType textureColorType, 797 GrColorType bufferColorType, 798 sk_sp<GrGpuBuffer> transferBuffer, 799 size_t offset, 800 size_t rowBytes) = 0; 801 802 // overridden by backend-specific derived class to perform the surface transfer 803 virtual bool onTransferPixelsFrom(GrSurface*, 804 SkIRect, 805 GrColorType surfaceColorType, 806 GrColorType bufferColorType, 807 sk_sp<GrGpuBuffer> transferBuffer, 808 size_t offset) = 0; 809 810 // overridden by backend-specific derived class to perform the resolve 811 virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) = 0; 812 813 // overridden by backend specific derived class to perform mip map level regeneration. 814 virtual bool onRegenerateMipMapLevels(GrTexture*) = 0; 815 816 // overridden by backend specific derived class to perform the copy surface 817 virtual bool onCopySurface(GrSurface* dst, const SkIRect& dstRect, 818 GrSurface* src, const SkIRect& srcRect, 819 GrSamplerState::Filter) = 0; 820 821 virtual GrOpsRenderPass* onGetOpsRenderPass( 822 GrRenderTarget* renderTarget, 823 bool useMSAASurface, 824 GrAttachment* stencil, 825 GrSurfaceOrigin, 826 const SkIRect& bounds, 827 const GrOpsRenderPass::LoadAndStoreInfo&, 828 const GrOpsRenderPass::StencilLoadAndStoreInfo&, 829 const SkTArray<GrSurfaceProxy*, true>& sampledProxies, 830 GrXferBarrierFlags renderPassXferBarriers) = 0; 831 prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const skgpu::MutableTextureState * newState)832 virtual void prepareSurfacesForBackendAccessAndStateUpdates( 833 SkSpan<GrSurfaceProxy*> proxies, 834 SkSurface::BackendSurfaceAccess access, 835 const skgpu::MutableTextureState* newState) {} 836 837 virtual bool onSubmitToGpu(bool syncCpu) = 0; 838 839 void reportSubmitHistograms(); onReportSubmitHistograms()840 virtual void onReportSubmitHistograms() {} 841 842 #ifdef SK_ENABLE_DUMP_GPU onDumpJSON(SkJSONWriter *)843 virtual void onDumpJSON(SkJSONWriter*) const {} 844 #endif 845 846 sk_sp<GrTexture> createTextureCommon(SkISize, 847 const GrBackendFormat&, 848 GrTextureType textureType, 849 GrRenderable, 850 int renderTargetSampleCnt, 851 skgpu::Budgeted, 852 GrProtected, 853 int mipLevelCnt, 854 uint32_t levelClearMask, 855 std::string_view label); 856 resetContext()857 void resetContext() { 858 this->onResetContext(fResetBits); 859 fResetBits = 0; 860 } 861 862 void callSubmittedProcs(bool success); 863 864 sk_sp<const GrCaps> fCaps; 865 // Compiler used for compiling SkSL into backend shader code. We only want to create the 866 // compiler once, as there is significant overhead to the first compile. 867 std::unique_ptr<SkSL::Compiler> fCompiler; 868 869 uint32_t fResetBits; 870 // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu. 871 GrDirectContext* fContext; 872 873 struct SubmittedProc { SubmittedProcSubmittedProc874 SubmittedProc(GrGpuSubmittedProc proc, GrGpuSubmittedContext context) 875 : fProc(proc), fContext(context) {} 876 877 GrGpuSubmittedProc fProc; 878 GrGpuSubmittedContext fContext; 879 }; 880 SkSTArray<4, SubmittedProc> fSubmittedProcs; 881 882 bool fOOMed = false; 883 884 #if SK_HISTOGRAMS_ENABLED 885 int fCurrentSubmitRenderPassCount = 0; 886 #endif 887 888 friend class GrPathRendering; 889 using INHERITED = SkRefCnt; 890 }; 891 892 #endif 893