1 /* 2 * Copyright 2020 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrDirectContext_DEFINED 9 #define GrDirectContext_DEFINED 10 11 #include <set> 12 13 #include "include/gpu/GrRecordingContext.h" 14 15 #include "include/gpu/GrBackendSurface.h" 16 17 #include "src/gpu/GrGpuResource.h" 18 19 // We shouldn't need this but currently Android is relying on this being include transitively. 20 #include "include/core/SkUnPreMultiply.h" 21 22 class GrAtlasManager; 23 class GrBackendSemaphore; 24 class GrClientMappedBufferManager; 25 class GrDirectContextPriv; 26 class GrContextThreadSafeProxy; 27 struct GrD3DBackendContext; 28 class GrFragmentProcessor; 29 class GrGpu; 30 struct GrGLInterface; 31 struct GrMtlBackendContext; 32 struct GrMockOptions; 33 class GrPath; 34 class GrResourceCache; 35 class GrResourceProvider; 36 class GrStrikeCache; 37 class GrSurfaceProxy; 38 class GrSwizzle; 39 class GrTextureProxy; 40 struct GrVkBackendContext; 41 42 class SkImage; 43 class SkString; 44 class SkSurfaceCharacterization; 45 class SkSurfaceProps; 46 class SkTaskGroup; 47 class SkTraceMemoryDump; 48 49 namespace skgpu { namespace v1 { class SmallPathAtlasMgr; }} 50 51 class SK_API GrDirectContext : public GrRecordingContext { 52 public: 53 #ifdef SK_GL 54 /** 55 * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the 56 * result of GrGLMakeNativeInterface() is used if it succeeds. 57 */ 58 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&); 59 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>); 60 static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&); 61 static sk_sp<GrDirectContext> MakeGL(); 62 #endif 63 64 #ifdef SK_VULKAN 65 /** 66 * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned 67 * GrDirectContext is destroyed. This also means that any objects created with this 68 * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold 69 * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released, 70 * then it is safe to delete the vulkan objects. 71 */ 72 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&); 73 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&); 74 #endif 75 76 #ifdef SK_METAL 77 /** 78 * Makes a GrDirectContext which uses Metal as the backend. The GrMtlBackendContext contains a 79 * MTLDevice and MTLCommandQueue which should be used by the backend. These objects must 80 * have their own ref which will be released when the GrMtlBackendContext is destroyed. 81 * Ganesh will take its own ref on the objects which will be released when the GrDirectContext 82 * is destroyed. 83 */ 84 static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&, const GrContextOptions&); 85 static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&); 86 /** 87 * Deprecated. 88 * 89 * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an 90 * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects 91 * must have a ref on them that can be transferred to Ganesh, which will release the ref 92 * when the GrDirectContext is destroyed. 93 */ 94 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&); 95 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue); 96 #endif 97 98 #ifdef SK_DIRECT3D 99 /** 100 * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context 101 * must be kept alive until the returned GrDirectContext is first destroyed or abandoned. 102 */ 103 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&); 104 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&); 105 #endif 106 107 #ifdef SK_DAWN 108 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&, 109 const GrContextOptions&); 110 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&); 111 #endif 112 113 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&); 114 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*); 115 116 ~GrDirectContext() override; 117 118 /** 119 * The context normally assumes that no outsider is setting state 120 * within the underlying 3D API's context/device/whatever. This call informs 121 * the context that the state was modified and it should resend. Shouldn't 122 * be called frequently for good performance. 123 * The flag bits, state, is dependent on which backend is used by the 124 * context, either GL or D3D (possible in future). 125 */ 126 void resetContext(uint32_t state = kAll_GrBackendState); 127 128 /** 129 * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which 130 * the context has modified the bound texture will have texture id 0 bound. This does not 131 * flush the context. Calling resetContext() does not change the set that will be bound 132 * to texture id 0 on the next call to resetGLTextureBindings(). After this is called 133 * all unit/target combinations are considered to have unmodified bindings until the context 134 * subsequently modifies them (meaning if this is called twice in a row with no intervening 135 * context usage then the second call is a no-op.) 136 */ 137 void resetGLTextureBindings(); 138 139 /** 140 * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer 141 * usable. Call this if you have lost the associated GPU context, and thus internal texture, 142 * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the 143 * context and any of its created resource objects will not make backend 3D API calls. Content 144 * rendered but not previously flushed may be lost. After this function is called all subsequent 145 * calls on the context will fail or be no-ops. 146 * 147 * The typical use case for this function is that the underlying 3D context was lost and further 148 * API calls may crash. 149 * 150 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to 151 * create the context must be kept alive even after abandoning the context. Those objects must 152 * live for the lifetime of the context object itself. The reason for this is so that 153 * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be 154 * cleaned up even in a device lost state. 155 */ 156 void abandonContext() override; 157 158 /** 159 * Returns true if the context was abandoned or if the if the backend specific context has 160 * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a 161 * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this 162 * context. 163 */ 164 bool abandoned() override; 165 166 // TODO: Remove this from public after migrating Chrome. 167 sk_sp<GrContextThreadSafeProxy> threadSafeProxy(); 168 169 /** 170 * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is 171 * reset and will return false until another out-of-memory error is reported by the 3D API. If 172 * the context is abandoned then this will report false. 173 * 174 * Currently this is implemented for: 175 * 176 * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and 177 * therefore hide the error from Skia. Also, it is not advised to use this in combination with 178 * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever 179 * checking the GL context for OOM. 180 * 181 * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has 182 * occurred. 183 */ 184 bool oomed(); 185 186 /** 187 * This is similar to abandonContext() however the underlying 3D context is not yet lost and 188 * the context will cleanup all allocated resources before returning. After returning it will 189 * assume that the underlying context may no longer be valid. 190 * 191 * The typical use case for this function is that the client is going to destroy the 3D context 192 * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed 193 * elsewhere by either the client or Skia objects). 194 * 195 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to 196 * create the context must be alive before calling releaseResourcesAndAbandonContext. 197 */ 198 void releaseResourcesAndAbandonContext(); 199 200 /////////////////////////////////////////////////////////////////////////// 201 // Resource Cache 202 203 /** DEPRECATED 204 * Return the current GPU resource cache limits. 205 * 206 * @param maxResources If non-null, will be set to -1. 207 * @param maxResourceBytes If non-null, returns maximum number of bytes of 208 * video memory that can be held in the cache. 209 */ 210 void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const; 211 212 /** 213 * Return the current GPU resource cache limit in bytes. 214 */ 215 size_t getResourceCacheLimit() const; 216 217 /** 218 * Gets the current GPU resource cache usage. 219 * 220 * @param resourceCount If non-null, returns the number of resources that are held in the 221 * cache. 222 * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held 223 * in the cache. 224 */ 225 void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const; 226 227 /** 228 * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources. 229 */ 230 size_t getResourceCachePurgeableBytes() const; 231 232 /** DEPRECATED 233 * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes 234 * limit, it will be purged (LRU) to keep the cache within the limit. 235 * 236 * @param maxResources Unused. 237 * @param maxResourceBytes The maximum number of bytes of video memory 238 * that can be held in the cache. 239 */ 240 void setResourceCacheLimits(int maxResources, size_t maxResourceBytes); 241 242 /** 243 * Specify the GPU resource cache limit. If the cache currently exceeds this limit, 244 * it will be purged (LRU) to keep the cache within the limit. 245 * 246 * @param maxResourceBytes The maximum number of bytes of video memory 247 * that can be held in the cache. 248 */ 249 void setResourceCacheLimit(size_t maxResourceBytes); 250 251 /** 252 * Frees GPU created by the context. Can be called to reduce GPU memory 253 * pressure. 254 */ 255 void freeGpuResources(); 256 257 /** 258 * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are 259 * otherwise marked for deletion, regardless of whether the context is under budget. 260 * 261 * If 'scratchResourcesOnly' is true all unlocked scratch resources older than 'msNotUsed' will 262 * be purged but the unlocked resources with persistent data will remain. If 263 * 'scratchResourcesOnly' is false then all unlocked resources older than 'msNotUsed' will be 264 * purged. 265 * 266 * @param msNotUsed Only unlocked resources not used in these last milliseconds 267 * will be cleaned up. 268 * @param scratchResourcesOnly If true only unlocked scratch resources will be purged. 269 */ 270 void performDeferredCleanup(std::chrono::milliseconds msNotUsed, 271 bool scratchResourcesOnly=false); 272 273 // Temporary compatibility API for Android. purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)274 void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) { 275 this->performDeferredCleanup(msNotUsed); 276 } 277 278 /** 279 * Purge unlocked resources from the cache until the the provided byte count has been reached 280 * or we have purged all unlocked resources. The default policy is to purge in LRU order, but 281 * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other 282 * resource types. 283 * 284 * @param maxBytesToPurge the desired number of bytes to be purged. 285 * @param preferScratchResources If true scratch resources will be purged prior to other 286 * resource types. 287 */ 288 void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources); 289 void purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag& tag); 290 void purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet); 291 void purgeUnlockAndSafeCacheGpuResources(); 292 293 /** 294 * This entry point is intended for instances where an app has been backgrounded or 295 * suspended. 296 * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the 297 * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false 298 * then all unlocked resources will be purged. 299 * In either case, after the unlocked resources are purged a separate pass will be made to 300 * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true 301 * some resources with persistent data may be purged to be under budget). 302 * 303 * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior 304 * enforcing the budget requirements. 305 */ 306 void purgeUnlockedResources(bool scratchResourcesOnly); 307 308 /** 309 * Gets the maximum supported texture size. 310 */ 311 using GrRecordingContext::maxTextureSize; 312 313 /** 314 * Gets the maximum supported render target size. 315 */ 316 using GrRecordingContext::maxRenderTargetSize; 317 318 /** 319 * Can a SkImage be created with the given color type. 320 */ 321 using GrRecordingContext::colorTypeSupportedAsImage; 322 323 /** 324 * Can a SkSurface be created with the given color type. To check whether MSAA is supported 325 * use maxSurfaceSampleCountForColorType(). 326 */ 327 using GrRecordingContext::colorTypeSupportedAsSurface; 328 329 /** 330 * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA 331 * rendering is supported for the color type. 0 is returned if rendering to this color type 332 * is not supported at all. 333 */ 334 using GrRecordingContext::maxSurfaceSampleCountForColorType; 335 336 /////////////////////////////////////////////////////////////////////////// 337 // Misc. 338 339 /** 340 * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before 341 * executing any more commands on the GPU. If this call returns false, then the GPU back-end 342 * will not wait on any passed in semaphores, and the client will still own the semaphores, 343 * regardless of the value of deleteSemaphoresAfterWait. 344 * 345 * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case 346 * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it 347 * knows that Skia has finished waiting on them. This can be done by using finishedProcs on 348 * flush calls. 349 */ 350 bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores, 351 bool deleteSemaphoresAfterWait = true); 352 353 /** 354 * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D 355 * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by 356 * GrContext::submit(syncCpu). 357 */ 358 void flushAndSubmit(bool syncCpu = false) { 359 this->flush(GrFlushInfo()); 360 this->submit(syncCpu); 361 } 362 363 /** 364 * Call to ensure all drawing to the context has been flushed to underlying 3D API specific 365 * objects. A call to `submit` is always required to ensure work is actually sent to 366 * the gpu. Some specific API details: 367 * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some 368 * sync objects from the flush will not be valid until a submission occurs. 369 * 370 * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command 371 * buffer or encoder objects. However, these objects are not sent to the gpu until a 372 * submission occurs. 373 * 374 * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be 375 * submitted to the gpu during the next submit call (it is possible Skia failed to create a 376 * subset of the semaphores). The client should not wait on these semaphores until after submit 377 * has been called, and must keep them alive until then. If this call returns 378 * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on 379 * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with 380 * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the 381 * client is still responsible for deleting any initialized semaphores. 382 * Regardleess of semaphore submission the context will still be flushed. It should be 383 * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not 384 * happen. It simply means there were no semaphores submitted to the GPU. A caller should only 385 * take this as a failure if they passed in semaphores to be submitted. 386 */ 387 GrSemaphoresSubmitted flush(const GrFlushInfo& info); 388 flush()389 void flush() { this->flush({}); } 390 391 /** 392 * Submit outstanding work to the gpu from all previously un-submitted flushes. The return 393 * value of the submit will indicate whether or not the submission to the GPU was successful. 394 * 395 * If the call returns true, all previously passed in semaphores in flush calls will have been 396 * submitted to the GPU and they can safely be waited on. The caller should wait on those 397 * semaphores or perform some other global synchronization before deleting the semaphores. 398 * 399 * If it returns false, then those same semaphores will not have been submitted and we will not 400 * try to submit them again. The caller is free to delete the semaphores at any time. 401 * 402 * If the syncCpu flag is true this function will return once the gpu has finished with all 403 * submitted work. 404 */ 405 bool submit(bool syncCpu = false); 406 407 /** 408 * Checks whether any asynchronous work is complete and if so calls related callbacks. 409 */ 410 void checkAsyncWorkCompletion(); 411 412 /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */ 413 // Chrome is using this! 414 void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const; 415 void dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const; 416 417 bool supportsDistanceFieldText() const; 418 419 void storeVkPipelineCacheData(); 420 421 /** 422 * Retrieve the default GrBackendFormat for a given SkColorType and renderability. 423 * It is guaranteed that this backend format will be the one used by the following 424 * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods. 425 * 426 * The caller should check that the returned format is valid. 427 */ 428 using GrRecordingContext::defaultBackendFormat; 429 430 /** 431 * The explicitly allocated backend texture API allows clients to use Skia to create backend 432 * objects outside of Skia proper (i.e., Skia's caching system will not know about them.) 433 * 434 * It is the client's responsibility to delete all these objects (using deleteBackendTexture) 435 * before deleting the context used to create them. If the backend is Vulkan, the textures must 436 * be deleted before abandoning the context as well. Additionally, clients should only delete 437 * these objects on the thread for which that context is active. 438 * 439 * The client is responsible for ensuring synchronization between different uses 440 * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the 441 * surface, rewrapping it in a image and drawing the image will require explicit 442 * synchronization on the client's part). 443 */ 444 445 /** 446 * If possible, create an uninitialized backend texture. The client should ensure that the 447 * returned backend texture is valid. 448 * For the Vulkan backend the layout of the created VkImage will be: 449 * VK_IMAGE_LAYOUT_UNDEFINED. 450 */ 451 GrBackendTexture createBackendTexture(int width, int height, 452 const GrBackendFormat&, 453 GrMipmapped, 454 GrRenderable, 455 GrProtected = GrProtected::kNo); 456 457 /** 458 * If possible, create an uninitialized backend texture. The client should ensure that the 459 * returned backend texture is valid. 460 * If successful, the created backend texture will be compatible with the provided 461 * SkColorType. 462 * For the Vulkan backend the layout of the created VkImage will be: 463 * VK_IMAGE_LAYOUT_UNDEFINED. 464 */ 465 GrBackendTexture createBackendTexture(int width, int height, 466 SkColorType, 467 GrMipmapped, 468 GrRenderable, 469 GrProtected = GrProtected::kNo); 470 471 /** 472 * If possible, create a backend texture initialized to a particular color. The client should 473 * ensure that the returned backend texture is valid. The client can pass in a finishedProc 474 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The 475 * client is required to call `submit` to send the upload work to the gpu. The 476 * finishedProc will always get called even if we failed to create the GrBackendTexture. 477 * For the Vulkan backend the layout of the created VkImage will be: 478 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 479 */ 480 GrBackendTexture createBackendTexture(int width, int height, 481 const GrBackendFormat&, 482 const SkColor4f& color, 483 GrMipmapped, 484 GrRenderable, 485 GrProtected = GrProtected::kNo, 486 GrGpuFinishedProc finishedProc = nullptr, 487 GrGpuFinishedContext finishedContext = nullptr); 488 489 /** 490 * If possible, create a backend texture initialized to a particular color. The client should 491 * ensure that the returned backend texture is valid. The client can pass in a finishedProc 492 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The 493 * client is required to call `submit` to send the upload work to the gpu. The 494 * finishedProc will always get called even if we failed to create the GrBackendTexture. 495 * If successful, the created backend texture will be compatible with the provided 496 * SkColorType. 497 * For the Vulkan backend the layout of the created VkImage will be: 498 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 499 */ 500 GrBackendTexture createBackendTexture(int width, int height, 501 SkColorType, 502 const SkColor4f& color, 503 GrMipmapped, 504 GrRenderable, 505 GrProtected = GrProtected::kNo, 506 GrGpuFinishedProc finishedProc = nullptr, 507 GrGpuFinishedContext finishedContext = nullptr); 508 509 /** 510 * If possible, create a backend texture initialized with the provided pixmap data. The client 511 * should ensure that the returned backend texture is valid. The client can pass in a 512 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 513 * deleted. The client is required to call `submit` to send the upload work to the gpu. 514 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 515 * If successful, the created backend texture will be compatible with the provided 516 * pixmap(s). Compatible, in this case, means that the backend format will be the result 517 * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted 518 * when this call returns. 519 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired 520 * the data for all the mipmap levels must be provided. In the mipmapped case all the 521 * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels 522 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The 523 * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture. 524 * Note: the pixmap's alphatypes and colorspaces are ignored. 525 * For the Vulkan backend the layout of the created VkImage will be: 526 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 527 */ 528 GrBackendTexture createBackendTexture(const SkPixmap srcData[], 529 int numLevels, 530 GrSurfaceOrigin, 531 GrRenderable, 532 GrProtected, 533 GrGpuFinishedProc finishedProc = nullptr, 534 GrGpuFinishedContext finishedContext = nullptr); 535 536 /** 537 * Convenience version createBackendTexture() that takes just a base level pixmap. 538 */ 539 GrBackendTexture createBackendTexture(const SkPixmap& srcData, 540 GrSurfaceOrigin textureOrigin, 541 GrRenderable renderable, 542 GrProtected isProtected, 543 GrGpuFinishedProc finishedProc = nullptr, 544 GrGpuFinishedContext finishedContext = nullptr) { 545 return this->createBackendTexture(&srcData, 1, textureOrigin, renderable, isProtected, 546 finishedProc, finishedContext); 547 } 548 549 // Deprecated versions that do not take origin and assume top-left. 550 GrBackendTexture createBackendTexture(const SkPixmap srcData[], 551 int numLevels, 552 GrRenderable renderable, 553 GrProtected isProtected, 554 GrGpuFinishedProc finishedProc = nullptr, 555 GrGpuFinishedContext finishedContext = nullptr) { 556 return this->createBackendTexture(srcData, 557 numLevels, 558 kTopLeft_GrSurfaceOrigin, 559 renderable, 560 isProtected, 561 finishedProc, 562 finishedContext); 563 } 564 GrBackendTexture createBackendTexture(const SkPixmap& srcData, 565 GrRenderable renderable, 566 GrProtected isProtected, 567 GrGpuFinishedProc finishedProc = nullptr, 568 GrGpuFinishedContext finishedContext = nullptr) { 569 return this->createBackendTexture(&srcData, 570 1, 571 renderable, 572 isProtected, 573 finishedProc, 574 finishedContext); 575 } 576 577 /** 578 * If possible, updates a backend texture to be filled to a particular color. The client should 579 * check the return value to see if the update was successful. The client can pass in a 580 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 581 * deleted. The client is required to call `submit` to send the upload work to the gpu. 582 * The finishedProc will always get called even if we failed to update the GrBackendTexture. 583 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 584 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 585 */ 586 bool updateBackendTexture(const GrBackendTexture&, 587 const SkColor4f& color, 588 GrGpuFinishedProc finishedProc, 589 GrGpuFinishedContext finishedContext); 590 591 /** 592 * If possible, updates a backend texture to be filled to a particular color. The data in 593 * GrBackendTexture and passed in color is interpreted with respect to the passed in 594 * SkColorType. The client should check the return value to see if the update was successful. 595 * The client can pass in a finishedProc to be notified when the data has been uploaded by the 596 * gpu and the texture can be deleted. The client is required to call `submit` to send 597 * the upload work to the gpu. The finishedProc will always get called even if we failed to 598 * update the GrBackendTexture. 599 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 600 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 601 */ 602 bool updateBackendTexture(const GrBackendTexture&, 603 SkColorType skColorType, 604 const SkColor4f& color, 605 GrGpuFinishedProc finishedProc, 606 GrGpuFinishedContext finishedContext); 607 608 /** 609 * If possible, updates a backend texture filled with the provided pixmap data. The client 610 * should check the return value to see if the update was successful. The client can pass in a 611 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 612 * deleted. The client is required to call `submit` to send the upload work to the gpu. 613 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 614 * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case, 615 * means that the backend format is compatible with the base pixmap's colortype. The src data 616 * can be deleted when this call returns. 617 * If the backend texture is mip mapped, the data for all the mipmap levels must be provided. 618 * In the mipmapped case all the colortypes of the provided pixmaps must be the same. 619 * Additionally, all the miplevels must be sized correctly (please see 620 * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the 621 * pixmap data is vertically flipped in the texture. 622 * Note: the pixmap's alphatypes and colorspaces are ignored. 623 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 624 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 625 */ 626 bool updateBackendTexture(const GrBackendTexture&, 627 const SkPixmap srcData[], 628 int numLevels, 629 GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin, 630 GrGpuFinishedProc finishedProc = nullptr, 631 GrGpuFinishedContext finishedContext = nullptr); 632 633 /** 634 * Convenience version of updateBackendTexture that takes just a base level pixmap. 635 */ 636 bool updateBackendTexture(const GrBackendTexture& texture, 637 const SkPixmap& srcData, 638 GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin, 639 GrGpuFinishedProc finishedProc = nullptr, 640 GrGpuFinishedContext finishedContext = nullptr) { 641 return this->updateBackendTexture(texture, 642 &srcData, 643 1, 644 textureOrigin, 645 finishedProc, 646 finishedContext); 647 } 648 649 // Deprecated version that does not take origin and assumes top-left. updateBackendTexture(const GrBackendTexture & texture,const SkPixmap srcData[],int numLevels,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)650 bool updateBackendTexture(const GrBackendTexture& texture, 651 const SkPixmap srcData[], 652 int numLevels, 653 GrGpuFinishedProc finishedProc, 654 GrGpuFinishedContext finishedContext) { 655 return this->updateBackendTexture(texture, 656 srcData, 657 numLevels, 658 kTopLeft_GrSurfaceOrigin, 659 finishedProc, 660 finishedContext); 661 } 662 663 /** 664 * Retrieve the GrBackendFormat for a given SkImage::CompressionType. This is 665 * guaranteed to match the backend format used by the following 666 * createCompressedBackendTexture methods that take a CompressionType. 667 * 668 * The caller should check that the returned format is valid. 669 */ 670 using GrRecordingContext::compressedBackendFormat; 671 672 /** 673 *If possible, create a compressed backend texture initialized to a particular color. The 674 * client should ensure that the returned backend texture is valid. The client can pass in a 675 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 676 * deleted. The client is required to call `submit` to send the upload work to the gpu. 677 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 678 * For the Vulkan backend the layout of the created VkImage will be: 679 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 680 */ 681 GrBackendTexture createCompressedBackendTexture(int width, int height, 682 const GrBackendFormat&, 683 const SkColor4f& color, 684 GrMipmapped, 685 GrProtected = GrProtected::kNo, 686 GrGpuFinishedProc finishedProc = nullptr, 687 GrGpuFinishedContext finishedContext = nullptr); 688 689 GrBackendTexture createCompressedBackendTexture(int width, int height, 690 SkImage::CompressionType, 691 const SkColor4f& color, 692 GrMipmapped, 693 GrProtected = GrProtected::kNo, 694 GrGpuFinishedProc finishedProc = nullptr, 695 GrGpuFinishedContext finishedContext = nullptr); 696 697 /** 698 * If possible, create a backend texture initialized with the provided raw data. The client 699 * should ensure that the returned backend texture is valid. The client can pass in a 700 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 701 * deleted. The client is required to call `submit` to send the upload work to the gpu. 702 * The finishedProc will always get called even if we failed to create the GrBackendTexture 703 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired 704 * the data for all the mipmap levels must be provided. Additionally, all the miplevels 705 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). 706 * For the Vulkan backend the layout of the created VkImage will be: 707 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 708 */ 709 GrBackendTexture createCompressedBackendTexture(int width, int height, 710 const GrBackendFormat&, 711 const void* data, size_t dataSize, 712 GrMipmapped, 713 GrProtected = GrProtected::kNo, 714 GrGpuFinishedProc finishedProc = nullptr, 715 GrGpuFinishedContext finishedContext = nullptr); 716 717 GrBackendTexture createCompressedBackendTexture(int width, int height, 718 SkImage::CompressionType, 719 const void* data, size_t dataSize, 720 GrMipmapped, 721 GrProtected = GrProtected::kNo, 722 GrGpuFinishedProc finishedProc = nullptr, 723 GrGpuFinishedContext finishedContext = nullptr); 724 725 /** 726 * If possible, updates a backend texture filled with the provided color. If the texture is 727 * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client 728 * should check the return value to see if the update was successful. The client can pass in a 729 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 730 * deleted. The client is required to call `submit` to send the upload work to the gpu. 731 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 732 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 733 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 734 */ 735 bool updateCompressedBackendTexture(const GrBackendTexture&, 736 const SkColor4f& color, 737 GrGpuFinishedProc finishedProc, 738 GrGpuFinishedContext finishedContext); 739 740 /** 741 * If possible, updates a backend texture filled with the provided raw data. The client 742 * should check the return value to see if the update was successful. The client can pass in a 743 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 744 * deleted. The client is required to call `submit` to send the upload work to the gpu. 745 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 746 * If a mipMapped texture is passed in, the data for all the mipmap levels must be provided. 747 * Additionally, all the miplevels must be sized correctly (please see 748 * SkMipMap::ComputeLevelSize and ComputeLevelCount). 749 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 750 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 751 */ 752 bool updateCompressedBackendTexture(const GrBackendTexture&, 753 const void* data, 754 size_t dataSize, 755 GrGpuFinishedProc finishedProc, 756 GrGpuFinishedContext finishedContext); 757 758 /** 759 * Updates the state of the GrBackendTexture/RenderTarget to have the passed in 760 * GrBackendSurfaceMutableState. All objects that wrap the backend surface (i.e. SkSurfaces and 761 * SkImages) will also be aware of this state change. This call does not submit the state change 762 * to the gpu, but requires the client to call `submit` to send it to the GPU. The work 763 * for this call is ordered linearly with all other calls that require GrContext::submit to be 764 * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be 765 * called with finishedContext after the state transition is known to have occurred on the GPU. 766 * 767 * See GrBackendSurfaceMutableState to see what state can be set via this call. 768 * 769 * If the backend API is Vulkan, the caller can set the GrBackendSurfaceMutableState's 770 * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to 771 * tell Skia to not change those respective states. 772 * 773 * If previousState is not null and this returns true, then Skia will have filled in 774 * previousState to have the values of the state before this call. 775 */ 776 bool setBackendTextureState(const GrBackendTexture&, 777 const GrBackendSurfaceMutableState&, 778 GrBackendSurfaceMutableState* previousState = nullptr, 779 GrGpuFinishedProc finishedProc = nullptr, 780 GrGpuFinishedContext finishedContext = nullptr); 781 bool setBackendRenderTargetState(const GrBackendRenderTarget&, 782 const GrBackendSurfaceMutableState&, 783 GrBackendSurfaceMutableState* previousState = nullptr, 784 GrGpuFinishedProc finishedProc = nullptr, 785 GrGpuFinishedContext finishedContext = nullptr); 786 787 void deleteBackendTexture(GrBackendTexture); 788 789 // This interface allows clients to pre-compile shaders and populate the runtime program cache. 790 // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format. 791 // 792 // Steps to use this API: 793 // 794 // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to 795 // something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This 796 // will ensure that the blobs are SkSL, and are suitable for pre-compilation. 797 // 2) Run your application, and save all of the key/data pairs that are fed to the cache. 798 // 799 // 3) Switch over to shipping your application. Include the key/data pairs from above. 800 // 4) At startup (or any convenient time), call precompileShader for each key/data pair. 801 // This will compile the SkSL to create a GL program, and populate the runtime cache. 802 // 803 // This is only guaranteed to work if the context/device used in step #2 are created in the 804 // same way as the one used in step #4, and the same GrContextOptions are specified. 805 // Using cached shader blobs on a different device or driver are undefined. 806 bool precompileShader(const SkData& key, const SkData& data); 807 808 #ifdef SK_ENABLE_DUMP_GPU 809 /** Returns a string with detailed information about the context & GPU, in JSON format. */ 810 SkString dump() const; 811 #endif 812 813 class DirectContextID { 814 public: 815 static GrDirectContext::DirectContextID Next(); 816 DirectContextID()817 DirectContextID() : fID(SK_InvalidUniqueID) {} 818 819 bool operator==(const DirectContextID& that) const { return fID == that.fID; } 820 bool operator!=(const DirectContextID& that) const { return !(*this == that); } 821 makeInvalid()822 void makeInvalid() { fID = SK_InvalidUniqueID; } isValid()823 bool isValid() const { return fID != SK_InvalidUniqueID; } 824 825 private: DirectContextID(uint32_t id)826 constexpr DirectContextID(uint32_t id) : fID(id) {} 827 uint32_t fID; 828 }; 829 directContextID()830 DirectContextID directContextID() const { return fDirectContextID; } 831 832 // Provides access to functions that aren't part of the public API. 833 GrDirectContextPriv priv(); 834 const GrDirectContextPriv priv() const; // NOLINT(readability-const-return-type) 835 836 /** 837 * Set current resource tag for gpu cache recycle. 838 */ 839 void setCurrentGrResourceTag(const GrGpuResourceTag& tag); 840 841 /** 842 * Pop resource tag. 843 */ 844 void popGrResourceTag(); 845 846 847 /** 848 * Get current resource tag for gpu cache recycle. 849 * 850 * @return all GrGpuResourceTags. 851 */ 852 GrGpuResourceTag getCurrentGrResourceTag() const; 853 854 /** 855 * Releases GrGpuResource objects and removes them from the cache by tag. 856 */ 857 void releaseByTag(const GrGpuResourceTag& tag); 858 859 /** 860 * Get all GrGpuResource tag. 861 * 862 * @return all GrGpuResourceTags. 863 */ 864 std::set<GrGpuResourceTag> getAllGrGpuResourceTags() const; 865 866 class SK_API ResourceCollector { 867 public: 868 virtual void collectSurfaceProxy(sk_sp<GrSurfaceProxy>& surface) = 0; 869 }; 870 setResourceCollector(ResourceCollector * collector)871 void setResourceCollector(ResourceCollector* collector) { 872 fResourceCollector = collector; 873 } 874 collectResource(sk_sp<GrSurfaceProxy> & surface)875 void collectResource(sk_sp<GrSurfaceProxy>& surface) { 876 if (fResourceCollector != nullptr) { 877 fResourceCollector->collectSurfaceProxy(surface); 878 } 879 } 880 881 protected: 882 GrDirectContext(GrBackendApi backend, const GrContextOptions& options); 883 884 bool init() override; 885 onGetAtlasManager()886 GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); } 887 skgpu::v1::SmallPathAtlasMgr* onGetSmallPathAtlasMgr(); 888 asDirectContext()889 GrDirectContext* asDirectContext() override { return this; } 890 891 private: 892 // This call will make sure out work on the GPU is finished and will execute any outstanding 893 // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the 894 // outstanding work on the gpu. The main use currently for this function is when tearing down or 895 // abandoning the context. 896 // 897 // When we finish up work on the GPU it could trigger callbacks to the client. In the case we 898 // are abandoning the context we don't want the client to be able to use the GrDirectContext to 899 // issue more commands during the callback. Thus before calling this function we set the 900 // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded 901 // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned 902 // bool is used for this signal. 903 void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned); 904 905 const DirectContextID fDirectContextID; 906 // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed 907 // after all of its users. Clients of fTaskGroup will generally want to ensure that they call 908 // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being 909 // invoked after objects they depend upon have already been destroyed. 910 std::unique_ptr<SkTaskGroup> fTaskGroup; 911 std::unique_ptr<GrStrikeCache> fStrikeCache; 912 sk_sp<GrGpu> fGpu; 913 std::unique_ptr<GrResourceCache> fResourceCache; 914 std::unique_ptr<GrResourceProvider> fResourceProvider; 915 916 bool fDidTestPMConversions; 917 // true if the PM/UPM conversion succeeded; false otherwise 918 bool fPMUPMConversionsRoundTrip; 919 920 GrContextOptions::PersistentCache* fPersistentCache; 921 922 std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager; 923 std::unique_ptr<GrAtlasManager> fAtlasManager; 924 925 std::unique_ptr<skgpu::v1::SmallPathAtlasMgr> fSmallPathAtlasMgr; 926 927 ResourceCollector* fResourceCollector = nullptr; 928 929 friend class GrDirectContextPriv; 930 931 using INHERITED = GrRecordingContext; 932 }; 933 934 935 #endif 936