1 /* 2 * Copyright 2020 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrDirectContext_DEFINED 9 #define GrDirectContext_DEFINED 10 11 #include "include/gpu/GrRecordingContext.h" 12 13 #include "include/gpu/GrBackendSurface.h" 14 15 // We shouldn't need this but currently Android is relying on this being include transitively. 16 #include "include/core/SkUnPreMultiply.h" 17 18 class GrAtlasManager; 19 class GrBackendSemaphore; 20 class GrClientMappedBufferManager; 21 class GrDirectContextPriv; 22 class GrContextThreadSafeProxy; 23 struct GrD3DBackendContext; 24 class GrFragmentProcessor; 25 class GrGpu; 26 struct GrGLInterface; 27 struct GrMtlBackendContext; 28 struct GrMockOptions; 29 class GrPath; 30 class GrResourceCache; 31 class GrResourceProvider; 32 class GrStrikeCache; 33 class GrSurfaceProxy; 34 class GrTextureProxy; 35 struct GrVkBackendContext; 36 37 class SkImage; 38 class SkString; 39 class SkSurfaceCharacterization; 40 class SkSurfaceProps; 41 class SkTaskGroup; 42 class SkTraceMemoryDump; 43 44 namespace skgpu { 45 class Swizzle; 46 namespace v1 { class SmallPathAtlasMgr; } 47 } 48 49 class SK_API GrDirectContext : public GrRecordingContext { 50 public: 51 #ifdef SK_GL 52 /** 53 * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the 54 * result of GrGLMakeNativeInterface() is used if it succeeds. 55 */ 56 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&); 57 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>); 58 static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&); 59 static sk_sp<GrDirectContext> MakeGL(); 60 #endif 61 62 #ifdef SK_VULKAN 63 /** 64 * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned 65 * GrDirectContext is destroyed. This also means that any objects created with this 66 * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold 67 * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released, 68 * then it is safe to delete the vulkan objects. 69 */ 70 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&); 71 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&); 72 #endif 73 74 #ifdef SK_METAL 75 /** 76 * Makes a GrDirectContext which uses Metal as the backend. The GrMtlBackendContext contains a 77 * MTLDevice and MTLCommandQueue which should be used by the backend. These objects must 78 * have their own ref which will be released when the GrMtlBackendContext is destroyed. 79 * Ganesh will take its own ref on the objects which will be released when the GrDirectContext 80 * is destroyed. 81 */ 82 static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&, const GrContextOptions&); 83 static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&); 84 /** 85 * Deprecated. 86 * 87 * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an 88 * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects 89 * must have a ref on them that can be transferred to Ganesh, which will release the ref 90 * when the GrDirectContext is destroyed. 91 */ 92 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&); 93 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue); 94 #endif 95 96 #ifdef SK_DIRECT3D 97 /** 98 * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context 99 * must be kept alive until the returned GrDirectContext is first destroyed or abandoned. 100 */ 101 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&); 102 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&); 103 #endif 104 105 #ifdef SK_DAWN 106 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&, 107 const GrContextOptions&); 108 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&); 109 #endif 110 111 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&); 112 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*); 113 114 ~GrDirectContext() override; 115 116 /** 117 * The context normally assumes that no outsider is setting state 118 * within the underlying 3D API's context/device/whatever. This call informs 119 * the context that the state was modified and it should resend. Shouldn't 120 * be called frequently for good performance. 121 * The flag bits, state, is dependent on which backend is used by the 122 * context, either GL or D3D (possible in future). 123 */ 124 void resetContext(uint32_t state = kAll_GrBackendState); 125 126 /** 127 * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which 128 * the context has modified the bound texture will have texture id 0 bound. This does not 129 * flush the context. Calling resetContext() does not change the set that will be bound 130 * to texture id 0 on the next call to resetGLTextureBindings(). After this is called 131 * all unit/target combinations are considered to have unmodified bindings until the context 132 * subsequently modifies them (meaning if this is called twice in a row with no intervening 133 * context usage then the second call is a no-op.) 134 */ 135 void resetGLTextureBindings(); 136 137 /** 138 * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer 139 * usable. Call this if you have lost the associated GPU context, and thus internal texture, 140 * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the 141 * context and any of its created resource objects will not make backend 3D API calls. Content 142 * rendered but not previously flushed may be lost. After this function is called all subsequent 143 * calls on the context will fail or be no-ops. 144 * 145 * The typical use case for this function is that the underlying 3D context was lost and further 146 * API calls may crash. 147 * 148 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to 149 * create the context must be kept alive even after abandoning the context. Those objects must 150 * live for the lifetime of the context object itself. The reason for this is so that 151 * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be 152 * cleaned up even in a device lost state. 153 */ 154 void abandonContext() override; 155 156 /** 157 * Returns true if the context was abandoned or if the if the backend specific context has 158 * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a 159 * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this 160 * context. 161 */ 162 bool abandoned() override; 163 164 // TODO: Remove this from public after migrating Chrome. 165 sk_sp<GrContextThreadSafeProxy> threadSafeProxy(); 166 167 /** 168 * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is 169 * reset and will return false until another out-of-memory error is reported by the 3D API. If 170 * the context is abandoned then this will report false. 171 * 172 * Currently this is implemented for: 173 * 174 * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and 175 * therefore hide the error from Skia. Also, it is not advised to use this in combination with 176 * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever 177 * checking the GL context for OOM. 178 * 179 * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has 180 * occurred. 181 */ 182 bool oomed(); 183 184 /** 185 * This is similar to abandonContext() however the underlying 3D context is not yet lost and 186 * the context will cleanup all allocated resources before returning. After returning it will 187 * assume that the underlying context may no longer be valid. 188 * 189 * The typical use case for this function is that the client is going to destroy the 3D context 190 * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed 191 * elsewhere by either the client or Skia objects). 192 * 193 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to 194 * create the context must be alive before calling releaseResourcesAndAbandonContext. 195 */ 196 void releaseResourcesAndAbandonContext(); 197 198 /////////////////////////////////////////////////////////////////////////// 199 // Resource Cache 200 201 /** DEPRECATED 202 * Return the current GPU resource cache limits. 203 * 204 * @param maxResources If non-null, will be set to -1. 205 * @param maxResourceBytes If non-null, returns maximum number of bytes of 206 * video memory that can be held in the cache. 207 */ 208 void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const; 209 210 /** 211 * Return the current GPU resource cache limit in bytes. 212 */ 213 size_t getResourceCacheLimit() const; 214 215 /** 216 * Gets the current GPU resource cache usage. 217 * 218 * @param resourceCount If non-null, returns the number of resources that are held in the 219 * cache. 220 * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held 221 * in the cache. 222 */ 223 void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const; 224 225 /** 226 * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources. 227 */ 228 size_t getResourceCachePurgeableBytes() const; 229 230 /** DEPRECATED 231 * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes 232 * limit, it will be purged (LRU) to keep the cache within the limit. 233 * 234 * @param maxResources Unused. 235 * @param maxResourceBytes The maximum number of bytes of video memory 236 * that can be held in the cache. 237 */ 238 void setResourceCacheLimits(int maxResources, size_t maxResourceBytes); 239 240 /** 241 * Specify the GPU resource cache limit. If the cache currently exceeds this limit, 242 * it will be purged (LRU) to keep the cache within the limit. 243 * 244 * @param maxResourceBytes The maximum number of bytes of video memory 245 * that can be held in the cache. 246 */ 247 void setResourceCacheLimit(size_t maxResourceBytes); 248 249 /** 250 * Frees GPU created by the context. Can be called to reduce GPU memory 251 * pressure. 252 */ 253 void freeGpuResources(); 254 255 /** 256 * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are 257 * otherwise marked for deletion, regardless of whether the context is under budget. 258 * 259 * If 'scratchResourcesOnly' is true all unlocked scratch resources older than 'msNotUsed' will 260 * be purged but the unlocked resources with persistent data will remain. If 261 * 'scratchResourcesOnly' is false then all unlocked resources older than 'msNotUsed' will be 262 * purged. 263 * 264 * @param msNotUsed Only unlocked resources not used in these last milliseconds 265 * will be cleaned up. 266 * @param scratchResourcesOnly If true only unlocked scratch resources will be purged. 267 */ 268 void performDeferredCleanup(std::chrono::milliseconds msNotUsed, 269 bool scratchResourcesOnly=false); 270 271 // Temporary compatibility API for Android. purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)272 void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) { 273 this->performDeferredCleanup(msNotUsed); 274 } 275 276 /** 277 * Purge unlocked resources from the cache until the the provided byte count has been reached 278 * or we have purged all unlocked resources. The default policy is to purge in LRU order, but 279 * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other 280 * resource types. 281 * 282 * @param maxBytesToPurge the desired number of bytes to be purged. 283 * @param preferScratchResources If true scratch resources will be purged prior to other 284 * resource types. 285 */ 286 void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources); 287 288 /** 289 * This entry point is intended for instances where an app has been backgrounded or 290 * suspended. 291 * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the 292 * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false 293 * then all unlocked resources will be purged. 294 * In either case, after the unlocked resources are purged a separate pass will be made to 295 * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true 296 * some resources with persistent data may be purged to be under budget). 297 * 298 * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior 299 * enforcing the budget requirements. 300 */ 301 void purgeUnlockedResources(bool scratchResourcesOnly); 302 303 /** 304 * Gets the maximum supported texture size. 305 */ 306 using GrRecordingContext::maxTextureSize; 307 308 /** 309 * Gets the maximum supported render target size. 310 */ 311 using GrRecordingContext::maxRenderTargetSize; 312 313 /** 314 * Can a SkImage be created with the given color type. 315 */ 316 using GrRecordingContext::colorTypeSupportedAsImage; 317 318 /** 319 * Can a SkSurface be created with the given color type. To check whether MSAA is supported 320 * use maxSurfaceSampleCountForColorType(). 321 */ 322 using GrRecordingContext::colorTypeSupportedAsSurface; 323 324 /** 325 * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA 326 * rendering is supported for the color type. 0 is returned if rendering to this color type 327 * is not supported at all. 328 */ 329 using GrRecordingContext::maxSurfaceSampleCountForColorType; 330 331 /////////////////////////////////////////////////////////////////////////// 332 // Misc. 333 334 /** 335 * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before 336 * executing any more commands on the GPU. If this call returns false, then the GPU back-end 337 * will not wait on any passed in semaphores, and the client will still own the semaphores, 338 * regardless of the value of deleteSemaphoresAfterWait. 339 * 340 * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case 341 * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it 342 * knows that Skia has finished waiting on them. This can be done by using finishedProcs on 343 * flush calls. 344 */ 345 bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores, 346 bool deleteSemaphoresAfterWait = true); 347 348 /** 349 * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D 350 * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by 351 * GrContext::submit(syncCpu). 352 */ 353 void flushAndSubmit(bool syncCpu = false) { 354 this->flush(GrFlushInfo()); 355 this->submit(syncCpu); 356 } 357 358 /** 359 * Call to ensure all drawing to the context has been flushed to underlying 3D API specific 360 * objects. A call to `submit` is always required to ensure work is actually sent to 361 * the gpu. Some specific API details: 362 * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some 363 * sync objects from the flush will not be valid until a submission occurs. 364 * 365 * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command 366 * buffer or encoder objects. However, these objects are not sent to the gpu until a 367 * submission occurs. 368 * 369 * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be 370 * submitted to the gpu during the next submit call (it is possible Skia failed to create a 371 * subset of the semaphores). The client should not wait on these semaphores until after submit 372 * has been called, and must keep them alive until then. If this call returns 373 * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on 374 * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with 375 * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the 376 * client is still responsible for deleting any initialized semaphores. 377 * Regardleess of semaphore submission the context will still be flushed. It should be 378 * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not 379 * happen. It simply means there were no semaphores submitted to the GPU. A caller should only 380 * take this as a failure if they passed in semaphores to be submitted. 381 */ 382 GrSemaphoresSubmitted flush(const GrFlushInfo& info); 383 flush()384 void flush() { this->flush({}); } 385 386 /** 387 * Submit outstanding work to the gpu from all previously un-submitted flushes. The return 388 * value of the submit will indicate whether or not the submission to the GPU was successful. 389 * 390 * If the call returns true, all previously passed in semaphores in flush calls will have been 391 * submitted to the GPU and they can safely be waited on. The caller should wait on those 392 * semaphores or perform some other global synchronization before deleting the semaphores. 393 * 394 * If it returns false, then those same semaphores will not have been submitted and we will not 395 * try to submit them again. The caller is free to delete the semaphores at any time. 396 * 397 * If the syncCpu flag is true this function will return once the gpu has finished with all 398 * submitted work. 399 */ 400 bool submit(bool syncCpu = false); 401 402 /** 403 * Checks whether any asynchronous work is complete and if so calls related callbacks. 404 */ 405 void checkAsyncWorkCompletion(); 406 407 /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */ 408 // Chrome is using this! 409 void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const; 410 411 bool supportsDistanceFieldText() const; 412 413 void storeVkPipelineCacheData(); 414 415 /** 416 * Retrieve the default GrBackendFormat for a given SkColorType and renderability. 417 * It is guaranteed that this backend format will be the one used by the following 418 * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods. 419 * 420 * The caller should check that the returned format is valid. 421 */ 422 using GrRecordingContext::defaultBackendFormat; 423 424 /** 425 * The explicitly allocated backend texture API allows clients to use Skia to create backend 426 * objects outside of Skia proper (i.e., Skia's caching system will not know about them.) 427 * 428 * It is the client's responsibility to delete all these objects (using deleteBackendTexture) 429 * before deleting the context used to create them. If the backend is Vulkan, the textures must 430 * be deleted before abandoning the context as well. Additionally, clients should only delete 431 * these objects on the thread for which that context is active. 432 * 433 * The client is responsible for ensuring synchronization between different uses 434 * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the 435 * surface, rewrapping it in a image and drawing the image will require explicit 436 * synchronization on the client's part). 437 */ 438 439 /** 440 * If possible, create an uninitialized backend texture. The client should ensure that the 441 * returned backend texture is valid. 442 * For the Vulkan backend the layout of the created VkImage will be: 443 * VK_IMAGE_LAYOUT_UNDEFINED. 444 */ 445 GrBackendTexture createBackendTexture(int width, int height, 446 const GrBackendFormat&, 447 GrMipmapped, 448 GrRenderable, 449 GrProtected = GrProtected::kNo); 450 451 /** 452 * If possible, create an uninitialized backend texture. The client should ensure that the 453 * returned backend texture is valid. 454 * If successful, the created backend texture will be compatible with the provided 455 * SkColorType. 456 * For the Vulkan backend the layout of the created VkImage will be: 457 * VK_IMAGE_LAYOUT_UNDEFINED. 458 */ 459 GrBackendTexture createBackendTexture(int width, int height, 460 SkColorType, 461 GrMipmapped, 462 GrRenderable, 463 GrProtected = GrProtected::kNo); 464 465 /** 466 * If possible, create a backend texture initialized to a particular color. The client should 467 * ensure that the returned backend texture is valid. The client can pass in a finishedProc 468 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The 469 * client is required to call `submit` to send the upload work to the gpu. The 470 * finishedProc will always get called even if we failed to create the GrBackendTexture. 471 * For the Vulkan backend the layout of the created VkImage will be: 472 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 473 */ 474 GrBackendTexture createBackendTexture(int width, int height, 475 const GrBackendFormat&, 476 const SkColor4f& color, 477 GrMipmapped, 478 GrRenderable, 479 GrProtected = GrProtected::kNo, 480 GrGpuFinishedProc finishedProc = nullptr, 481 GrGpuFinishedContext finishedContext = nullptr); 482 483 /** 484 * If possible, create a backend texture initialized to a particular color. The client should 485 * ensure that the returned backend texture is valid. The client can pass in a finishedProc 486 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The 487 * client is required to call `submit` to send the upload work to the gpu. The 488 * finishedProc will always get called even if we failed to create the GrBackendTexture. 489 * If successful, the created backend texture will be compatible with the provided 490 * SkColorType. 491 * For the Vulkan backend the layout of the created VkImage will be: 492 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 493 */ 494 GrBackendTexture createBackendTexture(int width, int height, 495 SkColorType, 496 const SkColor4f& color, 497 GrMipmapped, 498 GrRenderable, 499 GrProtected = GrProtected::kNo, 500 GrGpuFinishedProc finishedProc = nullptr, 501 GrGpuFinishedContext finishedContext = nullptr); 502 503 /** 504 * If possible, create a backend texture initialized with the provided pixmap data. The client 505 * should ensure that the returned backend texture is valid. The client can pass in a 506 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 507 * deleted. The client is required to call `submit` to send the upload work to the gpu. 508 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 509 * If successful, the created backend texture will be compatible with the provided 510 * pixmap(s). Compatible, in this case, means that the backend format will be the result 511 * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted 512 * when this call returns. 513 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired 514 * the data for all the mipmap levels must be provided. In the mipmapped case all the 515 * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels 516 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The 517 * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture. 518 * Note: the pixmap's alphatypes and colorspaces are ignored. 519 * For the Vulkan backend the layout of the created VkImage will be: 520 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 521 */ 522 GrBackendTexture createBackendTexture(const SkPixmap srcData[], 523 int numLevels, 524 GrSurfaceOrigin, 525 GrRenderable, 526 GrProtected, 527 GrGpuFinishedProc finishedProc = nullptr, 528 GrGpuFinishedContext finishedContext = nullptr); 529 530 /** 531 * Convenience version createBackendTexture() that takes just a base level pixmap. 532 */ 533 GrBackendTexture createBackendTexture(const SkPixmap& srcData, 534 GrSurfaceOrigin textureOrigin, 535 GrRenderable renderable, 536 GrProtected isProtected, 537 GrGpuFinishedProc finishedProc = nullptr, 538 GrGpuFinishedContext finishedContext = nullptr) { 539 return this->createBackendTexture(&srcData, 1, textureOrigin, renderable, isProtected, 540 finishedProc, finishedContext); 541 } 542 543 // Deprecated versions that do not take origin and assume top-left. 544 GrBackendTexture createBackendTexture(const SkPixmap srcData[], 545 int numLevels, 546 GrRenderable renderable, 547 GrProtected isProtected, 548 GrGpuFinishedProc finishedProc = nullptr, 549 GrGpuFinishedContext finishedContext = nullptr) { 550 return this->createBackendTexture(srcData, 551 numLevels, 552 kTopLeft_GrSurfaceOrigin, 553 renderable, 554 isProtected, 555 finishedProc, 556 finishedContext); 557 } 558 GrBackendTexture createBackendTexture(const SkPixmap& srcData, 559 GrRenderable renderable, 560 GrProtected isProtected, 561 GrGpuFinishedProc finishedProc = nullptr, 562 GrGpuFinishedContext finishedContext = nullptr) { 563 return this->createBackendTexture(&srcData, 564 1, 565 renderable, 566 isProtected, 567 finishedProc, 568 finishedContext); 569 } 570 571 /** 572 * If possible, updates a backend texture to be filled to a particular color. The client should 573 * check the return value to see if the update was successful. The client can pass in a 574 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 575 * deleted. The client is required to call `submit` to send the upload work to the gpu. 576 * The finishedProc will always get called even if we failed to update the GrBackendTexture. 577 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 578 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 579 */ 580 bool updateBackendTexture(const GrBackendTexture&, 581 const SkColor4f& color, 582 GrGpuFinishedProc finishedProc, 583 GrGpuFinishedContext finishedContext); 584 585 /** 586 * If possible, updates a backend texture to be filled to a particular color. The data in 587 * GrBackendTexture and passed in color is interpreted with respect to the passed in 588 * SkColorType. The client should check the return value to see if the update was successful. 589 * The client can pass in a finishedProc to be notified when the data has been uploaded by the 590 * gpu and the texture can be deleted. The client is required to call `submit` to send 591 * the upload work to the gpu. The finishedProc will always get called even if we failed to 592 * update the GrBackendTexture. 593 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 594 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 595 */ 596 bool updateBackendTexture(const GrBackendTexture&, 597 SkColorType skColorType, 598 const SkColor4f& color, 599 GrGpuFinishedProc finishedProc, 600 GrGpuFinishedContext finishedContext); 601 602 /** 603 * If possible, updates a backend texture filled with the provided pixmap data. The client 604 * should check the return value to see if the update was successful. The client can pass in a 605 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 606 * deleted. The client is required to call `submit` to send the upload work to the gpu. 607 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 608 * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case, 609 * means that the backend format is compatible with the base pixmap's colortype. The src data 610 * can be deleted when this call returns. 611 * If the backend texture is mip mapped, the data for all the mipmap levels must be provided. 612 * In the mipmapped case all the colortypes of the provided pixmaps must be the same. 613 * Additionally, all the miplevels must be sized correctly (please see 614 * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the 615 * pixmap data is vertically flipped in the texture. 616 * Note: the pixmap's alphatypes and colorspaces are ignored. 617 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 618 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 619 */ 620 bool updateBackendTexture(const GrBackendTexture&, 621 const SkPixmap srcData[], 622 int numLevels, 623 GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin, 624 GrGpuFinishedProc finishedProc = nullptr, 625 GrGpuFinishedContext finishedContext = nullptr); 626 627 /** 628 * Convenience version of updateBackendTexture that takes just a base level pixmap. 629 */ 630 bool updateBackendTexture(const GrBackendTexture& texture, 631 const SkPixmap& srcData, 632 GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin, 633 GrGpuFinishedProc finishedProc = nullptr, 634 GrGpuFinishedContext finishedContext = nullptr) { 635 return this->updateBackendTexture(texture, 636 &srcData, 637 1, 638 textureOrigin, 639 finishedProc, 640 finishedContext); 641 } 642 643 // Deprecated version that does not take origin and assumes top-left. updateBackendTexture(const GrBackendTexture & texture,const SkPixmap srcData[],int numLevels,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)644 bool updateBackendTexture(const GrBackendTexture& texture, 645 const SkPixmap srcData[], 646 int numLevels, 647 GrGpuFinishedProc finishedProc, 648 GrGpuFinishedContext finishedContext) { 649 return this->updateBackendTexture(texture, 650 srcData, 651 numLevels, 652 kTopLeft_GrSurfaceOrigin, 653 finishedProc, 654 finishedContext); 655 } 656 657 /** 658 * Retrieve the GrBackendFormat for a given SkImage::CompressionType. This is 659 * guaranteed to match the backend format used by the following 660 * createCompressedBackendTexture methods that take a CompressionType. 661 * 662 * The caller should check that the returned format is valid. 663 */ 664 using GrRecordingContext::compressedBackendFormat; 665 666 /** 667 *If possible, create a compressed backend texture initialized to a particular color. The 668 * client should ensure that the returned backend texture is valid. The client can pass in a 669 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 670 * deleted. The client is required to call `submit` to send the upload work to the gpu. 671 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 672 * For the Vulkan backend the layout of the created VkImage will be: 673 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 674 */ 675 GrBackendTexture createCompressedBackendTexture(int width, int height, 676 const GrBackendFormat&, 677 const SkColor4f& color, 678 GrMipmapped, 679 GrProtected = GrProtected::kNo, 680 GrGpuFinishedProc finishedProc = nullptr, 681 GrGpuFinishedContext finishedContext = nullptr); 682 683 GrBackendTexture createCompressedBackendTexture(int width, int height, 684 SkImage::CompressionType, 685 const SkColor4f& color, 686 GrMipmapped, 687 GrProtected = GrProtected::kNo, 688 GrGpuFinishedProc finishedProc = nullptr, 689 GrGpuFinishedContext finishedContext = nullptr); 690 691 /** 692 * If possible, create a backend texture initialized with the provided raw data. The client 693 * should ensure that the returned backend texture is valid. The client can pass in a 694 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 695 * deleted. The client is required to call `submit` to send the upload work to the gpu. 696 * The finishedProc will always get called even if we failed to create the GrBackendTexture 697 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired 698 * the data for all the mipmap levels must be provided. Additionally, all the miplevels 699 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). 700 * For the Vulkan backend the layout of the created VkImage will be: 701 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 702 */ 703 GrBackendTexture createCompressedBackendTexture(int width, int height, 704 const GrBackendFormat&, 705 const void* data, size_t dataSize, 706 GrMipmapped, 707 GrProtected = GrProtected::kNo, 708 GrGpuFinishedProc finishedProc = nullptr, 709 GrGpuFinishedContext finishedContext = nullptr); 710 711 GrBackendTexture createCompressedBackendTexture(int width, int height, 712 SkImage::CompressionType, 713 const void* data, size_t dataSize, 714 GrMipmapped, 715 GrProtected = GrProtected::kNo, 716 GrGpuFinishedProc finishedProc = nullptr, 717 GrGpuFinishedContext finishedContext = nullptr); 718 719 /** 720 * If possible, updates a backend texture filled with the provided color. If the texture is 721 * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client 722 * should check the return value to see if the update was successful. The client can pass in a 723 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 724 * deleted. The client is required to call `submit` to send the upload work to the gpu. 725 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 726 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 727 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 728 */ 729 bool updateCompressedBackendTexture(const GrBackendTexture&, 730 const SkColor4f& color, 731 GrGpuFinishedProc finishedProc, 732 GrGpuFinishedContext finishedContext); 733 734 /** 735 * If possible, updates a backend texture filled with the provided raw data. The client 736 * should check the return value to see if the update was successful. The client can pass in a 737 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 738 * deleted. The client is required to call `submit` to send the upload work to the gpu. 739 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 740 * If a mipMapped texture is passed in, the data for all the mipmap levels must be provided. 741 * Additionally, all the miplevels must be sized correctly (please see 742 * SkMipMap::ComputeLevelSize and ComputeLevelCount). 743 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 744 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 745 */ 746 bool updateCompressedBackendTexture(const GrBackendTexture&, 747 const void* data, 748 size_t dataSize, 749 GrGpuFinishedProc finishedProc, 750 GrGpuFinishedContext finishedContext); 751 752 /** 753 * Updates the state of the GrBackendTexture/RenderTarget to have the passed in 754 * GrBackendSurfaceMutableState. All objects that wrap the backend surface (i.e. SkSurfaces and 755 * SkImages) will also be aware of this state change. This call does not submit the state change 756 * to the gpu, but requires the client to call `submit` to send it to the GPU. The work 757 * for this call is ordered linearly with all other calls that require GrContext::submit to be 758 * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be 759 * called with finishedContext after the state transition is known to have occurred on the GPU. 760 * 761 * See GrBackendSurfaceMutableState to see what state can be set via this call. 762 * 763 * If the backend API is Vulkan, the caller can set the GrBackendSurfaceMutableState's 764 * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to 765 * tell Skia to not change those respective states. 766 * 767 * If previousState is not null and this returns true, then Skia will have filled in 768 * previousState to have the values of the state before this call. 769 */ 770 bool setBackendTextureState(const GrBackendTexture&, 771 const GrBackendSurfaceMutableState&, 772 GrBackendSurfaceMutableState* previousState = nullptr, 773 GrGpuFinishedProc finishedProc = nullptr, 774 GrGpuFinishedContext finishedContext = nullptr); 775 bool setBackendRenderTargetState(const GrBackendRenderTarget&, 776 const GrBackendSurfaceMutableState&, 777 GrBackendSurfaceMutableState* previousState = nullptr, 778 GrGpuFinishedProc finishedProc = nullptr, 779 GrGpuFinishedContext finishedContext = nullptr); 780 781 void deleteBackendTexture(GrBackendTexture); 782 783 // This interface allows clients to pre-compile shaders and populate the runtime program cache. 784 // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format. 785 // 786 // Steps to use this API: 787 // 788 // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to 789 // something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This 790 // will ensure that the blobs are SkSL, and are suitable for pre-compilation. 791 // 2) Run your application, and save all of the key/data pairs that are fed to the cache. 792 // 793 // 3) Switch over to shipping your application. Include the key/data pairs from above. 794 // 4) At startup (or any convenient time), call precompileShader for each key/data pair. 795 // This will compile the SkSL to create a GL program, and populate the runtime cache. 796 // 797 // This is only guaranteed to work if the context/device used in step #2 are created in the 798 // same way as the one used in step #4, and the same GrContextOptions are specified. 799 // Using cached shader blobs on a different device or driver are undefined. 800 bool precompileShader(const SkData& key, const SkData& data); 801 802 #ifdef SK_ENABLE_DUMP_GPU 803 /** Returns a string with detailed information about the context & GPU, in JSON format. */ 804 SkString dump() const; 805 #endif 806 807 class DirectContextID { 808 public: 809 static GrDirectContext::DirectContextID Next(); 810 DirectContextID()811 DirectContextID() : fID(SK_InvalidUniqueID) {} 812 813 bool operator==(const DirectContextID& that) const { return fID == that.fID; } 814 bool operator!=(const DirectContextID& that) const { return !(*this == that); } 815 makeInvalid()816 void makeInvalid() { fID = SK_InvalidUniqueID; } isValid()817 bool isValid() const { return fID != SK_InvalidUniqueID; } 818 819 private: DirectContextID(uint32_t id)820 constexpr DirectContextID(uint32_t id) : fID(id) {} 821 uint32_t fID; 822 }; 823 directContextID()824 DirectContextID directContextID() const { return fDirectContextID; } 825 826 // Provides access to functions that aren't part of the public API. 827 GrDirectContextPriv priv(); 828 const GrDirectContextPriv priv() const; // NOLINT(readability-const-return-type) 829 830 protected: 831 GrDirectContext(GrBackendApi backend, const GrContextOptions& options); 832 833 bool init() override; 834 onGetAtlasManager()835 GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); } 836 skgpu::v1::SmallPathAtlasMgr* onGetSmallPathAtlasMgr(); 837 asDirectContext()838 GrDirectContext* asDirectContext() override { return this; } 839 840 private: 841 // This call will make sure out work on the GPU is finished and will execute any outstanding 842 // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the 843 // outstanding work on the gpu. The main use currently for this function is when tearing down or 844 // abandoning the context. 845 // 846 // When we finish up work on the GPU it could trigger callbacks to the client. In the case we 847 // are abandoning the context we don't want the client to be able to use the GrDirectContext to 848 // issue more commands during the callback. Thus before calling this function we set the 849 // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded 850 // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned 851 // bool is used for this signal. 852 void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned); 853 854 const DirectContextID fDirectContextID; 855 // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed 856 // after all of its users. Clients of fTaskGroup will generally want to ensure that they call 857 // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being 858 // invoked after objects they depend upon have already been destroyed. 859 std::unique_ptr<SkTaskGroup> fTaskGroup; 860 std::unique_ptr<GrStrikeCache> fStrikeCache; 861 sk_sp<GrGpu> fGpu; 862 std::unique_ptr<GrResourceCache> fResourceCache; 863 std::unique_ptr<GrResourceProvider> fResourceProvider; 864 865 bool fDidTestPMConversions; 866 // true if the PM/UPM conversion succeeded; false otherwise 867 bool fPMUPMConversionsRoundTrip; 868 869 GrContextOptions::PersistentCache* fPersistentCache; 870 871 std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager; 872 std::unique_ptr<GrAtlasManager> fAtlasManager; 873 874 std::unique_ptr<skgpu::v1::SmallPathAtlasMgr> fSmallPathAtlasMgr; 875 876 friend class GrDirectContextPriv; 877 878 using INHERITED = GrRecordingContext; 879 }; 880 881 882 #endif 883