1 /* 2 * Copyright 2020 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrDirectContext_DEFINED 9 #define GrDirectContext_DEFINED 10 11 #include "include/gpu/GrRecordingContext.h" 12 13 #include "include/gpu/GrBackendSurface.h" 14 15 // We shouldn't need this but currently Android is relying on this being include transitively. 16 #include "include/core/SkUnPreMultiply.h" 17 18 class GrAtlasManager; 19 class GrBackendSemaphore; 20 class GrClientMappedBufferManager; 21 class GrDirectContextPriv; 22 class GrContextThreadSafeProxy; 23 struct GrD3DBackendContext; 24 class GrFragmentProcessor; 25 class GrGpu; 26 struct GrGLInterface; 27 struct GrMtlBackendContext; 28 struct GrMockOptions; 29 class GrPath; 30 class GrResourceCache; 31 class GrSmallPathAtlasMgr; 32 class GrSurfaceDrawContext; 33 class GrResourceProvider; 34 class GrStrikeCache; 35 class GrSurfaceProxy; 36 class GrSwizzle; 37 class GrTextureProxy; 38 struct GrVkBackendContext; 39 40 class SkImage; 41 class SkString; 42 class SkSurfaceCharacterization; 43 class SkSurfaceProps; 44 class SkTaskGroup; 45 class SkTraceMemoryDump; 46 47 class SK_API GrDirectContext : public GrRecordingContext { 48 public: 49 #ifdef SK_GL 50 /** 51 * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the 52 * result of GrGLMakeNativeInterface() is used if it succeeds. 53 */ 54 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&); 55 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>); 56 static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&); 57 static sk_sp<GrDirectContext> MakeGL(); 58 #endif 59 60 #ifdef SK_VULKAN 61 /** 62 * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned 63 * GrDirectContext is destroyed. This also means that any objects created with this 64 * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold 65 * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released, 66 * then it is safe to delete the vulkan objects. 67 */ 68 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&); 69 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&); 70 #endif 71 72 #ifdef SK_METAL 73 /** 74 * Makes a GrDirectContext which uses Metal as the backend. The GrMtlBackendContext contains a 75 * MTLDevice and MTLCommandQueue which should be used by the backend. These objects must 76 * have their own ref which will be released when the GrMtlBackendContext is destroyed. 77 * Ganesh will take its own ref on the objects which will be released when the GrDirectContext 78 * is destroyed. 79 */ 80 static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&, const GrContextOptions&); 81 static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&); 82 /** 83 * Deprecated. 84 * 85 * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an 86 * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects 87 * must have a ref on them that can be transferred to Ganesh, which will release the ref 88 * when the GrDirectContext is destroyed. 89 */ 90 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&); 91 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue); 92 #endif 93 94 #ifdef SK_DIRECT3D 95 /** 96 * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context 97 * must be kept alive until the returned GrDirectContext is first destroyed or abandoned. 98 */ 99 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&); 100 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&); 101 #endif 102 103 #ifdef SK_DAWN 104 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&, 105 const GrContextOptions&); 106 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&); 107 #endif 108 109 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&); 110 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*); 111 112 ~GrDirectContext() override; 113 114 /** 115 * The context normally assumes that no outsider is setting state 116 * within the underlying 3D API's context/device/whatever. This call informs 117 * the context that the state was modified and it should resend. Shouldn't 118 * be called frequently for good performance. 119 * The flag bits, state, is dependent on which backend is used by the 120 * context, either GL or D3D (possible in future). 121 */ 122 void resetContext(uint32_t state = kAll_GrBackendState); 123 124 /** 125 * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which 126 * the context has modified the bound texture will have texture id 0 bound. This does not 127 * flush the context. Calling resetContext() does not change the set that will be bound 128 * to texture id 0 on the next call to resetGLTextureBindings(). After this is called 129 * all unit/target combinations are considered to have unmodified bindings until the context 130 * subsequently modifies them (meaning if this is called twice in a row with no intervening 131 * context usage then the second call is a no-op.) 132 */ 133 void resetGLTextureBindings(); 134 135 /** 136 * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer 137 * usable. Call this if you have lost the associated GPU context, and thus internal texture, 138 * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the 139 * context and any of its created resource objects will not make backend 3D API calls. Content 140 * rendered but not previously flushed may be lost. After this function is called all subsequent 141 * calls on the context will fail or be no-ops. 142 * 143 * The typical use case for this function is that the underlying 3D context was lost and further 144 * API calls may crash. 145 * 146 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to 147 * create the context must be kept alive even after abandoning the context. Those objects must 148 * live for the lifetime of the context object itself. The reason for this is so that 149 * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be 150 * cleaned up even in a device lost state. 151 */ 152 void abandonContext() override; 153 154 /** 155 * Returns true if the context was abandoned or if the if the backend specific context has 156 * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a 157 * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this 158 * context. 159 */ 160 bool abandoned() override; 161 162 // TODO: Remove this from public after migrating Chrome. 163 sk_sp<GrContextThreadSafeProxy> threadSafeProxy(); 164 165 /** 166 * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is 167 * reset and will return false until another out-of-memory error is reported by the 3D API. If 168 * the context is abandoned then this will report false. 169 * 170 * Currently this is implemented for: 171 * 172 * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and 173 * therefore hide the error from Skia. Also, it is not advised to use this in combination with 174 * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever 175 * checking the GL context for OOM. 176 * 177 * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has 178 * occurred. 179 */ 180 bool oomed(); 181 182 /** 183 * This is similar to abandonContext() however the underlying 3D context is not yet lost and 184 * the context will cleanup all allocated resources before returning. After returning it will 185 * assume that the underlying context may no longer be valid. 186 * 187 * The typical use case for this function is that the client is going to destroy the 3D context 188 * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed 189 * elsewhere by either the client or Skia objects). 190 * 191 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to 192 * create the context must be alive before calling releaseResourcesAndAbandonContext. 193 */ 194 void releaseResourcesAndAbandonContext(); 195 196 /////////////////////////////////////////////////////////////////////////// 197 // Resource Cache 198 199 /** DEPRECATED 200 * Return the current GPU resource cache limits. 201 * 202 * @param maxResources If non-null, will be set to -1. 203 * @param maxResourceBytes If non-null, returns maximum number of bytes of 204 * video memory that can be held in the cache. 205 */ 206 void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const; 207 208 /** 209 * Return the current GPU resource cache limit in bytes. 210 */ 211 size_t getResourceCacheLimit() const; 212 213 /** 214 * Gets the current GPU resource cache usage. 215 * 216 * @param resourceCount If non-null, returns the number of resources that are held in the 217 * cache. 218 * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held 219 * in the cache. 220 */ 221 void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const; 222 223 /** 224 * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources. 225 */ 226 size_t getResourceCachePurgeableBytes() const; 227 228 /** DEPRECATED 229 * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes 230 * limit, it will be purged (LRU) to keep the cache within the limit. 231 * 232 * @param maxResources Unused. 233 * @param maxResourceBytes The maximum number of bytes of video memory 234 * that can be held in the cache. 235 */ 236 void setResourceCacheLimits(int maxResources, size_t maxResourceBytes); 237 238 /** 239 * Specify the GPU resource cache limit. If the cache currently exceeds this limit, 240 * it will be purged (LRU) to keep the cache within the limit. 241 * 242 * @param maxResourceBytes The maximum number of bytes of video memory 243 * that can be held in the cache. 244 */ 245 void setResourceCacheLimit(size_t maxResourceBytes); 246 247 /** 248 * Frees GPU created by the context. Can be called to reduce GPU memory 249 * pressure. 250 */ 251 void freeGpuResources(); 252 253 /** 254 * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are 255 * otherwise marked for deletion, regardless of whether the context is under budget. 256 * 257 * If 'scratchResourcesOnly' is true all unlocked scratch resources older than 'msNotUsed' will 258 * be purged but the unlocked resources with persistent data will remain. If 259 * 'scratchResourcesOnly' is false then all unlocked resources older than 'msNotUsed' will be 260 * purged. 261 * 262 * @param msNotUsed Only unlocked resources not used in these last milliseconds 263 * will be cleaned up. 264 * @param scratchResourcesOnly If true only unlocked scratch resources will be purged. 265 */ 266 void performDeferredCleanup(std::chrono::milliseconds msNotUsed, 267 bool scratchResourcesOnly=false); 268 269 // Temporary compatibility API for Android. purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)270 void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) { 271 this->performDeferredCleanup(msNotUsed); 272 } 273 274 /** 275 * Purge unlocked resources from the cache until the the provided byte count has been reached 276 * or we have purged all unlocked resources. The default policy is to purge in LRU order, but 277 * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other 278 * resource types. 279 * 280 * @param maxBytesToPurge the desired number of bytes to be purged. 281 * @param preferScratchResources If true scratch resources will be purged prior to other 282 * resource types. 283 */ 284 void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources); 285 286 /** 287 * This entry point is intended for instances where an app has been backgrounded or 288 * suspended. 289 * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the 290 * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false 291 * then all unlocked resources will be purged. 292 * In either case, after the unlocked resources are purged a separate pass will be made to 293 * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true 294 * some resources with persistent data may be purged to be under budget). 295 * 296 * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior 297 * enforcing the budget requirements. 298 */ 299 void purgeUnlockedResources(bool scratchResourcesOnly); 300 301 /** 302 * Gets the maximum supported texture size. 303 */ 304 using GrRecordingContext::maxTextureSize; 305 306 /** 307 * Gets the maximum supported render target size. 308 */ 309 using GrRecordingContext::maxRenderTargetSize; 310 311 /** 312 * Can a SkImage be created with the given color type. 313 */ 314 using GrRecordingContext::colorTypeSupportedAsImage; 315 316 /** 317 * Can a SkSurface be created with the given color type. To check whether MSAA is supported 318 * use maxSurfaceSampleCountForColorType(). 319 */ 320 using GrRecordingContext::colorTypeSupportedAsSurface; 321 322 /** 323 * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA 324 * rendering is supported for the color type. 0 is returned if rendering to this color type 325 * is not supported at all. 326 */ 327 using GrRecordingContext::maxSurfaceSampleCountForColorType; 328 329 /////////////////////////////////////////////////////////////////////////// 330 // Misc. 331 332 /** 333 * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before 334 * executing any more commands on the GPU. If this call returns false, then the GPU back-end 335 * will not wait on any passed in semaphores, and the client will still own the semaphores, 336 * regardless of the value of deleteSemaphoresAfterWait. 337 * 338 * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case 339 * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it 340 * knows that Skia has finished waiting on them. This can be done by using finishedProcs on 341 * flush calls. 342 */ 343 bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores, 344 bool deleteSemaphoresAfterWait = true); 345 346 /** 347 * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D 348 * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by 349 * GrContext::submit(syncCpu). 350 */ 351 void flushAndSubmit(bool syncCpu = false) { 352 this->flush(GrFlushInfo()); 353 this->submit(syncCpu); 354 } 355 356 /** 357 * Call to ensure all drawing to the context has been flushed to underlying 3D API specific 358 * objects. A call to `submit` is always required to ensure work is actually sent to 359 * the gpu. Some specific API details: 360 * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some 361 * sync objects from the flush will not be valid until a submission occurs. 362 * 363 * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command 364 * buffer or encoder objects. However, these objects are not sent to the gpu until a 365 * submission occurs. 366 * 367 * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be 368 * submitted to the gpu during the next submit call (it is possible Skia failed to create a 369 * subset of the semaphores). The client should not wait on these semaphores until after submit 370 * has been called, and must keep them alive until then. If this call returns 371 * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on 372 * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with 373 * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the 374 * client is still responsible for deleting any initialized semaphores. 375 * Regardleess of semaphore submission the context will still be flushed. It should be 376 * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not 377 * happen. It simply means there were no semaphores submitted to the GPU. A caller should only 378 * take this as a failure if they passed in semaphores to be submitted. 379 */ 380 GrSemaphoresSubmitted flush(const GrFlushInfo& info); 381 flush()382 void flush() { this->flush({}); } 383 384 /** 385 * Submit outstanding work to the gpu from all previously un-submitted flushes. The return 386 * value of the submit will indicate whether or not the submission to the GPU was successful. 387 * 388 * If the call returns true, all previously passed in semaphores in flush calls will have been 389 * submitted to the GPU and they can safely be waited on. The caller should wait on those 390 * semaphores or perform some other global synchronization before deleting the semaphores. 391 * 392 * If it returns false, then those same semaphores will not have been submitted and we will not 393 * try to submit them again. The caller is free to delete the semaphores at any time. 394 * 395 * If the syncCpu flag is true this function will return once the gpu has finished with all 396 * submitted work. 397 */ 398 bool submit(bool syncCpu = false); 399 400 /** 401 * Checks whether any asynchronous work is complete and if so calls related callbacks. 402 */ 403 void checkAsyncWorkCompletion(); 404 405 /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */ 406 // Chrome is using this! 407 void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const; 408 409 bool supportsDistanceFieldText() const; 410 411 void storeVkPipelineCacheData(); 412 413 /** 414 * Retrieve the default GrBackendFormat for a given SkColorType and renderability. 415 * It is guaranteed that this backend format will be the one used by the following 416 * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods. 417 * 418 * The caller should check that the returned format is valid. 419 */ 420 using GrRecordingContext::defaultBackendFormat; 421 422 /** 423 * The explicitly allocated backend texture API allows clients to use Skia to create backend 424 * objects outside of Skia proper (i.e., Skia's caching system will not know about them.) 425 * 426 * It is the client's responsibility to delete all these objects (using deleteBackendTexture) 427 * before deleting the context used to create them. If the backend is Vulkan, the textures must 428 * be deleted before abandoning the context as well. Additionally, clients should only delete 429 * these objects on the thread for which that context is active. 430 * 431 * The client is responsible for ensuring synchronization between different uses 432 * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the 433 * surface, rewrapping it in a image and drawing the image will require explicit 434 * synchronization on the client's part). 435 */ 436 437 /** 438 * If possible, create an uninitialized backend texture. The client should ensure that the 439 * returned backend texture is valid. 440 * For the Vulkan backend the layout of the created VkImage will be: 441 * VK_IMAGE_LAYOUT_UNDEFINED. 442 */ 443 GrBackendTexture createBackendTexture(int width, int height, 444 const GrBackendFormat&, 445 GrMipmapped, 446 GrRenderable, 447 GrProtected = GrProtected::kNo); 448 449 /** 450 * If possible, create an uninitialized backend texture. The client should ensure that the 451 * returned backend texture is valid. 452 * If successful, the created backend texture will be compatible with the provided 453 * SkColorType. 454 * For the Vulkan backend the layout of the created VkImage will be: 455 * VK_IMAGE_LAYOUT_UNDEFINED. 456 */ 457 GrBackendTexture createBackendTexture(int width, int height, 458 SkColorType, 459 GrMipmapped, 460 GrRenderable, 461 GrProtected = GrProtected::kNo); 462 463 /** 464 * If possible, create a backend texture initialized to a particular color. The client should 465 * ensure that the returned backend texture is valid. The client can pass in a finishedProc 466 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The 467 * client is required to call `submit` to send the upload work to the gpu. The 468 * finishedProc will always get called even if we failed to create the GrBackendTexture. 469 * For the Vulkan backend the layout of the created VkImage will be: 470 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 471 */ 472 GrBackendTexture createBackendTexture(int width, int height, 473 const GrBackendFormat&, 474 const SkColor4f& color, 475 GrMipmapped, 476 GrRenderable, 477 GrProtected = GrProtected::kNo, 478 GrGpuFinishedProc finishedProc = nullptr, 479 GrGpuFinishedContext finishedContext = nullptr); 480 481 /** 482 * If possible, create a backend texture initialized to a particular color. The client should 483 * ensure that the returned backend texture is valid. The client can pass in a finishedProc 484 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The 485 * client is required to call `submit` to send the upload work to the gpu. The 486 * finishedProc will always get called even if we failed to create the GrBackendTexture. 487 * If successful, the created backend texture will be compatible with the provided 488 * SkColorType. 489 * For the Vulkan backend the layout of the created VkImage will be: 490 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 491 */ 492 GrBackendTexture createBackendTexture(int width, int height, 493 SkColorType, 494 const SkColor4f& color, 495 GrMipmapped, 496 GrRenderable, 497 GrProtected = GrProtected::kNo, 498 GrGpuFinishedProc finishedProc = nullptr, 499 GrGpuFinishedContext finishedContext = nullptr); 500 501 /** 502 * If possible, create a backend texture initialized with the provided pixmap data. The client 503 * should ensure that the returned backend texture is valid. The client can pass in a 504 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 505 * deleted. The client is required to call `submit` to send the upload work to the gpu. 506 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 507 * If successful, the created backend texture will be compatible with the provided 508 * pixmap(s). Compatible, in this case, means that the backend format will be the result 509 * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted 510 * when this call returns. 511 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired 512 * the data for all the mipmap levels must be provided. In the mipmapped case all the 513 * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels 514 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The 515 * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture. 516 * Note: the pixmap's alphatypes and colorspaces are ignored. 517 * For the Vulkan backend the layout of the created VkImage will be: 518 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 519 */ 520 GrBackendTexture createBackendTexture(const SkPixmap srcData[], 521 int numLevels, 522 GrSurfaceOrigin, 523 GrRenderable, 524 GrProtected, 525 GrGpuFinishedProc finishedProc = nullptr, 526 GrGpuFinishedContext finishedContext = nullptr); 527 528 /** 529 * Convenience version createBackendTexture() that takes just a base level pixmap. 530 */ 531 GrBackendTexture createBackendTexture(const SkPixmap& srcData, 532 GrSurfaceOrigin textureOrigin, 533 GrRenderable renderable, 534 GrProtected isProtected, 535 GrGpuFinishedProc finishedProc = nullptr, 536 GrGpuFinishedContext finishedContext = nullptr) { 537 return this->createBackendTexture(&srcData, 1, textureOrigin, renderable, isProtected, 538 finishedProc, finishedContext); 539 } 540 541 // Deprecated versions that do not take origin and assume top-left. 542 GrBackendTexture createBackendTexture(const SkPixmap srcData[], 543 int numLevels, 544 GrRenderable renderable, 545 GrProtected isProtected, 546 GrGpuFinishedProc finishedProc = nullptr, 547 GrGpuFinishedContext finishedContext = nullptr) { 548 return this->createBackendTexture(srcData, 549 numLevels, 550 kTopLeft_GrSurfaceOrigin, 551 renderable, 552 isProtected, 553 finishedProc, 554 finishedContext); 555 } 556 GrBackendTexture createBackendTexture(const SkPixmap& srcData, 557 GrRenderable renderable, 558 GrProtected isProtected, 559 GrGpuFinishedProc finishedProc = nullptr, 560 GrGpuFinishedContext finishedContext = nullptr) { 561 return this->createBackendTexture(&srcData, 562 1, 563 renderable, 564 isProtected, 565 finishedProc, 566 finishedContext); 567 } 568 569 /** 570 * If possible, updates a backend texture to be filled to a particular color. The client should 571 * check the return value to see if the update was successful. The client can pass in a 572 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 573 * deleted. The client is required to call `submit` to send the upload work to the gpu. 574 * The finishedProc will always get called even if we failed to update the GrBackendTexture. 575 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 576 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 577 */ 578 bool updateBackendTexture(const GrBackendTexture&, 579 const SkColor4f& color, 580 GrGpuFinishedProc finishedProc, 581 GrGpuFinishedContext finishedContext); 582 583 /** 584 * If possible, updates a backend texture to be filled to a particular color. The data in 585 * GrBackendTexture and passed in color is interpreted with respect to the passed in 586 * SkColorType. The client should check the return value to see if the update was successful. 587 * The client can pass in a finishedProc to be notified when the data has been uploaded by the 588 * gpu and the texture can be deleted. The client is required to call `submit` to send 589 * the upload work to the gpu. The finishedProc will always get called even if we failed to 590 * update the GrBackendTexture. 591 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 592 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 593 */ 594 bool updateBackendTexture(const GrBackendTexture&, 595 SkColorType skColorType, 596 const SkColor4f& color, 597 GrGpuFinishedProc finishedProc, 598 GrGpuFinishedContext finishedContext); 599 600 /** 601 * If possible, updates a backend texture filled with the provided pixmap data. The client 602 * should check the return value to see if the update was successful. The client can pass in a 603 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 604 * deleted. The client is required to call `submit` to send the upload work to the gpu. 605 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 606 * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case, 607 * means that the backend format is compatible with the base pixmap's colortype. The src data 608 * can be deleted when this call returns. 609 * If the backend texture is mip mapped, the data for all the mipmap levels must be provided. 610 * In the mipmapped case all the colortypes of the provided pixmaps must be the same. 611 * Additionally, all the miplevels must be sized correctly (please see 612 * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the 613 * pixmap data is vertically flipped in the texture. 614 * Note: the pixmap's alphatypes and colorspaces are ignored. 615 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 616 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 617 */ 618 bool updateBackendTexture(const GrBackendTexture&, 619 const SkPixmap srcData[], 620 int numLevels, 621 GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin, 622 GrGpuFinishedProc finishedProc = nullptr, 623 GrGpuFinishedContext finishedContext = nullptr); 624 625 /** 626 * Convenience version of updateBackendTexture that takes just a base level pixmap. 627 */ 628 bool updateBackendTexture(const GrBackendTexture& texture, 629 const SkPixmap& srcData, 630 GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin, 631 GrGpuFinishedProc finishedProc = nullptr, 632 GrGpuFinishedContext finishedContext = nullptr) { 633 return this->updateBackendTexture(texture, 634 &srcData, 635 1, 636 textureOrigin, 637 finishedProc, 638 finishedContext); 639 } 640 641 // Deprecated version that does not take origin and assumes top-left. updateBackendTexture(const GrBackendTexture & texture,const SkPixmap srcData[],int numLevels,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)642 bool updateBackendTexture(const GrBackendTexture& texture, 643 const SkPixmap srcData[], 644 int numLevels, 645 GrGpuFinishedProc finishedProc, 646 GrGpuFinishedContext finishedContext) { 647 return this->updateBackendTexture(texture, 648 srcData, 649 numLevels, 650 kTopLeft_GrSurfaceOrigin, 651 finishedProc, 652 finishedContext); 653 } 654 655 /** 656 * Retrieve the GrBackendFormat for a given SkImage::CompressionType. This is 657 * guaranteed to match the backend format used by the following 658 * createCompressedBackendTexture methods that take a CompressionType. 659 * The caller should check that the returned format is valid. 660 */ 661 using GrRecordingContext::compressedBackendFormat; 662 663 /** 664 *If possible, create a compressed backend texture initialized to a particular color. The 665 * client should ensure that the returned backend texture is valid. The client can pass in a 666 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 667 * deleted. The client is required to call `submit` to send the upload work to the gpu. 668 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 669 * For the Vulkan backend the layout of the created VkImage will be: 670 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 671 */ 672 GrBackendTexture createCompressedBackendTexture(int width, int height, 673 const GrBackendFormat&, 674 const SkColor4f& color, 675 GrMipmapped, 676 GrProtected = GrProtected::kNo, 677 GrGpuFinishedProc finishedProc = nullptr, 678 GrGpuFinishedContext finishedContext = nullptr); 679 680 GrBackendTexture createCompressedBackendTexture(int width, int height, 681 SkImage::CompressionType, 682 const SkColor4f& color, 683 GrMipmapped, 684 GrProtected = GrProtected::kNo, 685 GrGpuFinishedProc finishedProc = nullptr, 686 GrGpuFinishedContext finishedContext = nullptr); 687 688 /** 689 * If possible, create a backend texture initialized with the provided raw data. The client 690 * should ensure that the returned backend texture is valid. The client can pass in a 691 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 692 * deleted. The client is required to call `submit` to send the upload work to the gpu. 693 * The finishedProc will always get called even if we failed to create the GrBackendTexture 694 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired 695 * the data for all the mipmap levels must be provided. Additionally, all the miplevels 696 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). 697 * For the Vulkan backend the layout of the created VkImage will be: 698 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 699 */ 700 GrBackendTexture createCompressedBackendTexture(int width, int height, 701 const GrBackendFormat&, 702 const void* data, size_t dataSize, 703 GrMipmapped, 704 GrProtected = GrProtected::kNo, 705 GrGpuFinishedProc finishedProc = nullptr, 706 GrGpuFinishedContext finishedContext = nullptr); 707 708 GrBackendTexture createCompressedBackendTexture(int width, int height, 709 SkImage::CompressionType, 710 const void* data, size_t dataSize, 711 GrMipmapped, 712 GrProtected = GrProtected::kNo, 713 GrGpuFinishedProc finishedProc = nullptr, 714 GrGpuFinishedContext finishedContext = nullptr); 715 716 /** 717 * If possible, updates a backend texture filled with the provided color. If the texture is 718 * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client 719 * should check the return value to see if the update was successful. The client can pass in a 720 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 721 * deleted. The client is required to call `submit` to send the upload work to the gpu. 722 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 723 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 724 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 725 */ 726 bool updateCompressedBackendTexture(const GrBackendTexture&, 727 const SkColor4f& color, 728 GrGpuFinishedProc finishedProc, 729 GrGpuFinishedContext finishedContext); 730 731 /** 732 * If possible, updates a backend texture filled with the provided raw data. The client 733 * should check the return value to see if the update was successful. The client can pass in a 734 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 735 * deleted. The client is required to call `submit` to send the upload work to the gpu. 736 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 737 * If a mipMapped texture is passed in, the data for all the mipmap levels must be provided. 738 * Additionally, all the miplevels must be sized correctly (please see 739 * SkMipMap::ComputeLevelSize and ComputeLevelCount). 740 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 741 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 742 */ 743 bool updateCompressedBackendTexture(const GrBackendTexture&, 744 const void* data, 745 size_t dataSize, 746 GrGpuFinishedProc finishedProc, 747 GrGpuFinishedContext finishedContext); 748 749 /** 750 * Updates the state of the GrBackendTexture/RenderTarget to have the passed in 751 * GrBackendSurfaceMutableState. All objects that wrap the backend surface (i.e. SkSurfaces and 752 * SkImages) will also be aware of this state change. This call does not submit the state change 753 * to the gpu, but requires the client to call `submit` to send it to the GPU. The work 754 * for this call is ordered linearly with all other calls that require GrContext::submit to be 755 * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be 756 * called with finishedContext after the state transition is known to have occurred on the GPU. 757 * 758 * See GrBackendSurfaceMutableState to see what state can be set via this call. 759 * 760 * If the backend API is Vulkan, the caller can set the GrBackendSurfaceMutableState's 761 * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to 762 * tell Skia to not change those respective states. 763 * 764 * If previousState is not null and this returns true, then Skia will have filled in 765 * previousState to have the values of the state before this call. 766 */ 767 bool setBackendTextureState(const GrBackendTexture&, 768 const GrBackendSurfaceMutableState&, 769 GrBackendSurfaceMutableState* previousState = nullptr, 770 GrGpuFinishedProc finishedProc = nullptr, 771 GrGpuFinishedContext finishedContext = nullptr); 772 bool setBackendRenderTargetState(const GrBackendRenderTarget&, 773 const GrBackendSurfaceMutableState&, 774 GrBackendSurfaceMutableState* previousState = nullptr, 775 GrGpuFinishedProc finishedProc = nullptr, 776 GrGpuFinishedContext finishedContext = nullptr); 777 778 void deleteBackendTexture(GrBackendTexture); 779 780 // This interface allows clients to pre-compile shaders and populate the runtime program cache. 781 // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format. 782 // 783 // Steps to use this API: 784 // 785 // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to 786 // something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This 787 // will ensure that the blobs are SkSL, and are suitable for pre-compilation. 788 // 2) Run your application, and save all of the key/data pairs that are fed to the cache. 789 // 790 // 3) Switch over to shipping your application. Include the key/data pairs from above. 791 // 4) At startup (or any convenient time), call precompileShader for each key/data pair. 792 // This will compile the SkSL to create a GL program, and populate the runtime cache. 793 // 794 // This is only guaranteed to work if the context/device used in step #2 are created in the 795 // same way as the one used in step #4, and the same GrContextOptions are specified. 796 // Using cached shader blobs on a different device or driver are undefined. 797 bool precompileShader(const SkData& key, const SkData& data); 798 799 #ifdef SK_ENABLE_DUMP_GPU 800 /** Returns a string with detailed information about the context & GPU, in JSON format. */ 801 SkString dump() const; 802 #endif 803 804 class DirectContextID { 805 public: 806 static GrDirectContext::DirectContextID Next(); 807 DirectContextID()808 DirectContextID() : fID(SK_InvalidUniqueID) {} 809 810 bool operator==(const DirectContextID& that) const { return fID == that.fID; } 811 bool operator!=(const DirectContextID& that) const { return !(*this == that); } 812 makeInvalid()813 void makeInvalid() { fID = SK_InvalidUniqueID; } isValid()814 bool isValid() const { return fID != SK_InvalidUniqueID; } 815 816 private: DirectContextID(uint32_t id)817 constexpr DirectContextID(uint32_t id) : fID(id) {} 818 uint32_t fID; 819 }; 820 directContextID()821 DirectContextID directContextID() const { return fDirectContextID; } 822 823 // Provides access to functions that aren't part of the public API. 824 GrDirectContextPriv priv(); 825 const GrDirectContextPriv priv() const; // NOLINT(readability-const-return-type) 826 827 protected: 828 GrDirectContext(GrBackendApi backend, const GrContextOptions& options); 829 830 bool init() override; 831 onGetAtlasManager()832 GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); } 833 GrSmallPathAtlasMgr* onGetSmallPathAtlasMgr(); 834 asDirectContext()835 GrDirectContext* asDirectContext() override { return this; } 836 837 private: 838 // This call will make sure out work on the GPU is finished and will execute any outstanding 839 // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the 840 // outstanding work on the gpu. The main use currently for this function is when tearing down or 841 // abandoning the context. 842 // 843 // When we finish up work on the GPU it could trigger callbacks to the client. In the case we 844 // are abandoning the context we don't want the client to be able to use the GrDirectContext to 845 // issue more commands during the callback. Thus before calling this function we set the 846 // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded 847 // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned 848 // bool is used for this signal. 849 void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned); 850 851 const DirectContextID fDirectContextID; 852 // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed 853 // after all of its users. Clients of fTaskGroup will generally want to ensure that they call 854 // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being 855 // invoked after objects they depend upon have already been destroyed. 856 std::unique_ptr<SkTaskGroup> fTaskGroup; 857 std::unique_ptr<GrStrikeCache> fStrikeCache; 858 sk_sp<GrGpu> fGpu; 859 std::unique_ptr<GrResourceCache> fResourceCache; 860 std::unique_ptr<GrResourceProvider> fResourceProvider; 861 862 bool fDidTestPMConversions; 863 // true if the PM/UPM conversion succeeded; false otherwise 864 bool fPMUPMConversionsRoundTrip; 865 866 GrContextOptions::PersistentCache* fPersistentCache; 867 GrContextOptions::ShaderErrorHandler* fShaderErrorHandler; 868 869 std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager; 870 std::unique_ptr<GrAtlasManager> fAtlasManager; 871 872 std::unique_ptr<GrSmallPathAtlasMgr> fSmallPathAtlasMgr; 873 874 friend class GrDirectContextPriv; 875 876 using INHERITED = GrRecordingContext; 877 }; 878 879 880 #endif 881