1 /* 2 * Copyright 2020 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrDirectContext_DEFINED 9 #define GrDirectContext_DEFINED 10 11 #include <set> 12 13 #include <array> 14 15 #include <unordered_map> 16 17 #include "include/gpu/GrRecordingContext.h" 18 19 #include "include/gpu/GrBackendSurface.h" 20 21 #include "src/gpu/GrGpuResource.h" 22 23 // We shouldn't need this but currently Android is relying on this being include transitively. 24 #include "include/core/SkUnPreMultiply.h" 25 26 #include "include/core/SkBlurTypes.h" 27 28 class GrAtlasManager; 29 class GrBackendSemaphore; 30 class GrClientMappedBufferManager; 31 class GrDirectContextPriv; 32 class GrContextThreadSafeProxy; 33 struct GrD3DBackendContext; 34 class GrFragmentProcessor; 35 class GrGpu; 36 struct GrGLInterface; 37 struct GrMtlBackendContext; 38 struct GrMockOptions; 39 class GrPath; 40 class GrResourceCache; 41 class GrResourceProvider; 42 class GrStrikeCache; 43 class GrSurfaceProxy; 44 class GrSwizzle; 45 class GrTextureProxy; 46 struct GrVkBackendContext; 47 48 class SkImage; 49 class SkString; 50 class SkSurfaceCharacterization; 51 class SkSurfaceProps; 52 class SkTaskGroup; 53 class SkTraceMemoryDump; 54 55 // OH ISSUE: callback for memory protect. 56 using MemoryOverflowCalllback = std::function<void(int32_t, size_t, bool)>; 57 58 namespace skgpu { namespace v1 { class SmallPathAtlasMgr; }} 59 60 class SK_API GrDirectContext : public GrRecordingContext { 61 public: 62 #ifdef SK_GL 63 /** 64 * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the 65 * result of GrGLMakeNativeInterface() is used if it succeeds. 66 */ 67 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&); 68 static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>); 69 static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&); 70 static sk_sp<GrDirectContext> MakeGL(); 71 #endif 72 73 #ifdef SK_VULKAN 74 /** 75 * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned 76 * GrDirectContext is destroyed. This also means that any objects created with this 77 * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold 78 * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released, 79 * then it is safe to delete the vulkan objects. 80 */ 81 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&); 82 static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&); 83 #endif 84 85 #ifdef SK_METAL 86 /** 87 * Makes a GrDirectContext which uses Metal as the backend. The GrMtlBackendContext contains a 88 * MTLDevice and MTLCommandQueue which should be used by the backend. These objects must 89 * have their own ref which will be released when the GrMtlBackendContext is destroyed. 90 * Ganesh will take its own ref on the objects which will be released when the GrDirectContext 91 * is destroyed. 92 */ 93 static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&, const GrContextOptions&); 94 static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&); 95 /** 96 * Deprecated. 97 * 98 * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an 99 * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects 100 * must have a ref on them that can be transferred to Ganesh, which will release the ref 101 * when the GrDirectContext is destroyed. 102 */ 103 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&); 104 static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue); 105 #endif 106 107 #ifdef SK_DIRECT3D 108 /** 109 * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context 110 * must be kept alive until the returned GrDirectContext is first destroyed or abandoned. 111 */ 112 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&); 113 static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&); 114 #endif 115 116 #ifdef SK_DAWN 117 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&, 118 const GrContextOptions&); 119 static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&); 120 #endif 121 122 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&); 123 static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*); 124 125 ~GrDirectContext() override; 126 127 /** 128 * The context normally assumes that no outsider is setting state 129 * within the underlying 3D API's context/device/whatever. This call informs 130 * the context that the state was modified and it should resend. Shouldn't 131 * be called frequently for good performance. 132 * The flag bits, state, is dependent on which backend is used by the 133 * context, either GL or D3D (possible in future). 134 */ 135 void resetContext(uint32_t state = kAll_GrBackendState); 136 137 /** 138 * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which 139 * the context has modified the bound texture will have texture id 0 bound. This does not 140 * flush the context. Calling resetContext() does not change the set that will be bound 141 * to texture id 0 on the next call to resetGLTextureBindings(). After this is called 142 * all unit/target combinations are considered to have unmodified bindings until the context 143 * subsequently modifies them (meaning if this is called twice in a row with no intervening 144 * context usage then the second call is a no-op.) 145 */ 146 void resetGLTextureBindings(); 147 148 /** 149 * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer 150 * usable. Call this if you have lost the associated GPU context, and thus internal texture, 151 * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the 152 * context and any of its created resource objects will not make backend 3D API calls. Content 153 * rendered but not previously flushed may be lost. After this function is called all subsequent 154 * calls on the context will fail or be no-ops. 155 * 156 * The typical use case for this function is that the underlying 3D context was lost and further 157 * API calls may crash. 158 * 159 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to 160 * create the context must be kept alive even after abandoning the context. Those objects must 161 * live for the lifetime of the context object itself. The reason for this is so that 162 * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be 163 * cleaned up even in a device lost state. 164 */ 165 void abandonContext() override; 166 167 /** 168 * Returns true if the context was abandoned or if the if the backend specific context has 169 * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a 170 * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this 171 * context. 172 */ 173 bool abandoned() override; 174 175 // TODO: Remove this from public after migrating Chrome. 176 sk_sp<GrContextThreadSafeProxy> threadSafeProxy(); 177 178 /** 179 * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is 180 * reset and will return false until another out-of-memory error is reported by the 3D API. If 181 * the context is abandoned then this will report false. 182 * 183 * Currently this is implemented for: 184 * 185 * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and 186 * therefore hide the error from Skia. Also, it is not advised to use this in combination with 187 * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever 188 * checking the GL context for OOM. 189 * 190 * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has 191 * occurred. 192 */ 193 bool oomed(); 194 195 /** 196 * This is similar to abandonContext() however the underlying 3D context is not yet lost and 197 * the context will cleanup all allocated resources before returning. After returning it will 198 * assume that the underlying context may no longer be valid. 199 * 200 * The typical use case for this function is that the client is going to destroy the 3D context 201 * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed 202 * elsewhere by either the client or Skia objects). 203 * 204 * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to 205 * create the context must be alive before calling releaseResourcesAndAbandonContext. 206 */ 207 void releaseResourcesAndAbandonContext(); 208 209 /////////////////////////////////////////////////////////////////////////// 210 // Resource Cache 211 212 /** DEPRECATED 213 * Return the current GPU resource cache limits. 214 * 215 * @param maxResources If non-null, will be set to -1. 216 * @param maxResourceBytes If non-null, returns maximum number of bytes of 217 * video memory that can be held in the cache. 218 */ 219 void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const; 220 221 /** 222 * Return the current GPU resource cache limit in bytes. 223 */ 224 size_t getResourceCacheLimit() const; 225 226 /** 227 * Gets the current GPU resource cache usage. 228 * 229 * @param resourceCount If non-null, returns the number of resources that are held in the 230 * cache. 231 * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held 232 * in the cache. 233 */ 234 void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const; 235 236 /** 237 * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources. 238 */ 239 size_t getResourceCachePurgeableBytes() const; 240 241 /** DEPRECATED 242 * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes 243 * limit, it will be purged (LRU) to keep the cache within the limit. 244 * 245 * @param maxResources Unused. 246 * @param maxResourceBytes The maximum number of bytes of video memory 247 * that can be held in the cache. 248 */ 249 void setResourceCacheLimits(int maxResources, size_t maxResourceBytes); 250 251 /** 252 * Specify the GPU resource cache limit. If the cache currently exceeds this limit, 253 * it will be purged (LRU) to keep the cache within the limit. 254 * 255 * @param maxResourceBytes The maximum number of bytes of video memory 256 * that can be held in the cache. 257 */ 258 void setResourceCacheLimit(size_t maxResourceBytes); 259 260 /** 261 * Frees GPU created by the context. Can be called to reduce GPU memory 262 * pressure. 263 */ 264 void freeGpuResources(); 265 266 /** 267 * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are 268 * otherwise marked for deletion, regardless of whether the context is under budget. 269 * 270 * If 'scratchResourcesOnly' is true all unlocked scratch resources older than 'msNotUsed' will 271 * be purged but the unlocked resources with persistent data will remain. If 272 * 'scratchResourcesOnly' is false then all unlocked resources older than 'msNotUsed' will be 273 * purged. 274 * 275 * @param msNotUsed Only unlocked resources not used in these last milliseconds 276 * will be cleaned up. 277 * @param scratchResourcesOnly If true only unlocked scratch resources will be purged. 278 */ 279 void performDeferredCleanup(std::chrono::milliseconds msNotUsed, 280 bool scratchResourcesOnly=false); 281 282 // Temporary compatibility API for Android. purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)283 void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) { 284 this->performDeferredCleanup(msNotUsed); 285 } 286 287 /** 288 * Purge unlocked resources from the cache until the the provided byte count has been reached 289 * or we have purged all unlocked resources. The default policy is to purge in LRU order, but 290 * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other 291 * resource types. 292 * 293 * @param maxBytesToPurge the desired number of bytes to be purged. 294 * @param preferScratchResources If true scratch resources will be purged prior to other 295 * resource types. 296 */ 297 void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources); 298 void purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag& tag); 299 void purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet); 300 void purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet, 301 const std::set<int>& protectedPidSet); 302 void purgeUnlockAndSafeCacheGpuResources(); 303 void registerVulkanErrorCallback(const std::function<void()>& vulkanErrorCallback); 304 305 std::array<int, 2> CalcHpsBluredImageDimension(const SkBlurArg& blurArg); 306 /** 307 * This entry point is intended for instances where an app has been backgrounded or 308 * suspended. 309 * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the 310 * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false 311 * then all unlocked resources will be purged. 312 * In either case, after the unlocked resources are purged a separate pass will be made to 313 * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true 314 * some resources with persistent data may be purged to be under budget). 315 * 316 * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior 317 * enforcing the budget requirements. 318 */ 319 void purgeUnlockedResources(bool scratchResourcesOnly); 320 321 /** 322 * Gets the maximum supported texture size. 323 */ 324 using GrRecordingContext::maxTextureSize; 325 326 /** 327 * Gets the maximum supported render target size. 328 */ 329 using GrRecordingContext::maxRenderTargetSize; 330 331 /** 332 * Can a SkImage be created with the given color type. 333 */ 334 using GrRecordingContext::colorTypeSupportedAsImage; 335 336 /** 337 * Can a SkSurface be created with the given color type. To check whether MSAA is supported 338 * use maxSurfaceSampleCountForColorType(). 339 */ 340 using GrRecordingContext::colorTypeSupportedAsSurface; 341 342 /** 343 * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA 344 * rendering is supported for the color type. 0 is returned if rendering to this color type 345 * is not supported at all. 346 */ 347 using GrRecordingContext::maxSurfaceSampleCountForColorType; 348 349 /////////////////////////////////////////////////////////////////////////// 350 // Misc. 351 352 /** 353 * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before 354 * executing any more commands on the GPU. If this call returns false, then the GPU back-end 355 * will not wait on any passed in semaphores, and the client will still own the semaphores, 356 * regardless of the value of deleteSemaphoresAfterWait. 357 * 358 * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case 359 * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it 360 * knows that Skia has finished waiting on them. This can be done by using finishedProcs on 361 * flush calls. 362 */ 363 bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores, 364 bool deleteSemaphoresAfterWait = true); 365 366 /** 367 * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D 368 * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by 369 * GrContext::submit(syncCpu). 370 */ 371 void flushAndSubmit(bool syncCpu = false) { 372 this->flush(GrFlushInfo()); 373 this->submit(syncCpu); 374 } 375 376 /** 377 * Call to ensure all drawing to the context has been flushed to underlying 3D API specific 378 * objects. A call to `submit` is always required to ensure work is actually sent to 379 * the gpu. Some specific API details: 380 * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some 381 * sync objects from the flush will not be valid until a submission occurs. 382 * 383 * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command 384 * buffer or encoder objects. However, these objects are not sent to the gpu until a 385 * submission occurs. 386 * 387 * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be 388 * submitted to the gpu during the next submit call (it is possible Skia failed to create a 389 * subset of the semaphores). The client should not wait on these semaphores until after submit 390 * has been called, and must keep them alive until then. If this call returns 391 * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on 392 * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with 393 * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the 394 * client is still responsible for deleting any initialized semaphores. 395 * Regardleess of semaphore submission the context will still be flushed. It should be 396 * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not 397 * happen. It simply means there were no semaphores submitted to the GPU. A caller should only 398 * take this as a failure if they passed in semaphores to be submitted. 399 */ 400 GrSemaphoresSubmitted flush(const GrFlushInfo& info); 401 flush()402 void flush() { this->flush({}); } 403 404 /** 405 * Submit outstanding work to the gpu from all previously un-submitted flushes. The return 406 * value of the submit will indicate whether or not the submission to the GPU was successful. 407 * 408 * If the call returns true, all previously passed in semaphores in flush calls will have been 409 * submitted to the GPU and they can safely be waited on. The caller should wait on those 410 * semaphores or perform some other global synchronization before deleting the semaphores. 411 * 412 * If it returns false, then those same semaphores will not have been submitted and we will not 413 * try to submit them again. The caller is free to delete the semaphores at any time. 414 * 415 * If the syncCpu flag is true this function will return once the gpu has finished with all 416 * submitted work. 417 */ 418 bool submit(bool syncCpu = false); 419 420 /** 421 * Checks whether any asynchronous work is complete and if so calls related callbacks. 422 */ 423 void checkAsyncWorkCompletion(); 424 425 /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */ 426 // Chrome is using this! 427 void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const; 428 void dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const; 429 430 bool supportsDistanceFieldText() const; 431 432 void storeVkPipelineCacheData(); 433 434 /** 435 * Retrieve the default GrBackendFormat for a given SkColorType and renderability. 436 * It is guaranteed that this backend format will be the one used by the following 437 * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods. 438 * 439 * The caller should check that the returned format is valid. 440 */ 441 using GrRecordingContext::defaultBackendFormat; 442 443 /** 444 * The explicitly allocated backend texture API allows clients to use Skia to create backend 445 * objects outside of Skia proper (i.e., Skia's caching system will not know about them.) 446 * 447 * It is the client's responsibility to delete all these objects (using deleteBackendTexture) 448 * before deleting the context used to create them. If the backend is Vulkan, the textures must 449 * be deleted before abandoning the context as well. Additionally, clients should only delete 450 * these objects on the thread for which that context is active. 451 * 452 * The client is responsible for ensuring synchronization between different uses 453 * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the 454 * surface, rewrapping it in a image and drawing the image will require explicit 455 * synchronization on the client's part). 456 */ 457 458 /** 459 * If possible, create an uninitialized backend texture. The client should ensure that the 460 * returned backend texture is valid. 461 * For the Vulkan backend the layout of the created VkImage will be: 462 * VK_IMAGE_LAYOUT_UNDEFINED. 463 */ 464 GrBackendTexture createBackendTexture(int width, int height, 465 const GrBackendFormat&, 466 GrMipmapped, 467 GrRenderable, 468 GrProtected = GrProtected::kNo); 469 470 /** 471 * If possible, create an uninitialized backend texture. The client should ensure that the 472 * returned backend texture is valid. 473 * If successful, the created backend texture will be compatible with the provided 474 * SkColorType. 475 * For the Vulkan backend the layout of the created VkImage will be: 476 * VK_IMAGE_LAYOUT_UNDEFINED. 477 */ 478 GrBackendTexture createBackendTexture(int width, int height, 479 SkColorType, 480 GrMipmapped, 481 GrRenderable, 482 GrProtected = GrProtected::kNo); 483 484 /** 485 * If possible, create a backend texture initialized to a particular color. The client should 486 * ensure that the returned backend texture is valid. The client can pass in a finishedProc 487 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The 488 * client is required to call `submit` to send the upload work to the gpu. The 489 * finishedProc will always get called even if we failed to create the GrBackendTexture. 490 * For the Vulkan backend the layout of the created VkImage will be: 491 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 492 */ 493 GrBackendTexture createBackendTexture(int width, int height, 494 const GrBackendFormat&, 495 const SkColor4f& color, 496 GrMipmapped, 497 GrRenderable, 498 GrProtected = GrProtected::kNo, 499 GrGpuFinishedProc finishedProc = nullptr, 500 GrGpuFinishedContext finishedContext = nullptr); 501 502 /** 503 * If possible, create a backend texture initialized to a particular color. The client should 504 * ensure that the returned backend texture is valid. The client can pass in a finishedProc 505 * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The 506 * client is required to call `submit` to send the upload work to the gpu. The 507 * finishedProc will always get called even if we failed to create the GrBackendTexture. 508 * If successful, the created backend texture will be compatible with the provided 509 * SkColorType. 510 * For the Vulkan backend the layout of the created VkImage will be: 511 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 512 */ 513 GrBackendTexture createBackendTexture(int width, int height, 514 SkColorType, 515 const SkColor4f& color, 516 GrMipmapped, 517 GrRenderable, 518 GrProtected = GrProtected::kNo, 519 GrGpuFinishedProc finishedProc = nullptr, 520 GrGpuFinishedContext finishedContext = nullptr); 521 522 /** 523 * If possible, create a backend texture initialized with the provided pixmap data. The client 524 * should ensure that the returned backend texture is valid. The client can pass in a 525 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 526 * deleted. The client is required to call `submit` to send the upload work to the gpu. 527 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 528 * If successful, the created backend texture will be compatible with the provided 529 * pixmap(s). Compatible, in this case, means that the backend format will be the result 530 * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted 531 * when this call returns. 532 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired 533 * the data for all the mipmap levels must be provided. In the mipmapped case all the 534 * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels 535 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The 536 * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture. 537 * Note: the pixmap's alphatypes and colorspaces are ignored. 538 * For the Vulkan backend the layout of the created VkImage will be: 539 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 540 */ 541 GrBackendTexture createBackendTexture(const SkPixmap srcData[], 542 int numLevels, 543 GrSurfaceOrigin, 544 GrRenderable, 545 GrProtected, 546 GrGpuFinishedProc finishedProc = nullptr, 547 GrGpuFinishedContext finishedContext = nullptr); 548 549 /** 550 * Convenience version createBackendTexture() that takes just a base level pixmap. 551 */ 552 GrBackendTexture createBackendTexture(const SkPixmap& srcData, 553 GrSurfaceOrigin textureOrigin, 554 GrRenderable renderable, 555 GrProtected isProtected, 556 GrGpuFinishedProc finishedProc = nullptr, 557 GrGpuFinishedContext finishedContext = nullptr) { 558 return this->createBackendTexture(&srcData, 1, textureOrigin, renderable, isProtected, 559 finishedProc, finishedContext); 560 } 561 562 // Deprecated versions that do not take origin and assume top-left. 563 GrBackendTexture createBackendTexture(const SkPixmap srcData[], 564 int numLevels, 565 GrRenderable renderable, 566 GrProtected isProtected, 567 GrGpuFinishedProc finishedProc = nullptr, 568 GrGpuFinishedContext finishedContext = nullptr) { 569 return this->createBackendTexture(srcData, 570 numLevels, 571 kTopLeft_GrSurfaceOrigin, 572 renderable, 573 isProtected, 574 finishedProc, 575 finishedContext); 576 } 577 GrBackendTexture createBackendTexture(const SkPixmap& srcData, 578 GrRenderable renderable, 579 GrProtected isProtected, 580 GrGpuFinishedProc finishedProc = nullptr, 581 GrGpuFinishedContext finishedContext = nullptr) { 582 return this->createBackendTexture(&srcData, 583 1, 584 renderable, 585 isProtected, 586 finishedProc, 587 finishedContext); 588 } 589 590 /** 591 * If possible, updates a backend texture to be filled to a particular color. The client should 592 * check the return value to see if the update was successful. The client can pass in a 593 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 594 * deleted. The client is required to call `submit` to send the upload work to the gpu. 595 * The finishedProc will always get called even if we failed to update the GrBackendTexture. 596 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 597 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 598 */ 599 bool updateBackendTexture(const GrBackendTexture&, 600 const SkColor4f& color, 601 GrGpuFinishedProc finishedProc, 602 GrGpuFinishedContext finishedContext); 603 604 /** 605 * If possible, updates a backend texture to be filled to a particular color. The data in 606 * GrBackendTexture and passed in color is interpreted with respect to the passed in 607 * SkColorType. The client should check the return value to see if the update was successful. 608 * The client can pass in a finishedProc to be notified when the data has been uploaded by the 609 * gpu and the texture can be deleted. The client is required to call `submit` to send 610 * the upload work to the gpu. The finishedProc will always get called even if we failed to 611 * update the GrBackendTexture. 612 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 613 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 614 */ 615 bool updateBackendTexture(const GrBackendTexture&, 616 SkColorType skColorType, 617 const SkColor4f& color, 618 GrGpuFinishedProc finishedProc, 619 GrGpuFinishedContext finishedContext); 620 621 /** 622 * If possible, updates a backend texture filled with the provided pixmap data. The client 623 * should check the return value to see if the update was successful. The client can pass in a 624 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 625 * deleted. The client is required to call `submit` to send the upload work to the gpu. 626 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 627 * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case, 628 * means that the backend format is compatible with the base pixmap's colortype. The src data 629 * can be deleted when this call returns. 630 * If the backend texture is mip mapped, the data for all the mipmap levels must be provided. 631 * In the mipmapped case all the colortypes of the provided pixmaps must be the same. 632 * Additionally, all the miplevels must be sized correctly (please see 633 * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the 634 * pixmap data is vertically flipped in the texture. 635 * Note: the pixmap's alphatypes and colorspaces are ignored. 636 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 637 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 638 */ 639 bool updateBackendTexture(const GrBackendTexture&, 640 const SkPixmap srcData[], 641 int numLevels, 642 GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin, 643 GrGpuFinishedProc finishedProc = nullptr, 644 GrGpuFinishedContext finishedContext = nullptr); 645 646 /** 647 * Convenience version of updateBackendTexture that takes just a base level pixmap. 648 */ 649 bool updateBackendTexture(const GrBackendTexture& texture, 650 const SkPixmap& srcData, 651 GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin, 652 GrGpuFinishedProc finishedProc = nullptr, 653 GrGpuFinishedContext finishedContext = nullptr) { 654 return this->updateBackendTexture(texture, 655 &srcData, 656 1, 657 textureOrigin, 658 finishedProc, 659 finishedContext); 660 } 661 662 // Deprecated version that does not take origin and assumes top-left. updateBackendTexture(const GrBackendTexture & texture,const SkPixmap srcData[],int numLevels,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)663 bool updateBackendTexture(const GrBackendTexture& texture, 664 const SkPixmap srcData[], 665 int numLevels, 666 GrGpuFinishedProc finishedProc, 667 GrGpuFinishedContext finishedContext) { 668 return this->updateBackendTexture(texture, 669 srcData, 670 numLevels, 671 kTopLeft_GrSurfaceOrigin, 672 finishedProc, 673 finishedContext); 674 } 675 676 /** 677 * Retrieve the GrBackendFormat for a given SkImage::CompressionType. This is 678 * guaranteed to match the backend format used by the following 679 * createCompressedBackendTexture methods that take a CompressionType. 680 * 681 * The caller should check that the returned format is valid. 682 */ 683 using GrRecordingContext::compressedBackendFormat; 684 685 /** 686 *If possible, create a compressed backend texture initialized to a particular color. The 687 * client should ensure that the returned backend texture is valid. The client can pass in a 688 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 689 * deleted. The client is required to call `submit` to send the upload work to the gpu. 690 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 691 * For the Vulkan backend the layout of the created VkImage will be: 692 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 693 */ 694 GrBackendTexture createCompressedBackendTexture(int width, int height, 695 const GrBackendFormat&, 696 const SkColor4f& color, 697 GrMipmapped, 698 GrProtected = GrProtected::kNo, 699 GrGpuFinishedProc finishedProc = nullptr, 700 GrGpuFinishedContext finishedContext = nullptr); 701 702 GrBackendTexture createCompressedBackendTexture(int width, int height, 703 SkImage::CompressionType, 704 const SkColor4f& color, 705 GrMipmapped, 706 GrProtected = GrProtected::kNo, 707 GrGpuFinishedProc finishedProc = nullptr, 708 GrGpuFinishedContext finishedContext = nullptr); 709 710 /** 711 * If possible, create a backend texture initialized with the provided raw data. The client 712 * should ensure that the returned backend texture is valid. The client can pass in a 713 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 714 * deleted. The client is required to call `submit` to send the upload work to the gpu. 715 * The finishedProc will always get called even if we failed to create the GrBackendTexture 716 * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired 717 * the data for all the mipmap levels must be provided. Additionally, all the miplevels 718 * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). 719 * For the Vulkan backend the layout of the created VkImage will be: 720 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 721 */ 722 GrBackendTexture createCompressedBackendTexture(int width, int height, 723 const GrBackendFormat&, 724 const void* data, size_t dataSize, 725 GrMipmapped, 726 GrProtected = GrProtected::kNo, 727 GrGpuFinishedProc finishedProc = nullptr, 728 GrGpuFinishedContext finishedContext = nullptr); 729 730 GrBackendTexture createCompressedBackendTexture(int width, int height, 731 SkImage::CompressionType, 732 const void* data, size_t dataSize, 733 GrMipmapped, 734 GrProtected = GrProtected::kNo, 735 GrGpuFinishedProc finishedProc = nullptr, 736 GrGpuFinishedContext finishedContext = nullptr); 737 738 /** 739 * If possible, updates a backend texture filled with the provided color. If the texture is 740 * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client 741 * should check the return value to see if the update was successful. The client can pass in a 742 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 743 * deleted. The client is required to call `submit` to send the upload work to the gpu. 744 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 745 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 746 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 747 */ 748 bool updateCompressedBackendTexture(const GrBackendTexture&, 749 const SkColor4f& color, 750 GrGpuFinishedProc finishedProc, 751 GrGpuFinishedContext finishedContext); 752 753 /** 754 * If possible, updates a backend texture filled with the provided raw data. The client 755 * should check the return value to see if the update was successful. The client can pass in a 756 * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be 757 * deleted. The client is required to call `submit` to send the upload work to the gpu. 758 * The finishedProc will always get called even if we failed to create the GrBackendTexture. 759 * If a mipMapped texture is passed in, the data for all the mipmap levels must be provided. 760 * Additionally, all the miplevels must be sized correctly (please see 761 * SkMipMap::ComputeLevelSize and ComputeLevelCount). 762 * For the Vulkan backend after a successful update the layout of the created VkImage will be: 763 * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL 764 */ 765 bool updateCompressedBackendTexture(const GrBackendTexture&, 766 const void* data, 767 size_t dataSize, 768 GrGpuFinishedProc finishedProc, 769 GrGpuFinishedContext finishedContext); 770 771 /** 772 * Updates the state of the GrBackendTexture/RenderTarget to have the passed in 773 * GrBackendSurfaceMutableState. All objects that wrap the backend surface (i.e. SkSurfaces and 774 * SkImages) will also be aware of this state change. This call does not submit the state change 775 * to the gpu, but requires the client to call `submit` to send it to the GPU. The work 776 * for this call is ordered linearly with all other calls that require GrContext::submit to be 777 * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be 778 * called with finishedContext after the state transition is known to have occurred on the GPU. 779 * 780 * See GrBackendSurfaceMutableState to see what state can be set via this call. 781 * 782 * If the backend API is Vulkan, the caller can set the GrBackendSurfaceMutableState's 783 * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to 784 * tell Skia to not change those respective states. 785 * 786 * If previousState is not null and this returns true, then Skia will have filled in 787 * previousState to have the values of the state before this call. 788 */ 789 bool setBackendTextureState(const GrBackendTexture&, 790 const GrBackendSurfaceMutableState&, 791 GrBackendSurfaceMutableState* previousState = nullptr, 792 GrGpuFinishedProc finishedProc = nullptr, 793 GrGpuFinishedContext finishedContext = nullptr); 794 bool setBackendRenderTargetState(const GrBackendRenderTarget&, 795 const GrBackendSurfaceMutableState&, 796 GrBackendSurfaceMutableState* previousState = nullptr, 797 GrGpuFinishedProc finishedProc = nullptr, 798 GrGpuFinishedContext finishedContext = nullptr); 799 800 void deleteBackendTexture(GrBackendTexture); 801 802 // This interface allows clients to pre-compile shaders and populate the runtime program cache. 803 // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format. 804 // 805 // Steps to use this API: 806 // 807 // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to 808 // something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This 809 // will ensure that the blobs are SkSL, and are suitable for pre-compilation. 810 // 2) Run your application, and save all of the key/data pairs that are fed to the cache. 811 // 812 // 3) Switch over to shipping your application. Include the key/data pairs from above. 813 // 4) At startup (or any convenient time), call precompileShader for each key/data pair. 814 // This will compile the SkSL to create a GL program, and populate the runtime cache. 815 // 816 // This is only guaranteed to work if the context/device used in step #2 are created in the 817 // same way as the one used in step #4, and the same GrContextOptions are specified. 818 // Using cached shader blobs on a different device or driver are undefined. 819 bool precompileShader(const SkData& key, const SkData& data); 820 821 #ifdef SK_ENABLE_DUMP_GPU 822 /** Returns a string with detailed information about the context & GPU, in JSON format. */ 823 SkString dump() const; 824 #endif 825 826 class DirectContextID { 827 public: 828 static GrDirectContext::DirectContextID Next(); 829 DirectContextID()830 DirectContextID() : fID(SK_InvalidUniqueID) {} 831 832 bool operator==(const DirectContextID& that) const { return fID == that.fID; } 833 bool operator!=(const DirectContextID& that) const { return !(*this == that); } 834 makeInvalid()835 void makeInvalid() { fID = SK_InvalidUniqueID; } isValid()836 bool isValid() const { return fID != SK_InvalidUniqueID; } 837 838 private: DirectContextID(uint32_t id)839 constexpr DirectContextID(uint32_t id) : fID(id) {} 840 uint32_t fID; 841 }; 842 directContextID()843 DirectContextID directContextID() const { return fDirectContextID; } 844 845 // Provides access to functions that aren't part of the public API. 846 GrDirectContextPriv priv(); 847 const GrDirectContextPriv priv() const; // NOLINT(readability-const-return-type) 848 849 /** 850 * Set current resource tag for gpu cache recycle. 851 */ 852 void setCurrentGrResourceTag(const GrGpuResourceTag& tag); 853 854 /** 855 * Pop resource tag. 856 */ 857 void popGrResourceTag(); 858 859 860 /** 861 * Get current resource tag for gpu cache recycle. 862 * 863 * @return all GrGpuResourceTags. 864 */ 865 GrGpuResourceTag getCurrentGrResourceTag() const; 866 867 /** 868 * Releases GrGpuResource objects and removes them from the cache by tag. 869 */ 870 void releaseByTag(const GrGpuResourceTag& tag); 871 872 /** 873 * Get all GrGpuResource tag. 874 * 875 * @return all GrGpuResourceTags. 876 */ 877 std::set<GrGpuResourceTag> getAllGrGpuResourceTags() const; 878 879 void vmaDefragment(); 880 void dumpVmaStats(SkString *out); 881 void dumpAllResource(std::stringstream& dump) const; 882 883 #ifdef SKIA_OHOS 884 // OH ISSUE: set purgeable resource max count limit. 885 void setPurgeableResourceLimit(int purgeableMaxCount); 886 #endif 887 // OH ISSUE: get the memory information of the updated pid. 888 void getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out); 889 // OH ISSUE: init gpu memory limit. 890 void initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size); 891 // OH ISSUE: check whether the PID is abnormal. 892 bool isPidAbnormal() const override; 893 894 // OH ISSUE: intra frame and inter frame identification 895 void beginFrame(); 896 void endFrame(); 897 898 // OH ISSUE: asyn memory reclaimer 899 void setGpuMemoryAsyncReclaimerSwitch(bool enabled, const std::function<void()>& setThreadPriority); 900 void flushGpuMemoryInWaitQueue(); 901 902 // OH ISSUE: suppress release window 903 void setGpuCacheSuppressWindowSwitch(bool enabled); 904 void suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived); 905 void processVulkanError(); 906 907 protected: 908 GrDirectContext(GrBackendApi backend, const GrContextOptions& options); 909 910 bool init() override; 911 onGetAtlasManager()912 GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); } 913 skgpu::v1::SmallPathAtlasMgr* onGetSmallPathAtlasMgr(); 914 asDirectContext()915 GrDirectContext* asDirectContext() override { return this; } 916 917 private: 918 // This call will make sure out work on the GPU is finished and will execute any outstanding 919 // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the 920 // outstanding work on the gpu. The main use currently for this function is when tearing down or 921 // abandoning the context. 922 // 923 // When we finish up work on the GPU it could trigger callbacks to the client. In the case we 924 // are abandoning the context we don't want the client to be able to use the GrDirectContext to 925 // issue more commands during the callback. Thus before calling this function we set the 926 // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded 927 // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned 928 // bool is used for this signal. 929 void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned); 930 931 const DirectContextID fDirectContextID; 932 // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed 933 // after all of its users. Clients of fTaskGroup will generally want to ensure that they call 934 // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being 935 // invoked after objects they depend upon have already been destroyed. 936 std::unique_ptr<SkTaskGroup> fTaskGroup; 937 std::unique_ptr<GrStrikeCache> fStrikeCache; 938 sk_sp<GrGpu> fGpu; 939 std::unique_ptr<GrResourceCache> fResourceCache; 940 std::unique_ptr<GrResourceProvider> fResourceProvider; 941 942 bool fDidTestPMConversions; 943 // true if the PM/UPM conversion succeeded; false otherwise 944 bool fPMUPMConversionsRoundTrip; 945 946 GrContextOptions::PersistentCache* fPersistentCache; 947 948 std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager; 949 std::unique_ptr<GrAtlasManager> fAtlasManager; 950 951 std::unique_ptr<skgpu::v1::SmallPathAtlasMgr> fSmallPathAtlasMgr; 952 std::function<void()> vulkanErrorCallback_; 953 954 friend class GrDirectContextPriv; 955 956 using INHERITED = GrRecordingContext; 957 }; 958 959 960 #endif 961