• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDirectContext_DEFINED
9 #define GrDirectContext_DEFINED
10 
11 #include <set>
12 
13 #include <array>
14 
15 #include "include/gpu/GrRecordingContext.h"
16 
17 #include "include/gpu/GrBackendSurface.h"
18 
19 #include "src/gpu/GrGpuResource.h"
20 
21 // We shouldn't need this but currently Android is relying on this being include transitively.
22 #include "include/core/SkUnPreMultiply.h"
23 
24 #include "include/core/SkBlurTypes.h"
25 
26 class GrAtlasManager;
27 class GrBackendSemaphore;
28 class GrClientMappedBufferManager;
29 class GrDirectContextPriv;
30 class GrContextThreadSafeProxy;
31 struct GrD3DBackendContext;
32 class GrFragmentProcessor;
33 class GrGpu;
34 struct GrGLInterface;
35 struct GrMtlBackendContext;
36 struct GrMockOptions;
37 class GrPath;
38 class GrResourceCache;
39 class GrResourceProvider;
40 class GrStrikeCache;
41 class GrSurfaceProxy;
42 class GrSwizzle;
43 class GrTextureProxy;
44 struct GrVkBackendContext;
45 
46 class SkImage;
47 class SkString;
48 class SkSurfaceCharacterization;
49 class SkSurfaceProps;
50 class SkTaskGroup;
51 class SkTraceMemoryDump;
52 
53 // OH ISSUE: callback for memory protect.
54 using MemoryOverflowCalllback = std::function<void(int32_t, size_t, bool)>;
55 
56 namespace skgpu { namespace v1 { class SmallPathAtlasMgr; }}
57 
58 class SK_API GrDirectContext : public GrRecordingContext {
59 public:
60 #ifdef SK_GL
61     /**
62      * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the
63      * result of GrGLMakeNativeInterface() is used if it succeeds.
64      */
65     static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&);
66     static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>);
67     static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&);
68     static sk_sp<GrDirectContext> MakeGL();
69 #endif
70 
71 #ifdef SK_VULKAN
72     /**
73      * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned
74      * GrDirectContext is destroyed. This also means that any objects created with this
75      * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold
76      * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released,
77      * then it is safe to delete the vulkan objects.
78      */
79     static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&);
80     static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&);
81 #endif
82 
83 #ifdef SK_METAL
84     /**
85      * Makes a GrDirectContext which uses Metal as the backend. The GrMtlBackendContext contains a
86      * MTLDevice and MTLCommandQueue which should be used by the backend. These objects must
87      * have their own ref which will be released when the GrMtlBackendContext is destroyed.
88      * Ganesh will take its own ref on the objects which will be released when the GrDirectContext
89      * is destroyed.
90      */
91     static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&, const GrContextOptions&);
92     static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&);
93     /**
94      * Deprecated.
95      *
96      * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an
97      * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects
98      * must have a ref on them that can be transferred to Ganesh, which will release the ref
99      * when the GrDirectContext is destroyed.
100      */
101     static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&);
102     static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue);
103 #endif
104 
105 #ifdef SK_DIRECT3D
106     /**
107      * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context
108      * must be kept alive until the returned GrDirectContext is first destroyed or abandoned.
109      */
110     static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&);
111     static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&);
112 #endif
113 
114 #ifdef SK_DAWN
115     static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&,
116                                            const GrContextOptions&);
117     static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&);
118 #endif
119 
120     static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
121     static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*);
122 
123     ~GrDirectContext() override;
124 
125     /**
126      * The context normally assumes that no outsider is setting state
127      * within the underlying 3D API's context/device/whatever. This call informs
128      * the context that the state was modified and it should resend. Shouldn't
129      * be called frequently for good performance.
130      * The flag bits, state, is dependent on which backend is used by the
131      * context, either GL or D3D (possible in future).
132      */
133     void resetContext(uint32_t state = kAll_GrBackendState);
134 
135     /**
136      * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
137      * the context has modified the bound texture will have texture id 0 bound. This does not
138      * flush the context. Calling resetContext() does not change the set that will be bound
139      * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
140      * all unit/target combinations are considered to have unmodified bindings until the context
141      * subsequently modifies them (meaning if this is called twice in a row with no intervening
142      * context usage then the second call is a no-op.)
143      */
144     void resetGLTextureBindings();
145 
146     /**
147      * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
148      * usable. Call this if you have lost the associated GPU context, and thus internal texture,
149      * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
150      * context and any of its created resource objects will not make backend 3D API calls. Content
151      * rendered but not previously flushed may be lost. After this function is called all subsequent
152      * calls on the context will fail or be no-ops.
153      *
154      * The typical use case for this function is that the underlying 3D context was lost and further
155      * API calls may crash.
156      *
157      * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
158      * create the context must be kept alive even after abandoning the context. Those objects must
159      * live for the lifetime of the context object itself. The reason for this is so that
160      * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be
161      * cleaned up even in a device lost state.
162      */
163     void abandonContext() override;
164 
165     /**
166      * Returns true if the context was abandoned or if the if the backend specific context has
167      * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a
168      * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this
169      * context.
170      */
171     bool abandoned() override;
172 
173     // TODO: Remove this from public after migrating Chrome.
174     sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
175 
176     /**
177      * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is
178      * reset and will return false until another out-of-memory error is reported by the 3D API. If
179      * the context is abandoned then this will report false.
180      *
181      * Currently this is implemented for:
182      *
183      * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and
184      * therefore hide the error from Skia. Also, it is not advised to use this in combination with
185      * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever
186      * checking the GL context for OOM.
187      *
188      * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has
189      * occurred.
190      */
191     bool oomed();
192 
193     /**
194      * This is similar to abandonContext() however the underlying 3D context is not yet lost and
195      * the context will cleanup all allocated resources before returning. After returning it will
196      * assume that the underlying context may no longer be valid.
197      *
198      * The typical use case for this function is that the client is going to destroy the 3D context
199      * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed
200      * elsewhere by either the client or Skia objects).
201      *
202      * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
203      * create the context must be alive before calling releaseResourcesAndAbandonContext.
204      */
205     void releaseResourcesAndAbandonContext();
206 
207     ///////////////////////////////////////////////////////////////////////////
208     // Resource Cache
209 
210     /** DEPRECATED
211      *  Return the current GPU resource cache limits.
212      *
213      *  @param maxResources If non-null, will be set to -1.
214      *  @param maxResourceBytes If non-null, returns maximum number of bytes of
215      *                          video memory that can be held in the cache.
216      */
217     void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
218 
219     /**
220      *  Return the current GPU resource cache limit in bytes.
221      */
222     size_t getResourceCacheLimit() const;
223 
224     /**
225      *  Gets the current GPU resource cache usage.
226      *
227      *  @param resourceCount If non-null, returns the number of resources that are held in the
228      *                       cache.
229      *  @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
230      *                          in the cache.
231      */
232     void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
233 
234     /**
235      *  Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
236      */
237     size_t getResourceCachePurgeableBytes() const;
238 
239     /** DEPRECATED
240      *  Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes
241      *  limit, it will be purged (LRU) to keep the cache within the limit.
242      *
243      *  @param maxResources Unused.
244      *  @param maxResourceBytes The maximum number of bytes of video memory
245      *                          that can be held in the cache.
246      */
247     void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
248 
249     /**
250      *  Specify the GPU resource cache limit. If the cache currently exceeds this limit,
251      *  it will be purged (LRU) to keep the cache within the limit.
252      *
253      *  @param maxResourceBytes The maximum number of bytes of video memory
254      *                          that can be held in the cache.
255      */
256     void setResourceCacheLimit(size_t maxResourceBytes);
257 
258     /**
259      * Frees GPU created by the context. Can be called to reduce GPU memory
260      * pressure.
261      */
262     void freeGpuResources();
263 
264     /**
265      * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
266      * otherwise marked for deletion, regardless of whether the context is under budget.
267      *
268      * If 'scratchResourcesOnly' is true all unlocked scratch resources older than 'msNotUsed' will
269      * be purged but the unlocked resources with persistent data will remain. If
270      * 'scratchResourcesOnly' is false then all unlocked resources older than 'msNotUsed' will be
271      * purged.
272      *
273      * @param msNotUsed              Only unlocked resources not used in these last milliseconds
274      *                               will be cleaned up.
275      * @param scratchResourcesOnly   If true only unlocked scratch resources will be purged.
276      */
277     void performDeferredCleanup(std::chrono::milliseconds msNotUsed,
278                                 bool scratchResourcesOnly=false);
279 
280     // Temporary compatibility API for Android.
purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)281     void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
282         this->performDeferredCleanup(msNotUsed);
283     }
284 
285     /**
286      * Purge unlocked resources from the cache until the the provided byte count has been reached
287      * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
288      * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
289      * resource types.
290      *
291      * @param maxBytesToPurge the desired number of bytes to be purged.
292      * @param preferScratchResources If true scratch resources will be purged prior to other
293      *                               resource types.
294      */
295     void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
296     void purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag& tag);
297     void purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet);
298     void purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet,
299         const std::set<int>& protectedPidSet);
300     void purgeUnlockAndSafeCacheGpuResources();
301 
302     std::array<int, 2> CalcHpsBluredImageDimension(const SkBlurArg& blurArg);
303     /**
304      * This entry point is intended for instances where an app has been backgrounded or
305      * suspended.
306      * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
307      * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
308      * then all unlocked resources will be purged.
309      * In either case, after the unlocked resources are purged a separate pass will be made to
310      * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
311      * some resources with persistent data may be purged to be under budget).
312      *
313      * @param scratchResourcesOnly   If true only unlocked scratch resources will be purged prior
314      *                               enforcing the budget requirements.
315      */
316     void purgeUnlockedResources(bool scratchResourcesOnly);
317 
318     /**
319      * Gets the maximum supported texture size.
320      */
321     using GrRecordingContext::maxTextureSize;
322 
323     /**
324      * Gets the maximum supported render target size.
325      */
326     using GrRecordingContext::maxRenderTargetSize;
327 
328     /**
329      * Can a SkImage be created with the given color type.
330      */
331     using GrRecordingContext::colorTypeSupportedAsImage;
332 
333     /**
334      * Can a SkSurface be created with the given color type. To check whether MSAA is supported
335      * use maxSurfaceSampleCountForColorType().
336      */
337     using GrRecordingContext::colorTypeSupportedAsSurface;
338 
339     /**
340      * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
341      * rendering is supported for the color type. 0 is returned if rendering to this color type
342      * is not supported at all.
343      */
344     using GrRecordingContext::maxSurfaceSampleCountForColorType;
345 
346     ///////////////////////////////////////////////////////////////////////////
347     // Misc.
348 
349     /**
350      * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
351      * executing any more commands on the GPU. If this call returns false, then the GPU back-end
352      * will not wait on any passed in semaphores, and the client will still own the semaphores,
353      * regardless of the value of deleteSemaphoresAfterWait.
354      *
355      * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
356      * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
357      * knows that Skia has finished waiting on them. This can be done by using finishedProcs on
358      * flush calls.
359      */
360     bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
361               bool deleteSemaphoresAfterWait = true);
362 
363     /**
364      * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D
365      * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by
366      * GrContext::submit(syncCpu).
367      */
368     void flushAndSubmit(bool syncCpu = false) {
369         this->flush(GrFlushInfo());
370         this->submit(syncCpu);
371     }
372 
373     /**
374      * Call to ensure all drawing to the context has been flushed to underlying 3D API specific
375      * objects. A call to `submit` is always required to ensure work is actually sent to
376      * the gpu. Some specific API details:
377      *     GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
378      *         sync objects from the flush will not be valid until a submission occurs.
379      *
380      *     Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
381      *         buffer or encoder objects. However, these objects are not sent to the gpu until a
382      *         submission occurs.
383      *
384      * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
385      * submitted to the gpu during the next submit call (it is possible Skia failed to create a
386      * subset of the semaphores). The client should not wait on these semaphores until after submit
387      * has been called, and must keep them alive until then. If this call returns
388      * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
389      * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with
390      * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
391      * client is still responsible for deleting any initialized semaphores.
392      * Regardleess of semaphore submission the context will still be flushed. It should be
393      * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
394      * happen. It simply means there were no semaphores submitted to the GPU. A caller should only
395      * take this as a failure if they passed in semaphores to be submitted.
396      */
397     GrSemaphoresSubmitted flush(const GrFlushInfo& info);
398 
flush()399     void flush() { this->flush({}); }
400 
401     /**
402      * Submit outstanding work to the gpu from all previously un-submitted flushes. The return
403      * value of the submit will indicate whether or not the submission to the GPU was successful.
404      *
405      * If the call returns true, all previously passed in semaphores in flush calls will have been
406      * submitted to the GPU and they can safely be waited on. The caller should wait on those
407      * semaphores or perform some other global synchronization before deleting the semaphores.
408      *
409      * If it returns false, then those same semaphores will not have been submitted and we will not
410      * try to submit them again. The caller is free to delete the semaphores at any time.
411      *
412      * If the syncCpu flag is true this function will return once the gpu has finished with all
413      * submitted work.
414      */
415     bool submit(bool syncCpu = false);
416 
417     /**
418      * Checks whether any asynchronous work is complete and if so calls related callbacks.
419      */
420     void checkAsyncWorkCompletion();
421 
422     /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
423     // Chrome is using this!
424     void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
425     void dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const;
426 
427     bool supportsDistanceFieldText() const;
428 
429     void storeVkPipelineCacheData();
430 
431     /**
432      * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
433      * It is guaranteed that this backend format will be the one used by the following
434      * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
435      *
436      * The caller should check that the returned format is valid.
437      */
438     using GrRecordingContext::defaultBackendFormat;
439 
440     /**
441      * The explicitly allocated backend texture API allows clients to use Skia to create backend
442      * objects outside of Skia proper (i.e., Skia's caching system will not know about them.)
443      *
444      * It is the client's responsibility to delete all these objects (using deleteBackendTexture)
445      * before deleting the context used to create them. If the backend is Vulkan, the textures must
446      * be deleted before abandoning the context as well. Additionally, clients should only delete
447      * these objects on the thread for which that context is active.
448      *
449      * The client is responsible for ensuring synchronization between different uses
450      * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the
451      * surface, rewrapping it in a image and drawing the image will require explicit
452      * synchronization on the client's part).
453      */
454 
455      /**
456       * If possible, create an uninitialized backend texture. The client should ensure that the
457       * returned backend texture is valid.
458       * For the Vulkan backend the layout of the created VkImage will be:
459       *      VK_IMAGE_LAYOUT_UNDEFINED.
460       */
461      GrBackendTexture createBackendTexture(int width, int height,
462                                            const GrBackendFormat&,
463                                            GrMipmapped,
464                                            GrRenderable,
465                                            GrProtected = GrProtected::kNo);
466 
467      /**
468       * If possible, create an uninitialized backend texture. The client should ensure that the
469       * returned backend texture is valid.
470       * If successful, the created backend texture will be compatible with the provided
471       * SkColorType.
472       * For the Vulkan backend the layout of the created VkImage will be:
473       *      VK_IMAGE_LAYOUT_UNDEFINED.
474       */
475      GrBackendTexture createBackendTexture(int width, int height,
476                                            SkColorType,
477                                            GrMipmapped,
478                                            GrRenderable,
479                                            GrProtected = GrProtected::kNo);
480 
481      /**
482       * If possible, create a backend texture initialized to a particular color. The client should
483       * ensure that the returned backend texture is valid. The client can pass in a finishedProc
484       * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
485       * client is required to call `submit` to send the upload work to the gpu. The
486       * finishedProc will always get called even if we failed to create the GrBackendTexture.
487       * For the Vulkan backend the layout of the created VkImage will be:
488       *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
489       */
490      GrBackendTexture createBackendTexture(int width, int height,
491                                            const GrBackendFormat&,
492                                            const SkColor4f& color,
493                                            GrMipmapped,
494                                            GrRenderable,
495                                            GrProtected = GrProtected::kNo,
496                                            GrGpuFinishedProc finishedProc = nullptr,
497                                            GrGpuFinishedContext finishedContext = nullptr);
498 
499      /**
500       * If possible, create a backend texture initialized to a particular color. The client should
501       * ensure that the returned backend texture is valid. The client can pass in a finishedProc
502       * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
503       * client is required to call `submit` to send the upload work to the gpu. The
504       * finishedProc will always get called even if we failed to create the GrBackendTexture.
505       * If successful, the created backend texture will be compatible with the provided
506       * SkColorType.
507       * For the Vulkan backend the layout of the created VkImage will be:
508       *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
509       */
510      GrBackendTexture createBackendTexture(int width, int height,
511                                            SkColorType,
512                                            const SkColor4f& color,
513                                            GrMipmapped,
514                                            GrRenderable,
515                                            GrProtected = GrProtected::kNo,
516                                            GrGpuFinishedProc finishedProc = nullptr,
517                                            GrGpuFinishedContext finishedContext = nullptr);
518 
519      /**
520       * If possible, create a backend texture initialized with the provided pixmap data. The client
521       * should ensure that the returned backend texture is valid. The client can pass in a
522       * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
523       * deleted. The client is required to call `submit` to send the upload work to the gpu.
524       * The finishedProc will always get called even if we failed to create the GrBackendTexture.
525       * If successful, the created backend texture will be compatible with the provided
526       * pixmap(s). Compatible, in this case, means that the backend format will be the result
527       * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted
528       * when this call returns.
529       * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired
530       * the data for all the mipmap levels must be provided. In the mipmapped case all the
531       * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels
532       * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The
533       * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture.
534       * Note: the pixmap's alphatypes and colorspaces are ignored.
535       * For the Vulkan backend the layout of the created VkImage will be:
536       *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
537       */
538      GrBackendTexture createBackendTexture(const SkPixmap srcData[],
539                                            int numLevels,
540                                            GrSurfaceOrigin,
541                                            GrRenderable,
542                                            GrProtected,
543                                            GrGpuFinishedProc finishedProc = nullptr,
544                                            GrGpuFinishedContext finishedContext = nullptr);
545 
546     /**
547      * Convenience version createBackendTexture() that takes just a base level pixmap.
548      */
549      GrBackendTexture createBackendTexture(const SkPixmap& srcData,
550                                            GrSurfaceOrigin textureOrigin,
551                                            GrRenderable renderable,
552                                            GrProtected isProtected,
553                                            GrGpuFinishedProc finishedProc = nullptr,
554                                            GrGpuFinishedContext finishedContext = nullptr) {
555          return this->createBackendTexture(&srcData, 1, textureOrigin, renderable, isProtected,
556                                            finishedProc, finishedContext);
557      }
558 
559     // Deprecated versions that do not take origin and assume top-left.
560     GrBackendTexture createBackendTexture(const SkPixmap srcData[],
561                                           int numLevels,
562                                           GrRenderable renderable,
563                                           GrProtected isProtected,
564                                           GrGpuFinishedProc finishedProc = nullptr,
565                                           GrGpuFinishedContext finishedContext = nullptr) {
566         return this->createBackendTexture(srcData,
567                                           numLevels,
568                                           kTopLeft_GrSurfaceOrigin,
569                                           renderable,
570                                           isProtected,
571                                           finishedProc,
572                                           finishedContext);
573     }
574     GrBackendTexture createBackendTexture(const SkPixmap& srcData,
575                                           GrRenderable renderable,
576                                           GrProtected isProtected,
577                                           GrGpuFinishedProc finishedProc = nullptr,
578                                           GrGpuFinishedContext finishedContext = nullptr) {
579         return this->createBackendTexture(&srcData,
580                                           1,
581                                           renderable,
582                                           isProtected,
583                                           finishedProc,
584                                           finishedContext);
585     }
586 
587     /**
588      * If possible, updates a backend texture to be filled to a particular color. The client should
589      * check the return value to see if the update was successful. The client can pass in a
590      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
591      * deleted. The client is required to call `submit` to send the upload work to the gpu.
592      * The finishedProc will always get called even if we failed to update the GrBackendTexture.
593      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
594      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
595      */
596     bool updateBackendTexture(const GrBackendTexture&,
597                               const SkColor4f& color,
598                               GrGpuFinishedProc finishedProc,
599                               GrGpuFinishedContext finishedContext);
600 
601     /**
602      * If possible, updates a backend texture to be filled to a particular color. The data in
603      * GrBackendTexture and passed in color is interpreted with respect to the passed in
604      * SkColorType. The client should check the return value to see if the update was successful.
605      * The client can pass in a finishedProc to be notified when the data has been uploaded by the
606      * gpu and the texture can be deleted. The client is required to call `submit` to send
607      * the upload work to the gpu. The finishedProc will always get called even if we failed to
608      * update the GrBackendTexture.
609      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
610      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
611      */
612     bool updateBackendTexture(const GrBackendTexture&,
613                               SkColorType skColorType,
614                               const SkColor4f& color,
615                               GrGpuFinishedProc finishedProc,
616                               GrGpuFinishedContext finishedContext);
617 
618     /**
619      * If possible, updates a backend texture filled with the provided pixmap data. The client
620      * should check the return value to see if the update was successful. The client can pass in a
621      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
622      * deleted. The client is required to call `submit` to send the upload work to the gpu.
623      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
624      * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case,
625      * means that the backend format is compatible with the base pixmap's colortype. The src data
626      * can be deleted when this call returns.
627      * If the backend texture is mip mapped, the data for all the mipmap levels must be provided.
628      * In the mipmapped case all the colortypes of the provided pixmaps must be the same.
629      * Additionally, all the miplevels must be sized correctly (please see
630      * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the
631      * pixmap data is vertically flipped in the texture.
632      * Note: the pixmap's alphatypes and colorspaces are ignored.
633      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
634      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
635      */
636     bool updateBackendTexture(const GrBackendTexture&,
637                               const SkPixmap srcData[],
638                               int numLevels,
639                               GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin,
640                               GrGpuFinishedProc finishedProc = nullptr,
641                               GrGpuFinishedContext finishedContext = nullptr);
642 
643     /**
644      * Convenience version of updateBackendTexture that takes just a base level pixmap.
645      */
646     bool updateBackendTexture(const GrBackendTexture& texture,
647                               const SkPixmap& srcData,
648                               GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin,
649                               GrGpuFinishedProc finishedProc = nullptr,
650                               GrGpuFinishedContext finishedContext = nullptr) {
651         return this->updateBackendTexture(texture,
652                                           &srcData,
653                                           1,
654                                           textureOrigin,
655                                           finishedProc,
656                                           finishedContext);
657     }
658 
659     // Deprecated version that does not take origin and assumes top-left.
updateBackendTexture(const GrBackendTexture & texture,const SkPixmap srcData[],int numLevels,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)660     bool updateBackendTexture(const GrBackendTexture& texture,
661                              const SkPixmap srcData[],
662                              int numLevels,
663                              GrGpuFinishedProc finishedProc,
664                              GrGpuFinishedContext finishedContext) {
665         return this->updateBackendTexture(texture,
666                                           srcData,
667                                           numLevels,
668                                           kTopLeft_GrSurfaceOrigin,
669                                           finishedProc,
670                                           finishedContext);
671     }
672 
673     /**
674      * Retrieve the GrBackendFormat for a given SkImage::CompressionType. This is
675      * guaranteed to match the backend format used by the following
676      * createCompressedBackendTexture methods that take a CompressionType.
677      *
678      * The caller should check that the returned format is valid.
679      */
680     using GrRecordingContext::compressedBackendFormat;
681 
682     /**
683      *If possible, create a compressed backend texture initialized to a particular color. The
684      * client should ensure that the returned backend texture is valid. The client can pass in a
685      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
686      * deleted. The client is required to call `submit` to send the upload work to the gpu.
687      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
688      * For the Vulkan backend the layout of the created VkImage will be:
689      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
690      */
691     GrBackendTexture createCompressedBackendTexture(int width, int height,
692                                                     const GrBackendFormat&,
693                                                     const SkColor4f& color,
694                                                     GrMipmapped,
695                                                     GrProtected = GrProtected::kNo,
696                                                     GrGpuFinishedProc finishedProc = nullptr,
697                                                     GrGpuFinishedContext finishedContext = nullptr);
698 
699     GrBackendTexture createCompressedBackendTexture(int width, int height,
700                                                     SkImage::CompressionType,
701                                                     const SkColor4f& color,
702                                                     GrMipmapped,
703                                                     GrProtected = GrProtected::kNo,
704                                                     GrGpuFinishedProc finishedProc = nullptr,
705                                                     GrGpuFinishedContext finishedContext = nullptr);
706 
707     /**
708      * If possible, create a backend texture initialized with the provided raw data. The client
709      * should ensure that the returned backend texture is valid. The client can pass in a
710      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
711      * deleted. The client is required to call `submit` to send the upload work to the gpu.
712      * The finishedProc will always get called even if we failed to create the GrBackendTexture
713      * If numLevels is 1 a non-mipMapped texture will result. If a mipMapped texture is desired
714      * the data for all the mipmap levels must be provided. Additionally, all the miplevels
715      * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount).
716      * For the Vulkan backend the layout of the created VkImage will be:
717      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
718      */
719     GrBackendTexture createCompressedBackendTexture(int width, int height,
720                                                     const GrBackendFormat&,
721                                                     const void* data, size_t dataSize,
722                                                     GrMipmapped,
723                                                     GrProtected = GrProtected::kNo,
724                                                     GrGpuFinishedProc finishedProc = nullptr,
725                                                     GrGpuFinishedContext finishedContext = nullptr);
726 
727     GrBackendTexture createCompressedBackendTexture(int width, int height,
728                                                     SkImage::CompressionType,
729                                                     const void* data, size_t dataSize,
730                                                     GrMipmapped,
731                                                     GrProtected = GrProtected::kNo,
732                                                     GrGpuFinishedProc finishedProc = nullptr,
733                                                     GrGpuFinishedContext finishedContext = nullptr);
734 
735     /**
736      * If possible, updates a backend texture filled with the provided color. If the texture is
737      * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client
738      * should check the return value to see if the update was successful. The client can pass in a
739      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
740      * deleted. The client is required to call `submit` to send the upload work to the gpu.
741      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
742      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
743      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
744      */
745     bool updateCompressedBackendTexture(const GrBackendTexture&,
746                                         const SkColor4f& color,
747                                         GrGpuFinishedProc finishedProc,
748                                         GrGpuFinishedContext finishedContext);
749 
750     /**
751      * If possible, updates a backend texture filled with the provided raw data. The client
752      * should check the return value to see if the update was successful. The client can pass in a
753      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
754      * deleted. The client is required to call `submit` to send the upload work to the gpu.
755      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
756      * If a mipMapped texture is passed in, the data for all the mipmap levels must be provided.
757      * Additionally, all the miplevels must be sized correctly (please see
758      * SkMipMap::ComputeLevelSize and ComputeLevelCount).
759      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
760      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
761      */
762     bool updateCompressedBackendTexture(const GrBackendTexture&,
763                                         const void* data,
764                                         size_t dataSize,
765                                         GrGpuFinishedProc finishedProc,
766                                         GrGpuFinishedContext finishedContext);
767 
768     /**
769      * Updates the state of the GrBackendTexture/RenderTarget to have the passed in
770      * GrBackendSurfaceMutableState. All objects that wrap the backend surface (i.e. SkSurfaces and
771      * SkImages) will also be aware of this state change. This call does not submit the state change
772      * to the gpu, but requires the client to call `submit` to send it to the GPU. The work
773      * for this call is ordered linearly with all other calls that require GrContext::submit to be
774      * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be
775      * called with finishedContext after the state transition is known to have occurred on the GPU.
776      *
777      * See GrBackendSurfaceMutableState to see what state can be set via this call.
778      *
779      * If the backend API is Vulkan, the caller can set the GrBackendSurfaceMutableState's
780      * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
781      * tell Skia to not change those respective states.
782      *
783      * If previousState is not null and this returns true, then Skia will have filled in
784      * previousState to have the values of the state before this call.
785      */
786     bool setBackendTextureState(const GrBackendTexture&,
787                                 const GrBackendSurfaceMutableState&,
788                                 GrBackendSurfaceMutableState* previousState = nullptr,
789                                 GrGpuFinishedProc finishedProc = nullptr,
790                                 GrGpuFinishedContext finishedContext = nullptr);
791     bool setBackendRenderTargetState(const GrBackendRenderTarget&,
792                                      const GrBackendSurfaceMutableState&,
793                                      GrBackendSurfaceMutableState* previousState = nullptr,
794                                      GrGpuFinishedProc finishedProc = nullptr,
795                                      GrGpuFinishedContext finishedContext = nullptr);
796 
797     void deleteBackendTexture(GrBackendTexture);
798 
799     // This interface allows clients to pre-compile shaders and populate the runtime program cache.
800     // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format.
801     //
802     // Steps to use this API:
803     //
804     // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to
805     //    something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This
806     //    will ensure that the blobs are SkSL, and are suitable for pre-compilation.
807     // 2) Run your application, and save all of the key/data pairs that are fed to the cache.
808     //
809     // 3) Switch over to shipping your application. Include the key/data pairs from above.
810     // 4) At startup (or any convenient time), call precompileShader for each key/data pair.
811     //    This will compile the SkSL to create a GL program, and populate the runtime cache.
812     //
813     // This is only guaranteed to work if the context/device used in step #2 are created in the
814     // same way as the one used in step #4, and the same GrContextOptions are specified.
815     // Using cached shader blobs on a different device or driver are undefined.
816     bool precompileShader(const SkData& key, const SkData& data);
817 
818 #ifdef SK_ENABLE_DUMP_GPU
819     /** Returns a string with detailed information about the context & GPU, in JSON format. */
820     SkString dump() const;
821 #endif
822 
823     class DirectContextID {
824     public:
825         static GrDirectContext::DirectContextID Next();
826 
DirectContextID()827         DirectContextID() : fID(SK_InvalidUniqueID) {}
828 
829         bool operator==(const DirectContextID& that) const { return fID == that.fID; }
830         bool operator!=(const DirectContextID& that) const { return !(*this == that); }
831 
makeInvalid()832         void makeInvalid() { fID = SK_InvalidUniqueID; }
isValid()833         bool isValid() const { return fID != SK_InvalidUniqueID; }
834 
835     private:
DirectContextID(uint32_t id)836         constexpr DirectContextID(uint32_t id) : fID(id) {}
837         uint32_t fID;
838     };
839 
directContextID()840     DirectContextID directContextID() const { return fDirectContextID; }
841 
842     // Provides access to functions that aren't part of the public API.
843     GrDirectContextPriv priv();
844     const GrDirectContextPriv priv() const;  // NOLINT(readability-const-return-type)
845 
846     /**
847      * Set current resource tag for gpu cache recycle.
848      */
849     void setCurrentGrResourceTag(const GrGpuResourceTag& tag);
850 
851     /**
852      * Pop resource tag.
853      */
854     void popGrResourceTag();
855 
856 
857     /**
858      * Get current resource tag for gpu cache recycle.
859      *
860      * @return all GrGpuResourceTags.
861      */
862     GrGpuResourceTag getCurrentGrResourceTag() const;
863 
864     /**
865      * Releases GrGpuResource objects and removes them from the cache by tag.
866      */
867     void releaseByTag(const GrGpuResourceTag& tag);
868 
869     /**
870      * Get all GrGpuResource tag.
871      *
872      * @return all GrGpuResourceTags.
873      */
874     std::set<GrGpuResourceTag> getAllGrGpuResourceTags() const;
875 
876     void vmaDefragment();
877     void dumpVmaStats(SkString *out);
878 
879     // OH ISSUE: get the memory information of the updated pid.
880     void getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out);
881     // OH ISSUE: init gpu memory limit.
882     void initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size);
883     // OH ISSUE: check whether the PID is abnormal.
884     bool isPidAbnormal() const override;
885 
886     // OH ISSUE: intra frame and inter frame identification
887     void beginFrame();
888     void endFrame();
889 
890     // OH ISSUE: asyn memory reclaimer
891     void setGpuMemoryAsyncReclaimerSwitch(bool enabled);
892     void flushGpuMemoryInWaitQueue();
893 
894     // OH ISSUE: suppress release window
895     void setGpuCacheSuppressWindowSwitch(bool enabled);
896     void suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived);
897 
898 protected:
899     GrDirectContext(GrBackendApi backend, const GrContextOptions& options);
900 
901     bool init() override;
902 
onGetAtlasManager()903     GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); }
904     skgpu::v1::SmallPathAtlasMgr* onGetSmallPathAtlasMgr();
905 
asDirectContext()906     GrDirectContext* asDirectContext() override { return this; }
907 
908 private:
909     // This call will make sure out work on the GPU is finished and will execute any outstanding
910     // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the
911     // outstanding work on the gpu. The main use currently for this function is when tearing down or
912     // abandoning the context.
913     //
914     // When we finish up work on the GPU it could trigger callbacks to the client. In the case we
915     // are abandoning the context we don't want the client to be able to use the GrDirectContext to
916     // issue more commands during the callback. Thus before calling this function we set the
917     // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded
918     // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned
919     // bool is used for this signal.
920     void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned);
921 
922     const DirectContextID                   fDirectContextID;
923     // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
924     // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
925     // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
926     // invoked after objects they depend upon have already been destroyed.
927     std::unique_ptr<SkTaskGroup>            fTaskGroup;
928     std::unique_ptr<GrStrikeCache>          fStrikeCache;
929     sk_sp<GrGpu>                            fGpu;
930     std::unique_ptr<GrResourceCache>        fResourceCache;
931     std::unique_ptr<GrResourceProvider>     fResourceProvider;
932 
933     bool                                    fDidTestPMConversions;
934     // true if the PM/UPM conversion succeeded; false otherwise
935     bool                                    fPMUPMConversionsRoundTrip;
936 
937     GrContextOptions::PersistentCache*      fPersistentCache;
938 
939     std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager;
940     std::unique_ptr<GrAtlasManager> fAtlasManager;
941 
942     std::unique_ptr<skgpu::v1::SmallPathAtlasMgr> fSmallPathAtlasMgr;
943 
944     friend class GrDirectContextPriv;
945 
946     using INHERITED = GrRecordingContext;
947 };
948 
949 
950 #endif
951