• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDirectContext_DEFINED
9 #define GrDirectContext_DEFINED
10 
11 #include <set>
12 #include <unordered_map>
13 
14 #include "src/gpu/ganesh/GrGpuResource.h"
15 
16 #include "include/core/SkColor.h"
17 #include "include/core/SkRefCnt.h"
18 #include "include/core/SkTypes.h"
19 #include "include/gpu/GpuTypes.h"
20 #include "include/gpu/ganesh/GrContextOptions.h"
21 #include "include/gpu/ganesh/GrRecordingContext.h"
22 #include "include/gpu/ganesh/GrTypes.h"
23 
24 #include <chrono>
25 #include <cstddef>
26 #include <cstdint>
27 #include <memory>
28 #include <string_view>
29 
30 class GrAtlasManager;
31 class GrBackendSemaphore;
32 class GrBackendFormat;
33 class GrBackendTexture;
34 class GrBackendRenderTarget;
35 class GrClientMappedBufferManager;
36 class GrContextThreadSafeProxy;
37 class GrDirectContextPriv;
38 class GrGpu;
39 class GrResourceCache;
40 class GrResourceProvider;
41 class SkData;
42 class SkImage;
43 class SkPixmap;
44 class SkSurface;
45 class SkTaskGroup;
46 class SkTraceMemoryDump;
47 enum SkColorType : int;
48 enum class SkTextureCompressionType;
49 struct GrMockOptions;
50 struct GrD3DBackendContext; // IWYU pragma: keep
51 
52 // OH ISSUE: callback for memory protect.
53 using MemoryOverflowCalllback = std::function<void(int32_t, size_t, bool)>;
54 
55 namespace skgpu {
56     class MutableTextureState;
57 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
58     namespace ganesh { class SmallPathAtlasMgr; }
59 #endif
60 }
61 namespace sktext { namespace gpu { class StrikeCache; } }
62 namespace wgpu { class Device; } // IWYU pragma: keep
63 
64 namespace SkSurfaces {
65 enum class BackendSurfaceAccess;
66 }
67 
68 class SK_API GrDirectContext : public GrRecordingContext {
69 public:
70 #ifdef SK_DIRECT3D
71     /**
72      * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context
73      * must be kept alive until the returned GrDirectContext is first destroyed or abandoned.
74      */
75     static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&);
76     static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&);
77 #endif
78 
79     static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
80     static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*);
81 
82     ~GrDirectContext() override;
83 
84     /**
85      * The context normally assumes that no outsider is setting state
86      * within the underlying 3D API's context/device/whatever. This call informs
87      * the context that the state was modified and it should resend. Shouldn't
88      * be called frequently for good performance.
89      * The flag bits, state, is dependent on which backend is used by the
90      * context, either GL or D3D (possible in future).
91      */
92     void resetContext(uint32_t state = kAll_GrBackendState);
93 
94     /**
95      * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
96      * the context has modified the bound texture will have texture id 0 bound. This does not
97      * flush the context. Calling resetContext() does not change the set that will be bound
98      * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
99      * all unit/target combinations are considered to have unmodified bindings until the context
100      * subsequently modifies them (meaning if this is called twice in a row with no intervening
101      * context usage then the second call is a no-op.)
102      */
103     void resetGLTextureBindings();
104 
105     /**
106      * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
107      * usable. Call this if you have lost the associated GPU context, and thus internal texture,
108      * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
109      * context and any of its created resource objects will not make backend 3D API calls. Content
110      * rendered but not previously flushed may be lost. After this function is called all subsequent
111      * calls on the context will fail or be no-ops.
112      *
113      * The typical use case for this function is that the underlying 3D context was lost and further
114      * API calls may crash.
115      *
116      * This call is not valid to be made inside ReleaseProcs passed into SkSurface or SkImages. The
117      * call will simply fail (and assert in debug) if it is called while inside a ReleaseProc.
118      *
119      * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
120      * create the context must be kept alive even after abandoning the context. Those objects must
121      * live for the lifetime of the context object itself. The reason for this is so that
122      * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be
123      * cleaned up even in a device lost state.
124      */
125     void abandonContext() override;
126 
127     /**
128      * Returns true if the context was abandoned or if the backend specific context has gotten into
129      * an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a
130      * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this
131      * context.
132      */
133     bool abandoned() override;
134 
135     /**
136      * Returns true if the backend specific context has gotten into an unrecoverarble, lost state
137      * (e.g. in Vulkan backend if we've gotten a VK_ERROR_DEVICE_LOST). If the backend context is
138      * lost, this call will also abandon this context.
139      */
140     bool isDeviceLost();
141 
142     // TODO: Remove this from public after migrating Chrome.
143     sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
144 
145     /**
146      * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is
147      * reset and will return false until another out-of-memory error is reported by the 3D API. If
148      * the context is abandoned then this will report false.
149      *
150      * Currently this is implemented for:
151      *
152      * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and
153      * therefore hide the error from Skia. Also, it is not advised to use this in combination with
154      * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever
155      * checking the GL context for OOM.
156      *
157      * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has
158      * occurred.
159      */
160     bool oomed();
161 
162     /**
163      * This is similar to abandonContext() however the underlying 3D context is not yet lost and
164      * the context will cleanup all allocated resources before returning. After returning it will
165      * assume that the underlying context may no longer be valid.
166      *
167      * The typical use case for this function is that the client is going to destroy the 3D context
168      * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed
169      * elsewhere by either the client or Skia objects).
170      *
171      * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to
172      * create the context must be alive before calling releaseResourcesAndAbandonContext.
173      */
174     void releaseResourcesAndAbandonContext();
175 
176     ///////////////////////////////////////////////////////////////////////////
177     // Resource Cache
178 
179     /** DEPRECATED
180      *  Return the current GPU resource cache limits.
181      *
182      *  @param maxResources If non-null, will be set to -1.
183      *  @param maxResourceBytes If non-null, returns maximum number of bytes of
184      *                          video memory that can be held in the cache.
185      */
186     void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
187 
188     /**
189      *  Return the current GPU resource cache limit in bytes.
190      */
191     size_t getResourceCacheLimit() const;
192 
193     /**
194      *  Gets the current GPU resource cache usage.
195      *
196      *  @param resourceCount If non-null, returns the number of resources that are held in the
197      *                       cache.
198      *  @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
199      *                          in the cache.
200      */
201     void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
202 
203     /**
204      *  Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
205      */
206     size_t getResourceCachePurgeableBytes() const;
207 
208     /** DEPRECATED
209      *  Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes
210      *  limit, it will be purged (LRU) to keep the cache within the limit.
211      *
212      *  @param maxResources Unused.
213      *  @param maxResourceBytes The maximum number of bytes of video memory
214      *                          that can be held in the cache.
215      */
216     void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
217 
218     /**
219      *  Specify the GPU resource cache limit. If the cache currently exceeds this limit,
220      *  it will be purged (LRU) to keep the cache within the limit.
221      *
222      *  @param maxResourceBytes The maximum number of bytes of video memory
223      *                          that can be held in the cache.
224      */
225     void setResourceCacheLimit(size_t maxResourceBytes);
226 
227     /**
228      * Frees GPU created by the context. Can be called to reduce GPU memory
229      * pressure.
230      */
231     void freeGpuResources();
232 
233     /**
234      * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
235      * otherwise marked for deletion, regardless of whether the context is under budget.
236 
237      *
238      * @param msNotUsed   Only unlocked resources not used in these last milliseconds will be
239      *                    cleaned up.
240      * @param opts        Specify which resources should be cleaned up. If kScratchResourcesOnly
241      *                    then, all unlocked scratch resources older than 'msNotUsed' will be purged
242      *                    but the unlocked resources with persistent data will remain. If
243      *                    kAllResources
244      */
245 
246     void performDeferredCleanup(
247             std::chrono::milliseconds msNotUsed,
248             GrPurgeResourceOptions opts = GrPurgeResourceOptions::kAllResources);
249 
250     // Temporary compatibility API for Android.
purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)251     void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
252         this->performDeferredCleanup(msNotUsed);
253     }
254 
255     /**
256      * Purge unlocked resources from the cache until the the provided byte count has been reached
257      * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
258      * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
259      * resource types.
260      *
261      * @param maxBytesToPurge the desired number of bytes to be purged.
262      * @param preferScratchResources If true scratch resources will be purged prior to other
263      *                               resource types.
264      */
265     void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
266     void purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag& tag);
267     void purgeUnlockedResourcesByPid(bool scratchResourcesOnly, const std::set<int>& exitedPidSet);
268 
269     /**
270      * This entry point is intended for instances where an app has been backgrounded or
271      * suspended.
272      * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
273      * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
274      * then all unlocked resources will be purged.
275      * In either case, after the unlocked resources are purged a separate pass will be made to
276      * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
277      * some resources with persistent data may be purged to be under budget).
278      *
279      * @param opts If kScratchResourcesOnly only unlocked scratch resources will be purged prior
280      *             enforcing the budget requirements.
281      */
282     void purgeUnlockedResources(GrPurgeResourceOptions opts);
283 
284     /*
285      * Gets the types of GPU stats supported by this Context.
286      */
287     skgpu::GpuStatsFlags supportedGpuStats() const;
288 
289     /**
290      * Gets the maximum supported texture size.
291      */
292     using GrRecordingContext::maxTextureSize;
293 
294     /**
295      * Gets the maximum supported render target size.
296      */
297     using GrRecordingContext::maxRenderTargetSize;
298 
299     /**
300      * Can a SkImage be created with the given color type.
301      */
302     using GrRecordingContext::colorTypeSupportedAsImage;
303 
304     /**
305      * Does this context support protected content?
306      */
307     using GrRecordingContext::supportsProtectedContent;
308 
309     /**
310      * Can a SkSurface be created with the given color type. To check whether MSAA is supported
311      * use maxSurfaceSampleCountForColorType().
312      */
313     using GrRecordingContext::colorTypeSupportedAsSurface;
314 
315     /**
316      * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
317      * rendering is supported for the color type. 0 is returned if rendering to this color type
318      * is not supported at all.
319      */
320     using GrRecordingContext::maxSurfaceSampleCountForColorType;
321 
322     ///////////////////////////////////////////////////////////////////////////
323     // Misc.
324 
325     /**
326      * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
327      * executing any more commands on the GPU. We only guarantee blocking transfer and fragment
328      * shader work, but may block earlier stages as well depending on the backend.If this call
329      * returns false, then the GPU back-end will not wait on any passed in semaphores, and the
330      * client will still own the semaphores, regardless of the value of deleteSemaphoresAfterWait.
331      *
332      * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case
333      * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it
334      * knows that Skia has finished waiting on them. This can be done by using finishedProcs on
335      * flush calls.
336      *
337      * This is not supported on the GL backend.
338      */
339     bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores,
340               bool deleteSemaphoresAfterWait = true);
341 
342     /**
343      * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D
344      * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by
345      * GrContext::submit(sync).
346      */
347     void flushAndSubmit(GrSyncCpu sync = GrSyncCpu::kNo) {
348         this->flush(GrFlushInfo());
349         this->submit(sync);
350     }
351 
352     /**
353      * Call to ensure all drawing to the context has been flushed to underlying 3D API specific
354      * objects. A call to `submit` is always required to ensure work is actually sent to
355      * the gpu. Some specific API details:
356      *     GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
357      *         sync objects from the flush will not be valid until a submission occurs.
358      *
359      *     Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
360      *         buffer or encoder objects. However, these objects are not sent to the gpu until a
361      *         submission occurs.
362      *
363      * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
364      * submitted to the gpu during the next submit call (it is possible Skia failed to create a
365      * subset of the semaphores). The client should not wait on these semaphores until after submit
366      * has been called, and must keep them alive until then. If this call returns
367      * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
368      * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with
369      * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
370      * client is still responsible for deleting any initialized semaphores.
371      * Regardless of semaphore submission the context will still be flushed. It should be
372      * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
373      * happen. It simply means there were no semaphores submitted to the GPU. A caller should only
374      * take this as a failure if they passed in semaphores to be submitted.
375      */
376     GrSemaphoresSubmitted flush(const GrFlushInfo& info);
377 
flush()378     void flush() { this->flush(GrFlushInfo()); }
379 
380     /** Flushes any pending uses of texture-backed images in the GPU backend. If the image is not
381      *  texture-backed (including promise texture images) or if the GrDirectContext does not
382      *  have the same context ID as the context backing the image then this is a no-op.
383      *  If the image was not used in any non-culled draws in the current queue of work for the
384      *  passed GrDirectContext then this is a no-op unless the GrFlushInfo contains semaphores or
385      *  a finish proc. Those are respected even when the image has not been used.
386      *  @param image    the non-null image to flush.
387      *  @param info     flush options
388      */
389     GrSemaphoresSubmitted flush(const sk_sp<const SkImage>& image, const GrFlushInfo& info);
390     void flush(const sk_sp<const SkImage>& image);
391 
392     /** Version of flush() that uses a default GrFlushInfo. Also submits the flushed work to the
393      *   GPU.
394      */
395     void flushAndSubmit(const sk_sp<const SkImage>& image);
396 
397     /** Issues pending SkSurface commands to the GPU-backed API objects and resolves any SkSurface
398      *  MSAA. A call to GrDirectContext::submit is always required to ensure work is actually sent
399      *  to the gpu. Some specific API details:
400      *      GL: Commands are actually sent to the driver, but glFlush is never called. Thus some
401      *          sync objects from the flush will not be valid until a submission occurs.
402      *
403      *      Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command
404      *          buffer or encoder objects. However, these objects are not sent to the gpu until a
405      *          submission occurs.
406      *
407      *  The work that is submitted to the GPU will be dependent on the BackendSurfaceAccess that is
408      *  passed in.
409      *
410      *  If BackendSurfaceAccess::kNoAccess is passed in all commands will be issued to the GPU.
411      *
412      *  If BackendSurfaceAccess::kPresent is passed in and the backend API is not Vulkan, it is
413      *  treated the same as kNoAccess. If the backend API is Vulkan, the VkImage that backs the
414      *  SkSurface will be transferred back to its original queue. If the SkSurface was created by
415      *  wrapping a VkImage, the queue will be set to the queue which was originally passed in on
416      *  the GrVkImageInfo. Additionally, if the original queue was not external or foreign the
417      *  layout of the VkImage will be set to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.
418      *
419      *  The GrFlushInfo describes additional options to flush. Please see documentation at
420      *  GrFlushInfo for more info.
421      *
422      *  If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be
423      *  submitted to the gpu during the next submit call (it is possible Skia failed to create a
424      *  subset of the semaphores). The client should not wait on these semaphores until after submit
425      *  has been called, but must keep them alive until then. If a submit flag was passed in with
426      *  the flush these valid semaphores can we waited on immediately. If this call returns
427      *  GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on
428      *  the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in
429      *  with the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the
430      *  client is still responsible for deleting any initialized semaphores.
431      *  Regardless of semaphore submission the context will still be flushed. It should be
432      *  emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not
433      *  happen. It simply means there were no semaphores submitted to the GPU. A caller should only
434      *  take this as a failure if they passed in semaphores to be submitted.
435      *
436      *  Pending surface commands are flushed regardless of the return result.
437      *
438      *  @param surface  The GPU backed surface to be flushed. Has no effect on a CPU-backed surface.
439      *  @param access  type of access the call will do on the backend object after flush
440      *  @param info    flush options
441      */
442     GrSemaphoresSubmitted flush(SkSurface* surface,
443                                 SkSurfaces::BackendSurfaceAccess access,
444                                 const GrFlushInfo& info);
445 
446     /**
447      *  Same as above except:
448      *
449      *  If a skgpu::MutableTextureState is passed in, at the end of the flush we will transition
450      *  the surface to be in the state requested by the skgpu::MutableTextureState. If the surface
451      *  (or SkImage or GrBackendSurface wrapping the same backend object) is used again after this
452      *  flush the state may be changed and no longer match what is requested here. This is often
453      *  used if the surface will be used for presenting or external use and the client wants backend
454      *  object to be prepped for that use. A finishedProc or semaphore on the GrFlushInfo will also
455      *  include the work for any requested state change.
456      *
457      *  If the backend API is Vulkan, the caller can set the skgpu::MutableTextureState's
458      *  VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
459      *  tell Skia to not change those respective states.
460      *
461      *  @param surface  The GPU backed surface to be flushed. Has no effect on a CPU-backed surface.
462      *  @param info     flush options
463      *  @param newState optional state change request after flush
464      */
465     GrSemaphoresSubmitted flush(SkSurface* surface,
466                                 const GrFlushInfo& info,
467                                 const skgpu::MutableTextureState* newState = nullptr);
468 
469     /** Call to ensure all reads/writes of the surface have been issued to the underlying 3D API.
470      *  Skia will correctly order its own draws and pixel operations. This must to be used to ensure
471      *  correct ordering when the surface backing store is accessed outside Skia (e.g. direct use of
472      *  the 3D API or a windowing system). This is equivalent to
473      *  calling ::flush with a default GrFlushInfo followed by ::submit(syncCpu).
474      *
475      *  Has no effect on a CPU-backed surface.
476      */
477     void flushAndSubmit(SkSurface* surface, GrSyncCpu sync = GrSyncCpu::kNo);
478 
479     /**
480      * Flushes the given surface with the default GrFlushInfo.
481      *
482      *  Has no effect on a CPU-backed surface.
483      */
484     void flush(SkSurface* surface);
485 
486     /**
487      * Submit outstanding work to the gpu from all previously un-submitted flushes. The return
488      * value of the submit will indicate whether or not the submission to the GPU was successful.
489      *
490      * If the call returns true, all previously passed in semaphores in flush calls will have been
491      * submitted to the GPU and they can safely be waited on. The caller should wait on those
492      * semaphores or perform some other global synchronization before deleting the semaphores.
493      *
494      * If it returns false, then those same semaphores will not have been submitted and we will not
495      * try to submit them again. The caller is free to delete the semaphores at any time.
496      *
497      * If GrSubmitInfo::fSync flag is GrSyncCpu::kYes, this function will return once the gpu has
498      * finished with all submitted work.
499      *
500      * If GrSubmitInfo::fMarkBoundary flag is GrMarkFrameBoundary::kYes and the GPU supports a way
501      * to be notified about frame boundaries, then we will notify the GPU during/after the
502      * submission of work to the GPU. GrSubmitInfo::fFrameID is a frame ID that is passed to the
503      * GPU when marking a boundary. Ideally this value should be unique for each frame. Currently
504      * marking frame boundaries is only supported with the Vulkan backend and only if the
505      * VK_EXT_frame_boudnary extenstion is available.
506      */
507     bool submit(const GrSubmitInfo&);
508 
509     bool submit(GrSyncCpu sync = GrSyncCpu::kNo) {
510         GrSubmitInfo info;
511         info.fSync = sync;
512 
513         return this->submit(info);
514     }
515 
516 
517     /**
518      * Checks whether any asynchronous work is complete and if so calls related callbacks.
519      */
520     void checkAsyncWorkCompletion();
521 
522     /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
523     // Chrome is using this!
524     void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
525     void dumpMemoryStatisticsByTag(SkTraceMemoryDump* traceMemoryDump, GrGpuResourceTag& tag) const;
526 
527     bool supportsDistanceFieldText() const;
528 
529     void storeVkPipelineCacheData();
530 
531     /**
532      * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
533      * It is guaranteed that this backend format will be the one used by the following
534      * SkColorType and GrSurfaceCharacterization-based createBackendTexture methods.
535      *
536      * The caller should check that the returned format is valid.
537      */
538     using GrRecordingContext::defaultBackendFormat;
539 
540     /**
541      * The explicitly allocated backend texture API allows clients to use Skia to create backend
542      * objects outside of Skia proper (i.e., Skia's caching system will not know about them.)
543      *
544      * It is the client's responsibility to delete all these objects (using deleteBackendTexture)
545      * before deleting the context used to create them. If the backend is Vulkan, the textures must
546      * be deleted before abandoning the context as well. Additionally, clients should only delete
547      * these objects on the thread for which that context is active.
548      *
549      * The client is responsible for ensuring synchronization between different uses
550      * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the
551      * surface, rewrapping it in a image and drawing the image will require explicit
552      * synchronization on the client's part).
553      */
554 
555      /**
556       * If possible, create an uninitialized backend texture. The client should ensure that the
557       * returned backend texture is valid.
558       * For the Vulkan backend the layout of the created VkImage will be:
559       *      VK_IMAGE_LAYOUT_UNDEFINED.
560       */
561     GrBackendTexture createBackendTexture(int width,
562                                           int height,
563                                           const GrBackendFormat&,
564                                           skgpu::Mipmapped,
565                                           GrRenderable,
566                                           GrProtected = GrProtected::kNo,
567                                           std::string_view label = {});
568 
569     /**
570      * If possible, create an uninitialized backend texture. The client should ensure that the
571      * returned backend texture is valid.
572      * If successful, the created backend texture will be compatible with the provided
573      * SkColorType.
574      * For the Vulkan backend the layout of the created VkImage will be:
575      *      VK_IMAGE_LAYOUT_UNDEFINED.
576      */
577     GrBackendTexture createBackendTexture(int width,
578                                           int height,
579                                           SkColorType,
580                                           skgpu::Mipmapped,
581                                           GrRenderable,
582                                           GrProtected = GrProtected::kNo,
583                                           std::string_view label = {});
584 
585     /**
586      * If possible, create a backend texture initialized to a particular color. The client should
587      * ensure that the returned backend texture is valid. The client can pass in a finishedProc
588      * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
589      * client is required to call `submit` to send the upload work to the gpu. The
590      * finishedProc will always get called even if we failed to create the GrBackendTexture.
591      * For the Vulkan backend the layout of the created VkImage will be:
592      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
593      */
594     GrBackendTexture createBackendTexture(int width,
595                                           int height,
596                                           const GrBackendFormat&,
597                                           const SkColor4f& color,
598                                           skgpu::Mipmapped,
599                                           GrRenderable,
600                                           GrProtected = GrProtected::kNo,
601                                           GrGpuFinishedProc finishedProc = nullptr,
602                                           GrGpuFinishedContext finishedContext = nullptr,
603                                           std::string_view label = {});
604 
605     /**
606      * If possible, create a backend texture initialized to a particular color. The client should
607      * ensure that the returned backend texture is valid. The client can pass in a finishedProc
608      * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The
609      * client is required to call `submit` to send the upload work to the gpu. The
610      * finishedProc will always get called even if we failed to create the GrBackendTexture.
611      * If successful, the created backend texture will be compatible with the provided
612      * SkColorType.
613      * For the Vulkan backend the layout of the created VkImage will be:
614      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
615      */
616     GrBackendTexture createBackendTexture(int width,
617                                           int height,
618                                           SkColorType,
619                                           const SkColor4f& color,
620                                           skgpu::Mipmapped,
621                                           GrRenderable,
622                                           GrProtected = GrProtected::kNo,
623                                           GrGpuFinishedProc finishedProc = nullptr,
624                                           GrGpuFinishedContext finishedContext = nullptr,
625                                           std::string_view label = {});
626 
627     /**
628      * If possible, create a backend texture initialized with the provided pixmap data. The client
629      * should ensure that the returned backend texture is valid. The client can pass in a
630      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
631      * deleted. The client is required to call `submit` to send the upload work to the gpu.
632      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
633      * If successful, the created backend texture will be compatible with the provided
634      * pixmap(s). Compatible, in this case, means that the backend format will be the result
635      * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted
636      * when this call returns.
637      * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired
638      * the data for all the mipmap levels must be provided. In the mipmapped case all the
639      * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels
640      * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The
641      * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture.
642      * Note: the pixmap's alphatypes and colorspaces are ignored.
643      * For the Vulkan backend the layout of the created VkImage will be:
644      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
645      */
646     GrBackendTexture createBackendTexture(const SkPixmap srcData[],
647                                           int numLevels,
648                                           GrSurfaceOrigin,
649                                           GrRenderable,
650                                           GrProtected,
651                                           GrGpuFinishedProc finishedProc = nullptr,
652                                           GrGpuFinishedContext finishedContext = nullptr,
653                                           std::string_view label = {});
654 
655     /**
656      * Convenience version createBackendTexture() that takes just a base level pixmap.
657      */
658      GrBackendTexture createBackendTexture(const SkPixmap& srcData,
659                                            GrSurfaceOrigin textureOrigin,
660                                            GrRenderable renderable,
661                                            GrProtected isProtected,
662                                            GrGpuFinishedProc finishedProc = nullptr,
663                                            GrGpuFinishedContext finishedContext = nullptr,
664                                            std::string_view label = {});
665 
666     // Deprecated versions that do not take origin and assume top-left.
667     GrBackendTexture createBackendTexture(const SkPixmap srcData[],
668                                           int numLevels,
669                                           GrRenderable renderable,
670                                           GrProtected isProtected,
671                                           GrGpuFinishedProc finishedProc = nullptr,
672                                           GrGpuFinishedContext finishedContext = nullptr,
673                                           std::string_view label = {});
674 
675     GrBackendTexture createBackendTexture(const SkPixmap& srcData,
676                                           GrRenderable renderable,
677                                           GrProtected isProtected,
678                                           GrGpuFinishedProc finishedProc = nullptr,
679                                           GrGpuFinishedContext finishedContext = nullptr,
680                                           std::string_view label = {});
681 
682     /**
683      * If possible, updates a backend texture to be filled to a particular color. The client should
684      * check the return value to see if the update was successful. The client can pass in a
685      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
686      * deleted. The client is required to call `submit` to send the upload work to the gpu.
687      * The finishedProc will always get called even if we failed to update the GrBackendTexture.
688      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
689      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
690      */
691     bool updateBackendTexture(const GrBackendTexture&,
692                               const SkColor4f& color,
693                               GrGpuFinishedProc finishedProc,
694                               GrGpuFinishedContext finishedContext);
695 
696     /**
697      * If possible, updates a backend texture to be filled to a particular color. The data in
698      * GrBackendTexture and passed in color is interpreted with respect to the passed in
699      * SkColorType. The client should check the return value to see if the update was successful.
700      * The client can pass in a finishedProc to be notified when the data has been uploaded by the
701      * gpu and the texture can be deleted. The client is required to call `submit` to send
702      * the upload work to the gpu. The finishedProc will always get called even if we failed to
703      * update the GrBackendTexture.
704      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
705      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
706      */
707     bool updateBackendTexture(const GrBackendTexture&,
708                               SkColorType skColorType,
709                               const SkColor4f& color,
710                               GrGpuFinishedProc finishedProc,
711                               GrGpuFinishedContext finishedContext);
712 
713     /**
714      * If possible, updates a backend texture filled with the provided pixmap data. The client
715      * should check the return value to see if the update was successful. The client can pass in a
716      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
717      * deleted. The client is required to call `submit` to send the upload work to the gpu.
718      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
719      * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case,
720      * means that the backend format is compatible with the base pixmap's colortype. The src data
721      * can be deleted when this call returns.
722      * If the backend texture is mip mapped, the data for all the mipmap levels must be provided.
723      * In the mipmapped case all the colortypes of the provided pixmaps must be the same.
724      * Additionally, all the miplevels must be sized correctly (please see
725      * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the
726      * pixmap data is vertically flipped in the texture.
727      * Note: the pixmap's alphatypes and colorspaces are ignored.
728      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
729      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
730      */
731     bool updateBackendTexture(const GrBackendTexture&,
732                               const SkPixmap srcData[],
733                               int numLevels,
734                               GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin,
735                               GrGpuFinishedProc finishedProc = nullptr,
736                               GrGpuFinishedContext finishedContext = nullptr);
737 
738     /**
739      * Convenience version of updateBackendTexture that takes just a base level pixmap.
740      */
741     bool updateBackendTexture(const GrBackendTexture& texture,
742                               const SkPixmap& srcData,
743                               GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin,
744                               GrGpuFinishedProc finishedProc = nullptr,
745                               GrGpuFinishedContext finishedContext = nullptr) {
746         return this->updateBackendTexture(texture,
747                                           &srcData,
748                                           1,
749                                           textureOrigin,
750                                           finishedProc,
751                                           finishedContext);
752     }
753 
754     // Deprecated version that does not take origin and assumes top-left.
755     bool updateBackendTexture(const GrBackendTexture& texture,
756                              const SkPixmap srcData[],
757                              int numLevels,
758                              GrGpuFinishedProc finishedProc,
759                              GrGpuFinishedContext finishedContext);
760 
761     /**
762      * Retrieve the GrBackendFormat for a given SkTextureCompressionType. This is
763      * guaranteed to match the backend format used by the following
764      * createCompressedBackendTexture methods that take a CompressionType.
765      *
766      * The caller should check that the returned format is valid.
767      */
768     using GrRecordingContext::compressedBackendFormat;
769 
770     /**
771      *If possible, create a compressed backend texture initialized to a particular color. The
772      * client should ensure that the returned backend texture is valid. The client can pass in a
773      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
774      * deleted. The client is required to call `submit` to send the upload work to the gpu.
775      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
776      * For the Vulkan backend the layout of the created VkImage will be:
777      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
778      */
779     GrBackendTexture createCompressedBackendTexture(int width,
780                                                     int height,
781                                                     const GrBackendFormat&,
782                                                     const SkColor4f& color,
783                                                     skgpu::Mipmapped,
784                                                     GrProtected = GrProtected::kNo,
785                                                     GrGpuFinishedProc finishedProc = nullptr,
786                                                     GrGpuFinishedContext finishedContext = nullptr);
787 
788     GrBackendTexture createCompressedBackendTexture(int width,
789                                                     int height,
790                                                     SkTextureCompressionType,
791                                                     const SkColor4f& color,
792                                                     skgpu::Mipmapped,
793                                                     GrProtected = GrProtected::kNo,
794                                                     GrGpuFinishedProc finishedProc = nullptr,
795                                                     GrGpuFinishedContext finishedContext = nullptr);
796 
797     /**
798      * If possible, create a backend texture initialized with the provided raw data. The client
799      * should ensure that the returned backend texture is valid. The client can pass in a
800      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
801      * deleted. The client is required to call `submit` to send the upload work to the gpu.
802      * The finishedProc will always get called even if we failed to create the GrBackendTexture
803      * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired
804      * the data for all the mipmap levels must be provided. Additionally, all the miplevels
805      * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount).
806      * For the Vulkan backend the layout of the created VkImage will be:
807      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
808      */
809     GrBackendTexture createCompressedBackendTexture(int width,
810                                                     int height,
811                                                     const GrBackendFormat&,
812                                                     const void* data,
813                                                     size_t dataSize,
814                                                     skgpu::Mipmapped,
815                                                     GrProtected = GrProtected::kNo,
816                                                     GrGpuFinishedProc finishedProc = nullptr,
817                                                     GrGpuFinishedContext finishedContext = nullptr);
818 
819     GrBackendTexture createCompressedBackendTexture(int width,
820                                                     int height,
821                                                     SkTextureCompressionType,
822                                                     const void* data,
823                                                     size_t dataSize,
824                                                     skgpu::Mipmapped,
825                                                     GrProtected = GrProtected::kNo,
826                                                     GrGpuFinishedProc finishedProc = nullptr,
827                                                     GrGpuFinishedContext finishedContext = nullptr);
828 
829     /**
830      * If possible, updates a backend texture filled with the provided color. If the texture is
831      * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client
832      * should check the return value to see if the update was successful. The client can pass in a
833      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
834      * deleted. The client is required to call `submit` to send the upload work to the gpu.
835      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
836      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
837      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
838      */
839     bool updateCompressedBackendTexture(const GrBackendTexture&,
840                                         const SkColor4f& color,
841                                         GrGpuFinishedProc finishedProc,
842                                         GrGpuFinishedContext finishedContext);
843 
844     /**
845      * If possible, updates a backend texture filled with the provided raw data. The client
846      * should check the return value to see if the update was successful. The client can pass in a
847      * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be
848      * deleted. The client is required to call `submit` to send the upload work to the gpu.
849      * The finishedProc will always get called even if we failed to create the GrBackendTexture.
850      * If a mipmapped texture is passed in, the data for all the mipmap levels must be provided.
851      * Additionally, all the miplevels must be sized correctly (please see
852      * SkMipMap::ComputeLevelSize and ComputeLevelCount).
853      * For the Vulkan backend after a successful update the layout of the created VkImage will be:
854      *      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
855      */
856     bool updateCompressedBackendTexture(const GrBackendTexture&,
857                                         const void* data,
858                                         size_t dataSize,
859                                         GrGpuFinishedProc finishedProc,
860                                         GrGpuFinishedContext finishedContext);
861 
862     /**
863      * Updates the state of the GrBackendTexture/RenderTarget to have the passed in
864      * skgpu::MutableTextureState. All objects that wrap the backend surface (i.e. SkSurfaces and
865      * SkImages) will also be aware of this state change. This call does not submit the state change
866      * to the gpu, but requires the client to call `submit` to send it to the GPU. The work
867      * for this call is ordered linearly with all other calls that require GrContext::submit to be
868      * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be
869      * called with finishedContext after the state transition is known to have occurred on the GPU.
870      *
871      * See skgpu::MutableTextureState to see what state can be set via this call.
872      *
873      * If the backend API is Vulkan, the caller can set the skgpu::MutableTextureState's
874      * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to
875      * tell Skia to not change those respective states.
876      *
877      * If previousState is not null and this returns true, then Skia will have filled in
878      * previousState to have the values of the state before this call.
879      */
880     bool setBackendTextureState(const GrBackendTexture&,
881                                 const skgpu::MutableTextureState&,
882                                 skgpu::MutableTextureState* previousState = nullptr,
883                                 GrGpuFinishedProc finishedProc = nullptr,
884                                 GrGpuFinishedContext finishedContext = nullptr);
885     bool setBackendRenderTargetState(const GrBackendRenderTarget&,
886                                      const skgpu::MutableTextureState&,
887                                      skgpu::MutableTextureState* previousState = nullptr,
888                                      GrGpuFinishedProc finishedProc = nullptr,
889                                      GrGpuFinishedContext finishedContext = nullptr);
890 
891     void deleteBackendTexture(const GrBackendTexture&);
892 
893     // This interface allows clients to pre-compile shaders and populate the runtime program cache.
894     // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format.
895     //
896     // Steps to use this API:
897     //
898     // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to
899     //    something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This
900     //    will ensure that the blobs are SkSL, and are suitable for pre-compilation.
901     // 2) Run your application, and save all of the key/data pairs that are fed to the cache.
902     //
903     // 3) Switch over to shipping your application. Include the key/data pairs from above.
904     // 4) At startup (or any convenient time), call precompileShader for each key/data pair.
905     //    This will compile the SkSL to create a GL program, and populate the runtime cache.
906     //
907     // This is only guaranteed to work if the context/device used in step #2 are created in the
908     // same way as the one used in step #4, and the same GrContextOptions are specified.
909     // Using cached shader blobs on a different device or driver are undefined.
910     bool precompileShader(const SkData& key, const SkData& data);
911     void registerVulkanErrorCallback(const std::function<void()>& vulkanErrorCallback);
912     void processVulkanError();
913 
914 #ifdef SK_ENABLE_DUMP_GPU
915     /** Returns a string with detailed information about the context & GPU, in JSON format. */
916     SkString dump() const;
917 #endif
918 
919     class DirectContextID {
920     public:
921         static GrDirectContext::DirectContextID Next();
922 
DirectContextID()923         DirectContextID() : fID(SK_InvalidUniqueID) {}
924 
925         bool operator==(const DirectContextID& that) const { return fID == that.fID; }
926         bool operator!=(const DirectContextID& that) const { return !(*this == that); }
927 
makeInvalid()928         void makeInvalid() { fID = SK_InvalidUniqueID; }
isValid()929         bool isValid() const { return fID != SK_InvalidUniqueID; }
930 
931     private:
DirectContextID(uint32_t id)932         constexpr DirectContextID(uint32_t id) : fID(id) {}
933         uint32_t fID;
934     };
935 
directContextID()936     DirectContextID directContextID() const { return fDirectContextID; }
937 
938     // Provides access to functions that aren't part of the public API.
939     GrDirectContextPriv priv();
940     const GrDirectContextPriv priv() const;  // NOLINT(readability-const-return-type)
941 
942     /**
943      * Set current resource tag for gpu cache recycle.
944      */
945     void setCurrentGrResourceTag(const GrGpuResourceTag& tag);
946 
947     /**
948      * Pop resource tag.
949      */
950     void popGrResourceTag();
951 
952 
953     /**
954      * Get current resource tag for gpu cache recycle.
955      *
956      * @return all GrGpuResourceTags.
957      */
958     GrGpuResourceTag getCurrentGrResourceTag() const;
959 
960     /**
961      * Releases GrGpuResource objects and removes them from the cache by tag.
962      */
963     void releaseByTag(const GrGpuResourceTag& tag);
964 
965     /**
966      * Get all GrGpuResource tag.
967      *
968      * @return all GrGpuResourceTags.
969      */
970     std::set<GrGpuResourceTag> getAllGrGpuResourceTags() const;
971 
972     // OH ISSUE: get the memory information of the updated pid.
973     void getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out);
974 
975     // OH ISSUE: init gpu memory limit.
976     void initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size);
977 
978     // OH ISSUE: check whether the PID is abnormal.
979     bool isPidAbnormal() const override;
980 
981     void vmaDefragment();
982     void dumpVmaStats(SkString *out);
983     void dumpAllResource(std::stringstream& dump) const;
984 
985 protected:
986     GrDirectContext(GrBackendApi backend,
987                     const GrContextOptions& options,
988                     sk_sp<GrContextThreadSafeProxy> proxy);
989 
990     bool init() override;
991 
onGetAtlasManager()992     GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); }
993 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
994     skgpu::ganesh::SmallPathAtlasMgr* onGetSmallPathAtlasMgr();
995 #endif
996 
asDirectContext()997     GrDirectContext* asDirectContext() override { return this; }
998 
999 private:
1000     // This call will make sure out work on the GPU is finished and will execute any outstanding
1001     // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the
1002     // outstanding work on the gpu. The main use currently for this function is when tearing down or
1003     // abandoning the context.
1004     //
1005     // When we finish up work on the GPU it could trigger callbacks to the client. In the case we
1006     // are abandoning the context we don't want the client to be able to use the GrDirectContext to
1007     // issue more commands during the callback. Thus before calling this function we set the
1008     // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded
1009     // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned
1010     // bool is used for this signal.
1011     void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned);
1012 
1013     // This delete callback needs to be the first thing on the GrDirectContext so that it is the
1014     // last thing destroyed. The callback may signal the client to clean up things that may need
1015     // to survive the lifetime of some of the other objects on the GrDirectCotnext. So make sure
1016     // we don't call it until all else has been destroyed.
1017     class DeleteCallbackHelper {
1018     public:
DeleteCallbackHelper(GrDirectContextDestroyedContext context,GrDirectContextDestroyedProc proc)1019         DeleteCallbackHelper(GrDirectContextDestroyedContext context,
1020                              GrDirectContextDestroyedProc proc)
1021                 : fContext(context), fProc(proc) {}
1022 
~DeleteCallbackHelper()1023         ~DeleteCallbackHelper() {
1024             if (fProc) {
1025                 fProc(fContext);
1026             }
1027         }
1028 
1029     private:
1030         GrDirectContextDestroyedContext fContext;
1031         GrDirectContextDestroyedProc fProc;
1032     };
1033     std::unique_ptr<DeleteCallbackHelper> fDeleteCallbackHelper;
1034 
1035     const DirectContextID                   fDirectContextID;
1036     // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
1037     // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
1038     // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
1039     // invoked after objects they depend upon have already been destroyed.
1040     std::unique_ptr<SkTaskGroup>              fTaskGroup;
1041     std::unique_ptr<sktext::gpu::StrikeCache> fStrikeCache;
1042     std::unique_ptr<GrGpu>                    fGpu;
1043     std::unique_ptr<GrResourceCache>          fResourceCache;
1044     std::unique_ptr<GrResourceProvider>       fResourceProvider;
1045 
1046     // This is incremented before we start calling ReleaseProcs from GrSurfaces and decremented
1047     // after. A ReleaseProc may trigger code causing another resource to get freed so we to track
1048     // the count to know if we in a ReleaseProc at any level. When this is set to a value greated
1049     // than zero we will not allow abandonContext calls to be made on the context.
1050     int                                     fInsideReleaseProcCnt = 0;
1051 
1052     bool                                    fDidTestPMConversions;
1053     // true if the PM/UPM conversion succeeded; false otherwise
1054     bool                                    fPMUPMConversionsRoundTrip;
1055 
1056     GrContextOptions::PersistentCache*      fPersistentCache;
1057 
1058     std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager;
1059     std::unique_ptr<GrAtlasManager> fAtlasManager;
1060     std::function<void()> vulkanErrorCallback_;
1061 
1062 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
1063     std::unique_ptr<skgpu::ganesh::SmallPathAtlasMgr> fSmallPathAtlasMgr;
1064 #endif
1065 
1066     friend class GrDirectContextPriv;
1067 };
1068 
1069 
1070 #endif
1071