• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrContext_DEFINED
9 #define GrContext_DEFINED
10 
11 #include "SkMatrix.h"
12 #include "SkPathEffect.h"
13 #include "SkTypes.h"
14 #include "../private/GrRecordingContext.h"
15 #include "GrContextOptions.h"
16 
17 // We shouldn't need this but currently Android is relying on this being include transitively.
18 #include "SkUnPreMultiply.h"
19 
20 class GrAtlasManager;
21 class GrBackendFormat;
22 class GrBackendSemaphore;
23 class GrCaps;
24 class GrContextPriv;
25 class GrContextThreadSafeProxy;
26 class GrFragmentProcessor;
27 struct GrGLInterface;
28 class GrGpu;
29 struct GrMockOptions;
30 class GrPath;
31 class GrRenderTargetContext;
32 class GrResourceCache;
33 class GrResourceProvider;
34 class GrSamplerState;
35 class GrSkSLFPFactoryCache;
36 class GrSurfaceProxy;
37 class GrSwizzle;
38 class GrTextContext;
39 class GrTextureProxy;
40 struct GrVkBackendContext;
41 
42 class SkImage;
43 class SkSurfaceProps;
44 class SkTaskGroup;
45 class SkTraceMemoryDump;
46 
47 class SK_API GrContext : public GrRecordingContext {
48 public:
49     /**
50      * Creates a GrContext for a backend context. If no GrGLInterface is provided then the result of
51      * GrGLMakeNativeInterface() is used if it succeeds.
52      */
53     static sk_sp<GrContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&);
54     static sk_sp<GrContext> MakeGL(sk_sp<const GrGLInterface>);
55     static sk_sp<GrContext> MakeGL(const GrContextOptions&);
56     static sk_sp<GrContext> MakeGL();
57 
58     static sk_sp<GrContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&);
59     static sk_sp<GrContext> MakeVulkan(const GrVkBackendContext&);
60 
61 #ifdef SK_METAL
62     /**
63      * Makes a GrContext which uses Metal as the backend. The device parameter is an MTLDevice
64      * and queue is an MTLCommandQueue which should be used by the backend. These objects must
65      * have a ref on them which can be transferred to Ganesh which will release the ref when the
66      * GrContext is destroyed.
67      */
68     static sk_sp<GrContext> MakeMetal(void* device, void* queue, const GrContextOptions& options);
69     static sk_sp<GrContext> MakeMetal(void* device, void* queue);
70 #endif
71 
72     static sk_sp<GrContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
73     static sk_sp<GrContext> MakeMock(const GrMockOptions*);
74 
75     ~GrContext() override;
76 
77     sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
78 
79     /**
80      * The GrContext normally assumes that no outsider is setting state
81      * within the underlying 3D API's context/device/whatever. This call informs
82      * the context that the state was modified and it should resend. Shouldn't
83      * be called frequently for good performance.
84      * The flag bits, state, is dpendent on which backend is used by the
85      * context, either GL or D3D (possible in future).
86      */
87     void resetContext(uint32_t state = kAll_GrBackendState);
88 
89     /**
90      * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
91      * the GrContext has modified the bound texture will have texture id 0 bound. This does not
92      * flush the GrContext. Calling resetContext() does not change the set that will be bound
93      * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
94      * all unit/target combinations are considered to have unmodified bindings until the GrContext
95      * subsequently modifies them (meaning if this is called twice in a row with no intervening
96      * GrContext usage then the second call is a no-op.)
97      */
98     void resetGLTextureBindings();
99 
100     /**
101      * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
102      * usable. Call this if you have lost the associated GPU context, and thus internal texture,
103      * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
104      * GrContext and any of its created resource objects will not make backend 3D API calls. Content
105      * rendered but not previously flushed may be lost. After this function is called all subsequent
106      * calls on the GrContext will fail or be no-ops.
107      *
108      * The typical use case for this function is that the underlying 3D context was lost and further
109      * API calls may crash.
110      */
111     void abandonContext() override;
112 
113     /**
114      * Returns true if the context was abandoned.
115      */
116     using GrImageContext::abandoned;
117 
118     /**
119      * This is similar to abandonContext() however the underlying 3D context is not yet lost and
120      * the GrContext will cleanup all allocated resources before returning. After returning it will
121      * assume that the underlying context may no longer be valid.
122      *
123      * The typical use case for this function is that the client is going to destroy the 3D context
124      * but can't guarantee that GrContext will be destroyed first (perhaps because it may be ref'ed
125      * elsewhere by either the client or Skia objects).
126      */
127     virtual void releaseResourcesAndAbandonContext();
128 
129     ///////////////////////////////////////////////////////////////////////////
130     // Resource Cache
131 
132     /**
133      *  Return the current GPU resource cache limits.
134      *
135      *  @param maxResources If non-null, returns maximum number of resources that
136      *                      can be held in the cache.
137      *  @param maxResourceBytes If non-null, returns maximum number of bytes of
138      *                          video memory that can be held in the cache.
139      */
140     void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
141 
142     /**
143      *  Gets the current GPU resource cache usage.
144      *
145      *  @param resourceCount If non-null, returns the number of resources that are held in the
146      *                       cache.
147      *  @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
148      *                          in the cache.
149      */
150     void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
151 
152     /**
153      *  Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
154      */
155     size_t getResourceCachePurgeableBytes() const;
156 
157     /**
158      *  Specify the GPU resource cache limits. If the current cache exceeds either
159      *  of these, it will be purged (LRU) to keep the cache within these limits.
160      *
161      *  @param maxResources The maximum number of resources that can be held in
162      *                      the cache.
163      *  @param maxResourceBytes The maximum number of bytes of video memory
164      *                          that can be held in the cache.
165      */
166     void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
167 
168     /**
169      * Frees GPU created by the context. Can be called to reduce GPU memory
170      * pressure.
171      */
172     virtual void freeGpuResources();
173 
174     /**
175      * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
176      * otherwise marked for deletion, regardless of whether the context is under budget.
177      */
178     void performDeferredCleanup(std::chrono::milliseconds msNotUsed);
179 
180     // Temporary compatibility API for Android.
purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)181     void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
182         this->performDeferredCleanup(msNotUsed);
183     }
184 
185     /**
186      * Purge unlocked resources from the cache until the the provided byte count has been reached
187      * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
188      * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
189      * resource types.
190      *
191      * @param maxBytesToPurge the desired number of bytes to be purged.
192      * @param preferScratchResources If true scratch resources will be purged prior to other
193      *                               resource types.
194      */
195     void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
196 
197     /**
198      * This entry point is intended for instances where an app has been backgrounded or
199      * suspended.
200      * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
201      * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
202      * then all unlocked resources will be purged.
203      * In either case, after the unlocked resources are purged a separate pass will be made to
204      * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
205      * some resources with persistent data may be purged to be under budget).
206      *
207      * @param scratchResourcesOnly   If true only unlocked scratch resources will be purged prior
208      *                               enforcing the budget requirements.
209      */
210     void purgeUnlockedResources(bool scratchResourcesOnly);
211 
212     /**
213      * Gets the maximum supported texture size.
214      */
215     int maxTextureSize() const;
216 
217     /**
218      * Gets the maximum supported render target size.
219      */
220     int maxRenderTargetSize() const;
221 
222     /**
223      * Can a SkImage be created with the given color type.
224      */
225     bool colorTypeSupportedAsImage(SkColorType) const;
226 
227     /**
228      * Can a SkSurface be created with the given color type. To check whether MSAA is supported
229      * use maxSurfaceSampleCountForColorType().
230      */
colorTypeSupportedAsSurface(SkColorType colorType)231     bool colorTypeSupportedAsSurface(SkColorType colorType) const {
232         return this->maxSurfaceSampleCountForColorType(colorType) > 0;
233     }
234 
235     /**
236      * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
237      * rendering is supported for the color type. 0 is returned if rendering to this color type
238      * is not supported at all.
239      */
240     int maxSurfaceSampleCountForColorType(SkColorType) const;
241 
242     ///////////////////////////////////////////////////////////////////////////
243     // Misc.
244 
245 
246     /**
247      * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
248      * executing any more commands on the GPU. Skia will take ownership of the underlying semaphores
249      * and delete them once they have been signaled and waited on. If this call returns false, then
250      * the GPU back-end will not wait on any passed in semaphores, and the client will still own the
251      * semaphores.
252      */
253     bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores);
254 
255     /**
256      * Call to ensure all drawing to the context has been issued to the underlying 3D API.
257      */
258     void flush();
259 
260     /**
261      * Call to ensure all drawing to the context has been issued to the underlying 3D API. After
262      * issuing all commands, numSemaphore semaphores will be signaled by the gpu. The client passes
263      * in an array of numSemaphores GrBackendSemaphores. In general these GrBackendSemaphore's can
264      * be either initialized or not. If they are initialized, the backend uses the passed in
265      * semaphore. If it is not initialized, a new semaphore is created and the GrBackendSemaphore
266      * object is initialized with that semaphore.
267      *
268      * The client will own and be responsible for deleting the underlying semaphores that are stored
269      * and returned in initialized GrBackendSemaphore objects. The GrBackendSemaphore objects
270      * themselves can be deleted as soon as this function returns.
271      *
272      * If the backend API is OpenGL only uninitialized GrBackendSemaphores are supported.
273      * If the backend API is Vulkan either initialized or uninitialized semaphores are supported.
274      * If uninitialized, the semaphores which are created will be valid for use only with the
275      * VkDevice with which they were created.
276      *
277      * If this call returns GrSemaphoresSubmitted::kNo, the GPU backend will not have created or
278      * added any semaphores to signal on the GPU. Thus the client should not have the GPU wait on
279      * any of the semaphores. However, any pending commands to the context will still be flushed.
280      *
281      * If a finishedProc is provided, the finishedProc will be called when all work submitted to the
282      * gpu from this flush call and all previous flush calls has finished on the GPU. If the flush
283      * call fails due to an error and nothing ends up getting sent to the GPU, the finished proc is
284      * called immediately.
285      */
286     GrSemaphoresSubmitted flush(GrFlushFlags flags, int numSemaphores,
287                                 GrBackendSemaphore signalSemaphores[],
288                                 GrGpuFinishedProc finishedProc = nullptr,
289                                 GrGpuFinishedContext finishedContext = nullptr);
290 
291     /**
292      * Deprecated.
293      */
flushAndSignalSemaphores(int numSemaphores,GrBackendSemaphore signalSemaphores[])294     GrSemaphoresSubmitted flushAndSignalSemaphores(int numSemaphores,
295                                                    GrBackendSemaphore signalSemaphores[]) {
296         return this->flush(kNone_GrFlushFlags, numSemaphores, signalSemaphores);
297     }
298 
299     // Provides access to functions that aren't part of the public API.
300     GrContextPriv priv();
301     const GrContextPriv priv() const;
302 
303     /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
304     // Chrome is using this!
305     void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
306 
307     bool supportsDistanceFieldText() const;
308 
309     void storeVkPipelineCacheData();
310 
311 protected:
312     GrContext(GrBackendApi, const GrContextOptions&, int32_t contextID = SK_InvalidGenID);
313 
314     bool init(sk_sp<const GrCaps>, sk_sp<GrSkSLFPFactoryCache>) override;
315 
asDirectContext()316     GrContext* asDirectContext() override { return this; }
317 
318     virtual GrAtlasManager* onGetAtlasManager() = 0;
319 
320     sk_sp<GrContextThreadSafeProxy>         fThreadSafeProxy;
321 
322 private:
323     // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
324     // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
325     // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
326     // invoked after objects they depend upon have already been destroyed.
327     std::unique_ptr<SkTaskGroup>            fTaskGroup;
328     sk_sp<GrGpu>                            fGpu;
329     GrResourceCache*                        fResourceCache;
330     GrResourceProvider*                     fResourceProvider;
331 
332     bool                                    fDidTestPMConversions;
333     // true if the PM/UPM conversion succeeded; false otherwise
334     bool                                    fPMUPMConversionsRoundTrip;
335 
336     GrContextOptions::PersistentCache*      fPersistentCache;
337 
338     // TODO: have the GrClipStackClip use renderTargetContexts and rm this friending
339     friend class GrContextPriv;
340 
341     /**
342      * These functions create premul <-> unpremul effects, using the specialized round-trip effects
343      * from GrConfigConversionEffect.
344      */
345     std::unique_ptr<GrFragmentProcessor> createPMToUPMEffect(std::unique_ptr<GrFragmentProcessor>);
346     std::unique_ptr<GrFragmentProcessor> createUPMToPMEffect(std::unique_ptr<GrFragmentProcessor>);
347 
348     /**
349      * Returns true if createPMToUPMEffect and createUPMToPMEffect will succeed. In other words,
350      * did we find a pair of round-trip preserving conversion effects?
351      */
352     bool validPMUPMConversionExists();
353 
354     typedef GrRecordingContext INHERITED;
355 };
356 
357 #endif
358