• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrContext_DEFINED
9 #define GrContext_DEFINED
10 
11 #include "include/core/SkMatrix.h"
12 #include "include/core/SkPathEffect.h"
13 #include "include/core/SkTypes.h"
14 #include "include/gpu/GrBackendSurface.h"
15 #include "include/gpu/GrContextOptions.h"
16 #include "include/private/GrRecordingContext.h"
17 
18 // We shouldn't need this but currently Android is relying on this being include transitively.
19 #include "include/core/SkUnPreMultiply.h"
20 
21 class GrAtlasManager;
22 class GrBackendSemaphore;
23 class GrCaps;
24 class GrContextPriv;
25 class GrContextThreadSafeProxy;
26 class GrFragmentProcessor;
27 struct GrGLInterface;
28 class GrGpu;
29 struct GrMockOptions;
30 class GrPath;
31 class GrRenderTargetContext;
32 class GrResourceCache;
33 class GrResourceProvider;
34 class GrSamplerState;
35 class GrSkSLFPFactoryCache;
36 class GrSurfaceProxy;
37 class GrSwizzle;
38 class GrTextContext;
39 class GrTextureProxy;
40 struct GrVkBackendContext;
41 
42 class SkImage;
43 class SkSurfaceCharacterization;
44 class SkSurfaceProps;
45 class SkTaskGroup;
46 class SkTraceMemoryDump;
47 
48 class SK_API GrContext : public GrRecordingContext {
49 public:
50     /**
51      * Creates a GrContext for a backend context. If no GrGLInterface is provided then the result of
52      * GrGLMakeNativeInterface() is used if it succeeds.
53      */
54     static sk_sp<GrContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&);
55     static sk_sp<GrContext> MakeGL(sk_sp<const GrGLInterface>);
56     static sk_sp<GrContext> MakeGL(const GrContextOptions&);
57     static sk_sp<GrContext> MakeGL();
58 
59     static sk_sp<GrContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&);
60     static sk_sp<GrContext> MakeVulkan(const GrVkBackendContext&);
61 
62 #ifdef SK_METAL
63     /**
64      * Makes a GrContext which uses Metal as the backend. The device parameter is an MTLDevice
65      * and queue is an MTLCommandQueue which should be used by the backend. These objects must
66      * have a ref on them which can be transferred to Ganesh which will release the ref when the
67      * GrContext is destroyed.
68      */
69     static sk_sp<GrContext> MakeMetal(void* device, void* queue, const GrContextOptions& options);
70     static sk_sp<GrContext> MakeMetal(void* device, void* queue);
71 #endif
72 
73 #ifdef SK_DAWN
74     static sk_sp<GrContext> MakeDawn(const dawn::Device& device, const GrContextOptions& options);
75     static sk_sp<GrContext> MakeDawn(const dawn::Device& device);
76 #endif
77 
78     static sk_sp<GrContext> MakeMock(const GrMockOptions*, const GrContextOptions&);
79     static sk_sp<GrContext> MakeMock(const GrMockOptions*);
80 
81     ~GrContext() override;
82 
83     sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
84 
85     /**
86      * The GrContext normally assumes that no outsider is setting state
87      * within the underlying 3D API's context/device/whatever. This call informs
88      * the context that the state was modified and it should resend. Shouldn't
89      * be called frequently for good performance.
90      * The flag bits, state, is dpendent on which backend is used by the
91      * context, either GL or D3D (possible in future).
92      */
93     void resetContext(uint32_t state = kAll_GrBackendState);
94 
95     /**
96      * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which
97      * the GrContext has modified the bound texture will have texture id 0 bound. This does not
98      * flush the GrContext. Calling resetContext() does not change the set that will be bound
99      * to texture id 0 on the next call to resetGLTextureBindings(). After this is called
100      * all unit/target combinations are considered to have unmodified bindings until the GrContext
101      * subsequently modifies them (meaning if this is called twice in a row with no intervening
102      * GrContext usage then the second call is a no-op.)
103      */
104     void resetGLTextureBindings();
105 
106     /**
107      * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer
108      * usable. Call this if you have lost the associated GPU context, and thus internal texture,
109      * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
110      * GrContext and any of its created resource objects will not make backend 3D API calls. Content
111      * rendered but not previously flushed may be lost. After this function is called all subsequent
112      * calls on the GrContext will fail or be no-ops.
113      *
114      * The typical use case for this function is that the underlying 3D context was lost and further
115      * API calls may crash.
116      */
117     void abandonContext() override;
118 
119     /**
120      * Returns true if the context was abandoned.
121      */
122     using GrImageContext::abandoned;
123 
124     /**
125      * This is similar to abandonContext() however the underlying 3D context is not yet lost and
126      * the GrContext will cleanup all allocated resources before returning. After returning it will
127      * assume that the underlying context may no longer be valid.
128      *
129      * The typical use case for this function is that the client is going to destroy the 3D context
130      * but can't guarantee that GrContext will be destroyed first (perhaps because it may be ref'ed
131      * elsewhere by either the client or Skia objects).
132      */
133     virtual void releaseResourcesAndAbandonContext();
134 
135     ///////////////////////////////////////////////////////////////////////////
136     // Resource Cache
137 
138     /**
139      *  Return the current GPU resource cache limits.
140      *
141      *  @param maxResources If non-null, returns maximum number of resources that
142      *                      can be held in the cache.
143      *  @param maxResourceBytes If non-null, returns maximum number of bytes of
144      *                          video memory that can be held in the cache.
145      */
146     void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
147 
148     /**
149      *  Gets the current GPU resource cache usage.
150      *
151      *  @param resourceCount If non-null, returns the number of resources that are held in the
152      *                       cache.
153      *  @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
154      *                          in the cache.
155      */
156     void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
157 
158     /**
159      *  Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources.
160      */
161     size_t getResourceCachePurgeableBytes() const;
162 
163     /**
164      *  Specify the GPU resource cache limits. If the current cache exceeds either
165      *  of these, it will be purged (LRU) to keep the cache within these limits.
166      *
167      *  @param maxResources The maximum number of resources that can be held in
168      *                      the cache.
169      *  @param maxResourceBytes The maximum number of bytes of video memory
170      *                          that can be held in the cache.
171      */
172     void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
173 
174     /**
175      * Frees GPU created by the context. Can be called to reduce GPU memory
176      * pressure.
177      */
178     virtual void freeGpuResources();
179 
180     /**
181      * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are
182      * otherwise marked for deletion, regardless of whether the context is under budget.
183      */
184     void performDeferredCleanup(std::chrono::milliseconds msNotUsed);
185 
186     // Temporary compatibility API for Android.
purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed)187     void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) {
188         this->performDeferredCleanup(msNotUsed);
189     }
190 
191     /**
192      * Purge unlocked resources from the cache until the the provided byte count has been reached
193      * or we have purged all unlocked resources. The default policy is to purge in LRU order, but
194      * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other
195      * resource types.
196      *
197      * @param maxBytesToPurge the desired number of bytes to be purged.
198      * @param preferScratchResources If true scratch resources will be purged prior to other
199      *                               resource types.
200      */
201     void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources);
202 
203     /**
204      * This entry point is intended for instances where an app has been backgrounded or
205      * suspended.
206      * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the
207      * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false
208      * then all unlocked resources will be purged.
209      * In either case, after the unlocked resources are purged a separate pass will be made to
210      * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true
211      * some resources with persistent data may be purged to be under budget).
212      *
213      * @param scratchResourcesOnly   If true only unlocked scratch resources will be purged prior
214      *                               enforcing the budget requirements.
215      */
216     void purgeUnlockedResources(bool scratchResourcesOnly);
217 
218     /**
219      * Gets the maximum supported texture size.
220      */
221     int maxTextureSize() const;
222 
223     /**
224      * Gets the maximum supported render target size.
225      */
226     int maxRenderTargetSize() const;
227 
228     /**
229      * Can a SkImage be created with the given color type.
230      */
231     bool colorTypeSupportedAsImage(SkColorType) const;
232 
233     /**
234      * Can a SkSurface be created with the given color type. To check whether MSAA is supported
235      * use maxSurfaceSampleCountForColorType().
236      */
colorTypeSupportedAsSurface(SkColorType colorType)237     bool colorTypeSupportedAsSurface(SkColorType colorType) const {
238         return this->maxSurfaceSampleCountForColorType(colorType) > 0;
239     }
240 
241     /**
242      * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA
243      * rendering is supported for the color type. 0 is returned if rendering to this color type
244      * is not supported at all.
245      */
246     int maxSurfaceSampleCountForColorType(SkColorType) const;
247 
248     ///////////////////////////////////////////////////////////////////////////
249     // Misc.
250 
251 
252     /**
253      * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before
254      * executing any more commands on the GPU. Skia will take ownership of the underlying semaphores
255      * and delete them once they have been signaled and waited on. If this call returns false, then
256      * the GPU back-end will not wait on any passed in semaphores, and the client will still own the
257      * semaphores.
258      */
259     bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores);
260 
261     /**
262      * Call to ensure all drawing to the context has been issued to the underlying 3D API.
263      */
flush()264     void flush() {
265         this->flush(GrFlushInfo(), GrPrepareForExternalIORequests());
266     }
267 
268     /**
269      * Call to ensure all drawing to the context has been issued to the underlying 3D API.
270      *
271      * If this call returns GrSemaphoresSubmitted::kNo, the GPU backend will not have created or
272      * added any semaphores to signal on the GPU. Thus the client should not have the GPU wait on
273      * any of the semaphores passed in with the GrFlushInfo. However, any pending commands to the
274      * context will still be flushed. It should be emphasized that a return value of
275      * GrSemaphoresSubmitted::kNo does not mean the flush did not happen. It simply means there were
276      * no semaphores submitted to the GPU. A caller should only take this as a failure if they
277      * passed in semaphores to be submitted.
278      */
flush(const GrFlushInfo & info)279     GrSemaphoresSubmitted flush(const GrFlushInfo& info) {
280         return this->flush(info, GrPrepareForExternalIORequests());
281     }
282 
283     /**
284      * Call to ensure all drawing to the context has been issued to the underlying 3D API.
285      *
286      * If this call returns GrSemaphoresSubmitted::kNo, the GPU backend will not have created or
287      * added any semaphores to signal on the GPU. Thus the client should not have the GPU wait on
288      * any of the semaphores passed in with the GrFlushInfo. However, any pending commands to the
289      * context will still be flushed. It should be emphasized that a return value of
290      * GrSemaphoresSubmitted::kNo does not mean the flush did not happen. It simply means there were
291      * no semaphores submitted to the GPU. A caller should only take this as a failure if they
292      * passed in semaphores to be submitted.
293      *
294      * If the GrPrepareForExternalIORequests contains valid gpu backed SkSurfaces or SkImages, Skia
295      * will put the underlying backend objects into a state that is ready for external uses. See
296      * declaration of GrPreopareForExternalIORequests for more details.
297      */
298     GrSemaphoresSubmitted flush(const GrFlushInfo&, const GrPrepareForExternalIORequests&);
299 
300     /**
301      * Deprecated.
302      */
303     GrSemaphoresSubmitted flush(GrFlushFlags flags, int numSemaphores,
304                                 GrBackendSemaphore signalSemaphores[],
305                                 GrGpuFinishedProc finishedProc = nullptr,
306                                 GrGpuFinishedContext finishedContext = nullptr) {
307         GrFlushInfo info;
308         info.fFlags = flags;
309         info.fNumSemaphores = numSemaphores;
310         info.fSignalSemaphores = signalSemaphores;
311         info.fFinishedProc = finishedProc;
312         info.fFinishedContext = finishedContext;
313         return this->flush(info);
314     }
315 
316     /**
317      * Deprecated.
318      */
flushAndSignalSemaphores(int numSemaphores,GrBackendSemaphore signalSemaphores[])319     GrSemaphoresSubmitted flushAndSignalSemaphores(int numSemaphores,
320                                                    GrBackendSemaphore signalSemaphores[]) {
321         GrFlushInfo info;
322         info.fNumSemaphores = numSemaphores;
323         info.fSignalSemaphores = signalSemaphores;
324         return this->flush(info);
325     }
326 
327     /**
328      * Checks whether any asynchronous work is complete and if so calls related callbacks.
329      */
330     void checkAsyncWorkCompletion();
331 
332     // Provides access to functions that aren't part of the public API.
333     GrContextPriv priv();
334     const GrContextPriv priv() const;
335 
336     /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
337     // Chrome is using this!
338     void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
339 
340     bool supportsDistanceFieldText() const;
341 
342     void storeVkPipelineCacheData();
343 
344     static size_t ComputeTextureSize(SkColorType type, int width, int height, GrMipMapped,
345                                      bool useNextPow2 = false);
346 
347     /*
348      * Retrieve the default GrBackendFormat for a given SkColorType and renderability.
349      * It is guaranteed that this backend format will be the one used by the following
350      * SkColorType and SkSurfaceCharacterization-based createBackendTexture methods.
351      *
352      * The caller should check that the returned format is valid.
353      */
defaultBackendFormat(SkColorType ct,GrRenderable renderable)354     GrBackendFormat defaultBackendFormat(SkColorType ct, GrRenderable renderable) const {
355         return INHERITED::defaultBackendFormat(ct, renderable);
356     }
357 
358    /*
359     * The explicitly allocated backend texture API allows clients to use Skia to create backend
360     * objects outside of Skia proper (i.e., Skia's caching system will not know about them.)
361     *
362     * It is the client's responsibility to delete all these objects (using deleteBackendTexture)
363     * before deleting the GrContext used to create them. Additionally, clients should only
364     * delete these objects on the thread for which that GrContext is active.
365     *
366     * The client is responsible for ensuring synchronization between different uses
367     * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the
368     * surface, rewrapping it in a image and drawing the image will require explicit
369     * sychronization on the client's part).
370     */
371 
372     // If possible, create an uninitialized backend texture. The client should ensure that the
373     // returned backend texture is valid.
374     // For the Vulkan backend the layout of the created VkImage will be:
375     //      VK_IMAGE_LAYOUT_UNDEFINED.
376     GrBackendTexture createBackendTexture(int width, int height,
377                                           const GrBackendFormat&,
378                                           GrMipMapped,
379                                           GrRenderable,
380                                           GrProtected = GrProtected::kNo);
381 
382     // If possible, create an uninitialized backend texture. The client should ensure that the
383     // returned backend texture is valid.
384     // If successful, the created backend texture will be compatible with the provided
385     // SkColorType.
386     // For the Vulkan backend the layout of the created VkImage will be:
387     //      VK_IMAGE_LAYOUT_UNDEFINED.
388     GrBackendTexture createBackendTexture(int width, int height,
389                                           SkColorType,
390                                           GrMipMapped,
391                                           GrRenderable,
392                                           GrProtected = GrProtected::kNo);
393 
394 
395     // If possible, create an uninitialized backend texture that is compatible with the
396     // provided characterization. The client should ensure that the returned backend texture
397     // is valid.
398     // For the Vulkan backend the layout of the created VkImage will be:
399     //      VK_IMAGE_LAYOUT_UNDEFINED.
400     GrBackendTexture createBackendTexture(const SkSurfaceCharacterization& characterization);
401 
402     // If possible, create a backend texture initialized to a particular color. The client should
403     // ensure that the returned backend texture is valid.
404     // For the Vulkan backend the layout of the created VkImage will be:
405     //      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL if renderable is kNo
406     //  and VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL if renderable is kYes
407     GrBackendTexture createBackendTexture(int width, int height,
408                                           const GrBackendFormat&,
409                                           const SkColor4f& color,
410                                           GrMipMapped,
411                                           GrRenderable,
412                                           GrProtected = GrProtected::kNo);
413 
414     // If possible, create a backend texture initialized to a particular color. The client should
415     // ensure that the returned backend texture is valid.
416     // If successful, the created backend texture will be compatible with the provided
417     // SkColorType.
418     // For the Vulkan backend the layout of the created VkImage will be:
419     //      VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL if renderable is kNo
420     //  and VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL if renderable is kYes
421     GrBackendTexture createBackendTexture(int width, int height,
422                                           SkColorType,
423                                           const SkColor4f& color,
424                                           GrMipMapped,
425                                           GrRenderable,
426                                           GrProtected = GrProtected::kNo);
427 
428     // If possible, create a backend texture initialized to a particular color that is
429     // compatible with the provided characterization. The client should ensure that the
430     // returned backend texture is valid.
431     // For the Vulkan backend the layout of the created VkImage will be:
432     //      VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
433     GrBackendTexture createBackendTexture(const SkSurfaceCharacterization& characterization,
434                                           const SkColor4f& color);
435 
436     void deleteBackendTexture(GrBackendTexture);
437 
438 #ifdef SK_ENABLE_DUMP_GPU
439     /** Returns a string with detailed information about the context & GPU, in JSON format. */
440     SkString dump() const;
441 #endif
442 
443 protected:
444     GrContext(GrBackendApi, const GrContextOptions&, int32_t contextID = SK_InvalidGenID);
445 
446     bool init(sk_sp<const GrCaps>, sk_sp<GrSkSLFPFactoryCache>) override;
447 
asDirectContext()448     GrContext* asDirectContext() override { return this; }
449 
450     virtual GrAtlasManager* onGetAtlasManager() = 0;
451 
452     sk_sp<GrContextThreadSafeProxy>         fThreadSafeProxy;
453 
454 private:
455     // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed
456     // after all of its users. Clients of fTaskGroup will generally want to ensure that they call
457     // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being
458     // invoked after objects they depend upon have already been destroyed.
459     std::unique_ptr<SkTaskGroup>            fTaskGroup;
460     sk_sp<GrGpu>                            fGpu;
461     GrResourceCache*                        fResourceCache;
462     GrResourceProvider*                     fResourceProvider;
463 
464     bool                                    fDidTestPMConversions;
465     // true if the PM/UPM conversion succeeded; false otherwise
466     bool                                    fPMUPMConversionsRoundTrip;
467 
468     GrContextOptions::PersistentCache*      fPersistentCache;
469     GrContextOptions::ShaderErrorHandler*   fShaderErrorHandler;
470 
471     // TODO: have the GrClipStackClip use renderTargetContexts and rm this friending
472     friend class GrContextPriv;
473 
474     /**
475      * These functions create premul <-> unpremul effects, using the specialized round-trip effects
476      * from GrConfigConversionEffect.
477      */
478     std::unique_ptr<GrFragmentProcessor> createPMToUPMEffect(std::unique_ptr<GrFragmentProcessor>);
479     std::unique_ptr<GrFragmentProcessor> createUPMToPMEffect(std::unique_ptr<GrFragmentProcessor>);
480 
481     typedef GrRecordingContext INHERITED;
482 };
483 
484 #endif
485