• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDirectContextPriv_DEFINED
9 #define GrDirectContextPriv_DEFINED
10 
11 #include "include/core/SkSpan.h"
12 #include "include/core/SkSurface.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/gpu/AtlasTypes.h"
15 #include "src/gpu/ganesh/Device_v1.h"
16 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
17 
18 class GrAtlasManager;
19 class GrBackendFormat;
20 class GrBackendRenderTarget;
21 class GrImageInfo;
22 class GrMemoryPool;
23 class GrOnFlushCallbackObject;
24 class GrRenderTargetProxy;
25 class GrSemaphore;
26 class GrSurfaceProxy;
27 
28 class SkDeferredDisplayList;
29 class SkTaskGroup;
30 
31 /** Class that adds methods to GrDirectContext that are only intended for use internal to Skia.
32     This class is purely a privileged window into GrDirectContext. It should never have additional
33     data members or virtual methods. */
34 class GrDirectContextPriv : public GrRecordingContextPriv {
35 public:
context()36     GrDirectContext* context() { return static_cast<GrDirectContext*>(fContext); }
context()37     const GrDirectContext* context() const { return static_cast<const GrDirectContext*>(fContext); }
38 
getStrikeCache()39     sktext::gpu::StrikeCache* getStrikeCache() { return this->context()->fStrikeCache.get(); }
40 
41     /**
42      * Finalizes all pending reads and writes to the surfaces and also performs an MSAA resolves
43      * if necessary. The GrSurfaceProxy array is treated as a hint. If it is supplied the context
44      * will guarantee that the draws required for those proxies are flushed but it could do more.
45      * If no array is provided then all current work will be flushed.
46      *
47      * It is not necessary to call this before reading the render target via Skia/GrContext.
48      * GrContext will detect when it must perform a resolve before reading pixels back from the
49      * surface or using it as a texture.
50      */
51     GrSemaphoresSubmitted flushSurfaces(
52                 SkSpan<GrSurfaceProxy*>,
53                 SkSurface::BackendSurfaceAccess = SkSurface::BackendSurfaceAccess::kNoAccess,
54                 const GrFlushInfo& = {},
55                 const skgpu::MutableTextureState* newState = nullptr);
56 
57     /** Version of above that flushes for a single proxy. Null is allowed. */
58     GrSemaphoresSubmitted flushSurface(
59                 GrSurfaceProxy* proxy,
60                 SkSurface::BackendSurfaceAccess access = SkSurface::BackendSurfaceAccess::kNoAccess,
61                 const GrFlushInfo& info = {},
62                 const skgpu::MutableTextureState* newState = nullptr) {
63         size_t size = proxy ? 1 : 0;
64         return this->flushSurfaces({&proxy, size}, access, info, newState);
65     }
66 
67     /**
68      * Returns true if createPMToUPMEffect and createUPMToPMEffect will succeed. In other words,
69      * did we find a pair of round-trip preserving conversion effects?
70      */
71     bool validPMUPMConversionExists();
72 
73     /**
74      * These functions create premul <-> unpremul effects, using specialized round-trip effects.
75      */
76     std::unique_ptr<GrFragmentProcessor> createPMToUPMEffect(std::unique_ptr<GrFragmentProcessor>);
77     std::unique_ptr<GrFragmentProcessor> createUPMToPMEffect(std::unique_ptr<GrFragmentProcessor>);
78 
getTaskGroup()79     SkTaskGroup* getTaskGroup() { return this->context()->fTaskGroup.get(); }
80 
resourceProvider()81     GrResourceProvider* resourceProvider() { return this->context()->fResourceProvider.get(); }
resourceProvider()82     const GrResourceProvider* resourceProvider() const {
83         return this->context()->fResourceProvider.get();
84     }
85 
getResourceCache()86     GrResourceCache* getResourceCache() { return this->context()->fResourceCache.get(); }
87 
getGpu()88     GrGpu* getGpu() { return this->context()->fGpu.get(); }
getGpu()89     const GrGpu* getGpu() const { return this->context()->fGpu.get(); }
90 
91     // This accessor should only ever be called by the GrOpFlushState.
getAtlasManager()92     GrAtlasManager* getAtlasManager() {
93         return this->context()->onGetAtlasManager();
94     }
95 
96     // This accessor should only ever be called by the GrOpFlushState.
97 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
getSmallPathAtlasMgr()98     skgpu::v1::SmallPathAtlasMgr* getSmallPathAtlasMgr() {
99         return this->context()->onGetSmallPathAtlasMgr();
100     }
101 #endif
102 
103     void createDDLTask(sk_sp<const SkDeferredDisplayList>,
104                        sk_sp<GrRenderTargetProxy> newDest,
105                        SkIPoint offset);
106 
107     bool compile(const GrProgramDesc&, const GrProgramInfo&);
108 
getPersistentCache()109     GrContextOptions::PersistentCache* getPersistentCache() {
110         return this->context()->fPersistentCache;
111     }
112 
clientMappedBufferManager()113     GrClientMappedBufferManager* clientMappedBufferManager() {
114         return this->context()->fMappedBufferManager.get();
115     }
116 
setInsideReleaseProc(bool inside)117     void setInsideReleaseProc(bool inside) {
118         if (inside) {
119             this->context()->fInsideReleaseProcCnt++;
120         } else {
121             SkASSERT(this->context()->fInsideReleaseProcCnt > 0);
122             this->context()->fInsideReleaseProcCnt--;
123         }
124     }
125 
126 #if GR_TEST_UTILS
127     /** Reset GPU stats */
128     void resetGpuStats() const;
129 
130     /** Prints cache stats to the string if GR_CACHE_STATS == 1. */
131     void dumpCacheStats(SkString*) const;
132     void dumpCacheStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
133     void printCacheStats() const;
134 
135     /** Prints GPU stats to the string if GR_GPU_STATS == 1. */
136     void dumpGpuStats(SkString*) const;
137     void dumpGpuStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
138     void printGpuStats() const;
139 
140     /** These are only active if GR_GPU_STATS == 1. */
141     void resetContextStats();
142     void dumpContextStats(SkString*) const;
143     void dumpContextStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
144     void printContextStats() const;
145 
146     /** Get pointer to atlas texture for given mask format. Note that this wraps an
147         actively mutating texture in an SkImage. This could yield unexpected results
148         if it gets cached or used more generally. */
149     sk_sp<SkImage> testingOnly_getFontAtlasImage(skgpu::MaskFormat format, unsigned int index = 0);
150 
151     void testingOnly_flushAndRemoveOnFlushCallbackObject(GrOnFlushCallbackObject*);
152 #endif
153 
154 private:
GrDirectContextPriv(GrDirectContext * dContext)155     explicit GrDirectContextPriv(GrDirectContext* dContext) : GrRecordingContextPriv(dContext) {}
156     GrDirectContextPriv& operator=(const GrDirectContextPriv&) = delete;
157 
158     // No taking addresses of this type.
159     const GrDirectContextPriv* operator&() const;
160     GrDirectContextPriv* operator&();
161 
162     friend class GrDirectContext; // to construct/copy this type.
163 
164     using INHERITED = GrRecordingContextPriv;
165 };
166 
priv()167 inline GrDirectContextPriv GrDirectContext::priv() { return GrDirectContextPriv(this); }
168 
169 // NOLINTNEXTLINE(readability-const-return-type)
priv()170 inline const GrDirectContextPriv GrDirectContext::priv() const {
171     return GrDirectContextPriv(const_cast<GrDirectContext*>(this));
172 }
173 
174 #endif
175