1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrDirectContextPriv_DEFINED
9 #define GrDirectContextPriv_DEFINED
10
11 #include "include/core/SkSpan.h"
12 #include "include/core/SkSurface.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "src/gpu/AtlasTypes.h"
15 #include "src/gpu/ganesh/Device.h"
16 #include "src/gpu/ganesh/GrGpu.h"
17 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
18
19 class GrAtlasManager;
20 class GrBackendFormat;
21 class GrBackendRenderTarget;
22 class GrImageInfo;
23 class GrMemoryPool;
24 class GrOnFlushCallbackObject;
25 class GrRenderTargetProxy;
26 class GrSemaphore;
27 class GrSurfaceProxy;
28
29 class GrDeferredDisplayList;
30 class SkTaskGroup;
31
32 /** Class that adds methods to GrDirectContext that are only intended for use internal to Skia.
33 This class is purely a privileged window into GrDirectContext. It should never have additional
34 data members or virtual methods. */
35 class GrDirectContextPriv : public GrRecordingContextPriv {
36 public:
Make(GrBackendApi backend,const GrContextOptions & options,sk_sp<GrContextThreadSafeProxy> proxy)37 static sk_sp<GrDirectContext> Make(GrBackendApi backend,
38 const GrContextOptions& options,
39 sk_sp<GrContextThreadSafeProxy> proxy) {
40 return sk_sp<GrDirectContext>(new GrDirectContext(backend, options, std::move(proxy)));
41 }
42
Init(const sk_sp<GrDirectContext> & ctx)43 static bool Init(const sk_sp<GrDirectContext>& ctx) {
44 SkASSERT(ctx);
45 return ctx->init();
46 }
47
SetGpu(const sk_sp<GrDirectContext> & ctx,std::unique_ptr<GrGpu> gpu)48 static void SetGpu(const sk_sp<GrDirectContext>& ctx, std::unique_ptr<GrGpu> gpu) {
49 SkASSERT(ctx);
50 ctx->fGpu = std::move(gpu);
51 }
52
context()53 GrDirectContext* context() { return static_cast<GrDirectContext*>(fContext); }
context()54 const GrDirectContext* context() const { return static_cast<const GrDirectContext*>(fContext); }
55
getStrikeCache()56 sktext::gpu::StrikeCache* getStrikeCache() { return this->context()->fStrikeCache.get(); }
57
58 /**
59 * Finalizes all pending reads and writes to the surfaces and also performs an MSAA resolves
60 * if necessary. The GrSurfaceProxy array is treated as a hint. If it is supplied the context
61 * will guarantee that the draws required for those proxies are flushed but it could do more.
62 * If no array is provided then all current work will be flushed.
63 *
64 * It is not necessary to call this before reading the render target via Skia/GrContext.
65 * GrContext will detect when it must perform a resolve before reading pixels back from the
66 * surface or using it as a texture.
67 */
68 GrSemaphoresSubmitted flushSurfaces(
69 SkSpan<GrSurfaceProxy*>,
70 SkSurfaces::BackendSurfaceAccess = SkSurfaces::BackendSurfaceAccess::kNoAccess,
71 const GrFlushInfo& = {},
72 const skgpu::MutableTextureState* newState = nullptr);
73
74 /** Version of above that flushes for a single proxy. Null is allowed. */
75 GrSemaphoresSubmitted flushSurface(
76 GrSurfaceProxy* proxy,
77 SkSurfaces::BackendSurfaceAccess access = SkSurfaces::BackendSurfaceAccess::kNoAccess,
78 const GrFlushInfo& info = {},
79 const skgpu::MutableTextureState* newState = nullptr) {
80 size_t size = proxy ? 1 : 0;
81 return this->flushSurfaces({&proxy, size}, access, info, newState);
82 }
83
84 /**
85 * Returns true if createPMToUPMEffect and createUPMToPMEffect will succeed. In other words,
86 * did we find a pair of round-trip preserving conversion effects?
87 */
88 bool validPMUPMConversionExists();
89
90 /**
91 * These functions create premul <-> unpremul effects, using specialized round-trip effects.
92 */
93 std::unique_ptr<GrFragmentProcessor> createPMToUPMEffect(std::unique_ptr<GrFragmentProcessor>);
94 std::unique_ptr<GrFragmentProcessor> createUPMToPMEffect(std::unique_ptr<GrFragmentProcessor>);
95
getTaskGroup()96 SkTaskGroup* getTaskGroup() { return this->context()->fTaskGroup.get(); }
97
resourceProvider()98 GrResourceProvider* resourceProvider() { return this->context()->fResourceProvider.get(); }
resourceProvider()99 const GrResourceProvider* resourceProvider() const {
100 return this->context()->fResourceProvider.get();
101 }
102
getResourceCache()103 GrResourceCache* getResourceCache() { return this->context()->fResourceCache.get(); }
104
getGpu()105 GrGpu* getGpu() { return this->context()->fGpu.get(); }
getGpu()106 const GrGpu* getGpu() const { return this->context()->fGpu.get(); }
107
108 // This accessor should only ever be called by the GrOpFlushState.
getAtlasManager()109 GrAtlasManager* getAtlasManager() {
110 return this->context()->onGetAtlasManager();
111 }
112
113 // This accessor should only ever be called by the GrOpFlushState.
114 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
getSmallPathAtlasMgr()115 skgpu::ganesh::SmallPathAtlasMgr* getSmallPathAtlasMgr() {
116 return this->context()->onGetSmallPathAtlasMgr();
117 }
118 #endif
119
120 void createDDLTask(sk_sp<const GrDeferredDisplayList>,
121 sk_sp<GrRenderTargetProxy> newDest);
122
123 bool compile(const GrProgramDesc&, const GrProgramInfo&);
124
getPersistentCache()125 GrContextOptions::PersistentCache* getPersistentCache() {
126 return this->context()->fPersistentCache;
127 }
128
clientMappedBufferManager()129 GrClientMappedBufferManager* clientMappedBufferManager() {
130 return this->context()->fMappedBufferManager.get();
131 }
132
setInsideReleaseProc(bool inside)133 void setInsideReleaseProc(bool inside) {
134 if (inside) {
135 this->context()->fInsideReleaseProcCnt++;
136 } else {
137 SkASSERT(this->context()->fInsideReleaseProcCnt > 0);
138 this->context()->fInsideReleaseProcCnt--;
139 }
140 }
141
142 #if defined(GR_TEST_UTILS)
143 /** Reset GPU stats */
144 void resetGpuStats() const;
145
146 /** Prints cache stats to the string if GR_CACHE_STATS == 1. */
147 void dumpCacheStats(SkString*) const;
148 void dumpCacheStatsKeyValuePairs(
149 skia_private::TArray<SkString>* keys, skia_private::TArray<double>* values) const;
150 void printCacheStats() const;
151
152 /** Prints GPU stats to the string if GR_GPU_STATS == 1. */
153 void dumpGpuStats(SkString*) const;
154 void dumpGpuStatsKeyValuePairs(
155 skia_private::TArray<SkString>* keys, skia_private::TArray<double>* values) const;
156 void printGpuStats() const;
157
158 /** These are only active if GR_GPU_STATS == 1. */
159 void resetContextStats();
160 void dumpContextStats(SkString*) const;
161 void dumpContextStatsKeyValuePairs(
162 skia_private::TArray<SkString>* keys, skia_private::TArray<double>* values) const;
163 void printContextStats() const;
164
165 /** Get pointer to atlas texture for given mask format. Note that this wraps an
166 actively mutating texture in an SkImage. This could yield unexpected results
167 if it gets cached or used more generally. */
168 sk_sp<SkImage> testingOnly_getFontAtlasImage(skgpu::MaskFormat format, unsigned int index = 0);
169
170 void testingOnly_flushAndRemoveOnFlushCallbackObject(GrOnFlushCallbackObject*);
171 #endif
172
173 private:
GrDirectContextPriv(GrDirectContext * dContext)174 explicit GrDirectContextPriv(GrDirectContext* dContext) : GrRecordingContextPriv(dContext) {}
175 GrDirectContextPriv& operator=(const GrDirectContextPriv&) = delete;
176
177 // No taking addresses of this type.
178 const GrDirectContextPriv* operator&() const;
179 GrDirectContextPriv* operator&();
180
181 friend class GrDirectContext; // to construct/copy this type.
182
183 using INHERITED = GrRecordingContextPriv;
184 };
185
priv()186 inline GrDirectContextPriv GrDirectContext::priv() { return GrDirectContextPriv(this); }
187
188 // NOLINTNEXTLINE(readability-const-return-type)
priv()189 inline const GrDirectContextPriv GrDirectContext::priv() const {
190 return GrDirectContextPriv(const_cast<GrDirectContext*>(this));
191 }
192
193 #endif
194