• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDrawingManager_DEFINED
9 #define GrDrawingManager_DEFINED
10 
11 #include "include/core/SkSpan.h"
12 #include "include/core/SkSurface.h"
13 #include "include/private/SkTArray.h"
14 #include "include/private/SkTHash.h"
15 #include "src/gpu/GrBufferAllocPool.h"
16 #include "src/gpu/GrDeferredUpload.h"
17 #include "src/gpu/GrHashMapWithCache.h"
18 #include "src/gpu/GrResourceCache.h"
19 #include "src/gpu/GrSurfaceProxy.h"
20 
21 #if SK_GPU_V1
22 #include "src/gpu/v1/PathRenderer.h"
23 #include "src/gpu/v1/PathRendererChain.h"
24 #endif
25 
26 // Enabling this will print out which path renderers are being chosen
27 #define GR_PATH_RENDERER_SPEW 0
28 
29 class GrArenas;
30 class GrGpuBuffer;
31 class GrOnFlushCallbackObject;
32 class GrOpFlushState;
33 class GrRecordingContext;
34 class GrRenderTargetProxy;
35 class GrRenderTask;
36 class GrResourceAllocator;
37 class GrSemaphore;
38 class GrSurfaceProxyView;
39 class GrTextureResolveRenderTask;
40 class SkDeferredDisplayList;
41 namespace skgpu { namespace v1 {
42     class OpsTask;
43     class SoftwarePathRenderer;
44 }}
45 
46 class GrDrawingManager {
47 public:
48     ~GrDrawingManager();
49 
50     void freeGpuResources();
51 
52 #if SK_GPU_V1
53     // OpsTasks created at flush time are stored and handled different from the others.
54     sk_sp<skgpu::v1::OpsTask> newOpsTask(GrSurfaceProxyView,
55                                          sk_sp<GrArenas> arenas,
56                                          bool flushTimeOpsTask);
57 
58     // Adds 'atlasTask' to the DAG and leaves it open.
59     //
60     // If 'previousAtlasTask' is provided, closes it and configures dependencies to guarantee
61     // previousAtlasTask and all its users are completely out of service before atlasTask executes.
62     void addAtlasTask(sk_sp<GrRenderTask> atlasTask, GrRenderTask* previousAtlasTask);
63 #endif
64 
65     // Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This
66     // method will only add the new render task to the list. However, it adds the task before the
67     // last task in the list. It is up to the caller to call addProxy() on the returned object.
68     GrTextureResolveRenderTask* newTextureResolveRenderTaskBefore(const GrCaps&);
69 
70     // Creates a render task that can resolve MSAA and/or regenerate mimap levels on the passed in
71     // proxy. The task is appended to the end of the current list of tasks.
72     void newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy,
73                                      GrSurfaceProxy::ResolveFlags,
74                                      const GrCaps&);
75 
76     // Create a new render task that will cause the gpu to wait on semaphores before executing any
77     // more RenderTasks that target proxy. It is possible for this wait to also block additional
78     // work (even to other proxies) that has already been recorded or will be recorded later. The
79     // only guarantee is that future work to the passed in proxy will wait on the semaphores to be
80     // signaled.
81     void newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
82                            std::unique_ptr<std::unique_ptr<GrSemaphore>[]>,
83                            int numSemaphores);
84 
85     // Create a new render task which copies the pixels from the srcProxy into the dstBuffer. This
86     // is used to support the asynchronous readback API. The srcRect is the region of the srcProxy
87     // to be copied. The surfaceColorType says how we should interpret the data when reading back
88     // from the source. DstColorType describes how the data should be stored in the dstBuffer.
89     // DstOffset is the offset into the dstBuffer where we will start writing data.
90     void newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy, const SkIRect& srcRect,
91                                    GrColorType surfaceColorType, GrColorType dstColorType,
92                                    sk_sp<GrGpuBuffer> dstBuffer, size_t dstOffset);
93 
94     // Creates a new render task which copies a pixel rectangle from srcView into dstView. The src
95     // pixels copied are specified by srcRect. They are copied to a rect of the same size in
96     // dstProxy with top left at dstPoint. If the src rect is clipped by the src bounds then  pixel
97     // values in the dst rect corresponding to the area clipped by the src rect are not overwritten.
98     // This method is not guaranteed to succeed depending on the type of surface, formats, etc, and
99     // the backend-specific limitations. On success the task is returned so that the caller may
100     // mark it skippable if the copy is later deemed unnecessary.
101     sk_sp<GrRenderTask> newCopyRenderTask(sk_sp<GrSurfaceProxy> src,
102                                           SkIRect srcRect,
103                                           sk_sp<GrSurfaceProxy> dst,
104                                           SkIPoint dstPoint,
105                                           GrSurfaceOrigin);
106 
107     // Adds a task that writes the data from the passed GrMipLevels to dst. The lifetime of the
108     // pixel data in the levels should be tied to the passed SkData or the caller must flush the
109     // context before the data may become invalid. srcColorType is the color type of the
110     // GrMipLevels. dstColorType is the color type being used with dst and must be compatible with
111     // dst's format according to GrCaps::areColorTypeAndFormatCompatible().
112     bool newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
113                             SkIRect rect,
114                             GrColorType srcColorType,
115                             GrColorType dstColorType,
116                             const GrMipLevel[],
117                             int levelCount);
118 
getContext()119     GrRecordingContext* getContext() { return fContext; }
120 
121 #if SK_GPU_V1
122     using PathRenderer = skgpu::v1::PathRenderer;
123     using PathRendererChain = skgpu::v1::PathRendererChain;
124 
125     PathRenderer* getPathRenderer(const PathRenderer::CanDrawPathArgs&,
126                                   bool allowSW,
127                                   PathRendererChain::DrawType,
128                                   PathRenderer::StencilSupport* = nullptr);
129 
130     PathRenderer* getSoftwarePathRenderer();
131 
132     // Returns a direct pointer to the atlas path renderer, or null if it is not supported and
133     // turned on.
134     skgpu::v1::AtlasPathRenderer* getAtlasPathRenderer();
135 
136     // Returns a direct pointer to the tessellation path renderer, or null if it is not supported
137     // and turned on.
138     PathRenderer* getTessellationPathRenderer();
139 #endif
140 
141     void flushIfNecessary();
142 
143     static bool ProgramUnitTest(GrDirectContext*, int maxStages, int maxLevels);
144 
145     GrSemaphoresSubmitted flushSurfaces(SkSpan<GrSurfaceProxy*>,
146                                         SkSurface::BackendSurfaceAccess,
147                                         const GrFlushInfo&,
148                                         const GrBackendSurfaceMutableState* newState);
149 
150     void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
151 
152 #if GR_TEST_UTILS
153     void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*);
154 #if SK_GPU_V1
testingOnly_getOptionsForPathRendererChain()155     PathRendererChain::Options testingOnly_getOptionsForPathRendererChain() {
156         return fOptionsForPathRendererChain;
157     }
158 #endif
159 #endif
160 
161     GrRenderTask* getLastRenderTask(const GrSurfaceProxy*) const;
162     skgpu::v1::OpsTask* getLastOpsTask(const GrSurfaceProxy*) const;
163     void setLastRenderTask(const GrSurfaceProxy*, GrRenderTask*);
164 
165     void moveRenderTasksToDDL(SkDeferredDisplayList* ddl);
166     void createDDLTask(sk_sp<const SkDeferredDisplayList>,
167                        sk_sp<GrRenderTargetProxy> newDest,
168                        SkIPoint offset);
169 
170 private:
171 #if SK_GPU_V1
172     GrDrawingManager(GrRecordingContext*,
173                      const PathRendererChain::Options&,
174                      bool reduceOpsTaskSplitting);
175 #else
176     GrDrawingManager(GrRecordingContext*, bool reduceOpsTaskSplitting);
177 #endif
178 
179     bool wasAbandoned() const;
180 
181     void closeActiveOpsTask();
182 
183     // return true if any GrRenderTasks were actually executed; false otherwise
184     bool executeRenderTasks(GrOpFlushState*);
185 
186     void removeRenderTasks();
187 
188     void sortTasks();
189 
190     // Attempt to reorder tasks to reduce render passes, and check the memory budget of the
191     // resulting intervals. Returns whether the reordering was successful & the memory budget
192     // acceptable. If it returns true, fDAG has been updated to reflect the reordered tasks.
193     bool reorderTasks(GrResourceAllocator*);
194 
195     void closeAllTasks();
196 
197     GrRenderTask* appendTask(sk_sp<GrRenderTask>);
198     GrRenderTask* insertTaskBeforeLast(sk_sp<GrRenderTask>);
199 
200     bool flush(SkSpan<GrSurfaceProxy*> proxies,
201                SkSurface::BackendSurfaceAccess access,
202                const GrFlushInfo&,
203                const GrBackendSurfaceMutableState* newState);
204 
205     bool submitToGpu(bool syncToCpu);
206 
207     SkDEBUGCODE(void validate() const);
208 
209     friend class GrDirectContext; // access to: flush & cleanup
210     friend class GrDirectContextPriv; // access to: flush
211     friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class
212     friend class GrRecordingContext;  // access to: ctor
213     friend class SkImage; // for access to: flush
214 
215     static const int kNumPixelGeometries = 5; // The different pixel geometries
216     static const int kNumDFTOptions = 2;      // DFT or no DFT
217 
218     GrRecordingContext*                      fContext;
219 
220     // This cache is used by both the vertex and index pools. It reuses memory across multiple
221     // flushes.
222     sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache;
223 
224     SkTArray<sk_sp<GrRenderTask>>            fDAG;
225     skgpu::v1::OpsTask*                      fActiveOpsTask = nullptr;
226     // These are the IDs of the opsTask currently being flushed (in internalFlush). They are
227     // only stored here to prevent memory thrashing.
228     SkSTArray<8, uint32_t, true>             fFlushingRenderTaskIDs;
229     // These are the new renderTasks generated by the onFlush CBs
230     SkSTArray<4, sk_sp<GrRenderTask>>        fOnFlushRenderTasks;
231 
232 #if SK_GPU_V1
233     PathRendererChain::Options               fOptionsForPathRendererChain;
234     std::unique_ptr<PathRendererChain>       fPathRendererChain;
235     sk_sp<skgpu::v1::SoftwarePathRenderer>   fSoftwarePathRenderer;
236 #endif
237 
238     GrTokenTracker                           fTokenTracker;
239     bool                                     fFlushing = false;
240     const bool                               fReduceOpsTaskSplitting;
241 
242     SkTArray<GrOnFlushCallbackObject*>       fOnFlushCBObjects;
243 
244     struct SurfaceIDKeyTraits {
GetInvalidKeySurfaceIDKeyTraits245         static uint32_t GetInvalidKey() {
246             return GrSurfaceProxy::UniqueID::InvalidID().asUInt();
247         }
248     };
249 
250     GrHashMapWithCache<uint32_t, GrRenderTask*, SurfaceIDKeyTraits, GrCheapHash> fLastRenderTasks;
251 };
252 
253 #endif
254