• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDrawingManager_DEFINED
9 #define GrDrawingManager_DEFINED
10 
11 #include "include/core/SkSpan.h"
12 #include "include/core/SkSurface.h"
13 #include "include/private/SkTArray.h"
14 #include "include/private/SkTHash.h"
15 #include "src/gpu/GrBufferAllocPool.h"
16 #include "src/gpu/GrDeferredUpload.h"
17 #include "src/gpu/GrHashMapWithCache.h"
18 #include "src/gpu/GrPathRenderer.h"
19 #include "src/gpu/GrPathRendererChain.h"
20 #include "src/gpu/GrResourceCache.h"
21 #include "src/gpu/GrSurfaceProxy.h"
22 
23 // Enabling this will print out which path renderers are being chosen
24 #define GR_PATH_RENDERER_SPEW 0
25 
26 class GrArenas;
27 class GrCoverageCountingPathRenderer;
28 class GrGpuBuffer;
29 class GrOnFlushCallbackObject;
30 class GrOpFlushState;
31 class GrOpsTask;
32 class GrRecordingContext;
33 class GrRenderTargetProxy;
34 class GrRenderTask;
35 class GrResourceAllocator;
36 class GrSemaphore;
37 class GrSoftwarePathRenderer;
38 class GrSurfaceContext;
39 class GrSurfaceDrawContext;
40 class GrSurfaceProxyView;
41 class GrTextureResolveRenderTask;
42 class SkDeferredDisplayList;
43 
44 class GrDrawingManager {
45 public:
46     ~GrDrawingManager();
47 
48     void freeGpuResources();
49 
50     // OpsTasks created at flush time are stored and handled different from the others.
51     sk_sp<GrOpsTask> newOpsTask(GrSurfaceProxyView,
52                                 sk_sp<GrArenas> arenas,
53                                 bool flushTimeOpsTask);
54 
55     // Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This
56     // method will only add the new render task to the list. It is up to the caller to call
57     // addProxy() on the returned object.
58     GrTextureResolveRenderTask* newTextureResolveRenderTask(const GrCaps&);
59 
60     // Create a new render task that will cause the gpu to wait on semaphores before executing any
61     // more RenderTasks that target proxy. It is possible for this wait to also block additional
62     // work (even to other proxies) that has already been recorded or will be recorded later. The
63     // only guarantee is that future work to the passed in proxy will wait on the semaphores to be
64     // signaled.
65     void newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
66                            std::unique_ptr<std::unique_ptr<GrSemaphore>[]>,
67                            int numSemaphores);
68 
69     // Create a new render task which copies the pixels from the srcProxy into the dstBuffer. This
70     // is used to support the asynchronous readback API. The srcRect is the region of the srcProxy
71     // to be copied. The surfaceColorType says how we should interpret the data when reading back
72     // from the source. DstColorType describes how the data should be stored in the dstBuffer.
73     // DstOffset is the offset into the dstBuffer where we will start writing data.
74     void newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy, const SkIRect& srcRect,
75                                    GrColorType surfaceColorType, GrColorType dstColorType,
76                                    sk_sp<GrGpuBuffer> dstBuffer, size_t dstOffset);
77 
78     // Creates a new render task which copies a pixel rectangle from srcView into dstView. The src
79     // pixels copied are specified by srcRect. They are copied to a rect of the same size in
80     // dstProxy with top left at dstPoint. If the src rect is clipped by the src bounds then  pixel
81     // values in the dst rect corresponding to the area clipped by the src rect are not overwritten.
82     // This method is not guaranteed to succeed depending on the type of surface, formats, etc, and
83     // the backend-specific limitations. On success the task is returned so that the caller may
84     // mark it skippable if the copy is later deemed unnecessary.
85     sk_sp<GrRenderTask> newCopyRenderTask(sk_sp<GrSurfaceProxy> src,
86                                           SkIRect srcRect,
87                                           sk_sp<GrSurfaceProxy> dst,
88                                           SkIPoint dstPoint,
89                                           GrSurfaceOrigin);
90 
91     // Adds a task that writes the data from the passed GrMipLevels to dst. The lifetime of the
92     // pixel data in the levels should be tied to the passed SkData or the caller must flush the
93     // context before the data may become invalid. srcColorType is the color type of the
94     // GrMipLevels. dstColorType is the color type being used with dst and must be compatible with
95     // dst's format according to GrCaps::areColorTypeAndFormatCompatible().
96     bool newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
97                             SkIRect rect,
98                             GrColorType srcColorType,
99                             GrColorType dstColorType,
100                             const GrMipLevel[],
101                             int levelCount);
102 
getContext()103     GrRecordingContext* getContext() { return fContext; }
104 
105     GrPathRenderer* getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
106                                     bool allowSW,
107                                     GrPathRendererChain::DrawType drawType,
108                                     GrPathRenderer::StencilSupport* stencilSupport = nullptr);
109 
110     GrPathRenderer* getSoftwarePathRenderer();
111 
112     // Returns a direct pointer to the coverage counting path renderer, or null if it is not
113     // supported and turned on.
114     GrCoverageCountingPathRenderer* getCoverageCountingPathRenderer();
115 
116     // Returns a direct pointer to the tessellation path renderer, or null if it is not supported
117     // and turned on.
118     GrPathRenderer* getTessellationPathRenderer();
119 
120     void flushIfNecessary();
121 
122     static bool ProgramUnitTest(GrDirectContext*, int maxStages, int maxLevels);
123 
124     GrSemaphoresSubmitted flushSurfaces(SkSpan<GrSurfaceProxy*>,
125                                         SkSurface::BackendSurfaceAccess,
126                                         const GrFlushInfo&,
127                                         const GrBackendSurfaceMutableState* newState);
128 
129     void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
130 
131 #if GR_TEST_UTILS
132     void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*);
testingOnly_getOptionsForPathRendererChain()133     GrPathRendererChain::Options testingOnly_getOptionsForPathRendererChain() {
134         return fOptionsForPathRendererChain;
135     }
136 #endif
137 
138     GrRenderTask* getLastRenderTask(const GrSurfaceProxy*) const;
139     GrOpsTask* getLastOpsTask(const GrSurfaceProxy*) const;
140     void setLastRenderTask(const GrSurfaceProxy*, GrRenderTask*);
141 
142     void moveRenderTasksToDDL(SkDeferredDisplayList* ddl);
143     void createDDLTask(sk_sp<const SkDeferredDisplayList>,
144                        sk_sp<GrRenderTargetProxy> newDest,
145                        SkIPoint offset);
146 
147 private:
148     GrDrawingManager(GrRecordingContext*,
149                      const GrPathRendererChain::Options&,
150                      bool reduceOpsTaskSplitting);
151 
152     bool wasAbandoned() const;
153 
154     void closeActiveOpsTask();
155 
156     // return true if any GrRenderTasks were actually executed; false otherwise
157     bool executeRenderTasks(GrOpFlushState*);
158 
159     void removeRenderTasks();
160 
161     void sortTasks();
162 
163     // Attempt to reorder tasks to reduce render passes, and check the memory budget of the
164     // resulting intervals. Returns whether the reordering was successful & the memory budget
165     // acceptable. If it returns true, fDAG has been updated to reflect the reordered tasks.
166     bool reorderTasks(GrResourceAllocator*);
167 
168     void closeAllTasks();
169 
170     GrRenderTask* appendTask(sk_sp<GrRenderTask>);
171     GrRenderTask* insertTaskBeforeLast(sk_sp<GrRenderTask>);
172 
173     bool flush(SkSpan<GrSurfaceProxy*> proxies,
174                SkSurface::BackendSurfaceAccess access,
175                const GrFlushInfo&,
176                const GrBackendSurfaceMutableState* newState);
177 
178     bool submitToGpu(bool syncToCpu);
179 
180     SkDEBUGCODE(void validate() const);
181 
182     friend class GrDirectContext; // access to: flush & cleanup
183     friend class GrDirectContextPriv; // access to: flush
184     friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class
185     friend class GrRecordingContext;  // access to: ctor
186     friend class SkImage; // for access to: flush
187 
188     static const int kNumPixelGeometries = 5; // The different pixel geometries
189     static const int kNumDFTOptions = 2;      // DFT or no DFT
190 
191     GrRecordingContext*               fContext;
192     GrPathRendererChain::Options      fOptionsForPathRendererChain;
193 
194     // This cache is used by both the vertex and index pools. It reuses memory across multiple
195     // flushes.
196     sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache;
197 
198     SkTArray<sk_sp<GrRenderTask>>     fDAG;
199     GrOpsTask*                        fActiveOpsTask = nullptr;
200     // These are the IDs of the opsTask currently being flushed (in internalFlush). They are
201     // only stored here to prevent memory thrashing.
202     SkSTArray<8, uint32_t, true>      fFlushingRenderTaskIDs;
203     // These are the new renderTasks generated by the onFlush CBs
204     SkSTArray<4, sk_sp<GrRenderTask>> fOnFlushRenderTasks;
205 
206     std::unique_ptr<GrPathRendererChain> fPathRendererChain;
207     sk_sp<GrSoftwarePathRenderer>     fSoftwarePathRenderer;
208 
209     GrTokenTracker                    fTokenTracker;
210     bool                              fFlushing;
211     const bool                        fReduceOpsTaskSplitting;
212 
213     SkTArray<GrOnFlushCallbackObject*> fOnFlushCBObjects;
214 
215     struct SurfaceIDKeyTraits {
GetInvalidKeySurfaceIDKeyTraits216         static uint32_t GetInvalidKey() {
217             return GrSurfaceProxy::UniqueID::InvalidID().asUInt();
218         }
219     };
220 
221     GrHashMapWithCache<uint32_t, GrRenderTask*, SurfaceIDKeyTraits, GrCheapHash> fLastRenderTasks;
222 };
223 
224 #endif
225