• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrDrawingManager_DEFINED
9 #define GrDrawingManager_DEFINED
10 
11 #include "include/core/SkSpan.h"
12 #include "include/core/SkSurface.h"
13 #include "include/private/base/SkTArray.h"
14 #include "src/core/SkTHash.h"
15 #include "src/gpu/ganesh/GrBufferAllocPool.h"
16 #include "src/gpu/ganesh/GrDeferredUpload.h"
17 #include "src/gpu/ganesh/GrHashMapWithCache.h"
18 #include "src/gpu/ganesh/GrResourceCache.h"
19 #include "src/gpu/ganesh/GrSamplerState.h"
20 #include "src/gpu/ganesh/GrSurfaceProxy.h"
21 #include "src/gpu/ganesh/PathRenderer.h"
22 #include "src/gpu/ganesh/PathRendererChain.h"
23 
24 // Enabling this will print out which path renderers are being chosen
25 #define GR_PATH_RENDERER_SPEW 0
26 
27 class GrArenas;
28 class GrDeferredDisplayList;
29 class GrGpuBuffer;
30 class GrOnFlushCallbackObject;
31 class GrOpFlushState;
32 class GrRecordingContext;
33 class GrRenderTargetProxy;
34 class GrRenderTask;
35 class GrResourceAllocator;
36 class GrSemaphore;
37 class GrSurfaceProxyView;
38 class GrTextureResolveRenderTask;
39 namespace skgpu {
40 namespace ganesh {
41 class OpsTask;
42 class SoftwarePathRenderer;
43 }  // namespace ganesh
44 }  // namespace skgpu
45 
46 class GrDrawingManager {
47 public:
48     ~GrDrawingManager();
49 
50     void freeGpuResources();
51 
52     // OpsTasks created at flush time are stored and handled different from the others.
53     sk_sp<skgpu::ganesh::OpsTask> newOpsTask(GrSurfaceProxyView, sk_sp<GrArenas> arenas);
54 
55     // Adds 'atlasTask' to the DAG and leaves it open.
56     //
57     // If 'previousAtlasTask' is provided, closes it and configures dependencies to guarantee
58     // previousAtlasTask and all its users are completely out of service before atlasTask executes.
59     void addAtlasTask(sk_sp<GrRenderTask> atlasTask, GrRenderTask* previousAtlasTask);
60 
61     // Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This
62     // method will only add the new render task to the list. However, it adds the task before the
63     // last task in the list. It is up to the caller to call addProxy() on the returned object.
64     GrTextureResolveRenderTask* newTextureResolveRenderTaskBefore(const GrCaps&);
65 
66     // Creates a render task that can resolve MSAA and/or regenerate mimap levels on the passed in
67     // proxy. The task is appended to the end of the current list of tasks.
68     void newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy,
69                                      GrSurfaceProxy::ResolveFlags,
70                                      const GrCaps&);
71 
72     // Create a new render task that will cause the gpu to wait on semaphores before executing any
73     // more RenderTasks that target proxy. It is possible for this wait to also block additional
74     // work (even to other proxies) that has already been recorded or will be recorded later. The
75     // only guarantee is that future work to the passed in proxy will wait on the semaphores to be
76     // signaled.
77     void newWaitRenderTask(const sk_sp<GrSurfaceProxy>& proxy,
78                            std::unique_ptr<std::unique_ptr<GrSemaphore>[]>,
79                            int numSemaphores);
80 
81     // Create a new render task which copies the pixels from the srcProxy into the dstBuffer. This
82     // is used to support the asynchronous readback API. The srcRect is the region of the srcProxy
83     // to be copied. The surfaceColorType says how we should interpret the data when reading back
84     // from the source. DstColorType describes how the data should be stored in the dstBuffer.
85     // DstOffset is the offset into the dstBuffer where we will start writing data.
86     void newTransferFromRenderTask(const sk_sp<GrSurfaceProxy>& srcProxy, const SkIRect& srcRect,
87                                    GrColorType surfaceColorType, GrColorType dstColorType,
88                                    sk_sp<GrGpuBuffer> dstBuffer, size_t dstOffset);
89 
90     // Creates a new render task which copies a pixel rectangle from srcView into dstView. The src
91     // pixels copied are specified by srcRect. They are copied to the dstRect in dstProxy. Some
92     // backends and formats may require dstRect to have the same size as srcRect. Regardless,
93     // srcRect must be contained by src's dimensions and dstRect must be contained by dst's
94     // dimensions. Any clipping, aspect-ratio adjustment, etc. must be handled prior to this call.
95     //
96     // This method is not guaranteed to succeed depending on the type of surface, formats, etc, and
97     // the backend-specific limitations. On success the task is returned so that the caller may mark
98     // it skippable if the copy is later deemed unnecessary.
99     sk_sp<GrRenderTask> newCopyRenderTask(sk_sp<GrSurfaceProxy> dst,
100                                           SkIRect dstRect,
101                                           const sk_sp<GrSurfaceProxy>& src,
102                                           SkIRect srcRect,
103                                           GrSamplerState::Filter filter,
104                                           GrSurfaceOrigin);
105 
106     // Adds a render task that copies the range [srcOffset, srcOffset + size] from src to
107     // [dstOffset, dstOffset + size] in dst. The src buffer must have type kXferCpuToGpu and the
108     // dst must NOT have type kXferCpuToGpu. Neither buffer may be mapped when this executes.
109     // Because this is used to insert transfers to vertex/index buffers between draws and we don't
110     // track dependencies with buffers, this task is a hard boundary for task reordering.
111     void newBufferTransferTask(sk_sp<GrGpuBuffer> src,
112                                size_t srcOffset,
113                                sk_sp<GrGpuBuffer> dst,
114                                size_t dstOffset,
115                                size_t size);
116 
117     // Adds a render task that copies the src SkData to [dstOffset, dstOffset + src->size()] in dst.
118     // The dst must not have type kXferCpuToGpu and must not be mapped. Because this is used to
119     // insert updata to vertex/index buffers between draws and we don't track dependencies with
120     // buffers, this task is a hard boundary for task reordering.
121     void newBufferUpdateTask(sk_sp<SkData> src, sk_sp<GrGpuBuffer> dst, size_t dstOffset);
122 
123     // Adds a task that writes the data from the passed GrMipLevels to dst. The lifetime of the
124     // pixel data in the levels should be tied to the passed SkData or the caller must flush the
125     // context before the data may become invalid. srcColorType is the color type of the
126     // GrMipLevels. dstColorType is the color type being used with dst and must be compatible with
127     // dst's format according to GrCaps::areColorTypeAndFormatCompatible().
128     bool newWritePixelsTask(sk_sp<GrSurfaceProxy> dst,
129                             SkIRect rect,
130                             GrColorType srcColorType,
131                             GrColorType dstColorType,
132                             const GrMipLevel[],
133                             int levelCount);
134 
getContext()135     GrRecordingContext* getContext() { return fContext; }
136 
137     using PathRenderer = skgpu::ganesh::PathRenderer;
138     using PathRendererChain = skgpu::ganesh::PathRendererChain;
139 
140     PathRenderer* getPathRenderer(const PathRenderer::CanDrawPathArgs&,
141                                   bool allowSW,
142                                   PathRendererChain::DrawType,
143                                   PathRenderer::StencilSupport* = nullptr);
144 
145     PathRenderer* getSoftwarePathRenderer();
146 
147     // Returns a direct pointer to the atlas path renderer, or null if it is not supported and
148     // turned on.
149     skgpu::ganesh::AtlasPathRenderer* getAtlasPathRenderer();
150 
151     // Returns a direct pointer to the tessellation path renderer, or null if it is not supported
152     // and turned on.
153     PathRenderer* getTessellationPathRenderer();
154 
155     void flushIfNecessary();
156 
157     static bool ProgramUnitTest(GrDirectContext*, int maxStages, int maxLevels);
158 
159     GrSemaphoresSubmitted flushSurfaces(SkSpan<GrSurfaceProxy*>,
160                                         SkSurfaces::BackendSurfaceAccess,
161                                         const GrFlushInfo&,
162                                         const skgpu::MutableTextureState* newState);
163 
164     void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
165 
166 #if defined(GR_TEST_UTILS)
167     void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*);
testingOnly_getOptionsForPathRendererChain()168     PathRendererChain::Options testingOnly_getOptionsForPathRendererChain() {
169         return fOptionsForPathRendererChain;
170     }
171 #endif
172 
173     GrRenderTask* getLastRenderTask(const GrSurfaceProxy*) const;
174     skgpu::ganesh::OpsTask* getLastOpsTask(const GrSurfaceProxy*) const;
175     void setLastRenderTask(const GrSurfaceProxy*, GrRenderTask*);
176 
177     void moveRenderTasksToDDL(GrDeferredDisplayList* ddl);
178     void createDDLTask(sk_sp<const GrDeferredDisplayList>,
179                        sk_sp<GrRenderTargetProxy> newDest);
180 
181     // This is public so it can be called by an SkImage factory (in SkImages namespace).
182     // It is not meant to be directly called in other situations.
183     bool flush(SkSpan<GrSurfaceProxy*> proxies,
184                SkSurfaces::BackendSurfaceAccess access,
185                const GrFlushInfo&,
186                const skgpu::MutableTextureState* newState);
187 
188 private:
189     GrDrawingManager(GrRecordingContext*,
190                      const PathRendererChain::Options&,
191                      bool reduceOpsTaskSplitting);
192 
193     bool wasAbandoned() const;
194 
195     void closeActiveOpsTask();
196 
197     // return true if any GrRenderTasks were actually executed; false otherwise
198     bool executeRenderTasks(GrOpFlushState*);
199 
200     void removeRenderTasks();
201 
202     void sortTasks();
203 
204     // Attempt to reorder tasks to reduce render passes, and check the memory budget of the
205     // resulting intervals. Returns whether the reordering was successful & the memory budget
206     // acceptable. If it returns true, fDAG has been updated to reflect the reordered tasks.
207     bool reorderTasks(GrResourceAllocator*);
208 
209     void closeAllTasks();
210 
211     GrRenderTask* appendTask(sk_sp<GrRenderTask>);
212     GrRenderTask* insertTaskBeforeLast(sk_sp<GrRenderTask>);
213 
214     bool submitToGpu(GrSyncCpu sync);
215 
216     SkDEBUGCODE(void validate() const;)
217 
218     friend class GrDirectContext; // access to: flush & cleanup
219     friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class
220     friend class GrRecordingContext;  // access to: ctor
221 
222     static const int kNumPixelGeometries = 5; // The different pixel geometries
223     static const int kNumDFTOptions = 2;      // DFT or no DFT
224 
225     GrRecordingContext*                        fContext;
226 
227     // This cache is used by both the vertex and index pools. It reuses memory across multiple
228     // flushes.
229     sk_sp<GrBufferAllocPool::CpuBufferCache>   fCpuBufferCache;
230 
231     skia_private::TArray<sk_sp<GrRenderTask>>  fDAG;
232     std::vector<int>                           fReorderBlockerTaskIndices;
233     skgpu::ganesh::OpsTask*                    fActiveOpsTask = nullptr;
234 
235     PathRendererChain::Options                 fOptionsForPathRendererChain;
236     std::unique_ptr<PathRendererChain>         fPathRendererChain;
237     sk_sp<skgpu::ganesh::SoftwarePathRenderer> fSoftwarePathRenderer;
238 
239     skgpu::TokenTracker                        fTokenTracker;
240     bool                                       fFlushing = false;
241     const bool                                 fReduceOpsTaskSplitting;
242 
243     skia_private::TArray<GrOnFlushCallbackObject*> fOnFlushCBObjects;
244 
245     struct SurfaceIDKeyTraits {
GetInvalidKeySurfaceIDKeyTraits246         static uint32_t GetInvalidKey() {
247             return GrSurfaceProxy::UniqueID::InvalidID().asUInt();
248         }
249     };
250 
251     GrHashMapWithCache<uint32_t, GrRenderTask*, SurfaceIDKeyTraits, GrCheapHash> fLastRenderTasks;
252 };
253 
254 #endif
255