1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrDrawingManager_DEFINED 9 #define GrDrawingManager_DEFINED 10 11 #include "include/core/SkSpan.h" 12 #include "include/core/SkSurface.h" 13 #include "include/private/SkTArray.h" 14 #include "include/private/SkTHash.h" 15 #include "src/gpu/GrBufferAllocPool.h" 16 #include "src/gpu/GrDeferredUpload.h" 17 #include "src/gpu/GrHashMapWithCache.h" 18 #include "src/gpu/GrResourceCache.h" 19 #include "src/gpu/GrSurfaceProxy.h" 20 21 #if SK_GPU_V1 22 #include "src/gpu/v1/PathRenderer.h" 23 #include "src/gpu/v1/PathRendererChain.h" 24 #endif 25 26 // Enabling this will print out which path renderers are being chosen 27 #define GR_PATH_RENDERER_SPEW 0 28 29 class GrArenas; 30 class GrGpuBuffer; 31 class GrOnFlushCallbackObject; 32 class GrOpFlushState; 33 class GrRecordingContext; 34 class GrRenderTargetProxy; 35 class GrRenderTask; 36 class GrResourceAllocator; 37 class GrSemaphore; 38 class GrSurfaceProxyView; 39 class GrTextureResolveRenderTask; 40 class SkDeferredDisplayList; 41 namespace skgpu { namespace v1 { 42 class OpsTask; 43 class SoftwarePathRenderer; 44 }} 45 46 class GrDrawingManager { 47 public: 48 ~GrDrawingManager(); 49 50 void freeGpuResources(); 51 52 #if SK_GPU_V1 53 // OpsTasks created at flush time are stored and handled different from the others. 54 sk_sp<skgpu::v1::OpsTask> newOpsTask(GrSurfaceProxyView, 55 sk_sp<GrArenas> arenas, 56 bool flushTimeOpsTask); 57 58 // Adds 'atlasTask' to the DAG and leaves it open. 59 // 60 // If 'previousAtlasTask' is provided, closes it and configures dependencies to guarantee 61 // previousAtlasTask and all its users are completely out of service before atlasTask executes. 62 void addAtlasTask(sk_sp<GrRenderTask> atlasTask, GrRenderTask* previousAtlasTask); 63 #endif 64 65 // Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This 66 // method will only add the new render task to the list. It is up to the caller to call 67 // addProxy() on the returned object. 68 GrTextureResolveRenderTask* newTextureResolveRenderTask(const GrCaps&); 69 70 // Create a new render task that will cause the gpu to wait on semaphores before executing any 71 // more RenderTasks that target proxy. It is possible for this wait to also block additional 72 // work (even to other proxies) that has already been recorded or will be recorded later. The 73 // only guarantee is that future work to the passed in proxy will wait on the semaphores to be 74 // signaled. 75 void newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy, 76 std::unique_ptr<std::unique_ptr<GrSemaphore>[]>, 77 int numSemaphores); 78 79 // Create a new render task which copies the pixels from the srcProxy into the dstBuffer. This 80 // is used to support the asynchronous readback API. The srcRect is the region of the srcProxy 81 // to be copied. The surfaceColorType says how we should interpret the data when reading back 82 // from the source. DstColorType describes how the data should be stored in the dstBuffer. 83 // DstOffset is the offset into the dstBuffer where we will start writing data. 84 void newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy, const SkIRect& srcRect, 85 GrColorType surfaceColorType, GrColorType dstColorType, 86 sk_sp<GrGpuBuffer> dstBuffer, size_t dstOffset); 87 88 // Creates a new render task which copies a pixel rectangle from srcView into dstView. The src 89 // pixels copied are specified by srcRect. They are copied to a rect of the same size in 90 // dstProxy with top left at dstPoint. If the src rect is clipped by the src bounds then pixel 91 // values in the dst rect corresponding to the area clipped by the src rect are not overwritten. 92 // This method is not guaranteed to succeed depending on the type of surface, formats, etc, and 93 // the backend-specific limitations. On success the task is returned so that the caller may 94 // mark it skippable if the copy is later deemed unnecessary. 95 sk_sp<GrRenderTask> newCopyRenderTask(sk_sp<GrSurfaceProxy> src, 96 SkIRect srcRect, 97 sk_sp<GrSurfaceProxy> dst, 98 SkIPoint dstPoint, 99 GrSurfaceOrigin); 100 101 // Adds a task that writes the data from the passed GrMipLevels to dst. The lifetime of the 102 // pixel data in the levels should be tied to the passed SkData or the caller must flush the 103 // context before the data may become invalid. srcColorType is the color type of the 104 // GrMipLevels. dstColorType is the color type being used with dst and must be compatible with 105 // dst's format according to GrCaps::areColorTypeAndFormatCompatible(). 106 bool newWritePixelsTask(sk_sp<GrSurfaceProxy> dst, 107 SkIRect rect, 108 GrColorType srcColorType, 109 GrColorType dstColorType, 110 const GrMipLevel[], 111 int levelCount); 112 getContext()113 GrRecordingContext* getContext() { return fContext; } 114 115 #if SK_GPU_V1 116 using PathRenderer = skgpu::v1::PathRenderer; 117 using PathRendererChain = skgpu::v1::PathRendererChain; 118 119 PathRenderer* getPathRenderer(const PathRenderer::CanDrawPathArgs&, 120 bool allowSW, 121 PathRendererChain::DrawType, 122 PathRenderer::StencilSupport* = nullptr); 123 124 PathRenderer* getSoftwarePathRenderer(); 125 126 // Returns a direct pointer to the atlas path renderer, or null if it is not supported and 127 // turned on. 128 skgpu::v1::AtlasPathRenderer* getAtlasPathRenderer(); 129 130 // Returns a direct pointer to the tessellation path renderer, or null if it is not supported 131 // and turned on. 132 PathRenderer* getTessellationPathRenderer(); 133 #endif 134 135 void flushIfNecessary(); 136 137 static bool ProgramUnitTest(GrDirectContext*, int maxStages, int maxLevels); 138 139 GrSemaphoresSubmitted flushSurfaces(SkSpan<GrSurfaceProxy*>, 140 SkSurface::BackendSurfaceAccess, 141 const GrFlushInfo&, 142 const GrBackendSurfaceMutableState* newState); 143 144 void addOnFlushCallbackObject(GrOnFlushCallbackObject*); 145 146 #if GR_TEST_UTILS 147 void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*); 148 #if SK_GPU_V1 testingOnly_getOptionsForPathRendererChain()149 PathRendererChain::Options testingOnly_getOptionsForPathRendererChain() { 150 return fOptionsForPathRendererChain; 151 } 152 #endif 153 #endif 154 155 GrRenderTask* getLastRenderTask(const GrSurfaceProxy*) const; 156 skgpu::v1::OpsTask* getLastOpsTask(const GrSurfaceProxy*) const; 157 void setLastRenderTask(const GrSurfaceProxy*, GrRenderTask*); 158 159 void moveRenderTasksToDDL(SkDeferredDisplayList* ddl); 160 void createDDLTask(sk_sp<const SkDeferredDisplayList>, 161 sk_sp<GrRenderTargetProxy> newDest, 162 SkIPoint offset); 163 164 private: 165 #if SK_GPU_V1 166 GrDrawingManager(GrRecordingContext*, 167 const PathRendererChain::Options&, 168 bool reduceOpsTaskSplitting); 169 #else 170 GrDrawingManager(GrRecordingContext*, bool reduceOpsTaskSplitting); 171 #endif 172 173 bool wasAbandoned() const; 174 175 void closeActiveOpsTask(); 176 177 // return true if any GrRenderTasks were actually executed; false otherwise 178 bool executeRenderTasks(GrOpFlushState*); 179 180 void removeRenderTasks(); 181 182 void sortTasks(); 183 184 // Attempt to reorder tasks to reduce render passes, and check the memory budget of the 185 // resulting intervals. Returns whether the reordering was successful & the memory budget 186 // acceptable. If it returns true, fDAG has been updated to reflect the reordered tasks. 187 bool reorderTasks(GrResourceAllocator*); 188 189 void closeAllTasks(); 190 191 GrRenderTask* appendTask(sk_sp<GrRenderTask>); 192 GrRenderTask* insertTaskBeforeLast(sk_sp<GrRenderTask>); 193 194 bool flush(SkSpan<GrSurfaceProxy*> proxies, 195 SkSurface::BackendSurfaceAccess access, 196 const GrFlushInfo&, 197 const GrBackendSurfaceMutableState* newState); 198 199 bool submitToGpu(bool syncToCpu); 200 201 SkDEBUGCODE(void validate() const); 202 203 friend class GrDirectContext; // access to: flush & cleanup 204 friend class GrDirectContextPriv; // access to: flush 205 friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class 206 friend class GrRecordingContext; // access to: ctor 207 friend class SkImage; // for access to: flush 208 209 static const int kNumPixelGeometries = 5; // The different pixel geometries 210 static const int kNumDFTOptions = 2; // DFT or no DFT 211 212 GrRecordingContext* fContext; 213 214 // This cache is used by both the vertex and index pools. It reuses memory across multiple 215 // flushes. 216 sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache; 217 218 SkTArray<sk_sp<GrRenderTask>> fDAG; 219 skgpu::v1::OpsTask* fActiveOpsTask = nullptr; 220 // These are the IDs of the opsTask currently being flushed (in internalFlush). They are 221 // only stored here to prevent memory thrashing. 222 SkSTArray<8, uint32_t, true> fFlushingRenderTaskIDs; 223 // These are the new renderTasks generated by the onFlush CBs 224 SkSTArray<4, sk_sp<GrRenderTask>> fOnFlushRenderTasks; 225 226 #if SK_GPU_V1 227 PathRendererChain::Options fOptionsForPathRendererChain; 228 std::unique_ptr<PathRendererChain> fPathRendererChain; 229 sk_sp<skgpu::v1::SoftwarePathRenderer> fSoftwarePathRenderer; 230 #endif 231 232 GrTokenTracker fTokenTracker; 233 bool fFlushing = false; 234 const bool fReduceOpsTaskSplitting; 235 236 SkTArray<GrOnFlushCallbackObject*> fOnFlushCBObjects; 237 238 struct SurfaceIDKeyTraits { GetInvalidKeySurfaceIDKeyTraits239 static uint32_t GetInvalidKey() { 240 return GrSurfaceProxy::UniqueID::InvalidID().asUInt(); 241 } 242 }; 243 244 GrHashMapWithCache<uint32_t, GrRenderTask*, SurfaceIDKeyTraits, GrCheapHash> fLastRenderTasks; 245 }; 246 247 #endif 248