1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrDrawingManager_DEFINED 9 #define GrDrawingManager_DEFINED 10 11 #include "include/core/SkRefCnt.h" 12 #include "include/core/SkSpan.h" 13 #include "include/private/base/SkDebug.h" 14 #include "include/private/base/SkTArray.h" 15 #include "src/gpu/AtlasTypes.h" 16 #include "src/gpu/ganesh/GrBufferAllocPool.h" 17 #include "src/gpu/ganesh/GrCaps.h" 18 #include "src/gpu/ganesh/GrHashMapWithCache.h" 19 #include "src/gpu/ganesh/GrSamplerState.h" 20 #include "src/gpu/ganesh/GrSurfaceProxy.h" 21 #include "src/gpu/ganesh/PathRenderer.h" 22 #include "src/gpu/ganesh/PathRendererChain.h" 23 24 #include <cstddef> 25 #include <cstdint> 26 #include <memory> 27 #include <vector> 28 29 // Enabling this will print out which path renderers are being chosen 30 #define GR_PATH_RENDERER_SPEW 0 31 32 class GrArenas; 33 class GrDeferredDisplayList; 34 class GrDirectContext; 35 class GrGpuBuffer; 36 class GrOnFlushCallbackObject; 37 class GrOpFlushState; 38 class GrRecordingContext; 39 class GrRenderTargetProxy; 40 class GrRenderTask; 41 class GrResourceAllocator; 42 class GrSemaphore; 43 class GrSurfaceProxyView; 44 class GrTextureResolveRenderTask; 45 class SkData; 46 enum GrSurfaceOrigin : int; 47 enum class GrColorType; 48 enum class GrSemaphoresSubmitted : bool; 49 struct GrFlushInfo; 50 struct GrMipLevel; 51 struct SkIRect; 52 53 namespace SkSurfaces { 54 enum class BackendSurfaceAccess; 55 } 56 namespace skgpu { 57 class MutableTextureState; 58 namespace ganesh { 59 class AtlasPathRenderer; 60 class OpsTask; 61 class SoftwarePathRenderer; 62 } // namespace ganesh 63 } // namespace skgpu 64 65 class GrDrawingManager { 66 public: 67 ~GrDrawingManager(); 68 69 void freeGpuResources(); 70 71 // OpsTasks created at flush time are stored and handled different from the others. 72 sk_sp<skgpu::ganesh::OpsTask> newOpsTask(GrSurfaceProxyView, sk_sp<GrArenas> arenas); 73 74 // Adds 'atlasTask' to the DAG and leaves it open. 75 // 76 // If 'previousAtlasTask' is provided, closes it and configures dependencies to guarantee 77 // previousAtlasTask and all its users are completely out of service before atlasTask executes. 78 void addAtlasTask(sk_sp<GrRenderTask> atlasTask, GrRenderTask* previousAtlasTask); 79 80 // Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This 81 // method will only add the new render task to the list. However, it adds the task before the 82 // last task in the list. It is up to the caller to call addProxy() on the returned object. 83 GrTextureResolveRenderTask* newTextureResolveRenderTaskBefore(const GrCaps&); 84 85 // Creates a render task that can resolve MSAA and/or regenerate mimap levels on the passed in 86 // proxy. The task is appended to the end of the current list of tasks. 87 void newTextureResolveRenderTask(sk_sp<GrSurfaceProxy> proxy, 88 GrSurfaceProxy::ResolveFlags, 89 const GrCaps&); 90 91 // Create a new render task that will cause the gpu to wait on semaphores before executing any 92 // more RenderTasks that target proxy. It is possible for this wait to also block additional 93 // work (even to other proxies) that has already been recorded or will be recorded later. The 94 // only guarantee is that future work to the passed in proxy will wait on the semaphores to be 95 // signaled. 96 void newWaitRenderTask(const sk_sp<GrSurfaceProxy>& proxy, 97 std::unique_ptr<std::unique_ptr<GrSemaphore>[]>, 98 int numSemaphores); 99 100 // Create a new render task which copies the pixels from the srcProxy into the dstBuffer. This 101 // is used to support the asynchronous readback API. The srcRect is the region of the srcProxy 102 // to be copied. The surfaceColorType says how we should interpret the data when reading back 103 // from the source. DstColorType describes how the data should be stored in the dstBuffer. 104 // DstOffset is the offset into the dstBuffer where we will start writing data. 105 void newTransferFromRenderTask(const sk_sp<GrSurfaceProxy>& srcProxy, const SkIRect& srcRect, 106 GrColorType surfaceColorType, GrColorType dstColorType, 107 sk_sp<GrGpuBuffer> dstBuffer, size_t dstOffset); 108 109 // Creates a new render task which copies a pixel rectangle from srcView into dstView. The src 110 // pixels copied are specified by srcRect. They are copied to the dstRect in dstProxy. Some 111 // backends and formats may require dstRect to have the same size as srcRect. Regardless, 112 // srcRect must be contained by src's dimensions and dstRect must be contained by dst's 113 // dimensions. Any clipping, aspect-ratio adjustment, etc. must be handled prior to this call. 114 // 115 // This method is not guaranteed to succeed depending on the type of surface, formats, etc, and 116 // the backend-specific limitations. On success the task is returned so that the caller may mark 117 // it skippable if the copy is later deemed unnecessary. 118 sk_sp<GrRenderTask> newCopyRenderTask(sk_sp<GrSurfaceProxy> dst, 119 SkIRect dstRect, 120 const sk_sp<GrSurfaceProxy>& src, 121 SkIRect srcRect, 122 GrSamplerState::Filter filter, 123 GrSurfaceOrigin); 124 125 // Adds a render task that copies the range [srcOffset, srcOffset + size] from src to 126 // [dstOffset, dstOffset + size] in dst. The src buffer must have type kXferCpuToGpu and the 127 // dst must NOT have type kXferCpuToGpu. Neither buffer may be mapped when this executes. 128 // Because this is used to insert transfers to vertex/index buffers between draws and we don't 129 // track dependencies with buffers, this task is a hard boundary for task reordering. 130 void newBufferTransferTask(sk_sp<GrGpuBuffer> src, 131 size_t srcOffset, 132 sk_sp<GrGpuBuffer> dst, 133 size_t dstOffset, 134 size_t size); 135 136 // Adds a render task that copies the src SkData to [dstOffset, dstOffset + src->size()] in dst. 137 // The dst must not have type kXferCpuToGpu and must not be mapped. Because this is used to 138 // insert updata to vertex/index buffers between draws and we don't track dependencies with 139 // buffers, this task is a hard boundary for task reordering. 140 void newBufferUpdateTask(sk_sp<SkData> src, sk_sp<GrGpuBuffer> dst, size_t dstOffset); 141 142 // Adds a task that writes the data from the passed GrMipLevels to dst. The lifetime of the 143 // pixel data in the levels should be tied to the passed SkData or the caller must flush the 144 // context before the data may become invalid. srcColorType is the color type of the 145 // GrMipLevels. dstColorType is the color type being used with dst and must be compatible with 146 // dst's format according to GrCaps::areColorTypeAndFormatCompatible(). 147 bool newWritePixelsTask(sk_sp<GrSurfaceProxy> dst, 148 SkIRect rect, 149 GrColorType srcColorType, 150 GrColorType dstColorType, 151 const GrMipLevel[], 152 int levelCount); 153 getContext()154 GrRecordingContext* getContext() { return fContext; } 155 156 using PathRenderer = skgpu::ganesh::PathRenderer; 157 using PathRendererChain = skgpu::ganesh::PathRendererChain; 158 159 PathRenderer* getPathRenderer(const PathRenderer::CanDrawPathArgs&, 160 bool allowSW, 161 PathRendererChain::DrawType, 162 PathRenderer::StencilSupport* = nullptr); 163 164 PathRenderer* getSoftwarePathRenderer(); 165 166 // Returns a direct pointer to the atlas path renderer, or null if it is not supported and 167 // turned on. 168 skgpu::ganesh::AtlasPathRenderer* getAtlasPathRenderer(); 169 170 // Returns a direct pointer to the tessellation path renderer, or null if it is not supported 171 // and turned on. 172 PathRenderer* getTessellationPathRenderer(); 173 174 static bool ProgramUnitTest(GrDirectContext*, int maxStages, int maxLevels); 175 176 GrSemaphoresSubmitted flushSurfaces(SkSpan<GrSurfaceProxy*>, 177 SkSurfaces::BackendSurfaceAccess, 178 const GrFlushInfo&, 179 const skgpu::MutableTextureState* newState); 180 181 void addOnFlushCallbackObject(GrOnFlushCallbackObject*); 182 183 #if defined(GPU_TEST_UTILS) 184 void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*); testingOnly_getOptionsForPathRendererChain()185 PathRendererChain::Options testingOnly_getOptionsForPathRendererChain() { 186 return fOptionsForPathRendererChain; 187 } 188 #endif 189 190 GrRenderTask* getLastRenderTask(const GrSurfaceProxy*) const; 191 skgpu::ganesh::OpsTask* getLastOpsTask(const GrSurfaceProxy*) const; 192 void setLastRenderTask(const GrSurfaceProxy*, GrRenderTask*); 193 194 void moveRenderTasksToDDL(GrDeferredDisplayList* ddl); 195 void createDDLTask(sk_sp<const GrDeferredDisplayList>, 196 sk_sp<GrRenderTargetProxy> newDest); 197 198 // This is public so it can be called by an SkImage factory (in SkImages namespace). 199 // It is not meant to be directly called in other situations. 200 bool flush(SkSpan<GrSurfaceProxy*> proxies, 201 SkSurfaces::BackendSurfaceAccess access, 202 const GrFlushInfo&, 203 const skgpu::MutableTextureState* newState); 204 205 private: 206 GrDrawingManager(GrRecordingContext*, 207 const PathRendererChain::Options&, 208 bool reduceOpsTaskSplitting); 209 210 bool wasAbandoned() const; 211 212 void closeActiveOpsTask(); 213 214 // return true if any GrRenderTasks were actually executed; false otherwise 215 bool executeRenderTasks(GrOpFlushState*); 216 217 void removeRenderTasks(); 218 219 void sortTasks(); 220 221 // Attempt to reorder tasks to reduce render passes, and check the memory budget of the 222 // resulting intervals. Returns whether the reordering was successful & the memory budget 223 // acceptable. If it returns true, fDAG has been updated to reflect the reordered tasks. 224 bool reorderTasks(GrResourceAllocator*); 225 226 void closeAllTasks(); 227 228 GrRenderTask* appendTask(sk_sp<GrRenderTask>); 229 GrRenderTask* insertTaskBeforeLast(sk_sp<GrRenderTask>); 230 231 bool submitToGpu(); 232 233 SkDEBUGCODE(void validate() const;) 234 235 friend class GrDirectContext; // access to: flush & cleanup 236 friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class 237 friend class GrRecordingContext; // access to: ctor 238 239 static const int kNumPixelGeometries = 5; // The different pixel geometries 240 static const int kNumDFTOptions = 2; // DFT or no DFT 241 242 GrRecordingContext* fContext; 243 244 // This cache is used by both the vertex and index pools. It reuses memory across multiple 245 // flushes. 246 sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache; 247 248 skia_private::TArray<sk_sp<GrRenderTask>> fDAG; 249 std::vector<int> fReorderBlockerTaskIndices; 250 skgpu::ganesh::OpsTask* fActiveOpsTask = nullptr; 251 252 PathRendererChain::Options fOptionsForPathRendererChain; 253 std::unique_ptr<PathRendererChain> fPathRendererChain; 254 sk_sp<skgpu::ganesh::SoftwarePathRenderer> fSoftwarePathRenderer; 255 256 skgpu::TokenTracker fTokenTracker; 257 bool fFlushing = false; 258 const bool fReduceOpsTaskSplitting; 259 260 skia_private::TArray<GrOnFlushCallbackObject*> fOnFlushCBObjects; 261 262 struct SurfaceIDKeyTraits { GetInvalidKeySurfaceIDKeyTraits263 static uint32_t GetInvalidKey() { 264 return GrSurfaceProxy::UniqueID::InvalidID().asUInt(); 265 } 266 }; 267 268 GrHashMapWithCache<uint32_t, GrRenderTask*, SurfaceIDKeyTraits, GrCheapHash> fLastRenderTasks; 269 }; 270 271 #endif 272