1 /* 2 * Copyright 2019 Google LLC 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrRenderTask_DEFINED 9 #define GrRenderTask_DEFINED 10 11 #include "include/core/SkRefCnt.h" 12 #include "include/private/base/SkTArray.h" 13 #include "src/base/SkTInternalLList.h" 14 #include "src/gpu/ganesh/GrSurfaceProxyView.h" 15 #include "src/gpu/ganesh/GrTextureProxy.h" 16 #include "src/gpu/ganesh/GrTextureResolveManager.h" 17 18 class GrMockRenderTask; 19 class GrOpFlushState; 20 class GrResourceAllocator; 21 class GrTextureResolveRenderTask; 22 namespace skgpu { 23 namespace ganesh { 24 class OpsTask; 25 } 26 } // namespace skgpu 27 28 // This class abstracts a task that targets a single GrSurfaceProxy, participates in the 29 // GrDrawingManager's DAG, and implements the onExecute method to modify its target proxy's 30 // contents. (e.g., an opsTask that executes a command buffer, a task to regenerate mipmaps, etc.) 31 class GrRenderTask : public SkRefCnt { 32 public: 33 GrRenderTask(); 34 SkDEBUGCODE(~GrRenderTask() override;) 35 36 void makeClosed(GrRecordingContext*); 37 prePrepare(GrRecordingContext * context)38 void prePrepare(GrRecordingContext* context) { this->onPrePrepare(context); } 39 40 // These two methods are only invoked at flush time 41 void prepare(GrOpFlushState* flushState); execute(GrOpFlushState * flushState)42 bool execute(GrOpFlushState* flushState) { return this->onExecute(flushState); } 43 requiresExplicitCleanup()44 virtual bool requiresExplicitCleanup() const { return false; } 45 46 // Called when this class will survive a flush and needs to truncate its ops and start over. 47 // TODO: ultimately it should be invalid for an op list to survive a flush. 48 // https://bugs.chromium.org/p/skia/issues/detail?id=7111 endFlush(GrDrawingManager *)49 virtual void endFlush(GrDrawingManager*) {} 50 51 // This method "disowns" all the GrSurfaceProxies this RenderTask modifies. In 52 // practice this just means telling the drawingManager to forget the relevant 53 // mappings from surface proxy to last modifying rendertask. 54 virtual void disown(GrDrawingManager*); 55 isClosed()56 bool isClosed() const { return this->isSetFlag(kClosed_Flag); } 57 58 /** 59 * Make this task skippable. This must be used purely for optimization purposes 60 * at this point as not all tasks will actually skip their work. It would be better if we could 61 * detect tasks that can be skipped automatically. We'd need to support minimal flushes (i.e., 62 * only flush that which is required for SkSurfaces/SkImages) and the ability to detect 63 * "orphaned tasks" and clean them out from the DAG so they don't indefinitely accumulate. 64 * Finally, we'd probably have to track whether a proxy's backing store was imported or ever 65 * exported to the client in case the client is doing direct reads outside of Skia and thus 66 * may require tasks targeting the proxy to execute even if our DAG contains no reads. 67 */ 68 void makeSkippable(); 69 isSkippable()70 bool isSkippable() const { return this->isSetFlag(kSkippable_Flag); } 71 72 /** If true no other task should be reordered relative to this task. */ blocksReordering()73 bool blocksReordering() const { return this->isSetFlag(kBlocksReordering_Flag); } 74 75 /* 76 * Notify this GrRenderTask that it relies on the contents of 'dependedOn' 77 */ 78 void addDependency(GrDrawingManager*, 79 GrSurfaceProxy* dependedOn, 80 skgpu::Mipmapped, 81 GrTextureResolveManager, 82 const GrCaps& caps); 83 84 /* 85 * Notify this GrRenderTask that it relies on the contents of all GrRenderTasks which otherTask 86 * depends on. 87 */ 88 void addDependenciesFromOtherTask(GrRenderTask* otherTask); 89 dependencies()90 SkSpan<GrRenderTask*> dependencies() { return SkSpan(fDependencies); } dependents()91 SkSpan<GrRenderTask*> dependents() { return SkSpan(fDependents); } 92 93 void replaceDependency(const GrRenderTask* toReplace, GrRenderTask* replaceWith); 94 void replaceDependent(const GrRenderTask* toReplace, GrRenderTask* replaceWith); 95 96 97 /* 98 * Does this renderTask depend on 'dependedOn'? 99 */ 100 bool dependsOn(const GrRenderTask* dependedOn) const; 101 uniqueID()102 uint32_t uniqueID() const { return fUniqueID; } numTargets()103 int numTargets() const { return fTargets.size(); } target(int i)104 GrSurfaceProxy* target(int i) const { return fTargets[i].get(); } 105 106 /* 107 * Safely cast this GrRenderTask to a OpsTask (if possible). 108 */ asOpsTask()109 virtual skgpu::ganesh::OpsTask* asOpsTask() { return nullptr; } 110 111 #if defined(GR_TEST_UTILS) 112 /* 113 * Dump out the GrRenderTask dependency DAG 114 */ 115 virtual void dump(const SkString& label, 116 SkString indent, 117 bool printDependencies, 118 bool close) const; 119 virtual const char* name() const = 0; 120 #endif 121 122 #ifdef SK_DEBUG numClips()123 virtual int numClips() const { return 0; } 124 125 virtual void visitProxies_debugOnly(const GrVisitProxyFunc&) const = 0; 126 visitTargetAndSrcProxies_debugOnly(const GrVisitProxyFunc & func)127 void visitTargetAndSrcProxies_debugOnly(const GrVisitProxyFunc& func) const { 128 this->visitProxies_debugOnly(func); 129 for (const sk_sp<GrSurfaceProxy>& target : fTargets) { 130 func(target.get(), skgpu::Mipmapped::kNo); 131 } 132 } 133 #endif 134 isUsed(GrSurfaceProxy * proxy)135 bool isUsed(GrSurfaceProxy* proxy) const { 136 for (const sk_sp<GrSurfaceProxy>& target : fTargets) { 137 if (target.get() == proxy) { 138 return true; 139 } 140 } 141 142 return this->onIsUsed(proxy); 143 } 144 145 // Feed proxy usage intervals to the GrResourceAllocator class 146 virtual void gatherProxyIntervals(GrResourceAllocator*) const = 0; 147 148 // In addition to just the GrSurface being allocated, has the stencil buffer been allocated (if 149 // it is required)? 150 bool isInstantiated() const; 151 152 // Used by GrRenderTaskCluster. 153 SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrRenderTask); 154 155 #if defined(GR_TEST_UTILS) resolveTask()156 const GrTextureResolveRenderTask* resolveTask() const { return fTextureResolveTask; } 157 #endif 158 protected: 159 SkDEBUGCODE(bool deferredProxiesAreInstantiated() const;) 160 161 // Add a target surface proxy to the list of targets for this task. 162 // This also informs the drawing manager to update the lastRenderTask association. 163 void addTarget(GrDrawingManager*, sk_sp<GrSurfaceProxy>); 164 165 // Helper that adds the proxy owned by a view. addTarget(GrDrawingManager * dm,const GrSurfaceProxyView & view)166 void addTarget(GrDrawingManager* dm, const GrSurfaceProxyView& view) { 167 this->addTarget(dm, view.refProxy()); 168 } 169 170 enum class ExpectedOutcome : bool { 171 kTargetUnchanged, 172 kTargetDirty, 173 }; 174 175 // Performs any work to finalize this renderTask prior to execution. If returning 176 // ExpectedOutcome::kTargetDirty, the caller is also responsible to fill out the area it will 177 // modify in targetUpdateBounds. 178 // 179 // targetUpdateBounds must not extend beyond the proxy bounds. 180 virtual ExpectedOutcome onMakeClosed(GrRecordingContext*, SkIRect* targetUpdateBounds) = 0; 181 182 skia_private::STArray<1, sk_sp<GrSurfaceProxy>> fTargets; 183 184 // List of texture proxies whose contents are being prepared on a worker thread 185 // TODO: this list exists so we can fire off the proper upload when an renderTask begins 186 // executing. Can this be replaced? 187 skia_private::TArray<GrTextureProxy*, true> fDeferredProxies; 188 189 enum Flags { 190 kClosed_Flag = 0x01, //!< This task can't accept any more dependencies. 191 kDisowned_Flag = 0x02, //!< This task is disowned by its GrDrawingManager. 192 kSkippable_Flag = 0x04, //!< This task is skippable. 193 kAtlas_Flag = 0x08, //!< This task is atlas. 194 kBlocksReordering_Flag = 0x10, //!< No task can be reordered with respect to this task. 195 196 kWasOutput_Flag = 0x20, //!< Flag for topological sorting 197 kTempMark_Flag = 0x40, //!< Flag for topological sorting 198 }; 199 setFlag(uint32_t flag)200 void setFlag(uint32_t flag) { 201 fFlags |= flag; 202 } 203 resetFlag(uint32_t flag)204 void resetFlag(uint32_t flag) { 205 fFlags &= ~flag; 206 } 207 isSetFlag(uint32_t flag)208 bool isSetFlag(uint32_t flag) const { 209 return SkToBool(fFlags & flag); 210 } 211 setIndex(uint32_t index)212 void setIndex(uint32_t index) { 213 SkASSERT(!this->isSetFlag(kWasOutput_Flag)); 214 SkASSERT(index < (1 << 25)); 215 fFlags |= index << 7; 216 } 217 getIndex()218 uint32_t getIndex() const { 219 SkASSERT(this->isSetFlag(kWasOutput_Flag)); 220 return fFlags >> 7; 221 } 222 223 private: 224 // for TopoSortTraits, fTextureResolveTask, addDependency 225 friend class GrDrawingManager; 226 friend class GrMockRenderTask; 227 228 // Derived classes can override to indicate usage of proxies _other than target proxies_. 229 // GrRenderTask itself will handle checking the target proxies. 230 virtual bool onIsUsed(GrSurfaceProxy*) const = 0; 231 232 void addDependency(GrRenderTask* dependedOn); 233 void addDependent(GrRenderTask* dependent); 234 SkDEBUGCODE(bool isDependent(const GrRenderTask* dependent) const;) 235 SkDEBUGCODE(void validate() const;) 236 237 static uint32_t CreateUniqueID(); 238 239 struct TopoSortTraits { GetIndexTopoSortTraits240 static uint32_t GetIndex(GrRenderTask* renderTask) { 241 return renderTask->getIndex(); 242 } OutputTopoSortTraits243 static void Output(GrRenderTask* renderTask, uint32_t index) { 244 renderTask->setIndex(index); 245 renderTask->setFlag(kWasOutput_Flag); 246 } WasOutputTopoSortTraits247 static bool WasOutput(const GrRenderTask* renderTask) { 248 return renderTask->isSetFlag(kWasOutput_Flag); 249 } SetTempMarkTopoSortTraits250 static void SetTempMark(GrRenderTask* renderTask) { 251 renderTask->setFlag(kTempMark_Flag); 252 } ResetTempMarkTopoSortTraits253 static void ResetTempMark(GrRenderTask* renderTask) { 254 renderTask->resetFlag(kTempMark_Flag); 255 } IsTempMarkedTopoSortTraits256 static bool IsTempMarked(const GrRenderTask* renderTask) { 257 return renderTask->isSetFlag(kTempMark_Flag); 258 } NumDependenciesTopoSortTraits259 static int NumDependencies(const GrRenderTask* renderTask) { 260 return renderTask->fDependencies.size(); 261 } DependencyTopoSortTraits262 static GrRenderTask* Dependency(GrRenderTask* renderTask, int index) { 263 return renderTask->fDependencies[index]; 264 } 265 }; 266 onMakeSkippable()267 virtual void onMakeSkippable() {} onPrePrepare(GrRecordingContext *)268 virtual void onPrePrepare(GrRecordingContext*) {} // Only OpsTask currently overrides this onPrepare(GrOpFlushState *)269 virtual void onPrepare(GrOpFlushState*) {} // OpsTask and GrDDLTask override this 270 virtual bool onExecute(GrOpFlushState* flushState) = 0; 271 272 const uint32_t fUniqueID; 273 uint32_t fFlags; 274 275 // 'this' GrRenderTask relies on the output of the GrRenderTasks in 'fDependencies' 276 skia_private::STArray<1, GrRenderTask*, true> fDependencies; 277 // 'this' GrRenderTask's output is relied on by the GrRenderTasks in 'fDependents' 278 skia_private::STArray<1, GrRenderTask*, true> fDependents; 279 280 // For performance reasons, we should perform texture resolves back-to-back as much as possible. 281 // (http://skbug.com/9406). To accomplish this, we make and reuse one single resolve task for 282 // each render task, then add it as a dependency during makeClosed(). 283 GrTextureResolveRenderTask* fTextureResolveTask = nullptr; 284 285 SkDEBUGCODE(GrDrawingManager *fDrawingMgr = nullptr;) 286 }; 287 288 #endif 289