• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrRenderTargetOpList_DEFINED
9 #define GrRenderTargetOpList_DEFINED
10 
11 #include "GrAppliedClip.h"
12 #include "GrOpList.h"
13 #include "GrPathRendering.h"
14 #include "GrPrimitiveProcessor.h"
15 #include "ops/GrOp.h"
16 #include "SkArenaAlloc.h"
17 #include "SkClipStack.h"
18 #include "SkMatrix.h"
19 #include "SkStringUtils.h"
20 #include "SkStrokeRec.h"
21 #include "SkTArray.h"
22 #include "SkTLazy.h"
23 #include "SkTypes.h"
24 
25 class GrAuditTrail;
26 class GrClearOp;
27 class GrCaps;
28 class GrRenderTargetProxy;
29 
30 namespace gr_instanced {
31     class InstancedRendering;
32 }
33 
34 class GrRenderTargetOpList final : public GrOpList {
35 private:
36     using DstProxy = GrXferProcessor::DstProxy;
37 
38 public:
39     GrRenderTargetOpList(GrRenderTargetProxy*, GrGpu*, GrAuditTrail*);
40 
41     ~GrRenderTargetOpList() override;
42 
makeClosed(const GrCaps & caps)43     void makeClosed(const GrCaps& caps) override {
44         if (this->isClosed()) {
45             return;
46         }
47 
48         fLastFullClearOp = nullptr;
49         this->forwardCombine(caps);
50 
51         INHERITED::makeClosed(caps);
52     }
53 
isEmpty()54     bool isEmpty() const { return fRecordedOps.empty(); }
55 
56     /**
57      * Empties the draw buffer of any queued up draws.
58      */
59     void reset() override;
60 
61     void abandonGpuResources() override;
62     void freeGpuResources() override;
63 
64     /**
65      * Together these two functions flush all queued up draws to GrCommandBuffer. The return value
66      * of executeOps() indicates whether any commands were actually issued to the GPU.
67      */
68     void prepareOps(GrOpFlushState* flushState) override;
69     bool executeOps(GrOpFlushState* flushState) override;
70 
addOp(std::unique_ptr<GrOp> op,const GrCaps & caps)71     uint32_t addOp(std::unique_ptr<GrOp> op, const GrCaps& caps) {
72         this->recordOp(std::move(op), caps, nullptr, nullptr);
73         return this->uniqueID();
74     }
addOp(std::unique_ptr<GrOp> op,const GrCaps & caps,GrAppliedClip && clip,const DstProxy & dstProxy)75     uint32_t addOp(std::unique_ptr<GrOp> op, const GrCaps& caps,
76                    GrAppliedClip&& clip, const DstProxy& dstProxy) {
77         this->recordOp(std::move(op), caps, clip.doesClip() ? &clip : nullptr, &dstProxy);
78         return this->uniqueID();
79     }
80 
81     /** Clears the entire render target */
82     void fullClear(const GrCaps& caps, GrColor color);
83 
84     /**
85      * Copies a pixel rectangle from one surface to another. This call may finalize
86      * reserved vertex/index data (as though a draw call was made). The src pixels
87      * copied are specified by srcRect. They are copied to a rect of the same
88      * size in dst with top left at dstPoint. If the src rect is clipped by the
89      * src bounds then  pixel values in the dst rect corresponding to area clipped
90      * by the src rect are not overwritten. This method is not guaranteed to succeed
91      * depending on the type of surface, configs, etc, and the backend-specific
92      * limitations.
93      */
94     bool copySurface(const GrCaps& caps,
95                      GrSurfaceProxy* dst,
96                      GrSurfaceProxy* src,
97                      const SkIRect& srcRect,
98                      const SkIPoint& dstPoint) override;
99 
instancedRendering()100     gr_instanced::InstancedRendering* instancedRendering() const {
101         SkASSERT(fInstancedRendering);
102         return fInstancedRendering.get();
103     }
104 
asRenderTargetOpList()105     GrRenderTargetOpList* asRenderTargetOpList() override { return this; }
106 
107     SkDEBUGCODE(void dump() const override;)
108 
109     SkDEBUGCODE(int numOps() const override { return fRecordedOps.count(); })
110     SkDEBUGCODE(int numClips() const override { return fNumClips; })
111 
112 private:
113     friend class GrRenderTargetContextPriv; // for stencil clip state. TODO: this is invasive
114 
115     struct RecordedOp {
RecordedOpRecordedOp116         RecordedOp(std::unique_ptr<GrOp> op,
117                    const GrAppliedClip* appliedClip,
118                    const DstProxy* dstProxy)
119                 : fOp(std::move(op))
120                 , fAppliedClip(appliedClip) {
121             if (dstProxy) {
122                 fDstProxy = *dstProxy;
123             }
124         }
125         std::unique_ptr<GrOp> fOp;
126         DstProxy              fDstProxy;
127         const GrAppliedClip*  fAppliedClip;
128     };
129 
130     // If the input op is combined with an earlier op, this returns the combined op. Otherwise, it
131     // returns the input op.
132     GrOp* recordOp(std::unique_ptr<GrOp>, const GrCaps& caps,
133                    GrAppliedClip* = nullptr, const DstProxy* = nullptr);
134 
135     void forwardCombine(const GrCaps&);
136 
137     // If this returns true then b has been merged into a's op.
138     bool combineIfPossible(const RecordedOp& a, GrOp* b, const GrAppliedClip* bClip,
139                            const DstProxy* bDstTexture, const GrCaps&);
140 
141     GrClearOp*                     fLastFullClearOp = nullptr;
142 
143     std::unique_ptr<gr_instanced::InstancedRendering> fInstancedRendering;
144 
145     uint32_t                       fLastClipStackGenID;
146     SkIRect                        fLastDevClipBounds;
147 
148     // For ops/opList we have mean: 5 stdDev: 28
149     SkSTArray<5, RecordedOp, true> fRecordedOps;
150 
151     // MDB TODO: 4096 for the first allocation of the clip space will be huge overkill.
152     // Gather statistics to determine the correct size.
153     SkArenaAlloc                   fClipAllocator{4096};
154     SkDEBUGCODE(int                fNumClips;)
155 
156     typedef GrOpList INHERITED;
157 };
158 
159 #endif
160