• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrOpFlushState.h"
9 
10 #include "GrContextPriv.h"
11 #include "GrDrawOpAtlas.h"
12 #include "GrGpu.h"
13 #include "GrResourceProvider.h"
14 #include "GrTexture.h"
15 
16 //////////////////////////////////////////////////////////////////////////////
17 
GrOpFlushState(GrGpu * gpu,GrResourceProvider * resourceProvider,GrTokenTracker * tokenTracker)18 GrOpFlushState::GrOpFlushState(GrGpu* gpu,
19                                GrResourceProvider* resourceProvider,
20                                GrTokenTracker* tokenTracker)
21         : fVertexPool(gpu)
22         , fIndexPool(gpu)
23         , fGpu(gpu)
24         , fResourceProvider(resourceProvider)
25         , fTokenTracker(tokenTracker) {
26 }
27 
caps() const28 const GrCaps& GrOpFlushState::caps() const {
29     return *fGpu->caps();
30 }
31 
rtCommandBuffer()32 GrGpuRTCommandBuffer* GrOpFlushState::rtCommandBuffer() {
33     return fCommandBuffer->asRTCommandBuffer();
34 }
35 
executeDrawsAndUploadsForMeshDrawOp(uint32_t opID,const SkRect & opBounds)36 void GrOpFlushState::executeDrawsAndUploadsForMeshDrawOp(uint32_t opID, const SkRect& opBounds) {
37     SkASSERT(this->rtCommandBuffer());
38     while (fCurrDraw != fDraws.end() && fCurrDraw->fOpID == opID) {
39         GrDeferredUploadToken drawToken = fTokenTracker->nextTokenToFlush();
40         while (fCurrUpload != fInlineUploads.end() &&
41                fCurrUpload->fUploadBeforeToken == drawToken) {
42             this->rtCommandBuffer()->inlineUpload(this, fCurrUpload->fUpload);
43             ++fCurrUpload;
44         }
45         SkASSERT(fCurrDraw->fPipeline->proxy() == this->drawOpArgs().fProxy);
46         this->rtCommandBuffer()->draw(*fCurrDraw->fPipeline, *fCurrDraw->fGeometryProcessor,
47                                       fMeshes.begin() + fCurrMesh, nullptr, fCurrDraw->fMeshCnt,
48                                       opBounds);
49         fCurrMesh += fCurrDraw->fMeshCnt;
50         fTokenTracker->flushToken();
51         ++fCurrDraw;
52     }
53 }
54 
preExecuteDraws()55 void GrOpFlushState::preExecuteDraws() {
56     fVertexPool.unmap();
57     fIndexPool.unmap();
58     for (auto& upload : fASAPUploads) {
59         this->doUpload(upload);
60     }
61     // Setup execution iterators.
62     fCurrDraw = fDraws.begin();
63     fCurrUpload = fInlineUploads.begin();
64     fCurrMesh = 0;
65 }
66 
reset()67 void GrOpFlushState::reset() {
68     SkASSERT(fCurrDraw == fDraws.end());
69     SkASSERT(fCurrUpload == fInlineUploads.end());
70     fVertexPool.reset();
71     fIndexPool.reset();
72     fArena.reset();
73     fASAPUploads.reset();
74     fInlineUploads.reset();
75     fDraws.reset();
76     fMeshes.reset();
77     fCurrMesh = 0;
78     fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken();
79 }
80 
doUpload(GrDeferredTextureUploadFn & upload)81 void GrOpFlushState::doUpload(GrDeferredTextureUploadFn& upload) {
82     GrDeferredTextureUploadWritePixelsFn wp = [this](GrTextureProxy* dstProxy, int left, int top,
83                                                      int width, int height,
84                                                      GrColorType srcColorType, const void* buffer,
85                                                      size_t rowBytes) {
86         // We don't allow srgb conversions via op flush state uploads.
87         static constexpr auto kSRGBConversion = GrSRGBConversion::kNone;
88         GrSurface* dstSurface = dstProxy->priv().peekSurface();
89         GrGpu::DrawPreference drawPreference = GrGpu::kNoDraw_DrawPreference;
90         GrGpu::WritePixelTempDrawInfo tempInfo;
91         if (!fGpu->getWritePixelsInfo(dstSurface, dstProxy->origin(), width, height, srcColorType,
92                                       kSRGBConversion, &drawPreference, &tempInfo)) {
93             return false;
94         }
95         if (GrGpu::kNoDraw_DrawPreference == drawPreference) {
96             return this->fGpu->writePixels(dstSurface, dstProxy->origin(), left, top, width, height,
97                                            srcColorType, buffer, rowBytes);
98         }
99         // TODO: Shouldn't we be bailing here if a draw is really required instead of a copy?
100         // e.g. if (tempInfo.fSwizzle != "RGBA") fail.
101         GrSurfaceDesc desc;
102         desc.fOrigin = dstProxy->origin();
103         desc.fWidth = width;
104         desc.fHeight = height;
105         desc.fConfig = dstProxy->config();
106         sk_sp<GrTexture> temp(this->fResourceProvider->createApproxTexture(
107                 desc, GrResourceProvider::kNoPendingIO_Flag));
108         if (!temp) {
109             return false;
110         }
111         if (!fGpu->writePixels(temp.get(), dstProxy->origin(), 0, 0, width, height,
112                                tempInfo.fWriteColorType, buffer, rowBytes)) {
113             return false;
114         }
115         return fGpu->copySurface(dstSurface, dstProxy->origin(), temp.get(), dstProxy->origin(),
116                                  SkIRect::MakeWH(width, height), {left, top});
117     };
118     upload(wp);
119 }
120 
addInlineUpload(GrDeferredTextureUploadFn && upload)121 GrDeferredUploadToken GrOpFlushState::addInlineUpload(GrDeferredTextureUploadFn&& upload) {
122     return fInlineUploads.append(&fArena, std::move(upload), fTokenTracker->nextDrawToken())
123             .fUploadBeforeToken;
124 }
125 
addASAPUpload(GrDeferredTextureUploadFn && upload)126 GrDeferredUploadToken GrOpFlushState::addASAPUpload(GrDeferredTextureUploadFn&& upload) {
127     fASAPUploads.append(&fArena, std::move(upload));
128     return fTokenTracker->nextTokenToFlush();
129 }
130 
draw(const GrGeometryProcessor * gp,const GrPipeline * pipeline,const GrMesh & mesh)131 void GrOpFlushState::draw(const GrGeometryProcessor* gp, const GrPipeline* pipeline,
132                           const GrMesh& mesh) {
133     SkASSERT(fOpArgs);
134     SkASSERT(fOpArgs->fOp);
135     fMeshes.push_back(mesh);
136     bool firstDraw = fDraws.begin() == fDraws.end();
137     if (!firstDraw) {
138         Draw& lastDraw = *fDraws.begin();
139         // If the last draw shares a geometry processor and pipeline and there are no intervening
140         // uploads, add this mesh to it.
141         if (lastDraw.fGeometryProcessor == gp && lastDraw.fPipeline == pipeline) {
142             if (fInlineUploads.begin() == fInlineUploads.end() ||
143                 fInlineUploads.tail()->fUploadBeforeToken != fTokenTracker->nextDrawToken()) {
144                 ++lastDraw.fMeshCnt;
145                 return;
146             }
147         }
148     }
149     auto& draw = fDraws.append(&fArena);
150     GrDeferredUploadToken token = fTokenTracker->issueDrawToken();
151 
152     draw.fGeometryProcessor.reset(gp);
153     draw.fPipeline = pipeline;
154     draw.fMeshCnt = 1;
155     draw.fOpID = fOpArgs->fOp->uniqueID();
156     if (firstDraw) {
157         fBaseDrawToken = token;
158     }
159 }
160 
makeVertexSpace(size_t vertexSize,int vertexCount,const GrBuffer ** buffer,int * startVertex)161 void* GrOpFlushState::makeVertexSpace(size_t vertexSize, int vertexCount, const GrBuffer** buffer,
162                                       int* startVertex) {
163     return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
164 }
165 
makeIndexSpace(int indexCount,const GrBuffer ** buffer,int * startIndex)166 uint16_t* GrOpFlushState::makeIndexSpace(int indexCount, const GrBuffer** buffer, int* startIndex) {
167     return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
168 }
169 
makeVertexSpaceAtLeast(size_t vertexSize,int minVertexCount,int fallbackVertexCount,const GrBuffer ** buffer,int * startVertex,int * actualVertexCount)170 void* GrOpFlushState::makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount,
171                                              int fallbackVertexCount, const GrBuffer** buffer,
172                                              int* startVertex, int* actualVertexCount) {
173     return fVertexPool.makeSpaceAtLeast(vertexSize, minVertexCount, fallbackVertexCount, buffer,
174                                         startVertex, actualVertexCount);
175 }
176 
makeIndexSpaceAtLeast(int minIndexCount,int fallbackIndexCount,const GrBuffer ** buffer,int * startIndex,int * actualIndexCount)177 uint16_t* GrOpFlushState::makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
178                                                 const GrBuffer** buffer, int* startIndex,
179                                                 int* actualIndexCount) {
180     return reinterpret_cast<uint16_t*>(fIndexPool.makeSpaceAtLeast(
181             minIndexCount, fallbackIndexCount, buffer, startIndex, actualIndexCount));
182 }
183 
putBackIndices(int indexCount)184 void GrOpFlushState::putBackIndices(int indexCount) {
185     fIndexPool.putBack(indexCount * sizeof(uint16_t));
186 }
187 
putBackVertices(int vertices,size_t vertexStride)188 void GrOpFlushState::putBackVertices(int vertices, size_t vertexStride) {
189     fVertexPool.putBack(vertices * vertexStride);
190 }
191 
detachAppliedClip()192 GrAppliedClip GrOpFlushState::detachAppliedClip() {
193     return fOpArgs->fAppliedClip ? std::move(*fOpArgs->fAppliedClip) : GrAppliedClip();
194 }
195 
glyphCache() const196 GrGlyphCache* GrOpFlushState::glyphCache() const {
197     return fGpu->getContext()->contextPriv().getGlyphCache();
198 }
199 
fullAtlasManager() const200 GrAtlasManager* GrOpFlushState::fullAtlasManager() const {
201     return fGpu->getContext()->contextPriv().getFullAtlasManager();
202 }
203