1 /*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/DrawContext.h"
9
10 #include "include/core/SkColorSpace.h"
11 #include "include/core/SkPixmap.h"
12 #include "src/core/SkColorData.h"
13
14 #include "include/gpu/graphite/Context.h"
15 #include "include/gpu/graphite/Recorder.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/SkBackingFit.h"
18 #include "src/gpu/graphite/AtlasProvider.h"
19 #include "src/gpu/graphite/Buffer.h"
20 #include "src/gpu/graphite/Caps.h"
21 #include "src/gpu/graphite/CommandBuffer.h"
22 #include "src/gpu/graphite/ComputePathAtlas.h"
23 #include "src/gpu/graphite/ContextPriv.h"
24 #include "src/gpu/graphite/DrawList.h"
25 #include "src/gpu/graphite/DrawPass.h"
26 #include "src/gpu/graphite/Log.h"
27 #include "src/gpu/graphite/RasterPathAtlas.h"
28 #include "src/gpu/graphite/RecorderPriv.h"
29 #include "src/gpu/graphite/RenderPassDesc.h"
30 #include "src/gpu/graphite/ResourceTypes.h"
31 #include "src/gpu/graphite/SharedContext.h"
32 #include "src/gpu/graphite/TextureProxy.h"
33 #include "src/gpu/graphite/TextureProxyView.h"
34 #include "src/gpu/graphite/compute/DispatchGroup.h"
35 #include "src/gpu/graphite/geom/BoundsManager.h"
36 #include "src/gpu/graphite/geom/Geometry.h"
37 #include "src/gpu/graphite/task/ComputeTask.h"
38 #include "src/gpu/graphite/task/CopyTask.h"
39 #include "src/gpu/graphite/task/DrawTask.h"
40 #include "src/gpu/graphite/task/RenderPassTask.h"
41 #include "src/gpu/graphite/task/UploadTask.h"
42 #include "src/gpu/graphite/text/TextAtlasManager.h"
43
44 namespace skgpu::graphite {
45
46 namespace {
47
48 // Discarding content on floating point textures can leave nans as the prior color for a pixel,
49 // in which case hardware blending (when enabled) will fail even if the src, dst coefficients
50 // and coverage would produce the unmodified src value.
discard_op_should_use_clear(SkColorType ct)51 bool discard_op_should_use_clear(SkColorType ct) {
52 switch(ct) {
53 case kRGBA_F16Norm_SkColorType:
54 case kRGBA_F16_SkColorType:
55 case kRGBA_F32_SkColorType:
56 case kA16_float_SkColorType:
57 case kR16G16_float_SkColorType:
58 return true;
59 default:
60 return false;
61 }
62 }
63
64 } // anonymous namespace
65
Make(const Caps * caps,sk_sp<TextureProxy> target,SkISize deviceSize,const SkColorInfo & colorInfo,const SkSurfaceProps & props)66 sk_sp<DrawContext> DrawContext::Make(const Caps* caps,
67 sk_sp<TextureProxy> target,
68 SkISize deviceSize,
69 const SkColorInfo& colorInfo,
70 const SkSurfaceProps& props) {
71 if (!target) {
72 return nullptr;
73 }
74 // We don't render to unknown or unpremul alphatypes
75 if (colorInfo.alphaType() == kUnknown_SkAlphaType ||
76 colorInfo.alphaType() == kUnpremul_SkAlphaType) {
77 return nullptr;
78 }
79 if (!caps->isRenderable(target->textureInfo())) {
80 return nullptr;
81 }
82
83 // Accept an approximate-fit texture, but make sure it's at least as large as the device's
84 // logical size.
85 // TODO: validate that the color type and alpha type are compatible with the target's info
86 SkASSERT(target->isFullyLazy() || (target->dimensions().width() >= deviceSize.width() &&
87 target->dimensions().height() >= deviceSize.height()));
88 SkImageInfo imageInfo = SkImageInfo::Make(deviceSize, colorInfo);
89 return sk_sp<DrawContext>(new DrawContext(caps, std::move(target), imageInfo, props));
90 }
91
DrawContext(const Caps * caps,sk_sp<TextureProxy> target,const SkImageInfo & ii,const SkSurfaceProps & props)92 DrawContext::DrawContext(const Caps* caps,
93 sk_sp<TextureProxy> target,
94 const SkImageInfo& ii,
95 const SkSurfaceProps& props)
96 : fTarget(std::move(target))
97 , fImageInfo(ii)
98 , fSurfaceProps(props)
99 , fDstReadStrategy(caps->getDstReadStrategy(fTarget->textureInfo()))
100 , fCurrentDrawTask(sk_make_sp<DrawTask>(fTarget))
101 , fPendingDraws(std::make_unique<DrawList>())
102 , fPendingUploads(std::make_unique<UploadList>()) {
103 if (!caps->isTexturable(fTarget->textureInfo())) {
104 fReadView = {}; // Presumably this DrawContext is rendering into a swap chain
105 } else {
106 Swizzle swizzle = caps->getReadSwizzle(ii.colorType(), fTarget->textureInfo());
107 fReadView = {fTarget, swizzle};
108 }
109 // TBD - Will probably want DrawLists (and its internal commands) to come from an arena
110 // that the DC manages.
111 }
112
113 DrawContext::~DrawContext() = default;
114
clear(const SkColor4f & clearColor)115 void DrawContext::clear(const SkColor4f& clearColor) {
116 this->discard();
117
118 fPendingLoadOp = LoadOp::kClear;
119 SkPMColor4f pmColor = clearColor.premul();
120 fPendingClearColor = pmColor.array();
121 }
122
discard()123 void DrawContext::discard() {
124 // Non-loading operations on a fully lazy target can corrupt data beyond the DrawContext's
125 // region so should be avoided.
126 SkASSERT(!fTarget->isFullyLazy());
127
128 // A fullscreen clear or discard will overwrite anything that came before, so clear the DrawList
129 // NOTE: Eventually the current DrawTask should be reset, once there are no longer implicit
130 // dependencies on atlas tasks between DrawContexts. When that's resolved, the only tasks in the
131 // current DrawTask are those that directly impact the target, which becomes irrelevant with the
132 // clear op overwriting it. For now, preserve the previous tasks that might include atlas
133 // uploads that are not explicitly shared between DrawContexts.
134 if (fPendingDraws->renderStepCount() > 0) {
135 fPendingDraws = std::make_unique<DrawList>();
136 }
137 if (fComputePathAtlas) {
138 fComputePathAtlas->reset();
139 }
140
141 if (discard_op_should_use_clear(fImageInfo.colorType())) {
142 // In theory the clear color shouldn't matter since a discardable state should be fully
143 // overwritten by later draws, but if a previous call to clear() had injected bad data,
144 // the discard should not inherit it.
145 fPendingClearColor = {0.f, 0.f, 0.f, 0.f};
146 fPendingLoadOp = LoadOp::kClear;
147 } else {
148 fPendingLoadOp = LoadOp::kDiscard;
149 }
150 }
151
recordDraw(const Renderer * renderer,const Transform & localToDevice,const Geometry & geometry,const Clip & clip,DrawOrder ordering,const PaintParams * paint,const StrokeStyle * stroke)152 void DrawContext::recordDraw(const Renderer* renderer,
153 const Transform& localToDevice,
154 const Geometry& geometry,
155 const Clip& clip,
156 DrawOrder ordering,
157 const PaintParams* paint,
158 const StrokeStyle* stroke) {
159 SkASSERT(SkIRect::MakeSize(this->imageInfo().dimensions()).contains(clip.scissor()));
160 fPendingDraws->recordDraw(renderer, localToDevice, geometry, clip, ordering, paint, stroke);
161 }
162
recordUpload(Recorder * recorder,sk_sp<TextureProxy> targetProxy,const SkColorInfo & srcColorInfo,const SkColorInfo & dstColorInfo,const std::vector<MipLevel> & levels,const SkIRect & dstRect,std::unique_ptr<ConditionalUploadContext> condContext)163 bool DrawContext::recordUpload(Recorder* recorder,
164 sk_sp<TextureProxy> targetProxy,
165 const SkColorInfo& srcColorInfo,
166 const SkColorInfo& dstColorInfo,
167 const std::vector<MipLevel>& levels,
168 const SkIRect& dstRect,
169 std::unique_ptr<ConditionalUploadContext> condContext) {
170 // Our caller should have clipped to the bounds of the surface already.
171 SkASSERT(targetProxy->isFullyLazy() ||
172 SkIRect::MakeSize(targetProxy->dimensions()).contains(dstRect));
173 return fPendingUploads->recordUpload(recorder,
174 std::move(targetProxy),
175 srcColorInfo,
176 dstColorInfo,
177 levels,
178 dstRect,
179 std::move(condContext));
180 }
181
recordDependency(sk_sp<Task> task)182 void DrawContext::recordDependency(sk_sp<Task> task) {
183 SkASSERT(task);
184 // Adding `task` to the current DrawTask directly means that it will execute after any previous
185 // dependent tasks and after any previous calls to flush(), but everything else that's being
186 // collected on the DrawContext will execute after `task` once the next flush() is performed.
187 fCurrentDrawTask->addTask(std::move(task));
188 }
189
getComputePathAtlas(Recorder * recorder)190 PathAtlas* DrawContext::getComputePathAtlas(Recorder* recorder) {
191 if (!fComputePathAtlas) {
192 fComputePathAtlas = recorder->priv().atlasProvider()->createComputePathAtlas(recorder);
193 }
194 return fComputePathAtlas.get();
195 }
196
flush(Recorder * recorder)197 void DrawContext::flush(Recorder* recorder) {
198 if (fPendingUploads->size() > 0) {
199 TRACE_EVENT_INSTANT1("skia.gpu", TRACE_FUNC, TRACE_EVENT_SCOPE_THREAD,
200 "# uploads", fPendingUploads->size());
201 fCurrentDrawTask->addTask(UploadTask::Make(fPendingUploads.get()));
202 // The UploadTask steals the collected upload instances, automatically resetting this list
203 SkASSERT(fPendingUploads->size() == 0);
204 }
205
206 // Generate compute dispatches that render into the atlas texture used by pending draws.
207 // TODO: Once compute atlas caching is implemented, DrawContext might not hold onto to this
208 // at which point a recordDispatch() could be added and it stores a pending dispatches list that
209 // much like how uploads are handled. In that case, Device would be responsible for triggering
210 // the recording of dispatches, but that may happen naturally in AtlasProvider::recordUploads().
211 if (fComputePathAtlas) {
212 ComputeTask::DispatchGroupList dispatches;
213 if (fComputePathAtlas->recordDispatches(recorder, &dispatches)) {
214 // For now this check is valid as all coverage mask draws involve dispatches
215 SkASSERT(fPendingDraws->hasCoverageMaskDraws());
216
217 fCurrentDrawTask->addTask(ComputeTask::Make(std::move(dispatches)));
218 } // else no pending compute work needed to be recorded
219
220 fComputePathAtlas->reset();
221 } // else platform doesn't support compute or atlas was never initialized.
222
223 if (fPendingDraws->renderStepCount() == 0 && fPendingLoadOp != LoadOp::kClear) {
224 // Nothing will be rasterized to the target that warrants a RenderPassTask, but we preserve
225 // any added uploads or compute tasks since those could also affect the target w/o
226 // rasterizing anything directly.
227 return;
228 }
229
230 // Convert the pending draws and load/store ops into a DrawPass that will be executed after
231 // the collected uploads and compute dispatches. Save the bounds required for a dst copy to
232 // insert a copy task of sufficient size.
233 // TODO: At this point, there's only ever one DrawPass in a RenderPassTask to a target. When
234 // subpasses are implemented, they will either be collected alongside fPendingDraws or added
235 // to the RenderPassTask separately.
236 SkIRect dstReadPixelBounds = fPendingDraws->dstReadBounds().makeRoundOut().asSkIRect();
237 std::unique_ptr<DrawPass> pass = DrawPass::Make(recorder,
238 std::move(fPendingDraws),
239 fTarget,
240 this->imageInfo(),
241 std::make_pair(fPendingLoadOp, fPendingStoreOp),
242 fPendingClearColor);
243 fPendingDraws = std::make_unique<DrawList>();
244 // Now that there is content drawn to the target, that content must be loaded on any subsequent
245 // render pass.
246 fPendingLoadOp = LoadOp::kLoad;
247 fPendingStoreOp = StoreOp::kStore;
248
249 if (pass) {
250 SkASSERT(fTarget.get() == pass->target());
251
252 // If any paint used within the DrawPass reads from the dst texture (indicated by nonempty
253 // dstReadPixelBounds) and the dstReadStrategy is kTextureCopy, then add a CopyTask.
254 sk_sp<TextureProxy> dstCopy;
255 if (!dstReadPixelBounds.isEmpty() && fDstReadStrategy == DstReadStrategy::kTextureCopy) {
256 TRACE_EVENT_INSTANT0("skia.gpu", "DrawPass requires dst copy",
257 TRACE_EVENT_SCOPE_THREAD);
258
259 // TODO: Right now this assert is ensuring that the dstCopy will be texturable since it
260 // uses the same texture info as fTarget. Ideally, if fTarget were not texturable but
261 // still readable, we would perform a fallback to a compatible texturable info. We also
262 // should decide whether or not a copy-as-draw fallback is necessary here too. All of
263 // this is handled inside Image::Copy() except we would need it to expose the task in
264 // order to link it correctly.
265 SkASSERT(recorder->priv().caps()->isTexturable(fTarget->textureInfo()));
266 // Use approx size for better reuse.
267 SkISize dstCopyTextureSize = GetApproxSize(dstReadPixelBounds.size());
268 dstCopy = TextureProxy::Make(recorder->priv().caps(),
269 recorder->priv().resourceProvider(),
270 dstCopyTextureSize,
271 fTarget->textureInfo(),
272 "DstCopyTexture",
273 skgpu::Budgeted::kYes);
274 SkASSERT(dstCopy);
275
276 // Add the copy task to initialize dstCopy before the render pass task.
277 fCurrentDrawTask->addTask(CopyTextureToTextureTask::Make(
278 fTarget, dstReadPixelBounds, dstCopy, /*dstPoint=*/{0, 0}));
279 }
280
281 const Caps* caps = recorder->priv().caps();
282 auto [loadOp, storeOp] = pass->ops();
283 auto writeSwizzle = caps->getWriteSwizzle(this->colorInfo().colorType(),
284 fTarget->textureInfo());
285
286 RenderPassDesc desc = RenderPassDesc::Make(caps, fTarget->textureInfo(), loadOp, storeOp,
287 pass->depthStencilFlags(),
288 pass->clearColor(),
289 pass->requiresMSAA(),
290 writeSwizzle,
291 fDstReadStrategy);
292
293 RenderPassTask::DrawPassList passes;
294 passes.emplace_back(std::move(pass));
295 fCurrentDrawTask->addTask(RenderPassTask::Make(std::move(passes), desc, fTarget,
296 std::move(dstCopy), dstReadPixelBounds));
297 }
298 // else pass creation failed, DrawPass will have logged why. Don't discard the previously
299 // accumulated tasks, however, since they may represent operations on an atlas that other
300 // DrawContexts now implicitly depend on.
301 }
302
snapDrawTask(Recorder * recorder)303 sk_sp<Task> DrawContext::snapDrawTask(Recorder* recorder) {
304 // If flush() was explicitly called earlier and no new work was recorded, this call to flush()
305 // is a no-op and shouldn't hurt performance.
306 this->flush(recorder);
307
308 if (!fCurrentDrawTask->hasTasks()) {
309 return nullptr;
310 }
311
312 sk_sp<Task> snappedTask = std::move(fCurrentDrawTask);
313 fCurrentDrawTask = sk_make_sp<DrawTask>(fTarget);
314 return snappedTask;
315 }
316
317 } // namespace skgpu::graphite
318