• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "fuzz/Fuzz.h"
9 #include "fuzz/FuzzCommon.h"
10 
11 #include "include/core/SkCanvas.h"
12 #include "include/core/SkDeferredDisplayList.h"
13 #include "include/core/SkDeferredDisplayListRecorder.h"
14 #include "include/core/SkExecutor.h"
15 #include "include/core/SkPromiseImageTexture.h"
16 #include "include/core/SkSize.h"
17 #include "include/core/SkSurface.h"
18 #include "include/gpu/GrDirectContext.h"
19 #include "include/private/SkDeque.h"
20 #include "include/private/SkMutex.h"
21 #include "include/private/SkNoncopyable.h"
22 #include "include/private/SkTemplates.h"
23 #include "include/private/SkThreadID.h"
24 #include "src/core/SkTaskGroup.h"
25 #include "src/image/SkImage_Gpu.h"
26 #include "tools/gpu/GrContextFactory.h"
27 
28 #include <atomic>
29 #include <memory>
30 #include <queue>
31 
32 using ContextType = sk_gpu_test::GrContextFactory::ContextType;
33 
34 // be careful: `foo(make_fuzz_t<T>(f), make_fuzz_t<U>(f))` is undefined.
35 // In fact, all make_fuzz_foo() functions have this potential problem.
36 // Use sequence points!
37 template <typename T>
make_fuzz_t(Fuzz * fuzz)38 inline T make_fuzz_t(Fuzz* fuzz) {
39     T t;
40     fuzz->next(&t);
41     return t;
42 }
43 
44 class DDLFuzzer;
45 
46 // This class stores the state of a given promise image owned by the fuzzer. It acts as the
47 // context for the callback procs of the promise image.
48 class PromiseImageInfo : public SkNVRefCnt<PromiseImageInfo>, SkNoncopyable {
49 public:
50     enum class State : int {
51         kInitial,
52         kTriedToFulfill,
53         kDone
54     };
~PromiseImageInfo()55     ~PromiseImageInfo() {
56         // If we hit this, then the image or the texture will outlive this object which is bad.
57         SkASSERT_RELEASE(fImage->unique());
58         SkASSERT_RELEASE(!fTexture || fTexture->unique());
59         fImage.reset();
60         fTexture.reset();
61         State s = fState;
62         SkASSERT_RELEASE(s == State::kDone);
63     }
64     DDLFuzzer* fFuzzer = nullptr;
65     sk_sp<SkImage> fImage;
66     // At the moment, the atomicity of this isn't used because all our promise image callbacks
67     // happen on the same thread. See the TODO below about them unreffing them off the GPU thread.
68     std::atomic<State> fState{State::kInitial};
69     sk_sp<SkPromiseImageTexture> fTexture;
70 };
71 
72 static constexpr int kPromiseImageCount = 8;
73 static constexpr SkISize kPromiseImageSize{16, 16};
74 static constexpr int kPromiseImagesPerDDL = 4;
75 static constexpr int kRecordingThreadCount = 4;
76 static constexpr int kIterationCount = 10000;
77 
78 // A one-shot runner object for fuzzing our DDL threading. It creates an array of promise images,
79 // and concurrently records DDLs that reference them, playing each DDL back on the GPU thread.
80 // The backing textures for promise images may be recycled into a pool, or not, for each case
81 // as determined by the fuzzing data.
82 class DDLFuzzer : SkNoncopyable {
83 public:
84     DDLFuzzer(Fuzz*, ContextType);
85     void run();
86 
87     sk_sp<SkPromiseImageTexture> fulfillPromiseImage(PromiseImageInfo&);
88     void releasePromiseImage(PromiseImageInfo&);
89 private:
90     void initPromiseImage(int index);
91     void recordAndPlayDDL();
isOnGPUThread() const92     bool isOnGPUThread() const { return SkGetThreadID() == fGpuThread; }
isOnMainThread() const93     bool isOnMainThread() const { return SkGetThreadID() == fMainThread; }
94 
95     Fuzz* fFuzz = nullptr;
96     GrDirectContext* fContext = nullptr;
97     SkAutoTArray<PromiseImageInfo> fPromiseImages{kPromiseImageCount};
98     sk_sp<SkSurface> fSurface;
99     SkSurfaceCharacterization fSurfaceCharacterization;
100     std::unique_ptr<SkExecutor> fGpuExecutor = SkExecutor::MakeFIFOThreadPool(1, false);
101     std::unique_ptr<SkExecutor> fRecordingExecutor =
102         SkExecutor::MakeFIFOThreadPool(kRecordingThreadCount, false);
103     SkTaskGroup fGpuTaskGroup{*fGpuExecutor};
104     SkTaskGroup fRecordingTaskGroup{*fRecordingExecutor};
105     SkThreadID fGpuThread = kIllegalThreadID;
106     SkThreadID fMainThread = SkGetThreadID();
107     std::queue<sk_sp<SkPromiseImageTexture>> fReusableTextures;
108     sk_gpu_test::GrContextFactory fContextFactory;
109 };
110 
DDLFuzzer(Fuzz * fuzz,ContextType contextType)111 DDLFuzzer::DDLFuzzer(Fuzz* fuzz, ContextType contextType) : fFuzz(fuzz) {
112     sk_gpu_test::ContextInfo ctxInfo = fContextFactory.getContextInfo(contextType);
113     sk_gpu_test::TestContext* testCtx = ctxInfo.testContext();
114     fContext = ctxInfo.directContext();
115     if (!fContext) {
116         return;
117     }
118     SkISize canvasSize = kPromiseImageSize;
119     canvasSize.fWidth *= kPromiseImagesPerDDL;
120     SkImageInfo ii = SkImageInfo::Make(canvasSize, kRGBA_8888_SkColorType, kPremul_SkAlphaType);
121     fSurface = SkSurface::MakeRenderTarget(fContext, SkBudgeted::kNo, ii);
122     if (!fSurface || !fSurface->characterize(&fSurfaceCharacterization)) {
123         return;
124     }
125 
126     testCtx->makeNotCurrent();
127     fGpuTaskGroup.add([&]{
128         testCtx->makeCurrent();
129         fGpuThread = SkGetThreadID();
130     });
131     fGpuTaskGroup.wait();
132     for (int i = 0; i < kPromiseImageCount; ++i) {
133         this->initPromiseImage(i);
134     }
135 }
136 
fulfillPromiseImage(PromiseImageInfo & promiseImage)137 sk_sp<SkPromiseImageTexture> DDLFuzzer::fulfillPromiseImage(PromiseImageInfo& promiseImage) {
138     using State = PromiseImageInfo::State;
139     if (!this->isOnGPUThread()) {
140         fFuzz->signalBug();
141     }
142     bool success = make_fuzz_t<bool>(fFuzz);
143     State prior = promiseImage.fState.exchange(State::kTriedToFulfill, std::memory_order_relaxed);
144     if (prior != State::kInitial || promiseImage.fTexture != nullptr) {
145         fFuzz->signalBug();
146     }
147     if (!success) {
148         return nullptr;
149     }
150 
151     // Try reusing an existing texture if we can and if the fuzzer wills it.
152     if (!fReusableTextures.empty() && make_fuzz_t<bool>(fFuzz)) {
153         promiseImage.fTexture = std::move(fReusableTextures.front());
154         fReusableTextures.pop();
155         return promiseImage.fTexture;
156     }
157 
158     bool finishedBECreate = false;
159     auto markFinished = [](void* context) {
160         *(bool*)context = true;
161     };
162 
163     GrBackendTexture backendTex = fContext->createBackendTexture(kPromiseImageSize.width(),
164                                                                  kPromiseImageSize.height(),
165                                                                  kRGBA_8888_SkColorType,
166                                                                  SkColors::kRed,
167                                                                  GrMipMapped::kNo,
168                                                                  GrRenderable::kYes,
169                                                                  GrProtected::kNo,
170                                                                  markFinished,
171                                                                  &finishedBECreate);
172     SkASSERT_RELEASE(backendTex.isValid());
173     while (!finishedBECreate) {
174         fContext->checkAsyncWorkCompletion();
175     }
176 
177     promiseImage.fTexture = SkPromiseImageTexture::Make(backendTex);
178 
179     return promiseImage.fTexture;
180 }
181 
releasePromiseImage(PromiseImageInfo & promiseImage)182 void DDLFuzzer::releasePromiseImage(PromiseImageInfo& promiseImage) {
183     using State = PromiseImageInfo::State;
184     // TODO: This requirement will go away when we unref promise images off the GPU thread.
185     if (!this->isOnGPUThread()) {
186         fFuzz->signalBug();
187     }
188     State old = promiseImage.fState.exchange(State::kInitial, std::memory_order_relaxed);
189     if (old != State::kTriedToFulfill) {
190         fFuzz->signalBug();
191     }
192 
193     // If we failed to fulfill, then nothing to be done.
194     if (!promiseImage.fTexture) {
195         return;
196     }
197 
198     bool reuse = make_fuzz_t<bool>(fFuzz);
199     if (reuse) {
200         fReusableTextures.push(std::move(promiseImage.fTexture));
201     } else {
202         fContext->deleteBackendTexture(promiseImage.fTexture->backendTexture());
203     }
204     promiseImage.fTexture = nullptr;
205 }
206 
fuzz_promise_image_fulfill(void * ctxIn)207 static sk_sp<SkPromiseImageTexture> fuzz_promise_image_fulfill(void* ctxIn) {
208     PromiseImageInfo& fuzzPromiseImage = *(PromiseImageInfo*)ctxIn;
209     return fuzzPromiseImage.fFuzzer->fulfillPromiseImage(fuzzPromiseImage);
210 }
211 
fuzz_promise_image_release(void * ctxIn)212 static void fuzz_promise_image_release(void* ctxIn) {
213     PromiseImageInfo& fuzzPromiseImage = *(PromiseImageInfo*)ctxIn;
214     fuzzPromiseImage.fFuzzer->releasePromiseImage(fuzzPromiseImage);
215 }
216 
initPromiseImage(int index)217 void DDLFuzzer::initPromiseImage(int index) {
218     PromiseImageInfo& promiseImage = fPromiseImages[index];
219     promiseImage.fFuzzer = this;
220     GrBackendFormat backendFmt = fContext->defaultBackendFormat(kRGBA_8888_SkColorType,
221                                                                 GrRenderable::kYes);
222     promiseImage.fImage = SkImage::MakePromiseTexture(fContext->threadSafeProxy(),
223                                                       backendFmt,
224                                                       kPromiseImageSize,
225                                                       GrMipMapped::kNo,
226                                                       kTopLeft_GrSurfaceOrigin,
227                                                       kRGBA_8888_SkColorType,
228                                                       kUnpremul_SkAlphaType,
229                                                       SkColorSpace::MakeSRGB(),
230                                                       &fuzz_promise_image_fulfill,
231                                                       &fuzz_promise_image_release,
232                                                       &promiseImage);
233 }
234 
recordAndPlayDDL()235 void DDLFuzzer::recordAndPlayDDL() {
236     SkASSERT(!this->isOnGPUThread() && !this->isOnMainThread());
237     SkDeferredDisplayListRecorder recorder(fSurfaceCharacterization);
238     SkCanvas* canvas = recorder.getCanvas();
239     // Draw promise images in a strip
240     for (int i = 0; i < kPromiseImagesPerDDL; i++) {
241         int xOffset = i * kPromiseImageSize.width();
242         int j;
243         // Pick random promise images to draw.
244         fFuzz->nextRange(&j, 0, kPromiseImageCount - 1);
245         canvas->drawImage(fPromiseImages[j].fImage, xOffset, 0);
246     }
247     sk_sp<SkDeferredDisplayList> ddl = recorder.detach();
248     fGpuTaskGroup.add([=, ddl{std::move(ddl)}]{
249         bool success = fSurface->draw(std::move(ddl));
250         if (!success) {
251             fFuzz->signalBug();
252         }
253     });
254 }
255 
run()256 void DDLFuzzer::run() {
257     if (!fSurface) {
258         return;
259     }
260     fRecordingTaskGroup.batch(kIterationCount, [=](int i) {
261         this->recordAndPlayDDL();
262     });
263     fRecordingTaskGroup.wait();
264     fGpuTaskGroup.add([=] {
265         while (!fReusableTextures.empty()) {
266             sk_sp<SkPromiseImageTexture> gpuTexture = std::move(fReusableTextures.front());
267             fContext->deleteBackendTexture(gpuTexture->backendTexture());
268             fReusableTextures.pop();
269         }
270         fContextFactory.destroyContexts();
271         // TODO: Release promise images not on the GPU thread.
272         fPromiseImages.reset(0);
273     });
274     fGpuTaskGroup.wait();
275 }
276 
DEF_FUZZ(DDLThreadingGL,fuzz)277 DEF_FUZZ(DDLThreadingGL, fuzz) {
278     DDLFuzzer(fuzz, ContextType::kGL_ContextType).run();
279 }
280