1 /*
2 * Copyright 2020 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkBitmap.h"
9 #include "include/core/SkCanvas.h"
10 #include "include/core/SkSurface.h"
11 #include "include/core/SkTextBlob.h"
12 #include "src/core/SkSurfacePriv.h"
13 #include "src/gpu/text/GrTextBlob.h"
14 #include "tests/Test.h"
15 #include "tools/ToolUtils.h"
16
rasterize_blob(SkTextBlob * blob,const SkPaint & paint,GrRecordingContext * rContext,const SkMatrix & matrix)17 SkBitmap rasterize_blob(SkTextBlob* blob,
18 const SkPaint& paint,
19 GrRecordingContext* rContext,
20 const SkMatrix& matrix) {
21 const SkImageInfo info =
22 SkImageInfo::Make(500, 500, kN32_SkColorType, kPremul_SkAlphaType);
23 auto surface = SkSurface::MakeRenderTarget(rContext, SkBudgeted::kNo, info);
24 auto canvas = surface->getCanvas();
25 canvas->drawColor(SK_ColorWHITE);
26 canvas->concat(matrix);
27 canvas->drawTextBlob(blob, 10, 250, paint);
28 SkBitmap bitmap;
29 bitmap.allocN32Pixels(500, 500);
30 surface->readPixels(bitmap, 0, 0);
31 return bitmap;
32 }
33
check_for_black(const SkBitmap & bm)34 bool check_for_black(const SkBitmap& bm) {
35 for (int y = 0; y < bm.height(); y++) {
36 for (int x = 0; x < bm.width(); x++) {
37 if (bm.getColor(x, y) == SK_ColorBLACK) {
38 return true;
39 }
40 }
41 }
42 return false;
43 }
44
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrTextBlobScaleAnimation,reporter,ctxInfo)45 DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrTextBlobScaleAnimation, reporter, ctxInfo) {
46 auto tf = ToolUtils::create_portable_typeface("Mono", SkFontStyle());
47 SkFont font{tf};
48 font.setHinting(SkFontHinting::kNormal);
49 font.setSize(12);
50 font.setEdging(SkFont::Edging::kAntiAlias);
51 font.setSubpixel(true);
52
53 SkTextBlobBuilder builder;
54 const auto& runBuffer = builder.allocRunPosH(font, 30, 0, nullptr);
55
56 for (int i = 0; i < 30; i++) {
57 runBuffer.glyphs[i] = static_cast<SkGlyphID>(i);
58 runBuffer.pos[i] = SkIntToScalar(i);
59 }
60 auto blob = builder.make();
61
62 auto dContext = ctxInfo.directContext();
63 bool anyBlack = false;
64 for (int n = -13; n < 5; n++) {
65 SkMatrix m = SkMatrix::Scale(std::exp2(n), std::exp2(n));
66 auto bm = rasterize_blob(blob.get(), SkPaint(), dContext, m);
67 anyBlack |= check_for_black(bm);
68 }
69 REPORTER_ASSERT(reporter, anyBlack);
70 }
71
72 // Test extreme positions for all combinations of positions, origins, and translation matrices.
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrTextBlobMoveAround,reporter,ctxInfo)73 DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrTextBlobMoveAround, reporter, ctxInfo) {
74 auto tf = ToolUtils::create_portable_typeface("Mono", SkFontStyle());
75 SkFont font{tf};
76 font.setHinting(SkFontHinting::kNormal);
77 font.setSize(12);
78 font.setEdging(SkFont::Edging::kAntiAlias);
79 font.setSubpixel(true);
80
81 auto makeBlob = [&](SkPoint delta) {
82 SkTextBlobBuilder builder;
83 const auto& runBuffer = builder.allocRunPos(font, 30, nullptr);
84
85 for (int i = 0; i < 30; i++) {
86 runBuffer.glyphs[i] = static_cast<SkGlyphID>(i);
87 runBuffer.points()[i] = SkPoint::Make(SkIntToScalar(i*10) + delta.x(), 50 + delta.y());
88 }
89 return builder.make();
90 };
91
92 auto dContext = ctxInfo.directContext();
93 auto rasterizeBlob = [&](SkTextBlob* blob, SkPoint origin, const SkMatrix& matrix) {
94 SkPaint paint;
95 const SkImageInfo info =
96 SkImageInfo::Make(350, 80, kN32_SkColorType, kPremul_SkAlphaType);
97 auto surface = SkSurface::MakeRenderTarget(dContext, SkBudgeted::kNo, info);
98 auto canvas = surface->getCanvas();
99 canvas->drawColor(SK_ColorWHITE);
100 canvas->concat(matrix);
101 canvas->drawTextBlob(blob, 10 + origin.x(), 40 + origin.y(), paint);
102 SkBitmap bitmap;
103 bitmap.allocN32Pixels(350, 80);
104 surface->readPixels(bitmap, 0, 0);
105 return bitmap;
106 };
107
108 SkBitmap benchMark;
109 {
110 auto blob = makeBlob({0, 0});
111 benchMark = rasterizeBlob(blob.get(), {0,0}, SkMatrix::I());
112 }
113
114 auto checkBitmap = [&](const SkBitmap& bitmap) {
115 REPORTER_ASSERT(reporter, benchMark.width() == bitmap.width());
116 REPORTER_ASSERT(reporter, benchMark.width() == bitmap.width());
117
118 for (int y = 0; y < benchMark.height(); y++) {
119 for (int x = 0; x < benchMark.width(); x++) {
120 if (benchMark.getColor(x, y) != bitmap.getColor(x, y)) {
121 return false;
122 }
123 }
124 }
125 return true;
126 };
127
128 SkScalar interestingNumbers[] = {-10'000'000, -1'000'000, -1, 0, +1, +1'000'000, +10'000'000};
129 for (auto originX : interestingNumbers) {
130 for (auto originY : interestingNumbers) {
131 for (auto translateX : interestingNumbers) {
132 for (auto translateY : interestingNumbers) {
133 // Make sure everything adds to zero.
134 SkScalar deltaPosX = -(originX + translateX);
135 SkScalar deltaPosY = -(originY + translateY);
136 auto blob = makeBlob({deltaPosX, deltaPosY});
137 SkMatrix t = SkMatrix::Translate(translateX, translateY);
138 auto bitmap = rasterizeBlob(blob.get(), {originX, originY}, t);
139 REPORTER_ASSERT(reporter, checkBitmap(bitmap));
140 }
141 }
142 }
143 }
144 }
145
DEF_TEST(GrBagOfBytesBasic,r)146 DEF_TEST(GrBagOfBytesBasic, r) {
147 const int k4K = 1 << 12;
148 {
149 // GrBagOfBytes::MinimumSizeWithOverhead(-1); // This should fail
150 GrBagOfBytes::PlatformMinimumSizeWithOverhead(0, 16);
151 GrBagOfBytes::PlatformMinimumSizeWithOverhead(
152 std::numeric_limits<int>::max() - k4K - 1, 16);
153 // GrBagOfBytes::MinimumSizeWithOverhead(std::numeric_limits<int>::max() - k4K); // Fail
154 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(0, 1, 16, 16) == 31);
155 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(1, 1, 16, 16) == 32);
156 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(63, 1, 16, 16) == 94);
157 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(0, 8, 16, 16) == 24);
158 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(1, 8, 16, 16) == 32);
159 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(63, 8, 16, 16) == 88);
160 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(0, 16, 16, 16) == 16);
161 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(1, 16, 16, 16) == 32);
162 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(63, 16, 16, 16) == 80);
163
164 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(0, 1, 8, 16) == 23);
165 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(1, 1, 8, 16) == 24);
166 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(63, 1, 8, 16) == 86);
167 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(0, 8, 8, 16) == 16);
168 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(1, 8, 8, 16) == 24);
169 REPORTER_ASSERT(r, GrBagOfBytes::MinimumSizeWithOverhead(63, 8, 8, 16) == 80);
170 }
171
172 {
173 GrBagOfBytes bob;
174 // bob.alignedBytes(0, 1); // This should fail
175 // bob.alignedBytes(1, 0); // This should fail
176 // bob.alignedBytes(1, 3); // This should fail
177
178 struct Big {
179 char stuff[std::numeric_limits<int>::max()];
180 };
181 // bob.alignedBytes(sizeof(Big), 1); // this should fail
182 // bob.allocateBytesFor<Big>(); // this should not compile
183 // The following should run, but should not be regularly tested.
184 // bob.allocateBytesFor<int>((std::numeric_limits<int>::max() - (1<<12)) / sizeof(int) - 1);
185 // The following should fail
186 // bob.allocateBytesFor<int>((std::numeric_limits<int>::max() - (1<<12)) / sizeof(int));
187 bob.alignedBytes(1, 1); // To avoid unused variable problems.
188 }
189
190 // Force multiple block allocation
191 {
192 GrBagOfBytes bob;
193 const int k64K = 1 << 16;
194 // By default allocation block sizes start at 1K and go up with fib. This should allocate
195 // 10 individual blocks.
196 for (int i = 0; i < 10; i++) {
197 bob.alignedBytes(k64K, 1);
198 }
199 }
200 }
201
202 // Helper for defining allocators with inline/reserved storage.
203 // For argument declarations, stick to the base type (GrSubRunAllocator).
204 // Note: Inheriting from the storage first means the storage will outlive the
205 // GrSubRunAllocator, letting ~GrSubRunAllocator read it as it calls destructors.
206 // (This is mostly only relevant for strict tools like MSAN.)
207
208 template <size_t inlineSize>
209 class GrSTSubRunAllocator : private GrBagOfBytes::Storage<inlineSize>, public GrSubRunAllocator {
210 public:
GrSTSubRunAllocator(int firstHeapAllocation=GrBagOfBytes::PlatformMinimumSizeWithOverhead (inlineSize,1))211 explicit GrSTSubRunAllocator(int firstHeapAllocation =
212 GrBagOfBytes::PlatformMinimumSizeWithOverhead(inlineSize, 1))
213 : GrSubRunAllocator{this->data(), SkTo<int>(this->size()), firstHeapAllocation} {}
214 };
215
DEF_TEST(GrSubRunAllocator,r)216 DEF_TEST(GrSubRunAllocator, r) {
217 static int created = 0;
218 static int destroyed = 0;
219 struct Foo {
220 Foo() : fI{-2}, fX{-3} { created++; }
221 Foo(int i, float x) : fI{i}, fX{x} { created++; }
222 ~Foo() { destroyed++; }
223 int fI;
224 float fX;
225 };
226
227 struct alignas(8) OddAlignment {
228 char buf[10];
229 };
230
231 auto exercise = [&](GrSubRunAllocator* alloc) {
232 created = 0;
233 destroyed = 0;
234 {
235 int* p = alloc->makePOD<int>(3);
236 REPORTER_ASSERT(r, *p == 3);
237 int* q = alloc->makePOD<int>(7);
238 REPORTER_ASSERT(r, *q == 7);
239
240 REPORTER_ASSERT(r, *alloc->makePOD<int>(3) == 3);
241 auto foo = alloc->makeUnique<Foo>(3, 4.0f);
242 REPORTER_ASSERT(r, foo->fI == 3);
243 REPORTER_ASSERT(r, foo->fX == 4.0f);
244 REPORTER_ASSERT(r, created == 1);
245 REPORTER_ASSERT(r, destroyed == 0);
246
247 alloc->makePODArray<int>(10);
248
249 auto fooArray = alloc->makeUniqueArray<Foo>(10);
250 REPORTER_ASSERT(r, fooArray[3].fI == -2);
251 REPORTER_ASSERT(r, fooArray[4].fX == -3.0f);
252 REPORTER_ASSERT(r, created == 11);
253 REPORTER_ASSERT(r, destroyed == 0);
254 alloc->makePOD<OddAlignment>();
255 }
256
257 REPORTER_ASSERT(r, created == 11);
258 REPORTER_ASSERT(r, destroyed == 11);
259 };
260
261 // Exercise default arena
262 {
263 GrSubRunAllocator arena{0};
264 exercise(&arena);
265 }
266
267 // Exercise on stack arena
268 {
269 GrSTSubRunAllocator<64> arena;
270 exercise(&arena);
271 }
272
273 // Exercise arena with a heap allocated starting block
274 {
275 std::unique_ptr<char[]> block{new char[1024]};
276 GrSubRunAllocator arena{block.get(), 1024, 0};
277 exercise(&arena);
278 }
279
280 // Exercise the singly-link list of unique_ptrs use case
281 {
282 created = 0;
283 destroyed = 0;
284 GrSubRunAllocator arena;
285
286 struct Node {
287 Node(std::unique_ptr<Node, GrSubRunAllocator::Destroyer> next)
288 : fNext{std::move(next)} { created++; }
289 ~Node() { destroyed++; }
290 std::unique_ptr<Node, GrSubRunAllocator::Destroyer> fNext;
291 };
292
293 std::unique_ptr<Node, GrSubRunAllocator::Destroyer> current = nullptr;
294 for (int i = 0; i < 128; i++) {
295 current = arena.makeUnique<Node>(std::move(current));
296 }
297 REPORTER_ASSERT(r, created == 128);
298 REPORTER_ASSERT(r, destroyed == 0);
299 }
300 REPORTER_ASSERT(r, created == 128);
301 REPORTER_ASSERT(r, destroyed == 128);
302
303 // Exercise the array ctor w/ a mapping function
304 {
305 struct I {
306 I(int v) : i{v} {}
307 ~I() {}
308 int i;
309 };
310 GrSTSubRunAllocator<64> arena;
311 auto a = arena.makeUniqueArray<I>(8, [](size_t i) { return i; });
312 for (size_t i = 0; i < 8; i++) {
313 REPORTER_ASSERT(r, a[i].i == (int)i);
314 }
315 }
316
317 {
318 GrSubRunAllocator arena(4096);
319 void* ptr = arena.alignedBytes(4081, 8);
320 REPORTER_ASSERT(r, ((intptr_t)ptr & 7) == 0);
321 }
322 }
323