1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "Test.h"
9 // This is a GPU-backend specific test
10 #if SK_SUPPORT_GPU
11 #include "GrMemoryPool.h"
12 #include "SkRandom.h"
13 #include "SkTArray.h"
14 #include "SkTDArray.h"
15 #include "SkTemplates.h"
16
17 // A is the top of an inheritance tree of classes that overload op new and
18 // and delete to use a GrMemoryPool. The objects have values of different types
19 // that can be set and checked.
20 class A {
21 public:
A()22 A() {}
setValues(int v)23 virtual void setValues(int v) {
24 fChar = static_cast<char>(v);
25 }
checkValues(int v)26 virtual bool checkValues(int v) {
27 return fChar == static_cast<char>(v);
28 }
~A()29 virtual ~A() {}
30
operator new(size_t size)31 void* operator new(size_t size) {
32 if (!gPool.get()) {
33 return ::operator new(size);
34 } else {
35 return gPool->allocate(size);
36 }
37 }
38
operator delete(void * p)39 void operator delete(void* p) {
40 if (!gPool.get()) {
41 ::operator delete(p);
42 } else {
43 return gPool->release(p);
44 }
45 }
46
47 static A* Create(SkRandom* r);
48
SetAllocator(size_t preallocSize,size_t minAllocSize)49 static void SetAllocator(size_t preallocSize, size_t minAllocSize) {
50 GrMemoryPool* pool = new GrMemoryPool(preallocSize, minAllocSize);
51 gPool.reset(pool);
52 }
53
ResetAllocator()54 static void ResetAllocator() {
55 gPool.reset(nullptr);
56 }
57
58 private:
59 static std::unique_ptr<GrMemoryPool> gPool;
60 char fChar;
61 };
62
63 std::unique_ptr<GrMemoryPool> A::gPool;
64
65 class B : public A {
66 public:
B()67 B() {}
setValues(int v)68 virtual void setValues(int v) {
69 fDouble = static_cast<double>(v);
70 this->INHERITED::setValues(v);
71 }
checkValues(int v)72 virtual bool checkValues(int v) {
73 return fDouble == static_cast<double>(v) &&
74 this->INHERITED::checkValues(v);
75 }
~B()76 virtual ~B() {}
77
78 private:
79 double fDouble;
80
81 typedef A INHERITED;
82 };
83
84 class C : public A {
85 public:
C()86 C() {}
setValues(int v)87 virtual void setValues(int v) {
88 fInt64 = static_cast<int64_t>(v);
89 this->INHERITED::setValues(v);
90 }
checkValues(int v)91 virtual bool checkValues(int v) {
92 return fInt64 == static_cast<int64_t>(v) &&
93 this->INHERITED::checkValues(v);
94 }
~C()95 virtual ~C() {}
96
97 private:
98 int64_t fInt64;
99
100 typedef A INHERITED;
101 };
102
103 // D derives from C and owns a dynamically created B
104 class D : public C {
105 public:
D()106 D() {
107 fB = new B();
108 }
setValues(int v)109 virtual void setValues(int v) {
110 fVoidStar = reinterpret_cast<void*>(static_cast<intptr_t>(v));
111 this->INHERITED::setValues(v);
112 fB->setValues(v);
113 }
checkValues(int v)114 virtual bool checkValues(int v) {
115 return fVoidStar == reinterpret_cast<void*>(static_cast<intptr_t>(v)) &&
116 fB->checkValues(v) &&
117 this->INHERITED::checkValues(v);
118 }
~D()119 virtual ~D() {
120 delete fB;
121 }
122 private:
123 void* fVoidStar;
124 B* fB;
125
126 typedef C INHERITED;
127 };
128
129 class E : public A {
130 public:
E()131 E() {}
setValues(int v)132 virtual void setValues(int v) {
133 for (size_t i = 0; i < SK_ARRAY_COUNT(fIntArray); ++i) {
134 fIntArray[i] = v;
135 }
136 this->INHERITED::setValues(v);
137 }
checkValues(int v)138 virtual bool checkValues(int v) {
139 bool ok = true;
140 for (size_t i = 0; ok && i < SK_ARRAY_COUNT(fIntArray); ++i) {
141 if (fIntArray[i] != v) {
142 ok = false;
143 }
144 }
145 return ok && this->INHERITED::checkValues(v);
146 }
~E()147 virtual ~E() {}
148 private:
149 int fIntArray[20];
150
151 typedef A INHERITED;
152 };
153
Create(SkRandom * r)154 A* A::Create(SkRandom* r) {
155 switch (r->nextRangeU(0, 4)) {
156 case 0:
157 return new A;
158 case 1:
159 return new B;
160 case 2:
161 return new C;
162 case 3:
163 return new D;
164 case 4:
165 return new E;
166 default:
167 // suppress warning
168 return nullptr;
169 }
170 }
171
172 struct Rec {
173 A* fInstance;
174 int fValue;
175 };
176
DEF_TEST(GrMemoryPool,reporter)177 DEF_TEST(GrMemoryPool, reporter) {
178 // prealloc and min alloc sizes for the pool
179 static const size_t gSizes[][2] = {
180 {0, 0},
181 {10 * sizeof(A), 20 * sizeof(A)},
182 {100 * sizeof(A), 100 * sizeof(A)},
183 {500 * sizeof(A), 500 * sizeof(A)},
184 {10000 * sizeof(A), 0},
185 {1, 100 * sizeof(A)},
186 };
187 // different percentages of creation vs deletion
188 static const float gCreateFraction[] = {1.f, .95f, 0.75f, .5f};
189 // number of create/destroys per test
190 static const int kNumIters = 20000;
191 // check that all the values stored in A objects are correct after this
192 // number of iterations
193 static const int kCheckPeriod = 500;
194
195 SkRandom r;
196 for (size_t s = 0; s < SK_ARRAY_COUNT(gSizes); ++s) {
197 A::SetAllocator(gSizes[s][0], gSizes[s][1]);
198 for (size_t c = 0; c < SK_ARRAY_COUNT(gCreateFraction); ++c) {
199 SkTDArray<Rec> instanceRecs;
200 for (int i = 0; i < kNumIters; ++i) {
201 float createOrDestroy = r.nextUScalar1();
202 if (createOrDestroy < gCreateFraction[c] ||
203 0 == instanceRecs.count()) {
204 Rec* rec = instanceRecs.append();
205 rec->fInstance = A::Create(&r);
206 rec->fValue = static_cast<int>(r.nextU());
207 rec->fInstance->setValues(rec->fValue);
208 } else {
209 int d = r.nextRangeU(0, instanceRecs.count() - 1);
210 Rec& rec = instanceRecs[d];
211 REPORTER_ASSERT(reporter, rec.fInstance->checkValues(rec.fValue));
212 delete rec.fInstance;
213 instanceRecs.removeShuffle(d);
214 }
215 if (0 == i % kCheckPeriod) {
216 for (int r = 0; r < instanceRecs.count(); ++r) {
217 Rec& rec = instanceRecs[r];
218 REPORTER_ASSERT(reporter, rec.fInstance->checkValues(rec.fValue));
219 }
220 }
221 }
222 for (int i = 0; i < instanceRecs.count(); ++i) {
223 Rec& rec = instanceRecs[i];
224 REPORTER_ASSERT(reporter, rec.fInstance->checkValues(rec.fValue));
225 delete rec.fInstance;
226 }
227 }
228 }
229 }
230
231 // GrMemoryPool requires that it's empty at the point of destruction. This helps
232 // achieving that by releasing all added memory in the destructor.
233 class AutoPoolReleaser {
234 public:
AutoPoolReleaser(GrMemoryPool & pool)235 AutoPoolReleaser(GrMemoryPool& pool): fPool(pool) {
236 }
~AutoPoolReleaser()237 ~AutoPoolReleaser() {
238 for (void* ptr: fAllocated) {
239 fPool.release(ptr);
240 }
241 }
add(void * ptr)242 void add(void* ptr) {
243 fAllocated.push_back(ptr);
244 }
245 private:
246 GrMemoryPool& fPool;
247 SkTArray<void*> fAllocated;
248 };
249
DEF_TEST(GrMemoryPoolAPI,reporter)250 DEF_TEST(GrMemoryPoolAPI, reporter) {
251 constexpr size_t kSmallestMinAllocSize = GrMemoryPool::kSmallestMinAllocSize;
252
253 // Allocates memory until pool adds a new block (pool.size() changes).
254 auto allocateMemory = [](GrMemoryPool& pool, AutoPoolReleaser& r) {
255 size_t origPoolSize = pool.size();
256 while (pool.size() == origPoolSize) {
257 r.add(pool.allocate(31));
258 }
259 };
260
261 // Effective prealloc space capacity is >= kSmallestMinAllocSize.
262 {
263 GrMemoryPool pool(0, 0);
264 REPORTER_ASSERT(reporter, pool.preallocSize() == kSmallestMinAllocSize);
265 }
266
267 // Effective prealloc space capacity is >= minAllocSize.
268 {
269 constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 2;
270 GrMemoryPool pool(kSmallestMinAllocSize, kMinAllocSize);
271 REPORTER_ASSERT(reporter, pool.preallocSize() == kMinAllocSize);
272 }
273
274 // Effective block size capacity >= kSmallestMinAllocSize.
275 {
276 GrMemoryPool pool(kSmallestMinAllocSize, kSmallestMinAllocSize / 2);
277 AutoPoolReleaser r(pool);
278
279 allocateMemory(pool, r);
280 REPORTER_ASSERT(reporter, pool.size() == kSmallestMinAllocSize);
281 }
282
283 // Pool allocates exactly preallocSize on creation.
284 {
285 constexpr size_t kPreallocSize = kSmallestMinAllocSize * 5;
286 GrMemoryPool pool(kPreallocSize, 0);
287 REPORTER_ASSERT(reporter, pool.preallocSize() == kPreallocSize);
288 }
289
290 // Pool allocates exactly minAllocSize when it expands.
291 {
292 constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 7;
293 GrMemoryPool pool(0, kMinAllocSize);
294 AutoPoolReleaser r(pool);
295
296 allocateMemory(pool, r);
297 REPORTER_ASSERT(reporter, pool.size() == kMinAllocSize);
298
299 allocateMemory(pool, r);
300 REPORTER_ASSERT(reporter, pool.size() == 2 * kMinAllocSize);
301 }
302
303 // When asked to allocate amount > minAllocSize, pool allocates larger block
304 // to accommodate all internal structures.
305 {
306 constexpr size_t kMinAllocSize = kSmallestMinAllocSize * 2;
307 GrMemoryPool pool(kSmallestMinAllocSize, kMinAllocSize);
308 AutoPoolReleaser r(pool);
309
310 REPORTER_ASSERT(reporter, pool.size() == 0);
311
312 constexpr size_t hugeSize = 10 * kMinAllocSize;
313 r.add(pool.allocate(hugeSize));
314 REPORTER_ASSERT(reporter, pool.size() > hugeSize);
315
316 // Block size allocated to accommodate huge request doesn't include any extra
317 // space, so next allocation request allocates a new block.
318 size_t hugeBlockSize = pool.size();
319 r.add(pool.allocate(0));
320 REPORTER_ASSERT(reporter, pool.size() == hugeBlockSize + kMinAllocSize);
321 }
322 }
323
DEF_TEST(GrObjectMemoryPoolAPI,reporter)324 DEF_TEST(GrObjectMemoryPoolAPI, reporter) {
325 struct Data {
326 int value[5];
327 };
328 using DataObjectPool = GrObjectMemoryPool<Data>;
329 constexpr size_t kSmallestMinAllocCount = DataObjectPool::kSmallestMinAllocCount;
330
331 // Allocates objects until pool adds a new block (pool.size() changes).
332 // Returns number of objects that fit into the current block (i.e. before pool.size()
333 // changed; newly allocated block always ends up with one object allocated from it).
334 auto allocateObjects = [](DataObjectPool& pool, AutoPoolReleaser& r) -> size_t {
335 size_t count = 0;
336 size_t origPoolSize = pool.size();
337 while (pool.size() == origPoolSize) {
338 r.add(pool.allocate());
339 count++;
340 }
341 return count - 1;
342 };
343
344 // Effective prealloc space capacity is >= kSmallestMinAllocCount.
345 {
346 DataObjectPool pool(kSmallestMinAllocCount / 3, 0);
347 AutoPoolReleaser r(pool);
348
349 size_t preallocCount = allocateObjects(pool, r);
350 REPORTER_ASSERT(reporter, preallocCount == kSmallestMinAllocCount);
351 }
352
353 // Effective prealloc space capacity is >= minAllocCount.
354 {
355 DataObjectPool pool(kSmallestMinAllocCount, 2 * kSmallestMinAllocCount);
356 AutoPoolReleaser r(pool);
357
358 size_t preallocCount = allocateObjects(pool, r);
359 REPORTER_ASSERT(reporter, preallocCount == 2 * kSmallestMinAllocCount);
360 }
361
362 // Effective block capacity is >= kSmallestMinAllocCount.
363 {
364 DataObjectPool pool(kSmallestMinAllocCount, kSmallestMinAllocCount / 2);
365 AutoPoolReleaser r(pool);
366
367 // Fill prealloc space
368 allocateObjects(pool, r);
369
370 size_t minAllocCount = 1 + allocateObjects(pool, r);
371 REPORTER_ASSERT(reporter, minAllocCount == kSmallestMinAllocCount);
372 }
373
374 // Pool allocates space for exactly preallocCount objects on creation.
375 {
376 constexpr size_t kPreallocCount = kSmallestMinAllocCount * 7 / 3;
377 DataObjectPool pool(kPreallocCount, 0);
378 AutoPoolReleaser r(pool);
379
380 size_t preallocCount = allocateObjects(pool, r);
381 REPORTER_ASSERT(reporter, preallocCount == kPreallocCount);
382 }
383
384 // Pool allocates space for minAllocCount objects when it adds a new block.
385 {
386 constexpr size_t kMinAllocCount = kSmallestMinAllocCount * 11 / 3;
387 DataObjectPool pool(0, kMinAllocCount);
388 AutoPoolReleaser r(pool);
389
390 // Fill prealloc space
391 allocateObjects(pool, r);
392
393 size_t firstBlockCount = 1 + allocateObjects(pool, r);
394 REPORTER_ASSERT(reporter, firstBlockCount == kMinAllocCount);
395
396 size_t secondBlockCount = 1 + allocateObjects(pool, r);
397 REPORTER_ASSERT(reporter, secondBlockCount == kMinAllocCount);
398 }
399 }
400
401 #endif
402