1
2 /*
3 * Copyright 2011 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8 #include "SkBenchmark.h"
9 #include "SkMatrix.h"
10 #include "SkRandom.h"
11 #include "SkString.h"
12
13 class MatrixBench : public SkBenchmark {
14 SkString fName;
15 enum { N = 100000 };
16 public:
MatrixBench(void * param,const char name[])17 MatrixBench(void* param, const char name[]) : INHERITED(param) {
18 fName.printf("matrix_%s", name);
19 fIsRendering = false;
20 }
21
22 virtual void performTest() = 0;
23
24 protected:
mulLoopCount() const25 virtual int mulLoopCount() const { return 1; }
26
onGetName()27 virtual const char* onGetName() {
28 return fName.c_str();
29 }
30
onDraw(SkCanvas * canvas)31 virtual void onDraw(SkCanvas* canvas) {
32 int n = SkBENCHLOOP(N * this->mulLoopCount());
33 for (int i = 0; i < n; i++) {
34 this->performTest();
35 }
36 }
37
38 private:
39 typedef SkBenchmark INHERITED;
40 };
41
42 // we want to stop the compiler from eliminating code that it thinks is a no-op
43 // so we have a non-static global we increment, hoping that will convince the
44 // compiler to execute everything
45 int gMatrixBench_NonStaticGlobal;
46
47 #define always_do(pred) \
48 do { \
49 if (pred) { \
50 ++gMatrixBench_NonStaticGlobal; \
51 } \
52 } while (0)
53
54 class EqualsMatrixBench : public MatrixBench {
55 public:
EqualsMatrixBench(void * param)56 EqualsMatrixBench(void* param) : INHERITED(param, "equals") {}
57 protected:
performTest()58 virtual void performTest() {
59 SkMatrix m0, m1, m2;
60
61 m0.reset();
62 m1.reset();
63 m2.reset();
64 always_do(m0 == m1);
65 always_do(m1 == m2);
66 always_do(m2 == m0);
67 }
68 private:
69 typedef MatrixBench INHERITED;
70 };
71
72 class ScaleMatrixBench : public MatrixBench {
73 public:
ScaleMatrixBench(void * param)74 ScaleMatrixBench(void* param) : INHERITED(param, "scale") {
75 fSX = fSY = SkFloatToScalar(1.5f);
76 fM0.reset();
77 fM1.setScale(fSX, fSY);
78 fM2.setTranslate(fSX, fSY);
79 }
80 protected:
performTest()81 virtual void performTest() {
82 SkMatrix m;
83 m = fM0; m.preScale(fSX, fSY);
84 m = fM1; m.preScale(fSX, fSY);
85 m = fM2; m.preScale(fSX, fSY);
86 }
87 private:
88 SkMatrix fM0, fM1, fM2;
89 SkScalar fSX, fSY;
90 typedef MatrixBench INHERITED;
91 };
92
93 // having unknown values in our arrays can throw off the timing a lot, perhaps
94 // handling NaN values is a lot slower. Anyway, this guy is just meant to put
95 // reasonable values in our arrays.
init9(T array[9])96 template <typename T> void init9(T array[9]) {
97 SkRandom rand;
98 for (int i = 0; i < 9; i++) {
99 array[i] = rand.nextSScalar1();
100 }
101 }
102
103 // Test the performance of setConcat() non-perspective case:
104 // using floating point precision only.
105 class FloatConcatMatrixBench : public MatrixBench {
106 public:
FloatConcatMatrixBench(void * p)107 FloatConcatMatrixBench(void* p) : INHERITED(p, "concat_floatfloat") {
108 init9(mya);
109 init9(myb);
110 init9(myr);
111 }
112 protected:
mulLoopCount() const113 virtual int mulLoopCount() const { return 4; }
114
muladdmul(float a,float b,float c,float d,float * result)115 static inline void muladdmul(float a, float b, float c, float d,
116 float* result) {
117 *result = a * b + c * d;
118 }
performTest()119 virtual void performTest() {
120 const float* a = mya;
121 const float* b = myb;
122 float* r = myr;
123 muladdmul(a[0], b[0], a[1], b[3], &r[0]);
124 muladdmul(a[0], b[1], a[1], b[4], &r[1]);
125 muladdmul(a[0], b[2], a[1], b[5], &r[2]);
126 r[2] += a[2];
127 muladdmul(a[3], b[0], a[4], b[3], &r[3]);
128 muladdmul(a[3], b[1], a[4], b[4], &r[4]);
129 muladdmul(a[3], b[2], a[4], b[5], &r[5]);
130 r[5] += a[5];
131 r[6] = r[7] = 0.0f;
132 r[8] = 1.0f;
133 }
134 private:
135 float mya [9];
136 float myb [9];
137 float myr [9];
138 typedef MatrixBench INHERITED;
139 };
140
SkDoubleToFloat(double x)141 static inline float SkDoubleToFloat(double x) {
142 return static_cast<float>(x);
143 }
144
145 // Test the performance of setConcat() non-perspective case:
146 // using floating point precision but casting up to float for
147 // intermediate results during computations.
148 class FloatDoubleConcatMatrixBench : public MatrixBench {
149 public:
FloatDoubleConcatMatrixBench(void * p)150 FloatDoubleConcatMatrixBench(void* p) : INHERITED(p, "concat_floatdouble") {
151 init9(mya);
152 init9(myb);
153 init9(myr);
154 }
155 protected:
mulLoopCount() const156 virtual int mulLoopCount() const { return 4; }
157
muladdmul(float a,float b,float c,float d,float * result)158 static inline void muladdmul(float a, float b, float c, float d,
159 float* result) {
160 *result = SkDoubleToFloat((double)a * b + (double)c * d);
161 }
performTest()162 virtual void performTest() {
163 const float* a = mya;
164 const float* b = myb;
165 float* r = myr;
166 muladdmul(a[0], b[0], a[1], b[3], &r[0]);
167 muladdmul(a[0], b[1], a[1], b[4], &r[1]);
168 muladdmul(a[0], b[2], a[1], b[5], &r[2]);
169 r[2] += a[2];
170 muladdmul(a[3], b[0], a[4], b[3], &r[3]);
171 muladdmul(a[3], b[1], a[4], b[4], &r[4]);
172 muladdmul(a[3], b[2], a[4], b[5], &r[5]);
173 r[5] += a[5];
174 r[6] = r[7] = 0.0f;
175 r[8] = 1.0f;
176 }
177 private:
178 float mya [9];
179 float myb [9];
180 float myr [9];
181 typedef MatrixBench INHERITED;
182 };
183
184 // Test the performance of setConcat() non-perspective case:
185 // using double precision only.
186 class DoubleConcatMatrixBench : public MatrixBench {
187 public:
DoubleConcatMatrixBench(void * p)188 DoubleConcatMatrixBench(void* p) : INHERITED(p, "concat_double") {
189 init9(mya);
190 init9(myb);
191 init9(myr);
192 }
193 protected:
mulLoopCount() const194 virtual int mulLoopCount() const { return 4; }
195
muladdmul(double a,double b,double c,double d,double * result)196 static inline void muladdmul(double a, double b, double c, double d,
197 double* result) {
198 *result = a * b + c * d;
199 }
performTest()200 virtual void performTest() {
201 const double* a = mya;
202 const double* b = myb;
203 double* r = myr;
204 muladdmul(a[0], b[0], a[1], b[3], &r[0]);
205 muladdmul(a[0], b[1], a[1], b[4], &r[1]);
206 muladdmul(a[0], b[2], a[1], b[5], &r[2]);
207 r[2] += a[2];
208 muladdmul(a[3], b[0], a[4], b[3], &r[3]);
209 muladdmul(a[3], b[1], a[4], b[4], &r[4]);
210 muladdmul(a[3], b[2], a[4], b[5], &r[5]);
211 r[5] += a[5];
212 r[6] = r[7] = 0.0;
213 r[8] = 1.0;
214 }
215 private:
216 double mya [9];
217 double myb [9];
218 double myr [9];
219 typedef MatrixBench INHERITED;
220 };
221
222 class GetTypeMatrixBench : public MatrixBench {
223 public:
GetTypeMatrixBench(void * param)224 GetTypeMatrixBench(void* param)
225 : INHERITED(param, "gettype") {
226 fArray[0] = (float) fRnd.nextS();
227 fArray[1] = (float) fRnd.nextS();
228 fArray[2] = (float) fRnd.nextS();
229 fArray[3] = (float) fRnd.nextS();
230 fArray[4] = (float) fRnd.nextS();
231 fArray[5] = (float) fRnd.nextS();
232 fArray[6] = (float) fRnd.nextS();
233 fArray[7] = (float) fRnd.nextS();
234 fArray[8] = (float) fRnd.nextS();
235 }
236 protected:
237 // Putting random generation of the matrix inside performTest()
238 // would help us avoid anomalous runs, but takes up 25% or
239 // more of the function time.
performTest()240 virtual void performTest() {
241 fMatrix.setAll(fArray[0], fArray[1], fArray[2],
242 fArray[3], fArray[4], fArray[5],
243 fArray[6], fArray[7], fArray[8]);
244 always_do(fMatrix.getType());
245 fMatrix.dirtyMatrixTypeCache();
246 always_do(fMatrix.getType());
247 fMatrix.dirtyMatrixTypeCache();
248 always_do(fMatrix.getType());
249 fMatrix.dirtyMatrixTypeCache();
250 always_do(fMatrix.getType());
251 fMatrix.dirtyMatrixTypeCache();
252 always_do(fMatrix.getType());
253 fMatrix.dirtyMatrixTypeCache();
254 always_do(fMatrix.getType());
255 fMatrix.dirtyMatrixTypeCache();
256 always_do(fMatrix.getType());
257 fMatrix.dirtyMatrixTypeCache();
258 always_do(fMatrix.getType());
259 }
260 private:
261 SkMatrix fMatrix;
262 float fArray[9];
263 SkRandom fRnd;
264 typedef MatrixBench INHERITED;
265 };
266
267 class ScaleTransMixedMatrixBench : public MatrixBench {
268 public:
ScaleTransMixedMatrixBench(void * p)269 ScaleTransMixedMatrixBench(void* p) : INHERITED(p, "scaletrans_mixed"), fCount (16) {
270 fMatrix.setAll(fRandom.nextSScalar1(), fRandom.nextSScalar1(), fRandom.nextSScalar1(),
271 fRandom.nextSScalar1(), fRandom.nextSScalar1(), fRandom.nextSScalar1(),
272 fRandom.nextSScalar1(), fRandom.nextSScalar1(), fRandom.nextSScalar1());
273 int i;
274 for (i = 0; i < SkBENCHLOOP(fCount); i++) {
275 fSrc[i].fX = fRandom.nextSScalar1();
276 fSrc[i].fY = fRandom.nextSScalar1();
277 fDst[i].fX = fRandom.nextSScalar1();
278 fDst[i].fY = fRandom.nextSScalar1();
279 }
280 }
281 protected:
performTest()282 virtual void performTest() {
283 SkPoint* dst = fDst;
284 const SkPoint* src = fSrc;
285 int count = SkBENCHLOOP(fCount);
286 float mx = fMatrix[SkMatrix::kMScaleX];
287 float my = fMatrix[SkMatrix::kMScaleY];
288 float tx = fMatrix[SkMatrix::kMTransX];
289 float ty = fMatrix[SkMatrix::kMTransY];
290 do {
291 dst->fY = SkScalarMulAdd(src->fY, my, ty);
292 dst->fX = SkScalarMulAdd(src->fX, mx, tx);
293 src += 1;
294 dst += 1;
295 } while (--count);
296 }
297 private:
298 SkMatrix fMatrix;
299 SkPoint fSrc [16];
300 SkPoint fDst [16];
301 int fCount;
302 SkRandom fRandom;
303 typedef MatrixBench INHERITED;
304 };
305
306 class ScaleTransDoubleMatrixBench : public MatrixBench {
307 public:
ScaleTransDoubleMatrixBench(void * p)308 ScaleTransDoubleMatrixBench(void* p) : INHERITED(p, "scaletrans_double"), fCount (16) {
309 init9(fMatrix);
310 int i;
311 for (i = 0; i < SkBENCHLOOP(fCount); i++) {
312 fSrc[i].fX = fRandom.nextSScalar1();
313 fSrc[i].fY = fRandom.nextSScalar1();
314 fDst[i].fX = fRandom.nextSScalar1();
315 fDst[i].fY = fRandom.nextSScalar1();
316 }
317 }
318 protected:
performTest()319 virtual void performTest() {
320 SkPoint* dst = fDst;
321 const SkPoint* src = fSrc;
322 int count = SkBENCHLOOP(fCount);
323 // As doubles, on Z600 Linux systems this is 2.5x as expensive as mixed mode
324 float mx = (float) fMatrix[SkMatrix::kMScaleX];
325 float my = (float) fMatrix[SkMatrix::kMScaleY];
326 float tx = (float) fMatrix[SkMatrix::kMTransX];
327 float ty = (float) fMatrix[SkMatrix::kMTransY];
328 do {
329 dst->fY = src->fY * my + ty;
330 dst->fX = src->fX * mx + tx;
331 src += 1;
332 dst += 1;
333 } while (--count);
334 }
335 private:
336 double fMatrix [9];
337 SkPoint fSrc [16];
338 SkPoint fDst [16];
339 int fCount;
340 SkRandom fRandom;
341 typedef MatrixBench INHERITED;
342 };
343
344 class InvertMapRectMatrixBench : public MatrixBench {
345 public:
InvertMapRectMatrixBench(void * param,const char * name,int flags)346 InvertMapRectMatrixBench(void* param, const char* name, int flags)
347 : INHERITED(param, name)
348 , fFlags(flags) {
349 fMatrix.reset();
350 fIteration = 0;
351 if (flags & kScale_Flag) {
352 fMatrix.postScale(SkFloatToScalar(1.5f), SkFloatToScalar(2.5f));
353 }
354 if (flags & kTranslate_Flag) {
355 fMatrix.postTranslate(SkFloatToScalar(1.5f), SkFloatToScalar(2.5f));
356 }
357 if (flags & kRotate_Flag) {
358 fMatrix.postRotate(SkFloatToScalar(45.0f));
359 }
360 if (flags & kPerspective_Flag) {
361 fMatrix.setPerspX(SkFloatToScalar(1.5f));
362 fMatrix.setPerspY(SkFloatToScalar(2.5f));
363 }
364 if (0 == (flags & kUncachedTypeMask_Flag)) {
365 fMatrix.getType();
366 }
367 }
368 enum Flag {
369 kScale_Flag = 0x01,
370 kTranslate_Flag = 0x02,
371 kRotate_Flag = 0x04,
372 kPerspective_Flag = 0x08,
373 kUncachedTypeMask_Flag = 0x10,
374 };
375 protected:
performTest()376 virtual void performTest() {
377 if (fFlags & kUncachedTypeMask_Flag) {
378 // This will invalidate the typemask without
379 // changing the matrix.
380 fMatrix.setPerspX(fMatrix.getPerspX());
381 }
382 SkMatrix inv;
383 bool invertible = fMatrix.invert(&inv);
384 SkASSERT(invertible);
385 SkRect transformedRect;
386 // an arbitrary, small, non-zero rect to transform
387 SkRect srcRect = SkRect::MakeWH(SkIntToScalar(10), SkIntToScalar(10));
388 if (invertible) {
389 inv.mapRect(&transformedRect, srcRect);
390 }
391 }
392 private:
393 SkMatrix fMatrix;
394 int fFlags;
395 unsigned fIteration;
396 typedef MatrixBench INHERITED;
397 };
398
399 ///////////////////////////////////////////////////////////////////////////////
400
401 DEF_BENCH( return new EqualsMatrixBench(p); )
402 DEF_BENCH( return new ScaleMatrixBench(p); )
403 DEF_BENCH( return new FloatConcatMatrixBench(p); )
404 DEF_BENCH( return new FloatDoubleConcatMatrixBench(p); )
405 DEF_BENCH( return new DoubleConcatMatrixBench(p); )
406 DEF_BENCH( return new GetTypeMatrixBench(p); )
407 DEF_BENCH( return new InvertMapRectMatrixBench(p, "invert_maprect_identity", 0); )
408
409 DEF_BENCH(return new InvertMapRectMatrixBench(p,
410 "invert_maprect_rectstaysrect",
411 InvertMapRectMatrixBench::kScale_Flag |
412 InvertMapRectMatrixBench::kTranslate_Flag); )
413
414 DEF_BENCH(return new InvertMapRectMatrixBench(p,
415 "invert_maprect_translate",
416 InvertMapRectMatrixBench::kTranslate_Flag); )
417
418 DEF_BENCH(return new InvertMapRectMatrixBench(p,
419 "invert_maprect_nonpersp",
420 InvertMapRectMatrixBench::kScale_Flag |
421 InvertMapRectMatrixBench::kRotate_Flag |
422 InvertMapRectMatrixBench::kTranslate_Flag); )
423
424 DEF_BENCH( return new InvertMapRectMatrixBench(p,
425 "invert_maprect_persp",
426 InvertMapRectMatrixBench::kPerspective_Flag); )
427
428 DEF_BENCH( return new InvertMapRectMatrixBench(p,
429 "invert_maprect_typemask_rectstaysrect",
430 InvertMapRectMatrixBench::kUncachedTypeMask_Flag |
431 InvertMapRectMatrixBench::kScale_Flag |
432 InvertMapRectMatrixBench::kTranslate_Flag); )
433
434 DEF_BENCH( return new InvertMapRectMatrixBench(p,
435 "invert_maprect_typemask_nonpersp",
436 InvertMapRectMatrixBench::kUncachedTypeMask_Flag |
437 InvertMapRectMatrixBench::kScale_Flag |
438 InvertMapRectMatrixBench::kRotate_Flag |
439 InvertMapRectMatrixBench::kTranslate_Flag); )
440
441 DEF_BENCH( return new ScaleTransMixedMatrixBench(p); )
442 DEF_BENCH( return new ScaleTransDoubleMatrixBench(p); )
443