• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include "precomp.hpp"
2 
3 #include <map>
4 #include <iostream>
5 #include <fstream>
6 
7 #if defined WIN32 || defined _WIN32 || defined WIN64 || defined _WIN64
8 #ifndef NOMINMAX
9 #define NOMINMAX
10 #endif
11 #include <windows.h>
12 #endif
13 
14 #ifdef HAVE_CUDA
15 #include "opencv2/core/cuda.hpp"
16 #endif
17 
18 #ifdef ANDROID
19 # include <sys/time.h>
20 #endif
21 
22 using namespace perf;
23 
24 int64 TestBase::timeLimitDefault = 0;
25 unsigned int TestBase::iterationsLimitDefault = (unsigned int)(-1);
26 int64 TestBase::_timeadjustment = 0;
27 
28 // Item [0] will be considered the default implementation.
29 static std::vector<std::string> available_impls;
30 
31 static std::string  param_impl;
32 
33 static enum PERF_STRATEGY strategyForce = PERF_STRATEGY_DEFAULT;
34 static enum PERF_STRATEGY strategyModule = PERF_STRATEGY_SIMPLE;
35 
36 static double       param_max_outliers;
37 static double       param_max_deviation;
38 static unsigned int param_min_samples;
39 static unsigned int param_force_samples;
40 static uint64       param_seed;
41 static double       param_time_limit;
42 static int          param_threads;
43 static bool         param_write_sanity;
44 static bool         param_verify_sanity;
45 #ifdef CV_COLLECT_IMPL_DATA
46 static bool         param_collect_impl;
47 #endif
48 extern bool         test_ipp_check;
49 
50 #ifdef HAVE_CUDA
51 static int          param_cuda_device;
52 #endif
53 
54 #ifdef ANDROID
55 static int          param_affinity_mask;
56 static bool         log_power_checkpoints;
57 
58 #include <sys/syscall.h>
59 #include <pthread.h>
setCurrentThreadAffinityMask(int mask)60 static void setCurrentThreadAffinityMask(int mask)
61 {
62     pid_t pid=gettid();
63     int syscallres=syscall(__NR_sched_setaffinity, pid, sizeof(mask), &mask);
64     if (syscallres)
65     {
66         int err=errno;
67         err=err;//to avoid warnings about unused variables
68         LOGE("Error in the syscall setaffinity: mask=%d=0x%x err=%d=0x%x", mask, mask, err, err);
69     }
70 }
71 #endif
72 
73 static double perf_stability_criteria = 0.03; // 3%
74 
75 namespace {
76 
77 class PerfEnvironment: public ::testing::Environment
78 {
79 public:
TearDown()80     void TearDown()
81     {
82         cv::setNumThreads(-1);
83     }
84 };
85 
86 } // namespace
87 
randu(cv::Mat & m)88 static void randu(cv::Mat& m)
89 {
90     const int bigValue = 0x00000FFF;
91     if (m.depth() < CV_32F)
92     {
93         int minmax[] = {0, 256};
94         cv::Mat mr = cv::Mat(m.rows, (int)(m.cols * m.elemSize()), CV_8U, m.ptr(), m.step[0]);
95         cv::randu(mr, cv::Mat(1, 1, CV_32S, minmax), cv::Mat(1, 1, CV_32S, minmax + 1));
96     }
97     else if (m.depth() == CV_32F)
98     {
99         //float minmax[] = {-FLT_MAX, FLT_MAX};
100         float minmax[] = {-bigValue, bigValue};
101         cv::Mat mr = m.reshape(1);
102         cv::randu(mr, cv::Mat(1, 1, CV_32F, minmax), cv::Mat(1, 1, CV_32F, minmax + 1));
103     }
104     else
105     {
106         //double minmax[] = {-DBL_MAX, DBL_MAX};
107         double minmax[] = {-bigValue, bigValue};
108         cv::Mat mr = m.reshape(1);
109         cv::randu(mr, cv::Mat(1, 1, CV_64F, minmax), cv::Mat(1, 1, CV_64F, minmax + 1));
110     }
111 }
112 
113 /*****************************************************************************************\
114 *                       inner exception class for early termination
115 \*****************************************************************************************/
116 
117 class PerfEarlyExitException: public cv::Exception {};
118 
119 /*****************************************************************************************\
120 *                                   ::perf::Regression
121 \*****************************************************************************************/
122 
instance()123 Regression& Regression::instance()
124 {
125     static Regression single;
126     return single;
127 }
128 
add(TestBase * test,const std::string & name,cv::InputArray array,double eps,ERROR_TYPE err)129 Regression& Regression::add(TestBase* test, const std::string& name, cv::InputArray array, double eps, ERROR_TYPE err)
130 {
131     if(test) test->setVerified();
132     return instance()(name, array, eps, err);
133 }
134 
addMoments(TestBase * test,const std::string & name,const cv::Moments & array,double eps,ERROR_TYPE err)135 Regression& Regression::addMoments(TestBase* test, const std::string& name, const cv::Moments& array, double eps, ERROR_TYPE err)
136 {
137     int len = (int)sizeof(cv::Moments) / sizeof(double);
138     cv::Mat m(1, len, CV_64F, (void*)&array);
139 
140     return Regression::add(test, name, m, eps, err);
141 }
142 
addKeypoints(TestBase * test,const std::string & name,const std::vector<cv::KeyPoint> & array,double eps,ERROR_TYPE err)143 Regression& Regression::addKeypoints(TestBase* test, const std::string& name, const std::vector<cv::KeyPoint>& array, double eps, ERROR_TYPE err)
144 {
145     int len = (int)array.size();
146     cv::Mat pt      (len, 1, CV_32FC2, len ? (void*)&array[0].pt : 0,       sizeof(cv::KeyPoint));
147     cv::Mat size    (len, 1, CV_32FC1, len ? (void*)&array[0].size : 0,     sizeof(cv::KeyPoint));
148     cv::Mat angle   (len, 1, CV_32FC1, len ? (void*)&array[0].angle : 0,    sizeof(cv::KeyPoint));
149     cv::Mat response(len, 1, CV_32FC1, len ? (void*)&array[0].response : 0, sizeof(cv::KeyPoint));
150     cv::Mat octave  (len, 1, CV_32SC1, len ? (void*)&array[0].octave : 0,   sizeof(cv::KeyPoint));
151     cv::Mat class_id(len, 1, CV_32SC1, len ? (void*)&array[0].class_id : 0, sizeof(cv::KeyPoint));
152 
153     return Regression::add(test, name + "-pt",       pt,       eps, ERROR_ABSOLUTE)
154                                 (name + "-size",     size,     eps, ERROR_ABSOLUTE)
155                                 (name + "-angle",    angle,    eps, ERROR_ABSOLUTE)
156                                 (name + "-response", response, eps, err)
157                                 (name + "-octave",   octave,   eps, ERROR_ABSOLUTE)
158                                 (name + "-class_id", class_id, eps, ERROR_ABSOLUTE);
159 }
160 
addMatches(TestBase * test,const std::string & name,const std::vector<cv::DMatch> & array,double eps,ERROR_TYPE err)161 Regression& Regression::addMatches(TestBase* test, const std::string& name, const std::vector<cv::DMatch>& array, double eps, ERROR_TYPE err)
162 {
163     int len = (int)array.size();
164     cv::Mat queryIdx(len, 1, CV_32SC1, len ? (void*)&array[0].queryIdx : 0, sizeof(cv::DMatch));
165     cv::Mat trainIdx(len, 1, CV_32SC1, len ? (void*)&array[0].trainIdx : 0, sizeof(cv::DMatch));
166     cv::Mat imgIdx  (len, 1, CV_32SC1, len ? (void*)&array[0].imgIdx : 0,   sizeof(cv::DMatch));
167     cv::Mat distance(len, 1, CV_32FC1, len ? (void*)&array[0].distance : 0, sizeof(cv::DMatch));
168 
169     return Regression::add(test, name + "-queryIdx", queryIdx, DBL_EPSILON, ERROR_ABSOLUTE)
170                                 (name + "-trainIdx", trainIdx, DBL_EPSILON, ERROR_ABSOLUTE)
171                                 (name + "-imgIdx",   imgIdx,   DBL_EPSILON, ERROR_ABSOLUTE)
172                                 (name + "-distance", distance, eps, err);
173 }
174 
Init(const std::string & testSuitName,const std::string & ext)175 void Regression::Init(const std::string& testSuitName, const std::string& ext)
176 {
177     instance().init(testSuitName, ext);
178 }
179 
init(const std::string & testSuitName,const std::string & ext)180 void Regression::init(const std::string& testSuitName, const std::string& ext)
181 {
182     if (!storageInPath.empty())
183     {
184         LOGE("Subsequent initialization of Regression utility is not allowed.");
185         return;
186     }
187 
188     const char *data_path_dir = getenv("OPENCV_TEST_DATA_PATH");
189     const char *path_separator = "/";
190 
191     if (data_path_dir)
192     {
193         int len = (int)strlen(data_path_dir)-1;
194         if (len < 0) len = 0;
195         std::string path_base = (data_path_dir[0] == 0 ? std::string(".") : std::string(data_path_dir))
196                 + (data_path_dir[len] == '/' || data_path_dir[len] == '\\' ? "" : path_separator)
197                 + "perf"
198                 + path_separator;
199 
200         storageInPath = path_base + testSuitName + ext;
201         storageOutPath = path_base + testSuitName;
202     }
203     else
204     {
205         storageInPath = testSuitName + ext;
206         storageOutPath = testSuitName;
207     }
208 
209     suiteName = testSuitName;
210 
211     try
212     {
213         if (storageIn.open(storageInPath, cv::FileStorage::READ))
214         {
215             rootIn = storageIn.root();
216             if (storageInPath.length() > 3 && storageInPath.substr(storageInPath.length()-3) == ".gz")
217                 storageOutPath += "_new";
218             storageOutPath += ext;
219         }
220     }
221     catch(cv::Exception&)
222     {
223         LOGE("Failed to open sanity data for reading: %s", storageInPath.c_str());
224     }
225 
226     if(!storageIn.isOpened())
227         storageOutPath = storageInPath;
228 }
229 
Regression()230 Regression::Regression() : regRNG(cv::getTickCount())//this rng should be really random
231 {
232 }
233 
~Regression()234 Regression::~Regression()
235 {
236     if (storageIn.isOpened())
237         storageIn.release();
238     if (storageOut.isOpened())
239     {
240         if (!currentTestNodeName.empty())
241             storageOut << "}";
242         storageOut.release();
243     }
244 }
245 
write()246 cv::FileStorage& Regression::write()
247 {
248     if (!storageOut.isOpened() && !storageOutPath.empty())
249     {
250         int mode = (storageIn.isOpened() && storageInPath == storageOutPath)
251                 ? cv::FileStorage::APPEND : cv::FileStorage::WRITE;
252         storageOut.open(storageOutPath, mode);
253         if (!storageOut.isOpened())
254         {
255             LOGE("Could not open \"%s\" file for writing", storageOutPath.c_str());
256             storageOutPath.clear();
257         }
258         else if (mode == cv::FileStorage::WRITE && !rootIn.empty())
259         {
260             //TODO: write content of rootIn node into the storageOut
261         }
262     }
263     return storageOut;
264 }
265 
getCurrentTestNodeName()266 std::string Regression::getCurrentTestNodeName()
267 {
268     const ::testing::TestInfo* const test_info =
269       ::testing::UnitTest::GetInstance()->current_test_info();
270 
271     if (test_info == 0)
272         return "undefined";
273 
274     std::string nodename = std::string(test_info->test_case_name()) + "--" + test_info->name();
275     size_t idx = nodename.find_first_of('/');
276     if (idx != std::string::npos)
277         nodename.erase(idx);
278 
279     const char* type_param = test_info->type_param();
280     if (type_param != 0)
281         (nodename += "--") += type_param;
282 
283     const char* value_param = test_info->value_param();
284     if (value_param != 0)
285         (nodename += "--") += value_param;
286 
287     for(size_t i = 0; i < nodename.length(); ++i)
288         if (!isalnum(nodename[i]) && '_' != nodename[i])
289             nodename[i] = '-';
290 
291     return nodename;
292 }
293 
isVector(cv::InputArray a)294 bool Regression::isVector(cv::InputArray a)
295 {
296     return a.kind() == cv::_InputArray::STD_VECTOR_MAT || a.kind() == cv::_InputArray::STD_VECTOR_VECTOR ||
297            a.kind() == cv::_InputArray::STD_VECTOR_UMAT;
298 }
299 
getElem(cv::Mat & m,int y,int x,int cn)300 double Regression::getElem(cv::Mat& m, int y, int x, int cn)
301 {
302     switch (m.depth())
303     {
304     case CV_8U: return *(m.ptr<unsigned char>(y, x) + cn);
305     case CV_8S: return *(m.ptr<signed char>(y, x) + cn);
306     case CV_16U: return *(m.ptr<unsigned short>(y, x) + cn);
307     case CV_16S: return *(m.ptr<signed short>(y, x) + cn);
308     case CV_32S: return *(m.ptr<signed int>(y, x) + cn);
309     case CV_32F: return *(m.ptr<float>(y, x) + cn);
310     case CV_64F: return *(m.ptr<double>(y, x) + cn);
311     default: return 0;
312     }
313 }
314 
write(cv::Mat m)315 void Regression::write(cv::Mat m)
316 {
317     if (!m.empty() && m.dims < 2) return;
318 
319     double min, max;
320     cv::minMaxIdx(m, &min, &max);
321     write() << "min" << min << "max" << max;
322 
323     write() << "last" << "{" << "x" << m.size.p[1] - 1 << "y" << m.size.p[0] - 1
324         << "val" << getElem(m, m.size.p[0] - 1, m.size.p[1] - 1, m.channels() - 1) << "}";
325 
326     int x, y, cn;
327     x = regRNG.uniform(0, m.size.p[1]);
328     y = regRNG.uniform(0, m.size.p[0]);
329     cn = regRNG.uniform(0, m.channels());
330     write() << "rng1" << "{" << "x" << x << "y" << y;
331     if(cn > 0) write() << "cn" << cn;
332     write() << "val" << getElem(m, y, x, cn) << "}";
333 
334     x = regRNG.uniform(0, m.size.p[1]);
335     y = regRNG.uniform(0, m.size.p[0]);
336     cn = regRNG.uniform(0, m.channels());
337     write() << "rng2" << "{" << "x" << x << "y" << y;
338     if (cn > 0) write() << "cn" << cn;
339     write() << "val" << getElem(m, y, x, cn) << "}";
340 }
341 
verify(cv::FileNode node,cv::Mat actual,double eps,std::string argname,ERROR_TYPE err)342 void Regression::verify(cv::FileNode node, cv::Mat actual, double eps, std::string argname, ERROR_TYPE err)
343 {
344     if (!actual.empty() && actual.dims < 2) return;
345 
346     double expect_min = (double)node["min"];
347     double expect_max = (double)node["max"];
348 
349     if (err == ERROR_RELATIVE)
350         eps *= std::max(std::abs(expect_min), std::abs(expect_max));
351 
352     double actual_min, actual_max;
353     cv::minMaxIdx(actual, &actual_min, &actual_max);
354 
355     ASSERT_NEAR(expect_min, actual_min, eps)
356             << argname << " has unexpected minimal value" << std::endl;
357     ASSERT_NEAR(expect_max, actual_max, eps)
358             << argname << " has unexpected maximal value" << std::endl;
359 
360     cv::FileNode last = node["last"];
361     double actual_last = getElem(actual, actual.size.p[0] - 1, actual.size.p[1] - 1, actual.channels() - 1);
362     int expect_cols = (int)last["x"] + 1;
363     int expect_rows = (int)last["y"] + 1;
364     ASSERT_EQ(expect_cols, actual.size.p[1])
365             << argname << " has unexpected number of columns" << std::endl;
366     ASSERT_EQ(expect_rows, actual.size.p[0])
367             << argname << " has unexpected number of rows" << std::endl;
368 
369     double expect_last = (double)last["val"];
370     ASSERT_NEAR(expect_last, actual_last, eps)
371             << argname << " has unexpected value of the last element" << std::endl;
372 
373     cv::FileNode rng1 = node["rng1"];
374     int x1 = rng1["x"];
375     int y1 = rng1["y"];
376     int cn1 = rng1["cn"];
377 
378     double expect_rng1 = (double)rng1["val"];
379     // it is safe to use x1 and y1 without checks here because we have already
380     // verified that mat size is the same as recorded
381     double actual_rng1 = getElem(actual, y1, x1, cn1);
382 
383     ASSERT_NEAR(expect_rng1, actual_rng1, eps)
384             << argname << " has unexpected value of the ["<< x1 << ":" << y1 << ":" << cn1 <<"] element" << std::endl;
385 
386     cv::FileNode rng2 = node["rng2"];
387     int x2 = rng2["x"];
388     int y2 = rng2["y"];
389     int cn2 = rng2["cn"];
390 
391     double expect_rng2 = (double)rng2["val"];
392     double actual_rng2 = getElem(actual, y2, x2, cn2);
393 
394     ASSERT_NEAR(expect_rng2, actual_rng2, eps)
395             << argname << " has unexpected value of the ["<< x2 << ":" << y2 << ":" << cn2 <<"] element" << std::endl;
396 }
397 
write(cv::InputArray array)398 void Regression::write(cv::InputArray array)
399 {
400     write() << "kind" << array.kind();
401     write() << "type" << array.type();
402     if (isVector(array))
403     {
404         int total = (int)array.total();
405         int idx = regRNG.uniform(0, total);
406         write() << "len" << total;
407         write() << "idx" << idx;
408 
409         cv::Mat m = array.getMat(idx);
410 
411         if (m.total() * m.channels() < 26) //5x5 or smaller
412             write() << "val" << m;
413         else
414             write(m);
415     }
416     else
417     {
418         if (array.total() * array.channels() < 26) //5x5 or smaller
419             write() << "val" << array.getMat();
420         else
421             write(array.getMat());
422     }
423 }
424 
countViolations(const cv::Mat & expected,const cv::Mat & actual,const cv::Mat & diff,double eps,double * max_violation=0,double * max_allowed=0)425 static int countViolations(const cv::Mat& expected, const cv::Mat& actual, const cv::Mat& diff, double eps, double* max_violation = 0, double* max_allowed = 0)
426 {
427     cv::Mat diff64f;
428     diff.reshape(1).convertTo(diff64f, CV_64F);
429 
430     cv::Mat expected_abs = cv::abs(expected.reshape(1));
431     cv::Mat actual_abs = cv::abs(actual.reshape(1));
432     cv::Mat maximum, mask;
433     cv::max(expected_abs, actual_abs, maximum);
434     cv::multiply(maximum, cv::Vec<double, 1>(eps), maximum, CV_64F);
435     cv::compare(diff64f, maximum, mask, cv::CMP_GT);
436 
437     int v = cv::countNonZero(mask);
438 
439     if (v > 0 && max_violation != 0 && max_allowed != 0)
440     {
441         int loc[10] = {0};
442         cv::minMaxIdx(maximum, 0, max_allowed, 0, loc, mask);
443         *max_violation = diff64f.at<double>(loc[0], loc[1]);
444     }
445 
446     return v;
447 }
448 
verify(cv::FileNode node,cv::InputArray array,double eps,ERROR_TYPE err)449 void Regression::verify(cv::FileNode node, cv::InputArray array, double eps, ERROR_TYPE err)
450 {
451     int expected_kind = (int)node["kind"];
452     int expected_type = (int)node["type"];
453     ASSERT_EQ(expected_kind, array.kind()) << "  Argument \"" << node.name() << "\" has unexpected kind";
454     ASSERT_EQ(expected_type, array.type()) << "  Argument \"" << node.name() << "\" has unexpected type";
455 
456     cv::FileNode valnode = node["val"];
457     if (isVector(array))
458     {
459         int expected_length = (int)node["len"];
460         ASSERT_EQ(expected_length, (int)array.total()) << "  Vector \"" << node.name() << "\" has unexpected length";
461         int idx = node["idx"];
462 
463         cv::Mat actual = array.getMat(idx);
464 
465         if (valnode.isNone())
466         {
467             ASSERT_LE((size_t)26, actual.total() * (size_t)actual.channels())
468                     << "  \"" << node.name() << "[" <<  idx << "]\" has unexpected number of elements";
469             verify(node, actual, eps, cv::format("%s[%d]", node.name().c_str(), idx), err);
470         }
471         else
472         {
473             cv::Mat expected;
474             valnode >> expected;
475 
476             if(expected.empty())
477             {
478                 ASSERT_TRUE(actual.empty())
479                     << "  expected empty " << node.name() << "[" <<  idx<< "]";
480             }
481             else
482             {
483                 ASSERT_EQ(expected.size(), actual.size())
484                         << "  " << node.name() << "[" <<  idx<< "] has unexpected size";
485 
486                 cv::Mat diff;
487                 cv::absdiff(expected, actual, diff);
488 
489                 if (err == ERROR_ABSOLUTE)
490                 {
491                     if (!cv::checkRange(diff, true, 0, 0, eps))
492                     {
493                         if(expected.total() * expected.channels() < 12)
494                             std::cout << " Expected: " << std::endl << expected << std::endl << " Actual:" << std::endl << actual << std::endl;
495 
496                         double max;
497                         cv::minMaxIdx(diff.reshape(1), 0, &max);
498 
499                         FAIL() << "  Absolute difference (=" << max << ") between argument \""
500                                << node.name() << "[" <<  idx << "]\" and expected value is greater than " << eps;
501                     }
502                 }
503                 else if (err == ERROR_RELATIVE)
504                 {
505                     double maxv, maxa;
506                     int violations = countViolations(expected, actual, diff, eps, &maxv, &maxa);
507                     if (violations > 0)
508                     {
509                         if(expected.total() * expected.channels() < 12)
510                             std::cout << " Expected: " << std::endl << expected << std::endl << " Actual:" << std::endl << actual << std::endl;
511 
512                         FAIL() << "  Relative difference (" << maxv << " of " << maxa << " allowed) between argument \""
513                                << node.name() << "[" <<  idx << "]\" and expected value is greater than " << eps << " in " << violations << " points";
514                     }
515                 }
516             }
517         }
518     }
519     else
520     {
521         if (valnode.isNone())
522         {
523             ASSERT_LE((size_t)26, array.total() * (size_t)array.channels())
524                     << "  Argument \"" << node.name() << "\" has unexpected number of elements";
525             verify(node, array.getMat(), eps, "Argument \"" + node.name() + "\"", err);
526         }
527         else
528         {
529             cv::Mat expected;
530             valnode >> expected;
531             cv::Mat actual = array.getMat();
532 
533             if(expected.empty())
534             {
535                 ASSERT_TRUE(actual.empty())
536                     << "  expected empty " << node.name();
537             }
538             else
539             {
540                 ASSERT_EQ(expected.size(), actual.size())
541                         << "  Argument \"" << node.name() << "\" has unexpected size";
542 
543                 cv::Mat diff;
544                 cv::absdiff(expected, actual, diff);
545 
546                 if (err == ERROR_ABSOLUTE)
547                 {
548                     if (!cv::checkRange(diff, true, 0, 0, eps))
549                     {
550                         if(expected.total() * expected.channels() < 12)
551                             std::cout << " Expected: " << std::endl << expected << std::endl << " Actual:" << std::endl << actual << std::endl;
552 
553                         double max;
554                         cv::minMaxIdx(diff.reshape(1), 0, &max);
555 
556                         FAIL() << "  Difference (=" << max << ") between argument1 \"" << node.name()
557                                << "\" and expected value is greater than " << eps;
558                     }
559                 }
560                 else if (err == ERROR_RELATIVE)
561                 {
562                     double maxv, maxa;
563                     int violations = countViolations(expected, actual, diff, eps, &maxv, &maxa);
564                     if (violations > 0)
565                     {
566                         if(expected.total() * expected.channels() < 12)
567                             std::cout << " Expected: " << std::endl << expected << std::endl << " Actual:" << std::endl << actual << std::endl;
568 
569                         FAIL() << "  Relative difference (" << maxv << " of " << maxa << " allowed) between argument \"" << node.name()
570                                << "\" and expected value is greater than " << eps << " in " << violations << " points";
571                     }
572                 }
573             }
574         }
575     }
576 }
577 
operator ()(const std::string & name,cv::InputArray array,double eps,ERROR_TYPE err)578 Regression& Regression::operator() (const std::string& name, cv::InputArray array, double eps, ERROR_TYPE err)
579 {
580     // exit if current test is already failed
581     if(::testing::UnitTest::GetInstance()->current_test_info()->result()->Failed()) return *this;
582 
583     if(!array.empty() && array.depth() == CV_USRTYPE1)
584     {
585         ADD_FAILURE() << "  Can not check regression for CV_USRTYPE1 data type for " << name;
586         return *this;
587     }
588 
589     std::string nodename = getCurrentTestNodeName();
590 
591     cv::FileNode n = rootIn[nodename];
592     if(n.isNone())
593     {
594         if(param_write_sanity)
595         {
596             if (nodename != currentTestNodeName)
597             {
598                 if (!currentTestNodeName.empty())
599                     write() << "}";
600                 currentTestNodeName = nodename;
601 
602                 write() << nodename << "{";
603             }
604             // TODO: verify that name is alphanumeric, current error message is useless
605             write() << name << "{";
606             write(array);
607             write() << "}";
608         }
609         else if(param_verify_sanity)
610         {
611             ADD_FAILURE() << "  No regression data for " << name << " argument";
612         }
613     }
614     else
615     {
616         cv::FileNode this_arg = n[name];
617         if (!this_arg.isMap())
618             ADD_FAILURE() << "  No regression data for " << name << " argument";
619         else
620             verify(this_arg, array, eps, err);
621     }
622 
623     return *this;
624 }
625 
626 
627 /*****************************************************************************************\
628 *                                ::perf::performance_metrics
629 \*****************************************************************************************/
performance_metrics()630 performance_metrics::performance_metrics()
631 {
632     clear();
633 }
634 
clear()635 void performance_metrics::clear()
636 {
637     bytesIn = 0;
638     bytesOut = 0;
639     samples = 0;
640     outliers = 0;
641     gmean = 0;
642     gstddev = 0;
643     mean = 0;
644     stddev = 0;
645     median = 0;
646     min = 0;
647     frequency = 0;
648     terminationReason = TERM_UNKNOWN;
649 }
650 
651 /*****************************************************************************************\
652 *                                   Performance validation results
653 \*****************************************************************************************/
654 
655 static bool perf_validation_enabled = false;
656 
657 static std::string perf_validation_results_directory;
658 static std::map<std::string, float> perf_validation_results;
659 static std::string perf_validation_results_outfile;
660 
661 static double perf_validation_criteria = 0.03; // 3 %
662 static double perf_validation_time_threshold_ms = 0.1;
663 static int perf_validation_idle_delay_ms = 3000; // 3 sec
664 
loadPerfValidationResults(const std::string & fileName)665 static void loadPerfValidationResults(const std::string& fileName)
666 {
667     perf_validation_results.clear();
668     std::ifstream infile(fileName.c_str());
669     while (!infile.eof())
670     {
671         std::string name;
672         float value = 0;
673         if (!(infile >> value))
674         {
675             if (infile.eof())
676                 break; // it is OK
677             std::cout << "ERROR: Can't load performance validation results from " << fileName << "!" << std::endl;
678             return;
679         }
680         infile.ignore(1);
681         if (!(std::getline(infile, name)))
682         {
683             std::cout << "ERROR: Can't load performance validation results from " << fileName << "!" << std::endl;
684             return;
685         }
686         if (!name.empty() && name[name.size() - 1] == '\r') // CRLF processing on Linux
687             name.resize(name.size() - 1);
688         perf_validation_results[name] = value;
689     }
690     std::cout << "Performance validation results loaded from " << fileName << " (" << perf_validation_results.size() << " entries)" << std::endl;
691 }
692 
savePerfValidationResult(const std::string & name,float value)693 static void savePerfValidationResult(const std::string& name, float value)
694 {
695     perf_validation_results[name] = value;
696 }
697 
savePerfValidationResults()698 static void savePerfValidationResults()
699 {
700     if (!perf_validation_results_outfile.empty())
701     {
702         std::ofstream outfile((perf_validation_results_directory + perf_validation_results_outfile).c_str());
703         std::map<std::string, float>::const_iterator i;
704         for (i = perf_validation_results.begin(); i != perf_validation_results.end(); ++i)
705         {
706             outfile << i->second << ';';
707             outfile << i->first << std::endl;
708         }
709         outfile.close();
710         std::cout << "Performance validation results saved (" << perf_validation_results.size() << " entries)" << std::endl;
711     }
712 }
713 
714 class PerfValidationEnvironment : public ::testing::Environment
715 {
716 public:
~PerfValidationEnvironment()717     virtual ~PerfValidationEnvironment() {}
SetUp()718     virtual void SetUp() {}
719 
TearDown()720     virtual void TearDown()
721     {
722         savePerfValidationResults();
723     }
724 };
725 
726 
727 
728 /*****************************************************************************************\
729 *                                   ::perf::TestBase
730 \*****************************************************************************************/
731 
732 
Init(int argc,const char * const argv[])733 void TestBase::Init(int argc, const char* const argv[])
734 {
735     std::vector<std::string> plain_only;
736     plain_only.push_back("plain");
737     TestBase::Init(plain_only, argc, argv);
738 }
739 
Init(const std::vector<std::string> & availableImpls,int argc,const char * const argv[])740 void TestBase::Init(const std::vector<std::string> & availableImpls,
741                  int argc, const char* const argv[])
742 {
743     available_impls = availableImpls;
744 
745     const std::string command_line_keys =
746         "{   perf_max_outliers           |8        |percent of allowed outliers}"
747         "{   perf_min_samples            |10       |minimal required numer of samples}"
748         "{   perf_force_samples          |100      |force set maximum number of samples for all tests}"
749         "{   perf_seed                   |809564   |seed for random numbers generator}"
750         "{   perf_threads                |-1       |the number of worker threads, if parallel execution is enabled}"
751         "{   perf_write_sanity           |false    |create new records for sanity checks}"
752         "{   perf_verify_sanity          |false    |fail tests having no regression data for sanity checks}"
753         "{   perf_impl                   |" + available_impls[0] +
754                                                   "|the implementation variant of functions under test}"
755         "{   perf_list_impls             |false    |list available implementation variants and exit}"
756         "{   perf_run_cpu                |false    |deprecated, equivalent to --perf_impl=plain}"
757         "{   perf_strategy               |default  |specifies performance measuring strategy: default, base or simple (weak restrictions)}"
758         "{   perf_read_validation_results |        |specifies file name with performance results from previous run}"
759         "{   perf_write_validation_results |       |specifies file name to write performance validation results}"
760 #ifdef ANDROID
761         "{   perf_time_limit             |6.0      |default time limit for a single test (in seconds)}"
762         "{   perf_affinity_mask          |0        |set affinity mask for the main thread}"
763         "{   perf_log_power_checkpoints  |         |additional xml logging for power measurement}"
764 #else
765         "{   perf_time_limit             |3.0      |default time limit for a single test (in seconds)}"
766 #endif
767         "{   perf_max_deviation          |1.0      |}"
768 #ifdef HAVE_IPP
769         "{   perf_ipp_check              |false    |check whether IPP works without failures}"
770 #endif
771 #ifdef CV_COLLECT_IMPL_DATA
772         "{   perf_collect_impl           |false    |collect info about executed implementations}"
773 #endif
774         "{   help h                      |false    |print help info}"
775 #ifdef HAVE_CUDA
776         "{   perf_cuda_device            |0        |run CUDA test suite onto specific CUDA capable device}"
777         "{   perf_cuda_info_only         |false    |print an information about system and an available CUDA devices and then exit.}"
778 #endif
779     ;
780 
781     cv::CommandLineParser args(argc, argv, command_line_keys);
782     if (args.has("help"))
783     {
784         args.printMessage();
785         return;
786     }
787 
788     ::testing::AddGlobalTestEnvironment(new PerfEnvironment);
789 
790     param_impl          = args.has("perf_run_cpu") ? "plain" : args.get<std::string>("perf_impl");
791     std::string perf_strategy = args.get<std::string>("perf_strategy");
792     if (perf_strategy == "default")
793     {
794         // nothing
795     }
796     else if (perf_strategy == "base")
797     {
798         strategyForce = PERF_STRATEGY_BASE;
799     }
800     else if (perf_strategy == "simple")
801     {
802         strategyForce = PERF_STRATEGY_SIMPLE;
803     }
804     else
805     {
806         printf("No such strategy: %s\n", perf_strategy.c_str());
807         exit(1);
808     }
809     param_max_outliers  = std::min(100., std::max(0., args.get<double>("perf_max_outliers")));
810     param_min_samples   = std::max(1u, args.get<unsigned int>("perf_min_samples"));
811     param_max_deviation = std::max(0., args.get<double>("perf_max_deviation"));
812     param_seed          = args.get<unsigned int>("perf_seed");
813     param_time_limit    = std::max(0., args.get<double>("perf_time_limit"));
814     param_force_samples = args.get<unsigned int>("perf_force_samples");
815     param_write_sanity  = args.has("perf_write_sanity");
816     param_verify_sanity = args.has("perf_verify_sanity");
817     test_ipp_check      = !args.has("perf_ipp_check") ? getenv("OPENCV_IPP_CHECK") != NULL : true;
818     param_threads       = args.get<int>("perf_threads");
819 #ifdef CV_COLLECT_IMPL_DATA
820     param_collect_impl  = args.has("perf_collect_impl");
821 #endif
822 #ifdef ANDROID
823     param_affinity_mask   = args.get<int>("perf_affinity_mask");
824     log_power_checkpoints = args.has("perf_log_power_checkpoints");
825 #endif
826 
827     bool param_list_impls = args.has("perf_list_impls");
828 
829     if (param_list_impls)
830     {
831         fputs("Available implementation variants:", stdout);
832         for (size_t i = 0; i < available_impls.size(); ++i) {
833             putchar(' ');
834             fputs(available_impls[i].c_str(), stdout);
835         }
836         putchar('\n');
837         exit(0);
838     }
839 
840     if (std::find(available_impls.begin(), available_impls.end(), param_impl) == available_impls.end())
841     {
842         printf("No such implementation: %s\n", param_impl.c_str());
843         exit(1);
844     }
845 
846 #ifdef CV_COLLECT_IMPL_DATA
847     if(param_collect_impl)
848         cv::setUseCollection(1);
849     else
850         cv::setUseCollection(0);
851 #endif
852 
853 #ifdef HAVE_CUDA
854 
855     bool printOnly        = args.has("perf_cuda_info_only");
856 
857     if (printOnly)
858         exit(0);
859 #endif
860 
861     if (available_impls.size() > 1)
862         printf("[----------]\n[   INFO   ] \tImplementation variant: %s.\n[----------]\n", param_impl.c_str()), fflush(stdout);
863 
864 #ifdef HAVE_CUDA
865 
866     param_cuda_device      = std::max(0, std::min(cv::cuda::getCudaEnabledDeviceCount(), args.get<int>("perf_cuda_device")));
867 
868     if (param_impl == "cuda")
869     {
870         cv::cuda::DeviceInfo info(param_cuda_device);
871         if (!info.isCompatible())
872         {
873             printf("[----------]\n[ FAILURE  ] \tDevice %s is NOT compatible with current CUDA module build.\n[----------]\n", info.name()), fflush(stdout);
874             exit(-1);
875         }
876 
877         cv::cuda::setDevice(param_cuda_device);
878 
879         printf("[----------]\n[ GPU INFO ] \tRun test suite on %s GPU.\n[----------]\n", info.name()), fflush(stdout);
880     }
881 #endif
882 
883     {
884         const char* path = getenv("OPENCV_PERF_VALIDATION_DIR");
885         if (path)
886             perf_validation_results_directory = path;
887     }
888 
889     std::string fileName_perf_validation_results_src = args.get<std::string>("perf_read_validation_results");
890     if (!fileName_perf_validation_results_src.empty())
891     {
892         perf_validation_enabled = true;
893         loadPerfValidationResults(perf_validation_results_directory + fileName_perf_validation_results_src);
894     }
895 
896     perf_validation_results_outfile = args.get<std::string>("perf_write_validation_results");
897     if (!perf_validation_results_outfile.empty())
898     {
899         perf_validation_enabled = true;
900         ::testing::AddGlobalTestEnvironment(new PerfValidationEnvironment());
901     }
902 
903     if (!args.check())
904     {
905         args.printErrors();
906         return;
907     }
908 
909     timeLimitDefault = param_time_limit == 0.0 ? 1 : (int64)(param_time_limit * cv::getTickFrequency());
910     iterationsLimitDefault = param_force_samples == 0 ? (unsigned)(-1) : param_force_samples;
911     _timeadjustment = _calibrate();
912 }
913 
RecordRunParameters()914 void TestBase::RecordRunParameters()
915 {
916     ::testing::Test::RecordProperty("cv_implementation", param_impl);
917     ::testing::Test::RecordProperty("cv_num_threads", param_threads);
918 
919 #ifdef HAVE_CUDA
920     if (param_impl == "cuda")
921     {
922         cv::cuda::DeviceInfo info(param_cuda_device);
923         ::testing::Test::RecordProperty("cv_cuda_gpu", info.name());
924     }
925 #endif
926 }
927 
getSelectedImpl()928 std::string TestBase::getSelectedImpl()
929 {
930     return param_impl;
931 }
932 
setModulePerformanceStrategy(enum PERF_STRATEGY strategy)933 enum PERF_STRATEGY TestBase::setModulePerformanceStrategy(enum PERF_STRATEGY strategy)
934 {
935     enum PERF_STRATEGY ret = strategyModule;
936     strategyModule = strategy;
937     return ret;
938 }
939 
getCurrentModulePerformanceStrategy()940 enum PERF_STRATEGY TestBase::getCurrentModulePerformanceStrategy()
941 {
942     return strategyForce == PERF_STRATEGY_DEFAULT ? strategyModule : strategyForce;
943 }
944 
945 
_calibrate()946 int64 TestBase::_calibrate()
947 {
948     class _helper : public ::perf::TestBase
949     {
950         public:
951         performance_metrics& getMetrics() { return calcMetrics(); }
952         virtual void TestBody() {}
953         virtual void PerfTestBody()
954         {
955             //the whole system warmup
956             SetUp();
957             cv::Mat a(2048, 2048, CV_32S, cv::Scalar(1));
958             cv::Mat b(2048, 2048, CV_32S, cv::Scalar(2));
959             declare.time(30);
960             double s = 0;
961             for(declare.iterations(20); startTimer(), next(); stopTimer())
962                 s+=a.dot(b);
963             declare.time(s);
964 
965             //self calibration
966             SetUp();
967             for(declare.iterations(1000); startTimer(), next(); stopTimer()){}
968         }
969     };
970 
971     _timeadjustment = 0;
972     _helper h;
973     h.PerfTestBody();
974     double compensation = h.getMetrics().min;
975     if (getCurrentModulePerformanceStrategy() == PERF_STRATEGY_SIMPLE)
976     {
977         CV_Assert(compensation < 0.01 * cv::getTickFrequency());
978         compensation = 0.0f; // simple strategy doesn't require any compensation
979     }
980     LOGD("Time compensation is %.0f", compensation);
981     return (int64)compensation;
982 }
983 
984 #ifdef _MSC_VER
985 # pragma warning(push)
986 # pragma warning(disable:4355)  // 'this' : used in base member initializer list
987 #endif
TestBase()988 TestBase::TestBase(): testStrategy(PERF_STRATEGY_DEFAULT), declare(this)
989 {
990     lastTime = totalTime = timeLimit = 0;
991     nIters = currentIter = runsPerIteration = 0;
992     minIters = param_min_samples;
993     verified = false;
994     perfValidationStage = 0;
995 }
996 #ifdef _MSC_VER
997 # pragma warning(pop)
998 #endif
999 
1000 
declareArray(SizeVector & sizes,cv::InputOutputArray a,WarmUpType wtype)1001 void TestBase::declareArray(SizeVector& sizes, cv::InputOutputArray a, WarmUpType wtype)
1002 {
1003     if (!a.empty())
1004     {
1005         sizes.push_back(std::pair<int, cv::Size>(getSizeInBytes(a), getSize(a)));
1006         warmup(a, wtype);
1007     }
1008     else if (a.kind() != cv::_InputArray::NONE)
1009         ADD_FAILURE() << "  Uninitialized input/output parameters are not allowed for performance tests";
1010 }
1011 
warmup(cv::InputOutputArray a,WarmUpType wtype)1012 void TestBase::warmup(cv::InputOutputArray a, WarmUpType wtype)
1013 {
1014     if (a.empty())
1015         return;
1016     else if (a.isUMat())
1017     {
1018         if (wtype == WARMUP_RNG || wtype == WARMUP_WRITE)
1019         {
1020             int depth = a.depth();
1021             if (depth == CV_8U)
1022                 cv::randu(a, 0, 256);
1023             else if (depth == CV_8S)
1024                 cv::randu(a, -128, 128);
1025             else if (depth == CV_16U)
1026                 cv::randu(a, 0, 1024);
1027             else if (depth == CV_32F || depth == CV_64F)
1028                 cv::randu(a, -1.0, 1.0);
1029             else if (depth == CV_16S || depth == CV_32S)
1030                 cv::randu(a, -4096, 4096);
1031             else
1032                 CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported format");
1033         }
1034         return;
1035     }
1036     else if (a.kind() != cv::_InputArray::STD_VECTOR_MAT && a.kind() != cv::_InputArray::STD_VECTOR_VECTOR)
1037         warmup_impl(a.getMat(), wtype);
1038     else
1039     {
1040         size_t total = a.total();
1041         for (size_t i = 0; i < total; ++i)
1042             warmup_impl(a.getMat((int)i), wtype);
1043     }
1044 }
1045 
getSizeInBytes(cv::InputArray a)1046 int TestBase::getSizeInBytes(cv::InputArray a)
1047 {
1048     if (a.empty()) return 0;
1049     int total = (int)a.total();
1050     if (a.kind() != cv::_InputArray::STD_VECTOR_MAT && a.kind() != cv::_InputArray::STD_VECTOR_VECTOR)
1051         return total * CV_ELEM_SIZE(a.type());
1052 
1053     int size = 0;
1054     for (int i = 0; i < total; ++i)
1055         size += (int)a.total(i) * CV_ELEM_SIZE(a.type(i));
1056 
1057     return size;
1058 }
1059 
getSize(cv::InputArray a)1060 cv::Size TestBase::getSize(cv::InputArray a)
1061 {
1062     if (a.kind() != cv::_InputArray::STD_VECTOR_MAT && a.kind() != cv::_InputArray::STD_VECTOR_VECTOR)
1063         return a.size();
1064     return cv::Size();
1065 }
1066 
getCurrentPerformanceStrategy() const1067 PERF_STRATEGY TestBase::getCurrentPerformanceStrategy() const
1068 {
1069     if (strategyForce == PERF_STRATEGY_DEFAULT)
1070         return (testStrategy == PERF_STRATEGY_DEFAULT) ? strategyModule : testStrategy;
1071     else
1072         return strategyForce;
1073 }
1074 
next()1075 bool TestBase::next()
1076 {
1077     static int64 lastActivityPrintTime = 0;
1078 
1079     if (currentIter != (unsigned int)-1)
1080     {
1081         if (currentIter + 1 != times.size())
1082             ADD_FAILURE() << "  next() is called before stopTimer()";
1083     }
1084     else
1085     {
1086         lastActivityPrintTime = 0;
1087         metrics.clear();
1088     }
1089 
1090     cv::theRNG().state = param_seed; //this rng should generate same numbers for each run
1091     ++currentIter;
1092 
1093     bool has_next = false;
1094 
1095     do {
1096         assert(currentIter == times.size());
1097         if (currentIter == 0)
1098         {
1099             has_next = true;
1100             break;
1101         }
1102 
1103         if (getCurrentPerformanceStrategy() == PERF_STRATEGY_BASE)
1104         {
1105             has_next = currentIter < nIters && totalTime < timeLimit;
1106         }
1107         else
1108         {
1109             assert(getCurrentPerformanceStrategy() == PERF_STRATEGY_SIMPLE);
1110             if (totalTime - lastActivityPrintTime >= cv::getTickFrequency() * 10)
1111             {
1112                 std::cout << '.' << std::endl;
1113                 lastActivityPrintTime = totalTime;
1114             }
1115             if (currentIter >= nIters)
1116             {
1117                 has_next = false;
1118                 break;
1119             }
1120             if (currentIter < minIters)
1121             {
1122                 has_next = true;
1123                 break;
1124             }
1125 
1126             calcMetrics();
1127 
1128             if (fabs(metrics.mean) > 1e-6)
1129                 has_next = metrics.stddev > perf_stability_criteria * fabs(metrics.mean);
1130             else
1131                 has_next = true;
1132         }
1133     } while (false);
1134 
1135     if (perf_validation_enabled && !has_next)
1136     {
1137         calcMetrics();
1138         double median_ms = metrics.median * 1000.0f / metrics.frequency;
1139 
1140         const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info();
1141         std::string name = (test_info == 0) ? "" :
1142                 std::string(test_info->test_case_name()) + "--" + test_info->name();
1143 
1144         if (!perf_validation_results.empty() && !name.empty())
1145         {
1146             std::map<std::string, float>::iterator i = perf_validation_results.find(name);
1147             bool isSame = false;
1148             bool found = false;
1149             bool grow = false;
1150             if (i != perf_validation_results.end())
1151             {
1152                 found = true;
1153                 double prev_result = i->second;
1154                 grow = median_ms > prev_result;
1155                 isSame = fabs(median_ms - prev_result) <= perf_validation_criteria * fabs(median_ms);
1156                 if (!isSame)
1157                 {
1158                     if (perfValidationStage == 0)
1159                     {
1160                         printf("Performance is changed (samples = %d, median):\n    %.2f ms (current)\n    %.2f ms (previous)\n", (int)times.size(), median_ms, prev_result);
1161                     }
1162                 }
1163             }
1164             else
1165             {
1166                 if (perfValidationStage == 0)
1167                     printf("New performance result is detected\n");
1168             }
1169             if (!isSame)
1170             {
1171                 if (perfValidationStage < 2)
1172                 {
1173                     if (perfValidationStage == 0 && currentIter <= minIters * 3 && currentIter < nIters)
1174                     {
1175                         unsigned int new_minIters = std::max(minIters * 5, currentIter * 3);
1176                         printf("Increase minIters from %u to %u\n", minIters, new_minIters);
1177                         minIters = new_minIters;
1178                         has_next = true;
1179                         perfValidationStage++;
1180                     }
1181                     else if (found && currentIter >= nIters &&
1182                             median_ms > perf_validation_time_threshold_ms &&
1183                             (grow || metrics.stddev > perf_stability_criteria * fabs(metrics.mean)))
1184                     {
1185                         printf("Performance is unstable, it may be a result of overheat problems\n");
1186                         printf("Idle delay for %d ms... \n", perf_validation_idle_delay_ms);
1187 #if defined WIN32 || defined _WIN32 || defined WIN64 || defined _WIN64
1188                         Sleep(perf_validation_idle_delay_ms);
1189 #else
1190                         usleep(perf_validation_idle_delay_ms * 1000);
1191 #endif
1192                         has_next = true;
1193                         minIters = std::min(minIters * 5, nIters);
1194                         // reset collected samples
1195                         currentIter = 0;
1196                         times.clear();
1197                         metrics.clear();
1198                         perfValidationStage += 2;
1199                     }
1200                     if (!has_next)
1201                     {
1202                         printf("Assume that current result is valid\n");
1203                     }
1204                 }
1205                 else
1206                 {
1207                     printf("Re-measured performance result: %.2f ms\n", median_ms);
1208                 }
1209             }
1210         }
1211 
1212         if (!has_next && !name.empty())
1213         {
1214             savePerfValidationResult(name, (float)median_ms);
1215         }
1216     }
1217 
1218 #ifdef ANDROID
1219     if (log_power_checkpoints)
1220     {
1221         timeval tim;
1222         gettimeofday(&tim, NULL);
1223         unsigned long long t1 = tim.tv_sec * 1000LLU + (unsigned long long)(tim.tv_usec / 1000.f);
1224 
1225         if (currentIter == 1) RecordProperty("test_start", cv::format("%llu",t1).c_str());
1226         if (!has_next) RecordProperty("test_complete", cv::format("%llu",t1).c_str());
1227     }
1228 #endif
1229 
1230     if (has_next)
1231         startTimer(); // really we should measure activity from this moment, so reset start time
1232     return has_next;
1233 }
1234 
warmup_impl(cv::Mat m,WarmUpType wtype)1235 void TestBase::warmup_impl(cv::Mat m, WarmUpType wtype)
1236 {
1237     switch(wtype)
1238     {
1239     case WARMUP_READ:
1240         cv::sum(m.reshape(1));
1241         return;
1242     case WARMUP_WRITE:
1243         m.reshape(1).setTo(cv::Scalar::all(0));
1244         return;
1245     case WARMUP_RNG:
1246         randu(m);
1247         return;
1248     default:
1249         return;
1250     }
1251 }
1252 
getTotalInputSize() const1253 unsigned int TestBase::getTotalInputSize() const
1254 {
1255     unsigned int res = 0;
1256     for (SizeVector::const_iterator i = inputData.begin(); i != inputData.end(); ++i)
1257         res += i->first;
1258     return res;
1259 }
1260 
getTotalOutputSize() const1261 unsigned int TestBase::getTotalOutputSize() const
1262 {
1263     unsigned int res = 0;
1264     for (SizeVector::const_iterator i = outputData.begin(); i != outputData.end(); ++i)
1265         res += i->first;
1266     return res;
1267 }
1268 
startTimer()1269 void TestBase::startTimer()
1270 {
1271     lastTime = cv::getTickCount();
1272 }
1273 
stopTimer()1274 void TestBase::stopTimer()
1275 {
1276     int64 time = cv::getTickCount();
1277     if (lastTime == 0)
1278         ADD_FAILURE() << "  stopTimer() is called before startTimer()/next()";
1279     lastTime = time - lastTime;
1280     totalTime += lastTime;
1281     lastTime -= _timeadjustment;
1282     if (lastTime < 0) lastTime = 0;
1283     times.push_back(lastTime);
1284     lastTime = 0;
1285 }
1286 
calcMetrics()1287 performance_metrics& TestBase::calcMetrics()
1288 {
1289     CV_Assert(metrics.samples <= (unsigned int)currentIter);
1290     if ((metrics.samples == (unsigned int)currentIter) || times.size() == 0)
1291         return metrics;
1292 
1293     metrics.bytesIn = getTotalInputSize();
1294     metrics.bytesOut = getTotalOutputSize();
1295     metrics.frequency = cv::getTickFrequency();
1296     metrics.samples = (unsigned int)times.size();
1297     metrics.outliers = 0;
1298 
1299     if (metrics.terminationReason != performance_metrics::TERM_INTERRUPT && metrics.terminationReason != performance_metrics::TERM_EXCEPTION)
1300     {
1301         if (currentIter == nIters)
1302             metrics.terminationReason = performance_metrics::TERM_ITERATIONS;
1303         else if (totalTime >= timeLimit)
1304             metrics.terminationReason = performance_metrics::TERM_TIME;
1305         else
1306             metrics.terminationReason = performance_metrics::TERM_UNKNOWN;
1307     }
1308 
1309     std::sort(times.begin(), times.end());
1310 
1311     TimeVector::const_iterator start = times.begin();
1312     TimeVector::const_iterator end = times.end();
1313 
1314     if (getCurrentPerformanceStrategy() == PERF_STRATEGY_BASE)
1315     {
1316         //estimate mean and stddev for log(time)
1317         double gmean = 0;
1318         double gstddev = 0;
1319         int n = 0;
1320         for(TimeVector::const_iterator i = times.begin(); i != times.end(); ++i)
1321         {
1322             double x = static_cast<double>(*i)/runsPerIteration;
1323             if (x < DBL_EPSILON) continue;
1324             double lx = log(x);
1325 
1326             ++n;
1327             double delta = lx - gmean;
1328             gmean += delta / n;
1329             gstddev += delta * (lx - gmean);
1330         }
1331 
1332         gstddev = n > 1 ? sqrt(gstddev / (n - 1)) : 0;
1333 
1334         //filter outliers assuming log-normal distribution
1335         //http://stackoverflow.com/questions/1867426/modeling-distribution-of-performance-measurements
1336         if (gstddev > DBL_EPSILON)
1337         {
1338             double minout = exp(gmean - 3 * gstddev) * runsPerIteration;
1339             double maxout = exp(gmean + 3 * gstddev) * runsPerIteration;
1340             while(*start < minout) ++start, ++metrics.outliers;
1341             do --end, ++metrics.outliers; while(*end > maxout);
1342             ++end, --metrics.outliers;
1343         }
1344     }
1345     else if (getCurrentPerformanceStrategy() == PERF_STRATEGY_SIMPLE)
1346     {
1347         metrics.outliers = static_cast<int>(times.size() * param_max_outliers / 100);
1348         for (unsigned int i = 0; i < metrics.outliers; i++)
1349             --end;
1350     }
1351     else
1352     {
1353         assert(false);
1354     }
1355 
1356     int offset = static_cast<int>(start - times.begin());
1357 
1358     metrics.min = static_cast<double>(*start)/runsPerIteration;
1359     //calc final metrics
1360     unsigned int n = 0;
1361     double gmean = 0;
1362     double gstddev = 0;
1363     double mean = 0;
1364     double stddev = 0;
1365     unsigned int m = 0;
1366     for(; start != end; ++start)
1367     {
1368         double x = static_cast<double>(*start)/runsPerIteration;
1369         if (x > DBL_EPSILON)
1370         {
1371             double lx = log(x);
1372             ++m;
1373             double gdelta = lx - gmean;
1374             gmean += gdelta / m;
1375             gstddev += gdelta * (lx - gmean);
1376         }
1377         ++n;
1378         double delta = x - mean;
1379         mean += delta / n;
1380         stddev += delta * (x - mean);
1381     }
1382 
1383     metrics.mean = mean;
1384     metrics.gmean = exp(gmean);
1385     metrics.gstddev = m > 1 ? sqrt(gstddev / (m - 1)) : 0;
1386     metrics.stddev = n > 1 ? sqrt(stddev / (n - 1)) : 0;
1387     metrics.median = (n % 2
1388             ? (double)times[offset + n / 2]
1389             : 0.5 * (times[offset + n / 2] + times[offset + n / 2 - 1])
1390             ) / runsPerIteration;
1391 
1392     return metrics;
1393 }
1394 
validateMetrics()1395 void TestBase::validateMetrics()
1396 {
1397     performance_metrics& m = calcMetrics();
1398 
1399     if (HasFailure()) return;
1400 
1401     ASSERT_GE(m.samples, 1u)
1402       << "  No time measurements was performed.\nstartTimer() and stopTimer() commands are required for performance tests.";
1403 
1404     if (getCurrentPerformanceStrategy() == PERF_STRATEGY_BASE)
1405     {
1406         EXPECT_GE(m.samples, param_min_samples)
1407           << "  Only a few samples are collected.\nPlease increase number of iterations or/and time limit to get reliable performance measurements.";
1408 
1409         if (m.gstddev > DBL_EPSILON)
1410         {
1411             EXPECT_GT(/*m.gmean * */1., /*m.gmean * */ 2 * sinh(m.gstddev * param_max_deviation))
1412               << "  Test results are not reliable ((mean-sigma,mean+sigma) deviation interval is greater than measured time interval).";
1413         }
1414 
1415         EXPECT_LE(m.outliers, std::max((unsigned int)cvCeil(m.samples * param_max_outliers / 100.), 1u))
1416           << "  Test results are not reliable (too many outliers).";
1417     }
1418     else if (getCurrentPerformanceStrategy() == PERF_STRATEGY_SIMPLE)
1419     {
1420         double mean = metrics.mean * 1000.0f / metrics.frequency;
1421         double median = metrics.median * 1000.0f / metrics.frequency;
1422         double stddev = metrics.stddev * 1000.0f / metrics.frequency;
1423         double percents = stddev / mean * 100.f;
1424         printf("[ PERFSTAT ]    (samples = %d, mean = %.2f, median = %.2f, stddev = %.2f (%.1f%%))\n", (int)metrics.samples, mean, median, stddev, percents);
1425     }
1426     else
1427     {
1428         assert(false);
1429     }
1430 }
1431 
reportMetrics(bool toJUnitXML)1432 void TestBase::reportMetrics(bool toJUnitXML)
1433 {
1434     performance_metrics& m = calcMetrics();
1435 
1436     if (m.terminationReason == performance_metrics::TERM_SKIP_TEST)
1437     {
1438         if (toJUnitXML)
1439         {
1440             RecordProperty("custom_status", "skipped");
1441         }
1442     }
1443     else if (toJUnitXML)
1444     {
1445         RecordProperty("bytesIn", (int)m.bytesIn);
1446         RecordProperty("bytesOut", (int)m.bytesOut);
1447         RecordProperty("term", m.terminationReason);
1448         RecordProperty("samples", (int)m.samples);
1449         RecordProperty("outliers", (int)m.outliers);
1450         RecordProperty("frequency", cv::format("%.0f", m.frequency).c_str());
1451         RecordProperty("min", cv::format("%.0f", m.min).c_str());
1452         RecordProperty("median", cv::format("%.0f", m.median).c_str());
1453         RecordProperty("gmean", cv::format("%.0f", m.gmean).c_str());
1454         RecordProperty("gstddev", cv::format("%.6f", m.gstddev).c_str());
1455         RecordProperty("mean", cv::format("%.0f", m.mean).c_str());
1456         RecordProperty("stddev", cv::format("%.0f", m.stddev).c_str());
1457 #ifdef CV_COLLECT_IMPL_DATA
1458         if(param_collect_impl)
1459         {
1460             RecordProperty("impl_ipp", (int)(implConf.ipp || implConf.icv));
1461             RecordProperty("impl_ocl", (int)implConf.ocl);
1462             RecordProperty("impl_plain", (int)implConf.plain);
1463 
1464             std::string rec_line;
1465             std::vector<cv::String> rec;
1466             rec_line.clear();
1467             rec = implConf.GetCallsForImpl(CV_IMPL_IPP|CV_IMPL_MT);
1468             for(int i=0; i<rec.size();i++ ){rec_line += rec[i].c_str(); rec_line += " ";}
1469             rec = implConf.GetCallsForImpl(CV_IMPL_IPP);
1470             for(int i=0; i<rec.size();i++ ){rec_line += rec[i].c_str(); rec_line += " ";}
1471             RecordProperty("impl_rec_ipp", rec_line.c_str());
1472 
1473             rec_line.clear();
1474             rec = implConf.GetCallsForImpl(CV_IMPL_OCL);
1475             for(int i=0; i<rec.size();i++ ){rec_line += rec[i].c_str(); rec_line += " ";}
1476             RecordProperty("impl_rec_ocl", rec_line.c_str());
1477         }
1478 #endif
1479     }
1480     else
1481     {
1482         const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info();
1483         const char* type_param = test_info->type_param();
1484         const char* value_param = test_info->value_param();
1485 
1486 #if defined(ANDROID) && defined(USE_ANDROID_LOGGING)
1487         LOGD("[ FAILED   ] %s.%s", test_info->test_case_name(), test_info->name());
1488 #endif
1489 
1490         if (type_param)  LOGD("type      = %11s", type_param);
1491         if (value_param) LOGD("params    = %11s", value_param);
1492 
1493         switch (m.terminationReason)
1494         {
1495         case performance_metrics::TERM_ITERATIONS:
1496             LOGD("termination reason:  reached maximum number of iterations");
1497             break;
1498         case performance_metrics::TERM_TIME:
1499             LOGD("termination reason:  reached time limit");
1500             break;
1501         case performance_metrics::TERM_INTERRUPT:
1502             LOGD("termination reason:  aborted by the performance testing framework");
1503             break;
1504         case performance_metrics::TERM_EXCEPTION:
1505             LOGD("termination reason:  unhandled exception");
1506             break;
1507         case performance_metrics::TERM_UNKNOWN:
1508         default:
1509             LOGD("termination reason:  unknown");
1510             break;
1511         };
1512 
1513 #ifdef CV_COLLECT_IMPL_DATA
1514         if(param_collect_impl)
1515         {
1516             LOGD("impl_ipp =%11d", (int)(implConf.ipp || implConf.icv));
1517             LOGD("impl_ocl =%11d", (int)implConf.ocl);
1518             LOGD("impl_plain =%11d", (int)implConf.plain);
1519 
1520             std::string rec_line;
1521             std::vector<cv::String> rec;
1522             rec_line.clear();
1523             rec = implConf.GetCallsForImpl(CV_IMPL_IPP|CV_IMPL_MT);
1524             for(int i=0; i<rec.size();i++ ){rec_line += rec[i].c_str(); rec_line += " ";}
1525             rec = implConf.GetCallsForImpl(CV_IMPL_IPP);
1526             for(int i=0; i<rec.size();i++ ){rec_line += rec[i].c_str(); rec_line += " ";}
1527             LOGD("impl_rec_ipp =%s", rec_line.c_str());
1528 
1529             rec_line.clear();
1530             rec = implConf.GetCallsForImpl(CV_IMPL_OCL);
1531             for(int i=0; i<rec.size();i++ ){rec_line += rec[i].c_str(); rec_line += " ";}
1532             LOGD("impl_rec_ocl =%s", rec_line.c_str());
1533         }
1534 #endif
1535 
1536         LOGD("bytesIn   =%11lu", (unsigned long)m.bytesIn);
1537         LOGD("bytesOut  =%11lu", (unsigned long)m.bytesOut);
1538         if (nIters == (unsigned int)-1 || m.terminationReason == performance_metrics::TERM_ITERATIONS)
1539             LOGD("samples   =%11u",  m.samples);
1540         else
1541             LOGD("samples   =%11u of %u", m.samples, nIters);
1542         LOGD("outliers  =%11u", m.outliers);
1543         LOGD("frequency =%11.0f", m.frequency);
1544         if (m.samples > 0)
1545         {
1546             LOGD("min       =%11.0f = %.2fms", m.min, m.min * 1e3 / m.frequency);
1547             LOGD("median    =%11.0f = %.2fms", m.median, m.median * 1e3 / m.frequency);
1548             LOGD("gmean     =%11.0f = %.2fms", m.gmean, m.gmean * 1e3 / m.frequency);
1549             LOGD("gstddev   =%11.8f = %.2fms for 97%% dispersion interval", m.gstddev, m.gmean * 2 * sinh(m.gstddev * 3) * 1e3 / m.frequency);
1550             LOGD("mean      =%11.0f = %.2fms", m.mean, m.mean * 1e3 / m.frequency);
1551             LOGD("stddev    =%11.0f = %.2fms", m.stddev, m.stddev * 1e3 / m.frequency);
1552         }
1553     }
1554 }
1555 
SetUp()1556 void TestBase::SetUp()
1557 {
1558     cv::theRNG().state = param_seed; // this rng should generate same numbers for each run
1559 
1560     if (param_threads >= 0)
1561         cv::setNumThreads(param_threads);
1562 
1563 #ifdef ANDROID
1564     if (param_affinity_mask)
1565         setCurrentThreadAffinityMask(param_affinity_mask);
1566 #endif
1567 
1568     verified = false;
1569     lastTime = 0;
1570     totalTime = 0;
1571     runsPerIteration = 1;
1572     nIters = iterationsLimitDefault;
1573     currentIter = (unsigned int)-1;
1574     timeLimit = timeLimitDefault;
1575     times.clear();
1576 }
1577 
TearDown()1578 void TestBase::TearDown()
1579 {
1580     if (metrics.terminationReason == performance_metrics::TERM_SKIP_TEST)
1581     {
1582         LOGI("\tTest was skipped");
1583         GTEST_SUCCEED() << "Test was skipped";
1584     }
1585     else
1586     {
1587         if (!HasFailure() && !verified)
1588             ADD_FAILURE() << "The test has no sanity checks. There should be at least one check at the end of performance test.";
1589 
1590         validateMetrics();
1591         if (HasFailure())
1592         {
1593             reportMetrics(false);
1594             return;
1595         }
1596     }
1597 
1598     const ::testing::TestInfo* const test_info = ::testing::UnitTest::GetInstance()->current_test_info();
1599     const char* type_param = test_info->type_param();
1600     const char* value_param = test_info->value_param();
1601     if (value_param) printf("[ VALUE    ] \t%s\n", value_param), fflush(stdout);
1602     if (type_param)  printf("[ TYPE     ] \t%s\n", type_param), fflush(stdout);
1603 
1604 #ifdef CV_COLLECT_IMPL_DATA
1605     if(param_collect_impl)
1606     {
1607         implConf.ShapeUp();
1608         printf("[ I. FLAGS ] \t");
1609         if(implConf.ipp_mt)
1610         {
1611             if(implConf.icv) {printf("ICV_MT "); std::vector<cv::String> fun = implConf.GetCallsForImpl(CV_IMPL_IPP|CV_IMPL_MT); printf("("); for(int i=0; i<fun.size();i++ ){printf("%s ", fun[i].c_str());} printf(") "); }
1612             if(implConf.ipp) {printf("IPP_MT "); std::vector<cv::String> fun = implConf.GetCallsForImpl(CV_IMPL_IPP|CV_IMPL_MT); printf("("); for(int i=0; i<fun.size();i++ ){printf("%s ", fun[i].c_str());} printf(") "); }
1613         }
1614         else
1615         {
1616             if(implConf.icv) {printf("ICV "); std::vector<cv::String> fun = implConf.GetCallsForImpl(CV_IMPL_IPP); printf("("); for(int i=0; i<fun.size();i++ ){printf("%s ", fun[i].c_str());} printf(") "); }
1617             if(implConf.ipp) {printf("IPP "); std::vector<cv::String> fun = implConf.GetCallsForImpl(CV_IMPL_IPP); printf("("); for(int i=0; i<fun.size();i++ ){printf("%s ", fun[i].c_str());} printf(") "); }
1618         }
1619         if(implConf.ocl) {printf("OCL "); std::vector<cv::String> fun = implConf.GetCallsForImpl(CV_IMPL_OCL); printf("("); for(int i=0; i<fun.size();i++ ){printf("%s ", fun[i].c_str());} printf(") "); }
1620         if(implConf.plain) printf("PLAIN ");
1621         if(!(implConf.ipp_mt || implConf.icv || implConf.ipp || implConf.ocl || implConf.plain))
1622             printf("ERROR ");
1623         printf("\n");
1624         fflush(stdout);
1625     }
1626 #endif
1627     reportMetrics(true);
1628 }
1629 
getDataPath(const std::string & relativePath)1630 std::string TestBase::getDataPath(const std::string& relativePath)
1631 {
1632     if (relativePath.empty())
1633     {
1634         ADD_FAILURE() << "  Bad path to test resource";
1635         throw PerfEarlyExitException();
1636     }
1637 
1638     const char *data_path_dir = getenv("OPENCV_TEST_DATA_PATH");
1639     const char *path_separator = "/";
1640 
1641     std::string path;
1642     if (data_path_dir)
1643     {
1644         int len = (int)strlen(data_path_dir) - 1;
1645         if (len < 0) len = 0;
1646         path = (data_path_dir[0] == 0 ? std::string(".") : std::string(data_path_dir))
1647                 + (data_path_dir[len] == '/' || data_path_dir[len] == '\\' ? "" : path_separator);
1648     }
1649     else
1650     {
1651         path = ".";
1652         path += path_separator;
1653     }
1654 
1655     if (relativePath[0] == '/' || relativePath[0] == '\\')
1656         path += relativePath.substr(1);
1657     else
1658         path += relativePath;
1659 
1660     FILE* fp = fopen(path.c_str(), "r");
1661     if (fp)
1662         fclose(fp);
1663     else
1664     {
1665         ADD_FAILURE() << "  Requested file \"" << path << "\" does not exist.";
1666         throw PerfEarlyExitException();
1667     }
1668     return path;
1669 }
1670 
RunPerfTestBody()1671 void TestBase::RunPerfTestBody()
1672 {
1673     try
1674     {
1675 #ifdef CV_COLLECT_IMPL_DATA
1676         if(param_collect_impl)
1677             implConf.Reset();
1678 #endif
1679         this->PerfTestBody();
1680 #ifdef CV_COLLECT_IMPL_DATA
1681         if(param_collect_impl)
1682             implConf.GetImpl();
1683 #endif
1684     }
1685     catch(PerfSkipTestException&)
1686     {
1687         metrics.terminationReason = performance_metrics::TERM_SKIP_TEST;
1688         return;
1689     }
1690     catch(PerfEarlyExitException&)
1691     {
1692         metrics.terminationReason = performance_metrics::TERM_INTERRUPT;
1693         return;//no additional failure logging
1694     }
1695     catch(cv::Exception& e)
1696     {
1697         metrics.terminationReason = performance_metrics::TERM_EXCEPTION;
1698         #ifdef HAVE_CUDA
1699             if (e.code == cv::Error::GpuApiCallError)
1700                 cv::cuda::resetDevice();
1701         #endif
1702         FAIL() << "Expected: PerfTestBody() doesn't throw an exception.\n  Actual: it throws cv::Exception:\n  " << e.what();
1703     }
1704     catch(std::exception& e)
1705     {
1706         metrics.terminationReason = performance_metrics::TERM_EXCEPTION;
1707         FAIL() << "Expected: PerfTestBody() doesn't throw an exception.\n  Actual: it throws std::exception:\n  " << e.what();
1708     }
1709     catch(...)
1710     {
1711         metrics.terminationReason = performance_metrics::TERM_EXCEPTION;
1712         FAIL() << "Expected: PerfTestBody() doesn't throw an exception.\n  Actual: it throws...";
1713     }
1714 }
1715 
1716 /*****************************************************************************************\
1717 *                          ::perf::TestBase::_declareHelper
1718 \*****************************************************************************************/
iterations(unsigned int n)1719 TestBase::_declareHelper& TestBase::_declareHelper::iterations(unsigned int n)
1720 {
1721     test->times.clear();
1722     test->times.reserve(n);
1723     test->nIters = std::min(n, TestBase::iterationsLimitDefault);
1724     test->currentIter = (unsigned int)-1;
1725     test->metrics.clear();
1726     return *this;
1727 }
1728 
time(double timeLimitSecs)1729 TestBase::_declareHelper& TestBase::_declareHelper::time(double timeLimitSecs)
1730 {
1731     test->times.clear();
1732     test->currentIter = (unsigned int)-1;
1733     test->timeLimit = (int64)(timeLimitSecs * cv::getTickFrequency());
1734     test->metrics.clear();
1735     return *this;
1736 }
1737 
tbb_threads(int n)1738 TestBase::_declareHelper& TestBase::_declareHelper::tbb_threads(int n)
1739 {
1740     cv::setNumThreads(n);
1741     return *this;
1742 }
1743 
runs(unsigned int runsNumber)1744 TestBase::_declareHelper& TestBase::_declareHelper::runs(unsigned int runsNumber)
1745 {
1746     test->runsPerIteration = runsNumber;
1747     return *this;
1748 }
1749 
in(cv::InputOutputArray a1,WarmUpType wtype)1750 TestBase::_declareHelper& TestBase::_declareHelper::in(cv::InputOutputArray a1, WarmUpType wtype)
1751 {
1752     if (!test->times.empty()) return *this;
1753     TestBase::declareArray(test->inputData, a1, wtype);
1754     return *this;
1755 }
1756 
in(cv::InputOutputArray a1,cv::InputOutputArray a2,WarmUpType wtype)1757 TestBase::_declareHelper& TestBase::_declareHelper::in(cv::InputOutputArray a1, cv::InputOutputArray a2, WarmUpType wtype)
1758 {
1759     if (!test->times.empty()) return *this;
1760     TestBase::declareArray(test->inputData, a1, wtype);
1761     TestBase::declareArray(test->inputData, a2, wtype);
1762     return *this;
1763 }
1764 
in(cv::InputOutputArray a1,cv::InputOutputArray a2,cv::InputOutputArray a3,WarmUpType wtype)1765 TestBase::_declareHelper& TestBase::_declareHelper::in(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, WarmUpType wtype)
1766 {
1767     if (!test->times.empty()) return *this;
1768     TestBase::declareArray(test->inputData, a1, wtype);
1769     TestBase::declareArray(test->inputData, a2, wtype);
1770     TestBase::declareArray(test->inputData, a3, wtype);
1771     return *this;
1772 }
1773 
in(cv::InputOutputArray a1,cv::InputOutputArray a2,cv::InputOutputArray a3,cv::InputOutputArray a4,WarmUpType wtype)1774 TestBase::_declareHelper& TestBase::_declareHelper::in(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, cv::InputOutputArray a4, WarmUpType wtype)
1775 {
1776     if (!test->times.empty()) return *this;
1777     TestBase::declareArray(test->inputData, a1, wtype);
1778     TestBase::declareArray(test->inputData, a2, wtype);
1779     TestBase::declareArray(test->inputData, a3, wtype);
1780     TestBase::declareArray(test->inputData, a4, wtype);
1781     return *this;
1782 }
1783 
out(cv::InputOutputArray a1,WarmUpType wtype)1784 TestBase::_declareHelper& TestBase::_declareHelper::out(cv::InputOutputArray a1, WarmUpType wtype)
1785 {
1786     if (!test->times.empty()) return *this;
1787     TestBase::declareArray(test->outputData, a1, wtype);
1788     return *this;
1789 }
1790 
out(cv::InputOutputArray a1,cv::InputOutputArray a2,WarmUpType wtype)1791 TestBase::_declareHelper& TestBase::_declareHelper::out(cv::InputOutputArray a1, cv::InputOutputArray a2, WarmUpType wtype)
1792 {
1793     if (!test->times.empty()) return *this;
1794     TestBase::declareArray(test->outputData, a1, wtype);
1795     TestBase::declareArray(test->outputData, a2, wtype);
1796     return *this;
1797 }
1798 
out(cv::InputOutputArray a1,cv::InputOutputArray a2,cv::InputOutputArray a3,WarmUpType wtype)1799 TestBase::_declareHelper& TestBase::_declareHelper::out(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, WarmUpType wtype)
1800 {
1801     if (!test->times.empty()) return *this;
1802     TestBase::declareArray(test->outputData, a1, wtype);
1803     TestBase::declareArray(test->outputData, a2, wtype);
1804     TestBase::declareArray(test->outputData, a3, wtype);
1805     return *this;
1806 }
1807 
out(cv::InputOutputArray a1,cv::InputOutputArray a2,cv::InputOutputArray a3,cv::InputOutputArray a4,WarmUpType wtype)1808 TestBase::_declareHelper& TestBase::_declareHelper::out(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, cv::InputOutputArray a4, WarmUpType wtype)
1809 {
1810     if (!test->times.empty()) return *this;
1811     TestBase::declareArray(test->outputData, a1, wtype);
1812     TestBase::declareArray(test->outputData, a2, wtype);
1813     TestBase::declareArray(test->outputData, a3, wtype);
1814     TestBase::declareArray(test->outputData, a4, wtype);
1815     return *this;
1816 }
1817 
strategy(enum PERF_STRATEGY s)1818 TestBase::_declareHelper& TestBase::_declareHelper::strategy(enum PERF_STRATEGY s)
1819 {
1820     test->testStrategy = s;
1821     return *this;
1822 }
1823 
_declareHelper(TestBase * t)1824 TestBase::_declareHelper::_declareHelper(TestBase* t) : test(t)
1825 {
1826 }
1827 
1828 /*****************************************************************************************\
1829 *                                  miscellaneous
1830 \*****************************************************************************************/
1831 
1832 namespace {
1833 struct KeypointComparator
1834 {
1835     std::vector<cv::KeyPoint>& pts_;
1836     comparators::KeypointGreater cmp;
1837 
KeypointComparator__anon498fb1430211::KeypointComparator1838     KeypointComparator(std::vector<cv::KeyPoint>& pts) : pts_(pts), cmp() {}
1839 
operator ()__anon498fb1430211::KeypointComparator1840     bool operator()(int idx1, int idx2) const
1841     {
1842         return cmp(pts_[idx1], pts_[idx2]);
1843     }
1844 private:
1845     const KeypointComparator& operator=(const KeypointComparator&); // quiet MSVC
1846 };
1847 }//namespace
1848 
sort(std::vector<cv::KeyPoint> & pts,cv::InputOutputArray descriptors)1849 void perf::sort(std::vector<cv::KeyPoint>& pts, cv::InputOutputArray descriptors)
1850 {
1851     cv::Mat desc = descriptors.getMat();
1852 
1853     CV_Assert(pts.size() == (size_t)desc.rows);
1854     cv::AutoBuffer<int> idxs(desc.rows);
1855 
1856     for (int i = 0; i < desc.rows; ++i)
1857         idxs[i] = i;
1858 
1859     std::sort((int*)idxs, (int*)idxs + desc.rows, KeypointComparator(pts));
1860 
1861     std::vector<cv::KeyPoint> spts(pts.size());
1862     cv::Mat sdesc(desc.size(), desc.type());
1863 
1864     for(int j = 0; j < desc.rows; ++j)
1865     {
1866         spts[j] = pts[idxs[j]];
1867         cv::Mat row = sdesc.row(j);
1868         desc.row(idxs[j]).copyTo(row);
1869     }
1870 
1871     spts.swap(pts);
1872     sdesc.copyTo(desc);
1873 }
1874 
1875 /*****************************************************************************************\
1876 *                                  ::perf::GpuPerf
1877 \*****************************************************************************************/
targetDevice()1878 bool perf::GpuPerf::targetDevice()
1879 {
1880     return param_impl == "cuda";
1881 }
1882 
1883 /*****************************************************************************************\
1884 *                                  ::perf::PrintTo
1885 \*****************************************************************************************/
1886 namespace perf
1887 {
1888 
PrintTo(const MatType & t,::std::ostream * os)1889 void PrintTo(const MatType& t, ::std::ostream* os)
1890 {
1891     switch( CV_MAT_DEPTH((int)t) )
1892     {
1893         case CV_8U:  *os << "8U";  break;
1894         case CV_8S:  *os << "8S";  break;
1895         case CV_16U: *os << "16U"; break;
1896         case CV_16S: *os << "16S"; break;
1897         case CV_32S: *os << "32S"; break;
1898         case CV_32F: *os << "32F"; break;
1899         case CV_64F: *os << "64F"; break;
1900         case CV_USRTYPE1: *os << "USRTYPE1"; break;
1901         default: *os << "INVALID_TYPE"; break;
1902     }
1903     *os << 'C' << CV_MAT_CN((int)t);
1904 }
1905 
1906 } //namespace perf
1907 
1908 /*****************************************************************************************\
1909 *                                  ::cv::PrintTo
1910 \*****************************************************************************************/
1911 namespace cv {
1912 
PrintTo(const String & str,::std::ostream * os)1913 void PrintTo(const String& str, ::std::ostream* os)
1914 {
1915     *os << "\"" << str << "\"";
1916 }
1917 
PrintTo(const Size & sz,::std::ostream * os)1918 void PrintTo(const Size& sz, ::std::ostream* os)
1919 {
1920     *os << /*"Size:" << */sz.width << "x" << sz.height;
1921 }
1922 
1923 }  // namespace cv
1924