1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "precomp.hpp"
44
45 //==============================================================================
46 //
47 // Error handling helpers
48 //
49 //==============================================================================
50
51 namespace
52 {
53 #define error_entry(entry) { entry, #entry }
54
55 struct ErrorEntry
56 {
57 int code;
58 const char* str;
59 };
60
61 struct ErrorEntryComparer
62 {
63 int code;
ErrorEntryComparer__anon052582c20111::ErrorEntryComparer64 ErrorEntryComparer(int code_) : code(code_) {}
operator ()__anon052582c20111::ErrorEntryComparer65 bool operator()(const ErrorEntry& e) const { return e.code == code; }
66 };
67
68 //////////////////////////////////////////////////////////////////////////
69 // NCV errors
70
71 const ErrorEntry ncv_errors [] =
72 {
73 error_entry( NCV_SUCCESS ),
74 error_entry( NCV_UNKNOWN_ERROR ),
75 error_entry( NCV_CUDA_ERROR ),
76 error_entry( NCV_NPP_ERROR ),
77 error_entry( NCV_FILE_ERROR ),
78 error_entry( NCV_NULL_PTR ),
79 error_entry( NCV_INCONSISTENT_INPUT ),
80 error_entry( NCV_TEXTURE_BIND_ERROR ),
81 error_entry( NCV_DIMENSIONS_INVALID ),
82 error_entry( NCV_INVALID_ROI ),
83 error_entry( NCV_INVALID_STEP ),
84 error_entry( NCV_INVALID_SCALE ),
85 error_entry( NCV_INVALID_SCALE ),
86 error_entry( NCV_ALLOCATOR_NOT_INITIALIZED ),
87 error_entry( NCV_ALLOCATOR_BAD_ALLOC ),
88 error_entry( NCV_ALLOCATOR_BAD_DEALLOC ),
89 error_entry( NCV_ALLOCATOR_INSUFFICIENT_CAPACITY ),
90 error_entry( NCV_ALLOCATOR_DEALLOC_ORDER ),
91 error_entry( NCV_ALLOCATOR_BAD_REUSE ),
92 error_entry( NCV_MEM_COPY_ERROR ),
93 error_entry( NCV_MEM_RESIDENCE_ERROR ),
94 error_entry( NCV_MEM_INSUFFICIENT_CAPACITY ),
95 error_entry( NCV_HAAR_INVALID_PIXEL_STEP ),
96 error_entry( NCV_HAAR_TOO_MANY_FEATURES_IN_CLASSIFIER ),
97 error_entry( NCV_HAAR_TOO_MANY_FEATURES_IN_CASCADE ),
98 error_entry( NCV_HAAR_TOO_LARGE_FEATURES ),
99 error_entry( NCV_HAAR_XML_LOADING_EXCEPTION ),
100 error_entry( NCV_NOIMPL_HAAR_TILTED_FEATURES ),
101 error_entry( NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW ),
102 error_entry( NPPST_SUCCESS ),
103 error_entry( NPPST_ERROR ),
104 error_entry( NPPST_CUDA_KERNEL_EXECUTION_ERROR ),
105 error_entry( NPPST_NULL_POINTER_ERROR ),
106 error_entry( NPPST_TEXTURE_BIND_ERROR ),
107 error_entry( NPPST_MEMCPY_ERROR ),
108 error_entry( NPPST_MEM_ALLOC_ERR ),
109 error_entry( NPPST_MEMFREE_ERR ),
110 error_entry( NPPST_INVALID_ROI ),
111 error_entry( NPPST_INVALID_STEP ),
112 error_entry( NPPST_INVALID_SCALE ),
113 error_entry( NPPST_MEM_INSUFFICIENT_BUFFER ),
114 error_entry( NPPST_MEM_RESIDENCE_ERROR ),
115 error_entry( NPPST_MEM_INTERNAL_ERROR )
116 };
117
118 const size_t ncv_error_num = sizeof(ncv_errors) / sizeof(ncv_errors[0]);
119 }
120
getNcvErrorMessage(int code)121 cv::String cv::cuda::getNcvErrorMessage(int code)
122 {
123 size_t idx = std::find_if(ncv_errors, ncv_errors + ncv_error_num, ErrorEntryComparer(code)) - ncv_errors;
124
125 const char* msg = (idx != ncv_error_num) ? ncv_errors[idx].str : "Unknown error code";
126 String str = cv::format("%s [Code = %d]", msg, code);
127
128 return str;
129 }
130
131
stdDebugOutput(const cv::String & msg)132 static void stdDebugOutput(const cv::String &msg)
133 {
134 std::cout << msg.c_str() << std::endl;
135 }
136
137
138 static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
139
140
ncvDebugOutput(const cv::String & msg)141 void ncvDebugOutput(const cv::String &msg)
142 {
143 debugOutputHandler(msg);
144 }
145
146
ncvSetDebugOutputHandler(NCVDebugOutputHandler * func)147 void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func)
148 {
149 debugOutputHandler = func;
150 }
151
152
153 //==============================================================================
154 //
155 // Memory wrappers and helpers
156 //
157 //==============================================================================
158
159
alignUp(Ncv32u what,Ncv32u alignment)160 Ncv32u alignUp(Ncv32u what, Ncv32u alignment)
161 {
162 Ncv32u alignMask = alignment-1;
163 Ncv32u inverseAlignMask = ~alignMask;
164 Ncv32u res = (what + alignMask) & inverseAlignMask;
165 return res;
166 }
167
168
clear()169 void NCVMemPtr::clear()
170 {
171 ptr = NULL;
172 memtype = NCVMemoryTypeNone;
173 }
174
175
clear()176 void NCVMemSegment::clear()
177 {
178 begin.clear();
179 size = 0;
180 }
181
182
memSegCopyHelper(void * dst,NCVMemoryType dstType,const void * src,NCVMemoryType srcType,size_t sz,cudaStream_t cuStream)183 NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, cudaStream_t cuStream)
184 {
185 NCVStatus ncvStat;
186 switch (dstType)
187 {
188 case NCVMemoryTypeHostPageable:
189 case NCVMemoryTypeHostPinned:
190 switch (srcType)
191 {
192 case NCVMemoryTypeHostPageable:
193 case NCVMemoryTypeHostPinned:
194 memcpy(dst, src, sz);
195 ncvStat = NCV_SUCCESS;
196 break;
197 case NCVMemoryTypeDevice:
198 if (cuStream != 0)
199 {
200 ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
201 }
202 else
203 {
204 ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
205 }
206 ncvStat = NCV_SUCCESS;
207 break;
208 default:
209 ncvStat = NCV_MEM_RESIDENCE_ERROR;
210 }
211 break;
212 case NCVMemoryTypeDevice:
213 switch (srcType)
214 {
215 case NCVMemoryTypeHostPageable:
216 case NCVMemoryTypeHostPinned:
217 if (cuStream != 0)
218 {
219 ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
220 }
221 else
222 {
223 ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
224 }
225 ncvStat = NCV_SUCCESS;
226 break;
227 case NCVMemoryTypeDevice:
228 if (cuStream != 0)
229 {
230 ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
231 }
232 else
233 {
234 ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
235 }
236 ncvStat = NCV_SUCCESS;
237 break;
238 default:
239 ncvStat = NCV_MEM_RESIDENCE_ERROR;
240 }
241 break;
242 default:
243 ncvStat = NCV_MEM_RESIDENCE_ERROR;
244 }
245
246 return ncvStat;
247 }
248
249
memSegCopyHelper2D(void * dst,Ncv32u dstPitch,NCVMemoryType dstType,const void * src,Ncv32u srcPitch,NCVMemoryType srcType,Ncv32u widthbytes,Ncv32u height,cudaStream_t cuStream)250 NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType,
251 const void *src, Ncv32u srcPitch, NCVMemoryType srcType,
252 Ncv32u widthbytes, Ncv32u height, cudaStream_t cuStream)
253 {
254 NCVStatus ncvStat;
255 switch (dstType)
256 {
257 case NCVMemoryTypeHostPageable:
258 case NCVMemoryTypeHostPinned:
259 switch (srcType)
260 {
261 case NCVMemoryTypeHostPageable:
262 case NCVMemoryTypeHostPinned:
263 for (Ncv32u i=0; i<height; i++)
264 {
265 memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes);
266 }
267 ncvStat = NCV_SUCCESS;
268 break;
269 case NCVMemoryTypeDevice:
270 if (cuStream != 0)
271 {
272 ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
273 }
274 else
275 {
276 ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
277 }
278 ncvStat = NCV_SUCCESS;
279 break;
280 default:
281 ncvStat = NCV_MEM_RESIDENCE_ERROR;
282 }
283 break;
284 case NCVMemoryTypeDevice:
285 switch (srcType)
286 {
287 case NCVMemoryTypeHostPageable:
288 case NCVMemoryTypeHostPinned:
289 if (cuStream != 0)
290 {
291 ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
292 }
293 else
294 {
295 ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
296 }
297 ncvStat = NCV_SUCCESS;
298 break;
299 case NCVMemoryTypeDevice:
300 if (cuStream != 0)
301 {
302 ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
303 }
304 else
305 {
306 ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
307 }
308 ncvStat = NCV_SUCCESS;
309 break;
310 default:
311 ncvStat = NCV_MEM_RESIDENCE_ERROR;
312 }
313 break;
314 default:
315 ncvStat = NCV_MEM_RESIDENCE_ERROR;
316 }
317
318 return ncvStat;
319 }
320
321
322 //===================================================================
323 //
324 // NCVMemStackAllocator class members implementation
325 //
326 //===================================================================
327
328
NCVMemStackAllocator(Ncv32u alignment_)329 NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment_) :
330 _memType(NCVMemoryTypeNone),
331 _alignment(alignment_),
332 allocBegin(NULL),
333 begin(NULL),
334 end(NULL),
335 currentSize(0),
336 _maxSize(0),
337 bReusesMemory(false)
338 {
339 NcvBool bProperAlignment = (alignment_ & (alignment_ - 1)) == 0;
340 ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2");
341 }
342
343
NCVMemStackAllocator(NCVMemoryType memT,size_t capacity,Ncv32u alignment_,void * reusePtr)344 NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment_, void *reusePtr) :
345 _memType(memT),
346 _alignment(alignment_),
347 allocBegin(NULL),
348 currentSize(0),
349 _maxSize(0)
350 {
351 NcvBool bProperAlignment = (alignment_ & (alignment_ - 1)) == 0;
352 ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2");
353 ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type");
354
355 allocBegin = NULL;
356
357 if (reusePtr == NULL && capacity != 0)
358 {
359 bReusesMemory = false;
360 switch (memT)
361 {
362 case NCVMemoryTypeDevice:
363 ncvAssertCUDAReturn(cudaMalloc(&allocBegin, capacity), );
364 break;
365 case NCVMemoryTypeHostPinned:
366 ncvAssertCUDAReturn(cudaMallocHost(&allocBegin, capacity), );
367 break;
368 case NCVMemoryTypeHostPageable:
369 allocBegin = (Ncv8u *)malloc(capacity);
370 break;
371 default:;
372 }
373 }
374 else
375 {
376 bReusesMemory = true;
377 allocBegin = (Ncv8u *)reusePtr;
378 }
379
380 if (capacity == 0)
381 {
382 allocBegin = (Ncv8u *)(0x1);
383 }
384
385 if (!isCounting())
386 {
387 begin = allocBegin;
388 end = begin + capacity;
389 }
390 }
391
392
~NCVMemStackAllocator()393 NCVMemStackAllocator::~NCVMemStackAllocator()
394 {
395 if (allocBegin != NULL)
396 {
397 ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction");
398
399 if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1)))
400 {
401 switch (_memType)
402 {
403 case NCVMemoryTypeDevice:
404 ncvAssertCUDAReturn(cudaFree(allocBegin), );
405 break;
406 case NCVMemoryTypeHostPinned:
407 ncvAssertCUDAReturn(cudaFreeHost(allocBegin), );
408 break;
409 case NCVMemoryTypeHostPageable:
410 free(allocBegin);
411 break;
412 default:;
413 }
414 }
415
416 allocBegin = NULL;
417 }
418 }
419
420
alloc(NCVMemSegment & seg,size_t size)421 NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
422 {
423 seg.clear();
424 ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
425
426 size = alignUp(static_cast<Ncv32u>(size), this->_alignment);
427 this->currentSize += size;
428 this->_maxSize = std::max(this->_maxSize, this->currentSize);
429
430 if (!isCounting())
431 {
432 size_t availSize = end - begin;
433 ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY);
434 }
435
436 seg.begin.ptr = begin;
437 seg.begin.memtype = this->_memType;
438 seg.size = size;
439 begin += size;
440
441 return NCV_SUCCESS;
442 }
443
444
dealloc(NCVMemSegment & seg)445 NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg)
446 {
447 ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
448 ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
449 ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC);
450 ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER);
451
452 currentSize -= seg.size;
453 begin -= seg.size;
454
455 seg.clear();
456
457 ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC);
458
459 return NCV_SUCCESS;
460 }
461
462
isInitialized(void) const463 NcvBool NCVMemStackAllocator::isInitialized(void) const
464 {
465 return (((this->_alignment & (this->_alignment-1)) == 0) && isCounting()) || this->allocBegin != NULL;
466 }
467
468
isCounting(void) const469 NcvBool NCVMemStackAllocator::isCounting(void) const
470 {
471 return this->_memType == NCVMemoryTypeNone;
472 }
473
474
memType(void) const475 NCVMemoryType NCVMemStackAllocator::memType(void) const
476 {
477 return this->_memType;
478 }
479
480
alignment(void) const481 Ncv32u NCVMemStackAllocator::alignment(void) const
482 {
483 return this->_alignment;
484 }
485
486
maxSize(void) const487 size_t NCVMemStackAllocator::maxSize(void) const
488 {
489 return this->_maxSize;
490 }
491
492
493 //===================================================================
494 //
495 // NCVMemNativeAllocator class members implementation
496 //
497 //===================================================================
498
499
NCVMemNativeAllocator(NCVMemoryType memT,Ncv32u alignment_)500 NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment_) :
501 _memType(memT),
502 _alignment(alignment_),
503 currentSize(0),
504 _maxSize(0)
505 {
506 ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", );
507 }
508
509
~NCVMemNativeAllocator()510 NCVMemNativeAllocator::~NCVMemNativeAllocator()
511 {
512 ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak");
513 }
514
515
alloc(NCVMemSegment & seg,size_t size)516 NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
517 {
518 seg.clear();
519 ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
520
521 switch (this->_memType)
522 {
523 case NCVMemoryTypeDevice:
524 ncvAssertCUDAReturn(cudaMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
525 break;
526 case NCVMemoryTypeHostPinned:
527 ncvAssertCUDAReturn(cudaMallocHost(&seg.begin.ptr, size), NCV_CUDA_ERROR);
528 break;
529 case NCVMemoryTypeHostPageable:
530 seg.begin.ptr = (Ncv8u *)malloc(size);
531 break;
532 default:;
533 }
534
535 this->currentSize += alignUp(static_cast<Ncv32u>(size), this->_alignment);
536 this->_maxSize = std::max(this->_maxSize, this->currentSize);
537
538 seg.begin.memtype = this->_memType;
539 seg.size = size;
540
541 return NCV_SUCCESS;
542 }
543
544
dealloc(NCVMemSegment & seg)545 NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
546 {
547 ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
548 ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
549 ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
550
551 ncvAssertReturn(currentSize >= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
552 currentSize -= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment);
553
554 switch (this->_memType)
555 {
556 case NCVMemoryTypeDevice:
557 ncvAssertCUDAReturn(cudaFree(seg.begin.ptr), NCV_CUDA_ERROR);
558 break;
559 case NCVMemoryTypeHostPinned:
560 ncvAssertCUDAReturn(cudaFreeHost(seg.begin.ptr), NCV_CUDA_ERROR);
561 break;
562 case NCVMemoryTypeHostPageable:
563 free(seg.begin.ptr);
564 break;
565 default:;
566 }
567
568 seg.clear();
569
570 return NCV_SUCCESS;
571 }
572
573
isInitialized(void) const574 NcvBool NCVMemNativeAllocator::isInitialized(void) const
575 {
576 return (this->_alignment != 0);
577 }
578
579
isCounting(void) const580 NcvBool NCVMemNativeAllocator::isCounting(void) const
581 {
582 return false;
583 }
584
585
memType(void) const586 NCVMemoryType NCVMemNativeAllocator::memType(void) const
587 {
588 return this->_memType;
589 }
590
591
alignment(void) const592 Ncv32u NCVMemNativeAllocator::alignment(void) const
593 {
594 return this->_alignment;
595 }
596
597
maxSize(void) const598 size_t NCVMemNativeAllocator::maxSize(void) const
599 {
600 return this->_maxSize;
601 }
602
603
604 //===================================================================
605 //
606 // Time and timer routines
607 //
608 //===================================================================
609
610
611 typedef struct _NcvTimeMoment NcvTimeMoment;
612
613 #if defined(_WIN32) || defined(_WIN64)
614
615 #include <Windows.h>
616
617 typedef struct _NcvTimeMoment
618 {
619 LONGLONG moment, freq;
620 } NcvTimeMoment;
621
622
_ncvQueryMoment(NcvTimeMoment * t)623 static void _ncvQueryMoment(NcvTimeMoment *t)
624 {
625 QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq));
626 QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment));
627 }
628
629
_ncvMomentToMicroseconds(NcvTimeMoment * t)630 double _ncvMomentToMicroseconds(NcvTimeMoment *t)
631 {
632 return 1000000.0 * t->moment / t->freq;
633 }
634
635
_ncvMomentsDiffToMicroseconds(NcvTimeMoment * t1,NcvTimeMoment * t2)636 double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
637 {
638 return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
639 }
640
641
_ncvMomentsDiffToMilliseconds(NcvTimeMoment * t1,NcvTimeMoment * t2)642 double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
643 {
644 return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
645 }
646
647 #elif defined(__GNUC__)
648
649 #include <sys/time.h>
650
651 typedef struct _NcvTimeMoment
652 {
653 struct timeval tv;
654 struct timezone tz;
655 } NcvTimeMoment;
656
657
_ncvQueryMoment(NcvTimeMoment * t)658 void _ncvQueryMoment(NcvTimeMoment *t)
659 {
660 gettimeofday(& t->tv, & t->tz);
661 }
662
663
_ncvMomentToMicroseconds(NcvTimeMoment * t)664 double _ncvMomentToMicroseconds(NcvTimeMoment *t)
665 {
666 return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec;
667 }
668
669
_ncvMomentsDiffToMicroseconds(NcvTimeMoment * t1,NcvTimeMoment * t2)670 double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
671 {
672 return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec);
673 }
674
_ncvMomentsDiffToMilliseconds(NcvTimeMoment * t1,NcvTimeMoment * t2)675 double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
676 {
677 return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000;
678 }
679
680 #endif //#if defined(_WIN32) || defined(_WIN64)
681
682
683 struct _NcvTimer
684 {
685 NcvTimeMoment t1, t2;
686 };
687
688
ncvStartTimer(void)689 NcvTimer ncvStartTimer(void)
690 {
691 struct _NcvTimer *t;
692 t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer));
693 _ncvQueryMoment(&t->t1);
694 return t;
695 }
696
697
ncvEndQueryTimerUs(NcvTimer t)698 double ncvEndQueryTimerUs(NcvTimer t)
699 {
700 double res;
701 _ncvQueryMoment(&t->t2);
702 res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2);
703 free(t);
704 return res;
705 }
706
707
ncvEndQueryTimerMs(NcvTimer t)708 double ncvEndQueryTimerMs(NcvTimer t)
709 {
710 double res;
711 _ncvQueryMoment(&t->t2);
712 res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2);
713 free(t);
714 return res;
715 }
716
717
718 //===================================================================
719 //
720 // Operations with rectangles
721 //
722 //===================================================================
723
724 struct RectConvert
725 {
operator ()RectConvert726 cv::Rect operator()(const NcvRect32u& nr) const { return cv::Rect(nr.x, nr.y, nr.width, nr.height); }
operator ()RectConvert727 NcvRect32u operator()(const cv::Rect& nr) const
728 {
729 NcvRect32u rect;
730 rect.x = nr.x;
731 rect.y = nr.y;
732 rect.width = nr.width;
733 rect.height = nr.height;
734 return rect;
735 }
736 };
737
groupRectangles(std::vector<NcvRect32u> & hypotheses,int groupThreshold,double eps,std::vector<Ncv32u> * weights)738 static void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights)
739 {
740 #ifndef HAVE_OPENCV_OBJDETECT
741 (void) hypotheses;
742 (void) groupThreshold;
743 (void) eps;
744 (void) weights;
745 CV_Error(cv::Error::StsNotImplemented, "This functionality requires objdetect module");
746 #else
747 std::vector<cv::Rect> rects(hypotheses.size());
748 std::transform(hypotheses.begin(), hypotheses.end(), rects.begin(), RectConvert());
749
750 if (weights)
751 {
752 std::vector<int> weights_int;
753 weights_int.assign(weights->begin(), weights->end());
754 cv::groupRectangles(rects, weights_int, groupThreshold, eps);
755 }
756 else
757 {
758 cv::groupRectangles(rects, groupThreshold, eps);
759 }
760 std::transform(rects.begin(), rects.end(), hypotheses.begin(), RectConvert());
761 hypotheses.resize(rects.size());
762 #endif
763 }
764
765
766
ncvGroupRectangles_host(NCVVector<NcvRect32u> & hypotheses,Ncv32u & numHypotheses,Ncv32u minNeighbors,Ncv32f intersectEps,NCVVector<Ncv32u> * hypothesesWeights)767 NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses,
768 Ncv32u &numHypotheses,
769 Ncv32u minNeighbors,
770 Ncv32f intersectEps,
771 NCVVector<Ncv32u> *hypothesesWeights)
772 {
773 ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable ||
774 hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
775 if (hypothesesWeights != NULL)
776 {
777 ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable ||
778 hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
779 }
780
781 if (numHypotheses == 0)
782 {
783 return NCV_SUCCESS;
784 }
785
786 std::vector<NcvRect32u> rects(numHypotheses);
787 memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u));
788
789 std::vector<Ncv32u> weights;
790 if (hypothesesWeights != NULL)
791 {
792 groupRectangles(rects, minNeighbors, intersectEps, &weights);
793 }
794 else
795 {
796 groupRectangles(rects, minNeighbors, intersectEps, NULL);
797 }
798
799 numHypotheses = (Ncv32u)rects.size();
800 if (numHypotheses > 0)
801 {
802 memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u));
803 }
804
805 if (hypothesesWeights != NULL)
806 {
807 memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u));
808 }
809
810 return NCV_SUCCESS;
811 }
812
813
814 template <class T>
drawRectsWrapperHost(T * h_dst,Ncv32u dstStride,Ncv32u dstWidth,Ncv32u dstHeight,NcvRect32u * h_rects,Ncv32u numRects,T color)815 static NCVStatus drawRectsWrapperHost(T *h_dst,
816 Ncv32u dstStride,
817 Ncv32u dstWidth,
818 Ncv32u dstHeight,
819 NcvRect32u *h_rects,
820 Ncv32u numRects,
821 T color)
822 {
823 ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR);
824 ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
825 ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
826 ncvAssertReturn(numRects != 0, NCV_SUCCESS);
827 ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
828
829 for (Ncv32u i=0; i<numRects; i++)
830 {
831 NcvRect32u rect = h_rects[i];
832
833 if (rect.x < dstWidth)
834 {
835 for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
836 {
837 h_dst[each*dstStride+rect.x] = color;
838 }
839 }
840 if (rect.x+rect.width-1 < dstWidth)
841 {
842 for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
843 {
844 h_dst[each*dstStride+rect.x+rect.width-1] = color;
845 }
846 }
847 if (rect.y < dstHeight)
848 {
849 for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
850 {
851 h_dst[rect.y*dstStride+j] = color;
852 }
853 }
854 if (rect.y + rect.height - 1 < dstHeight)
855 {
856 for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
857 {
858 h_dst[(rect.y+rect.height-1)*dstStride+j] = color;
859 }
860 }
861 }
862
863 return NCV_SUCCESS;
864 }
865
866
ncvDrawRects_8u_host(Ncv8u * h_dst,Ncv32u dstStride,Ncv32u dstWidth,Ncv32u dstHeight,NcvRect32u * h_rects,Ncv32u numRects,Ncv8u color)867 NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst,
868 Ncv32u dstStride,
869 Ncv32u dstWidth,
870 Ncv32u dstHeight,
871 NcvRect32u *h_rects,
872 Ncv32u numRects,
873 Ncv8u color)
874 {
875 return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
876 }
877
878
ncvDrawRects_32u_host(Ncv32u * h_dst,Ncv32u dstStride,Ncv32u dstWidth,Ncv32u dstHeight,NcvRect32u * h_rects,Ncv32u numRects,Ncv32u color)879 NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst,
880 Ncv32u dstStride,
881 Ncv32u dstWidth,
882 Ncv32u dstHeight,
883 NcvRect32u *h_rects,
884 Ncv32u numRects,
885 Ncv32u color)
886 {
887 return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
888 }
889