1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "opencv2/opencv_modules.hpp"
44
45 #ifndef HAVE_OPENCV_CUDEV
46
47 #error "opencv_cudev is required"
48
49 #else
50
51 #include "opencv2/cudaarithm.hpp"
52 #include "opencv2/cudev.hpp"
53 #include "opencv2/core/private.cuda.hpp"
54
55 using namespace cv;
56 using namespace cv::cuda;
57 using namespace cv::cudev;
58
59 namespace {
60
61 template <typename T, typename R, typename I>
62 struct ConvertorMinMax : unary_function<T, R>
63 {
64 typedef typename LargerType<T, R>::type larger_type1;
65 typedef typename LargerType<larger_type1, I>::type larger_type2;
66 typedef typename LargerType<larger_type2, float>::type scalar_type;
67
68 scalar_type dmin, dmax;
69 const I* minMaxVals;
70
operator ()__anon4d9fa3fd0111::ConvertorMinMax71 __device__ R operator ()(typename TypeTraits<T>::parameter_type src) const
72 {
73 const scalar_type smin = minMaxVals[0];
74 const scalar_type smax = minMaxVals[1];
75
76 const scalar_type scale = (dmax - dmin) * (smax - smin > numeric_limits<scalar_type>::epsilon() ? 1.0 / (smax - smin) : 0.0);
77 const scalar_type shift = dmin - smin * scale;
78
79 return cudev::saturate_cast<R>(scale * src + shift);
80 }
81 };
82
83 template <typename T, typename R, typename I>
normalizeMinMax(const GpuMat & _src,GpuMat & _dst,double a,double b,const GpuMat & mask,Stream & stream)84 void normalizeMinMax(const GpuMat& _src, GpuMat& _dst, double a, double b, const GpuMat& mask, Stream& stream)
85 {
86 const GpuMat_<T>& src = (const GpuMat_<T>&)_src;
87 GpuMat_<R>& dst = (GpuMat_<R>&)_dst;
88
89 BufferPool pool(stream);
90 GpuMat_<I> minMaxVals(1, 2, pool.getAllocator());
91
92 if (mask.empty())
93 {
94 gridFindMinMaxVal(src, minMaxVals, stream);
95 }
96 else
97 {
98 gridFindMinMaxVal(src, minMaxVals, globPtr<uchar>(mask), stream);
99 }
100
101 ConvertorMinMax<T, R, I> cvt;
102 cvt.dmin = std::min(a, b);
103 cvt.dmax = std::max(a, b);
104 cvt.minMaxVals = minMaxVals[0];
105
106 if (mask.empty())
107 {
108 gridTransformUnary(src, dst, cvt, stream);
109 }
110 else
111 {
112 dst.setTo(Scalar::all(0), stream);
113 gridTransformUnary(src, dst, cvt, globPtr<uchar>(mask), stream);
114 }
115 }
116
117 template <typename T, typename R, typename I, bool normL2>
118 struct ConvertorNorm : unary_function<T, R>
119 {
120 typedef typename LargerType<T, R>::type larger_type1;
121 typedef typename LargerType<larger_type1, I>::type larger_type2;
122 typedef typename LargerType<larger_type2, float>::type scalar_type;
123
124 scalar_type a;
125 const I* normVal;
126
operator ()__anon4d9fa3fd0111::ConvertorNorm127 __device__ R operator ()(typename TypeTraits<T>::parameter_type src) const
128 {
129 sqrt_func<scalar_type> sqrt;
130
131 scalar_type scale = normL2 ? sqrt(*normVal) : *normVal;
132 scale = scale > numeric_limits<scalar_type>::epsilon() ? a / scale : 0.0;
133
134 return cudev::saturate_cast<R>(scale * src);
135 }
136 };
137
138 template <typename T, typename R, typename I>
normalizeNorm(const GpuMat & _src,GpuMat & _dst,double a,int normType,const GpuMat & mask,Stream & stream)139 void normalizeNorm(const GpuMat& _src, GpuMat& _dst, double a, int normType, const GpuMat& mask, Stream& stream)
140 {
141 const GpuMat_<T>& src = (const GpuMat_<T>&)_src;
142 GpuMat_<R>& dst = (GpuMat_<R>&)_dst;
143
144 BufferPool pool(stream);
145 GpuMat_<I> normVal(1, 1, pool.getAllocator());
146
147 if (normType == NORM_L1)
148 {
149 if (mask.empty())
150 {
151 gridCalcSum(abs_(cvt_<I>(src)), normVal, stream);
152 }
153 else
154 {
155 gridCalcSum(abs_(cvt_<I>(src)), normVal, globPtr<uchar>(mask), stream);
156 }
157 }
158 else if (normType == NORM_L2)
159 {
160 if (mask.empty())
161 {
162 gridCalcSum(sqr_(cvt_<I>(src)), normVal, stream);
163 }
164 else
165 {
166 gridCalcSum(sqr_(cvt_<I>(src)), normVal, globPtr<uchar>(mask), stream);
167 }
168 }
169 else // NORM_INF
170 {
171 if (mask.empty())
172 {
173 gridFindMaxVal(abs_(cvt_<I>(src)), normVal, stream);
174 }
175 else
176 {
177 gridFindMaxVal(abs_(cvt_<I>(src)), normVal, globPtr<uchar>(mask), stream);
178 }
179 }
180
181 if (normType == NORM_L2)
182 {
183 ConvertorNorm<T, R, I, true> cvt;
184 cvt.a = a;
185 cvt.normVal = normVal[0];
186
187 if (mask.empty())
188 {
189 gridTransformUnary(src, dst, cvt, stream);
190 }
191 else
192 {
193 dst.setTo(Scalar::all(0), stream);
194 gridTransformUnary(src, dst, cvt, globPtr<uchar>(mask), stream);
195 }
196 }
197 else
198 {
199 ConvertorNorm<T, R, I, false> cvt;
200 cvt.a = a;
201 cvt.normVal = normVal[0];
202
203 if (mask.empty())
204 {
205 gridTransformUnary(src, dst, cvt, stream);
206 }
207 else
208 {
209 dst.setTo(Scalar::all(0), stream);
210 gridTransformUnary(src, dst, cvt, globPtr<uchar>(mask), stream);
211 }
212 }
213 }
214
215 } // namespace
216
normalize(InputArray _src,OutputArray _dst,double a,double b,int normType,int dtype,InputArray _mask,Stream & stream)217 void cv::cuda::normalize(InputArray _src, OutputArray _dst, double a, double b, int normType, int dtype, InputArray _mask, Stream& stream)
218 {
219 typedef void (*func_minmax_t)(const GpuMat& _src, GpuMat& _dst, double a, double b, const GpuMat& mask, Stream& stream);
220 typedef void (*func_norm_t)(const GpuMat& _src, GpuMat& _dst, double a, int normType, const GpuMat& mask, Stream& stream);
221
222 static const func_minmax_t funcs_minmax[] =
223 {
224 normalizeMinMax<uchar, float, float>,
225 normalizeMinMax<schar, float, float>,
226 normalizeMinMax<ushort, float, float>,
227 normalizeMinMax<short, float, float>,
228 normalizeMinMax<int, float, float>,
229 normalizeMinMax<float, float, float>,
230 normalizeMinMax<double, double, double>
231 };
232
233 static const func_norm_t funcs_norm[] =
234 {
235 normalizeNorm<uchar, float, float>,
236 normalizeNorm<schar, float, float>,
237 normalizeNorm<ushort, float, float>,
238 normalizeNorm<short, float, float>,
239 normalizeNorm<int, float, float>,
240 normalizeNorm<float, float, float>,
241 normalizeNorm<double, double, double>
242 };
243
244 CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_MINMAX );
245
246 const GpuMat src = getInputMat(_src, stream);
247 const GpuMat mask = getInputMat(_mask, stream);
248
249 CV_Assert( src.channels() == 1 );
250 CV_Assert( mask.empty() || (mask.size() == src.size() && mask.type() == CV_8U) );
251
252 dtype = CV_MAT_DEPTH(dtype);
253
254 const int src_depth = src.depth();
255 const int tmp_depth = src_depth <= CV_32F ? CV_32F : src_depth;
256
257 GpuMat dst;
258 if (dtype == tmp_depth)
259 {
260 _dst.create(src.size(), tmp_depth);
261 dst = getOutputMat(_dst, src.size(), tmp_depth, stream);
262 }
263 else
264 {
265 BufferPool pool(stream);
266 dst = pool.getBuffer(src.size(), tmp_depth);
267 }
268
269 if (normType == NORM_MINMAX)
270 {
271 const func_minmax_t func = funcs_minmax[src_depth];
272 func(src, dst, a, b, mask, stream);
273 }
274 else
275 {
276 const func_norm_t func = funcs_norm[src_depth];
277 func(src, dst, a, normType, mask, stream);
278 }
279
280 if (dtype == tmp_depth)
281 {
282 syncOutput(dst, _dst, stream);
283 }
284 else
285 {
286 dst.convertTo(_dst, dtype, stream);
287 }
288 }
289
290 #endif
291