• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                           License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 //   * Redistribution's of source code must retain the above copyright notice,
21 //     this list of conditions and the following disclaimer.
22 //
23 //   * Redistribution's in binary form must reproduce the above copyright notice,
24 //     this list of conditions and the following disclaimer in the documentation
25 //     and/or other materials provided with the distribution.
26 //
27 //   * The name of the copyright holders may not be used to endorse or promote products
28 //     derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42 
43 #ifndef __OPENCV_PHOTO_HPP__
44 #define __OPENCV_PHOTO_HPP__
45 
46 #include "opencv2/core.hpp"
47 #include "opencv2/imgproc.hpp"
48 
49 /**
50 @defgroup photo Computational Photography
51 @{
52     @defgroup photo_denoise Denoising
53     @defgroup photo_hdr HDR imaging
54 
55 This section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment,
56 camera calibration with multiple exposures and exposure fusion.
57 
58     @defgroup photo_clone Seamless Cloning
59     @defgroup photo_render Non-Photorealistic Rendering
60     @defgroup photo_c C API
61 @}
62   */
63 
64 namespace cv
65 {
66 
67 //! @addtogroup photo
68 //! @{
69 
70 //! the inpainting algorithm
71 enum
72 {
73     INPAINT_NS    = 0, // Navier-Stokes algorithm
74     INPAINT_TELEA = 1 // A. Telea algorithm
75 };
76 
77 enum
78 {
79     NORMAL_CLONE = 1,
80     MIXED_CLONE  = 2,
81     MONOCHROME_TRANSFER = 3
82 };
83 
84 enum
85 {
86     RECURS_FILTER = 1,
87     NORMCONV_FILTER = 2
88 };
89 
90 /** @brief Restores the selected region in an image using the region neighborhood.
91 
92 @param src Input 8-bit 1-channel or 3-channel image.
93 @param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that
94 needs to be inpainted.
95 @param dst Output image with the same size and type as src .
96 @param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered
97 by the algorithm.
98 @param flags Inpainting method that could be one of the following:
99 -   **INPAINT_NS** Navier-Stokes based method [Navier01]
100 -   **INPAINT_TELEA** Method by Alexandru Telea @cite Telea04 .
101 
102 The function reconstructs the selected image area from the pixel near the area boundary. The
103 function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
104 objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting> for more details.
105 
106 @note
107    -   An example using the inpainting technique can be found at
108         opencv_source_code/samples/cpp/inpaint.cpp
109     -   (Python) An example using the inpainting technique can be found at
110         opencv_source_code/samples/python2/inpaint.py
111  */
112 CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
113         OutputArray dst, double inpaintRadius, int flags );
114 
115 //! @addtogroup photo_denoise
116 //! @{
117 
118 /** @brief Perform image denoising using Non-local Means Denoising algorithm
119 <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational
120 optimizations. Noise expected to be a gaussian white noise
121 
122 @param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image.
123 @param dst Output image with the same size and type as src .
124 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
125 Should be odd. Recommended value 7 pixels
126 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
127 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
128 denoising time. Recommended value 21 pixels
129 @param h Parameter regulating filter strength. Big h value perfectly removes noise but also
130 removes image details, smaller h value preserves details but also preserves some noise
131 
132 This function expected to be applied to grayscale images. For colored images look at
133 fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
134 image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
135 image to CIELAB colorspace and then separately denoise L and AB components with different h
136 parameter.
137  */
138 CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, float h = 3,
139         int templateWindowSize = 7, int searchWindowSize = 21);
140 
141 /** @brief Perform image denoising using Non-local Means Denoising algorithm
142 <http://www.ipol.im/pub/algo/bcm_non_local_means_denoising/> with several computational
143 optimizations. Noise expected to be a gaussian white noise
144 
145 @param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
146 2-channel, 3-channel or 4-channel image.
147 @param dst Output image with the same size and type as src .
148 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
149 Should be odd. Recommended value 7 pixels
150 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
151 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
152 denoising time. Recommended value 21 pixels
153 @param h Array of parameters regulating filter strength, either one
154 parameter applied to all channels or one per channel in dst. Big h value
155 perfectly removes noise but also removes image details, smaller h
156 value preserves details but also preserves some noise
157 @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1
158 
159 This function expected to be applied to grayscale images. For colored images look at
160 fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored
161 image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting
162 image to CIELAB colorspace and then separately denoise L and AB components with different h
163 parameter.
164  */
165 CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst,
166                                         const std::vector<float>& h,
167                                         int templateWindowSize = 7, int searchWindowSize = 21,
168                                         int normType = NORM_L2);
169 
170 /** @brief Modification of fastNlMeansDenoising function for colored images
171 
172 @param src Input 8-bit 3-channel image.
173 @param dst Output image with the same size and type as src .
174 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
175 Should be odd. Recommended value 7 pixels
176 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
177 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
178 denoising time. Recommended value 21 pixels
179 @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
180 removes noise but also removes image details, smaller h value preserves details but also preserves
181 some noise
182 @param hColor The same as h but for color components. For most images value equals 10
183 will be enough to remove colored noise and do not distort colors
184 
185 The function converts image to CIELAB colorspace and then separately denoise L and AB components
186 with given h parameters using fastNlMeansDenoising function.
187  */
188 CV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst,
189         float h = 3, float hColor = 3,
190         int templateWindowSize = 7, int searchWindowSize = 21);
191 
192 /** @brief Modification of fastNlMeansDenoising function for images sequence where consequtive images have been
193 captured in small period of time. For example video. This version of the function is for grayscale
194 images or for manual manipulation with colorspaces. For more details see
195 <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
196 
197 @param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or
198 4-channel images sequence. All images should have the same type and
199 size.
200 @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
201 @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
202 be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
203 imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
204 srcImgs[imgToDenoiseIndex] image.
205 @param dst Output image with the same size and type as srcImgs images.
206 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
207 Should be odd. Recommended value 7 pixels
208 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
209 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
210 denoising time. Recommended value 21 pixels
211 @param h Parameter regulating filter strength. Bigger h value
212 perfectly removes noise but also removes image details, smaller h
213 value preserves details but also preserves some noise
214  */
215 CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,
216         int imgToDenoiseIndex, int temporalWindowSize,
217         float h = 3, int templateWindowSize = 7, int searchWindowSize = 21);
218 
219 /** @brief Modification of fastNlMeansDenoising function for images sequence where consequtive images have been
220 captured in small period of time. For example video. This version of the function is for grayscale
221 images or for manual manipulation with colorspaces. For more details see
222 <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.6394>
223 
224 @param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel,
225 2-channel, 3-channel or 4-channel images sequence. All images should
226 have the same type and size.
227 @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
228 @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
229 be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
230 imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
231 srcImgs[imgToDenoiseIndex] image.
232 @param dst Output image with the same size and type as srcImgs images.
233 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
234 Should be odd. Recommended value 7 pixels
235 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
236 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
237 denoising time. Recommended value 21 pixels
238 @param h Array of parameters regulating filter strength, either one
239 parameter applied to all channels or one per channel in dst. Big h value
240 perfectly removes noise but also removes image details, smaller h
241 value preserves details but also preserves some noise
242 @param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1
243  */
244 CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,
245                                              int imgToDenoiseIndex, int temporalWindowSize,
246                                              const std::vector<float>& h,
247                                              int templateWindowSize = 7, int searchWindowSize = 21,
248                                              int normType = NORM_L2);
249 
250 /** @brief Modification of fastNlMeansDenoisingMulti function for colored images sequences
251 
252 @param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and
253 size.
254 @param imgToDenoiseIndex Target image to denoise index in srcImgs sequence
255 @param temporalWindowSize Number of surrounding images to use for target image denoising. Should
256 be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to
257 imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise
258 srcImgs[imgToDenoiseIndex] image.
259 @param dst Output image with the same size and type as srcImgs images.
260 @param templateWindowSize Size in pixels of the template patch that is used to compute weights.
261 Should be odd. Recommended value 7 pixels
262 @param searchWindowSize Size in pixels of the window that is used to compute weighted average for
263 given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater
264 denoising time. Recommended value 21 pixels
265 @param h Parameter regulating filter strength for luminance component. Bigger h value perfectly
266 removes noise but also removes image details, smaller h value preserves details but also preserves
267 some noise.
268 @param hColor The same as h but for color components.
269 
270 The function converts images to CIELAB colorspace and then separately denoise L and AB components
271 with given h parameters using fastNlMeansDenoisingMulti function.
272  */
273 CV_EXPORTS_W void fastNlMeansDenoisingColoredMulti( InputArrayOfArrays srcImgs, OutputArray dst,
274         int imgToDenoiseIndex, int temporalWindowSize,
275         float h = 3, float hColor = 3,
276         int templateWindowSize = 7, int searchWindowSize = 21);
277 
278 /** @brief Primal-dual algorithm is an algorithm for solving special types of variational problems (that is,
279 finding a function to minimize some functional). As the image denoising, in particular, may be seen
280 as the variational problem, primal-dual algorithm then can be used to perform denoising and this is
281 exactly what is implemented.
282 
283 It should be noted, that this implementation was taken from the July 2013 blog entry
284 @cite MA13 , which also contained (slightly more general) ready-to-use source code on Python.
285 Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end
286 of July 2013 and finally it was slightly adapted by later authors.
287 
288 Although the thorough discussion and justification of the algorithm involved may be found in
289 @cite ChambolleEtAl, it might make sense to skim over it here, following @cite MA13 . To begin
290 with, we consider the 1-byte gray-level images as the functions from the rectangular domain of
291 pixels (it may be seen as set
292 \f$\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\f$ for some
293 \f$m,\;n\in\mathbb{N}\f$) into \f$\{0,1,\dots,255\}\f$. We shall denote the noised images as \f$f_i\f$ and with
294 this view, given some image \f$x\f$ of the same size, we may measure how bad it is by the formula
295 
296 \f[\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\f]
297 
298 \f$\|\|\cdot\|\|\f$ here denotes \f$L_2\f$-norm and as you see, the first addend states that we want our
299 image to be smooth (ideally, having zero gradient, thus being constant) and the second states that
300 we want our result to be close to the observations we've got. If we treat \f$x\f$ as a function, this is
301 exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play.
302 
303 @param observations This array should contain one or more noised versions of the image that is to
304 be restored.
305 @param result Here the denoised image will be stored. There is no need to do pre-allocation of
306 storage space, as it will be automatically allocated, if necessary.
307 @param lambda Corresponds to \f$\lambda\f$ in the formulas above. As it is enlarged, the smooth
308 (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly
309 speaking, as it becomes smaller, the result will be more blur but more sever outliers will be
310 removed.
311 @param niters Number of iterations that the algorithm will run. Of course, as more iterations as
312 better, but it is hard to quantitatively refine this statement, so just use the default and
313 increase it if the results are poor.
314  */
315 CV_EXPORTS_W void denoise_TVL1(const std::vector<Mat>& observations,Mat& result, double lambda=1.0, int niters=30);
316 
317 //! @} photo_denoise
318 
319 //! @addtogroup photo_hdr
320 //! @{
321 
322 enum { LDR_SIZE = 256 };
323 
324 /** @brief Base class for tonemapping algorithms - tools that are used to map HDR image to 8-bit range.
325  */
326 class CV_EXPORTS_W Tonemap : public Algorithm
327 {
328 public:
329     /** @brief Tonemaps image
330 
331     @param src source image - 32-bit 3-channel Mat
332     @param dst destination image - 32-bit 3-channel Mat with values in [0, 1] range
333      */
334     CV_WRAP virtual void process(InputArray src, OutputArray dst) = 0;
335 
336     CV_WRAP virtual float getGamma() const = 0;
337     CV_WRAP virtual void setGamma(float gamma) = 0;
338 };
339 
340 /** @brief Creates simple linear mapper with gamma correction
341 
342 @param gamma positive value for gamma correction. Gamma value of 1.0 implies no correction, gamma
343 equal to 2.2f is suitable for most displays.
344 Generally gamma \> 1 brightens the image and gamma \< 1 darkens it.
345  */
346 CV_EXPORTS_W Ptr<Tonemap> createTonemap(float gamma = 1.0f);
347 
348 /** @brief Adaptive logarithmic mapping is a fast global tonemapping algorithm that scales the image in
349 logarithmic domain.
350 
351 Since it's a global operator the same function is applied to all the pixels, it is controlled by the
352 bias parameter.
353 
354 Optional saturation enhancement is possible as described in @cite FL02 .
355 
356 For more information see @cite DM03 .
357  */
358 class CV_EXPORTS_W TonemapDrago : public Tonemap
359 {
360 public:
361 
362     CV_WRAP virtual float getSaturation() const = 0;
363     CV_WRAP virtual void setSaturation(float saturation) = 0;
364 
365     CV_WRAP virtual float getBias() const = 0;
366     CV_WRAP virtual void setBias(float bias) = 0;
367 };
368 
369 /** @brief Creates TonemapDrago object
370 
371 @param gamma gamma value for gamma correction. See createTonemap
372 @param saturation positive saturation enhancement value. 1.0 preserves saturation, values greater
373 than 1 increase saturation and values less than 1 decrease it.
374 @param bias value for bias function in [0, 1] range. Values from 0.7 to 0.9 usually give best
375 results, default value is 0.85.
376  */
377 CV_EXPORTS_W Ptr<TonemapDrago> createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f);
378 
379 /** @brief This algorithm decomposes image into two layers: base layer and detail layer using bilateral filter
380 and compresses contrast of the base layer thus preserving all the details.
381 
382 This implementation uses regular bilateral filter from opencv.
383 
384 Saturation enhancement is possible as in ocvTonemapDrago.
385 
386 For more information see @cite DD02 .
387  */
388 class CV_EXPORTS_W TonemapDurand : public Tonemap
389 {
390 public:
391 
392     CV_WRAP virtual float getSaturation() const = 0;
393     CV_WRAP virtual void setSaturation(float saturation) = 0;
394 
395     CV_WRAP virtual float getContrast() const = 0;
396     CV_WRAP virtual void setContrast(float contrast) = 0;
397 
398     CV_WRAP virtual float getSigmaSpace() const = 0;
399     CV_WRAP virtual void setSigmaSpace(float sigma_space) = 0;
400 
401     CV_WRAP virtual float getSigmaColor() const = 0;
402     CV_WRAP virtual void setSigmaColor(float sigma_color) = 0;
403 };
404 
405 /** @brief Creates TonemapDurand object
406 
407 @param gamma gamma value for gamma correction. See createTonemap
408 @param contrast resulting contrast on logarithmic scale, i. e. log(max / min), where max and min
409 are maximum and minimum luminance values of the resulting image.
410 @param saturation saturation enhancement value. See createTonemapDrago
411 @param sigma_space bilateral filter sigma in color space
412 @param sigma_color bilateral filter sigma in coordinate space
413  */
414 CV_EXPORTS_W Ptr<TonemapDurand>
415 createTonemapDurand(float gamma = 1.0f, float contrast = 4.0f, float saturation = 1.0f, float sigma_space = 2.0f, float sigma_color = 2.0f);
416 
417 /** @brief This is a global tonemapping operator that models human visual system.
418 
419 Mapping function is controlled by adaptation parameter, that is computed using light adaptation and
420 color adaptation.
421 
422 For more information see @cite RD05 .
423  */
424 class CV_EXPORTS_W TonemapReinhard : public Tonemap
425 {
426 public:
427     CV_WRAP virtual float getIntensity() const = 0;
428     CV_WRAP virtual void setIntensity(float intensity) = 0;
429 
430     CV_WRAP virtual float getLightAdaptation() const = 0;
431     CV_WRAP virtual void setLightAdaptation(float light_adapt) = 0;
432 
433     CV_WRAP virtual float getColorAdaptation() const = 0;
434     CV_WRAP virtual void setColorAdaptation(float color_adapt) = 0;
435 };
436 
437 /** @brief Creates TonemapReinhard object
438 
439 @param gamma gamma value for gamma correction. See createTonemap
440 @param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results.
441 @param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel
442 value, if 0 it's global, otherwise it's a weighted mean of this two cases.
443 @param color_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently,
444 if 0 adaptation level is the same for each channel.
445  */
446 CV_EXPORTS_W Ptr<TonemapReinhard>
447 createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f);
448 
449 /** @brief This algorithm transforms image to contrast using gradients on all levels of gaussian pyramid,
450 transforms contrast values to HVS response and scales the response. After this the image is
451 reconstructed from new contrast values.
452 
453 For more information see @cite MM06 .
454  */
455 class CV_EXPORTS_W TonemapMantiuk : public Tonemap
456 {
457 public:
458     CV_WRAP virtual float getScale() const = 0;
459     CV_WRAP virtual void setScale(float scale) = 0;
460 
461     CV_WRAP virtual float getSaturation() const = 0;
462     CV_WRAP virtual void setSaturation(float saturation) = 0;
463 };
464 
465 /** @brief Creates TonemapMantiuk object
466 
467 @param gamma gamma value for gamma correction. See createTonemap
468 @param scale contrast scale factor. HVS response is multiplied by this parameter, thus compressing
469 dynamic range. Values from 0.6 to 0.9 produce best results.
470 @param saturation saturation enhancement value. See createTonemapDrago
471  */
472 CV_EXPORTS_W Ptr<TonemapMantiuk>
473 createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f);
474 
475 /** @brief The base class for algorithms that align images of the same scene with different exposures
476  */
477 class CV_EXPORTS_W AlignExposures : public Algorithm
478 {
479 public:
480     /** @brief Aligns images
481 
482     @param src vector of input images
483     @param dst vector of aligned images
484     @param times vector of exposure time values for each image
485     @param response 256x1 matrix with inverse camera response function for each pixel value, it should
486     have the same number of channels as images.
487      */
488     CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
489                                  InputArray times, InputArray response) = 0;
490 };
491 
492 /** @brief This algorithm converts images to median threshold bitmaps (1 for pixels brighter than median
493 luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations.
494 
495 It is invariant to exposure, so exposure values and camera response are not necessary.
496 
497 In this implementation new image regions are filled with zeros.
498 
499 For more information see @cite GW03 .
500  */
501 class CV_EXPORTS_W AlignMTB : public AlignExposures
502 {
503 public:
504     CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
505                                  InputArray times, InputArray response) = 0;
506 
507     /** @brief Short version of process, that doesn't take extra arguments.
508 
509     @param src vector of input images
510     @param dst vector of aligned images
511      */
512     CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst) = 0;
513 
514     /** @brief Calculates shift between two images, i. e. how to shift the second image to correspond it with the
515     first.
516 
517     @param img0 first image
518     @param img1 second image
519      */
520     CV_WRAP virtual Point calculateShift(InputArray img0, InputArray img1) = 0;
521     /** @brief Helper function, that shift Mat filling new regions with zeros.
522 
523     @param src input image
524     @param dst result image
525     @param shift shift value
526      */
527     CV_WRAP virtual void shiftMat(InputArray src, OutputArray dst, const Point shift) = 0;
528     /** @brief Computes median threshold and exclude bitmaps of given image.
529 
530     @param img input image
531     @param tb median threshold bitmap
532     @param eb exclude bitmap
533      */
534     CV_WRAP virtual void computeBitmaps(InputArray img, OutputArray tb, OutputArray eb) = 0;
535 
536     CV_WRAP virtual int getMaxBits() const = 0;
537     CV_WRAP virtual void setMaxBits(int max_bits) = 0;
538 
539     CV_WRAP virtual int getExcludeRange() const = 0;
540     CV_WRAP virtual void setExcludeRange(int exclude_range) = 0;
541 
542     CV_WRAP virtual bool getCut() const = 0;
543     CV_WRAP virtual void setCut(bool value) = 0;
544 };
545 
546 /** @brief Creates AlignMTB object
547 
548 @param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are
549 usually good enough (31 and 63 pixels shift respectively).
550 @param exclude_range range for exclusion bitmap that is constructed to suppress noise around the
551 median value.
552 @param cut if true cuts images, otherwise fills the new regions with zeros.
553  */
554 CV_EXPORTS_W Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true);
555 
556 /** @brief The base class for camera response calibration algorithms.
557  */
558 class CV_EXPORTS_W CalibrateCRF : public Algorithm
559 {
560 public:
561     /** @brief Recovers inverse camera response.
562 
563     @param src vector of input images
564     @param dst 256x1 matrix with inverse camera response function
565     @param times vector of exposure time values for each image
566      */
567     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
568 };
569 
570 /** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective
571 function as linear system. Objective function is constructed using pixel values on the same position
572 in all images, extra term is added to make the result smoother.
573 
574 For more information see @cite DM97 .
575  */
576 class CV_EXPORTS_W CalibrateDebevec : public CalibrateCRF
577 {
578 public:
579     CV_WRAP virtual float getLambda() const = 0;
580     CV_WRAP virtual void setLambda(float lambda) = 0;
581 
582     CV_WRAP virtual int getSamples() const = 0;
583     CV_WRAP virtual void setSamples(int samples) = 0;
584 
585     CV_WRAP virtual bool getRandom() const = 0;
586     CV_WRAP virtual void setRandom(bool random) = 0;
587 };
588 
589 /** @brief Creates CalibrateDebevec object
590 
591 @param samples number of pixel locations to use
592 @param lambda smoothness term weight. Greater values produce smoother results, but can alter the
593 response.
594 @param random if true sample pixel locations are chosen at random, otherwise the form a
595 rectangular grid.
596  */
597 CV_EXPORTS_W Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false);
598 
599 /** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective
600 function as linear system. This algorithm uses all image pixels.
601 
602 For more information see @cite RB99 .
603  */
604 class CV_EXPORTS_W CalibrateRobertson : public CalibrateCRF
605 {
606 public:
607     CV_WRAP virtual int getMaxIter() const = 0;
608     CV_WRAP virtual void setMaxIter(int max_iter) = 0;
609 
610     CV_WRAP virtual float getThreshold() const = 0;
611     CV_WRAP virtual void setThreshold(float threshold) = 0;
612 
613     CV_WRAP virtual Mat getRadiance() const = 0;
614 };
615 
616 /** @brief Creates CalibrateRobertson object
617 
618 @param max_iter maximal number of Gauss-Seidel solver iterations.
619 @param threshold target difference between results of two successive steps of the minimization.
620  */
621 CV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f);
622 
623 /** @brief The base class algorithms that can merge exposure sequence to a single image.
624  */
625 class CV_EXPORTS_W MergeExposures : public Algorithm
626 {
627 public:
628     /** @brief Merges images.
629 
630     @param src vector of input images
631     @param dst result image
632     @param times vector of exposure time values for each image
633     @param response 256x1 matrix with inverse camera response function for each pixel value, it should
634     have the same number of channels as images.
635      */
636     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
637                                  InputArray times, InputArray response) = 0;
638 };
639 
640 /** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure
641 values and camera response.
642 
643 For more information see @cite DM97 .
644  */
645 class CV_EXPORTS_W MergeDebevec : public MergeExposures
646 {
647 public:
648     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
649                                  InputArray times, InputArray response) = 0;
650     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
651 };
652 
653 /** @brief Creates MergeDebevec object
654  */
655 CV_EXPORTS_W Ptr<MergeDebevec> createMergeDebevec();
656 
657 /** @brief Pixels are weighted using contrast, saturation and well-exposedness measures, than images are
658 combined using laplacian pyramids.
659 
660 The resulting image weight is constructed as weighted average of contrast, saturation and
661 well-exposedness measures.
662 
663 The resulting image doesn't require tonemapping and can be converted to 8-bit image by multiplying
664 by 255, but it's recommended to apply gamma correction and/or linear tonemapping.
665 
666 For more information see @cite MK07 .
667  */
668 class CV_EXPORTS_W MergeMertens : public MergeExposures
669 {
670 public:
671     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
672                                  InputArray times, InputArray response) = 0;
673     /** @brief Short version of process, that doesn't take extra arguments.
674 
675     @param src vector of input images
676     @param dst result image
677      */
678     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst) = 0;
679 
680     CV_WRAP virtual float getContrastWeight() const = 0;
681     CV_WRAP virtual void setContrastWeight(float contrast_weiht) = 0;
682 
683     CV_WRAP virtual float getSaturationWeight() const = 0;
684     CV_WRAP virtual void setSaturationWeight(float saturation_weight) = 0;
685 
686     CV_WRAP virtual float getExposureWeight() const = 0;
687     CV_WRAP virtual void setExposureWeight(float exposure_weight) = 0;
688 };
689 
690 /** @brief Creates MergeMertens object
691 
692 @param contrast_weight contrast measure weight. See MergeMertens.
693 @param saturation_weight saturation measure weight
694 @param exposure_weight well-exposedness measure weight
695  */
696 CV_EXPORTS_W Ptr<MergeMertens>
697 createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f);
698 
699 /** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure
700 values and camera response.
701 
702 For more information see @cite RB99 .
703  */
704 class CV_EXPORTS_W MergeRobertson : public MergeExposures
705 {
706 public:
707     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
708                                  InputArray times, InputArray response) = 0;
709     CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
710 };
711 
712 /** @brief Creates MergeRobertson object
713  */
714 CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson();
715 
716 //! @} photo_hdr
717 
718 /** @brief Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized
719 black-and-white photograph rendering, and in many single channel image processing applications
720 @cite CL12 .
721 
722 @param src Input 8-bit 3-channel image.
723 @param grayscale Output 8-bit 1-channel image.
724 @param color_boost Output 8-bit 3-channel image.
725 
726 This function is to be applied on color images.
727  */
728 CV_EXPORTS_W void decolor( InputArray src, OutputArray grayscale, OutputArray color_boost);
729 
730 //! @addtogroup photo_clone
731 //! @{
732 
733 /** @brief Image editing tasks concern either global changes (color/intensity corrections, filters,
734 deformations) or local changes concerned to a selection. Here we are interested in achieving local
735 changes, ones that are restricted to a region manually selected (ROI), in a seamless and effortless
736 manner. The extent of the changes ranges from slight distortions to complete replacement by novel
737 content @cite PM03 .
738 
739 @param src Input 8-bit 3-channel image.
740 @param dst Input 8-bit 3-channel image.
741 @param mask Input 8-bit 1 or 3-channel image.
742 @param p Point in dst image where object is placed.
743 @param blend Output image with the same size and type as dst.
744 @param flags Cloning method that could be one of the following:
745 -   **NORMAL_CLONE** The power of the method is fully expressed when inserting objects with
746 complex outlines into a new background
747 -   **MIXED_CLONE** The classic method, color-based selection and alpha masking might be time
748 consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the
749 original image, is not effective. Mixed seamless cloning based on a loose selection proves
750 effective.
751 -   **FEATURE_EXCHANGE** Feature exchange allows the user to easily replace certain features of
752 one object by alternative features.
753  */
754 CV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p,
755         OutputArray blend, int flags);
756 
757 /** @brief Given an original color image, two differently colored versions of this image can be mixed
758 seamlessly.
759 
760 @param src Input 8-bit 3-channel image.
761 @param mask Input 8-bit 1 or 3-channel image.
762 @param dst Output image with the same size and type as src .
763 @param red_mul R-channel multiply factor.
764 @param green_mul G-channel multiply factor.
765 @param blue_mul B-channel multiply factor.
766 
767 Multiplication factor is between .5 to 2.5.
768  */
769 CV_EXPORTS_W void colorChange(InputArray src, InputArray mask, OutputArray dst, float red_mul = 1.0f,
770         float green_mul = 1.0f, float blue_mul = 1.0f);
771 
772 /** @brief Applying an appropriate non-linear transformation to the gradient field inside the selection and
773 then integrating back with a Poisson solver, modifies locally the apparent illumination of an image.
774 
775 @param src Input 8-bit 3-channel image.
776 @param mask Input 8-bit 1 or 3-channel image.
777 @param dst Output image with the same size and type as src.
778 @param alpha Value ranges between 0-2.
779 @param beta Value ranges between 0-2.
780 
781 This is useful to highlight under-exposed foreground objects or to reduce specular reflections.
782  */
783 CV_EXPORTS_W void illuminationChange(InputArray src, InputArray mask, OutputArray dst,
784         float alpha = 0.2f, float beta = 0.4f);
785 
786 /** @brief By retaining only the gradients at edge locations, before integrating with the Poisson solver, one
787 washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge
788 Detector is used.
789 
790 @param src Input 8-bit 3-channel image.
791 @param mask Input 8-bit 1 or 3-channel image.
792 @param dst Output image with the same size and type as src.
793 @param low_threshold Range from 0 to 100.
794 @param high_threshold Value \> 100.
795 @param kernel_size The size of the Sobel kernel to be used.
796 
797 **NOTE:**
798 
799 The algorithm assumes that the color of the source image is close to that of the destination. This
800 assumption means that when the colors don't match, the source image color gets tinted toward the
801 color of the destination image.
802  */
803 CV_EXPORTS_W void textureFlattening(InputArray src, InputArray mask, OutputArray dst,
804         float low_threshold = 30, float high_threshold = 45,
805         int kernel_size = 3);
806 
807 //! @} photo_clone
808 
809 //! @addtogroup photo_render
810 //! @{
811 
812 /** @brief Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing
813 filters are used in many different applications @cite EM11 .
814 
815 @param src Input 8-bit 3-channel image.
816 @param dst Output 8-bit 3-channel image.
817 @param flags Edge preserving filters:
818 -   **RECURS_FILTER** = 1
819 -   **NORMCONV_FILTER** = 2
820 @param sigma_s Range between 0 to 200.
821 @param sigma_r Range between 0 to 1.
822  */
823 CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1,
824         float sigma_s = 60, float sigma_r = 0.4f);
825 
826 /** @brief This filter enhances the details of a particular image.
827 
828 @param src Input 8-bit 3-channel image.
829 @param dst Output image with the same size and type as src.
830 @param sigma_s Range between 0 to 200.
831 @param sigma_r Range between 0 to 1.
832  */
833 CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10,
834         float sigma_r = 0.15f);
835 
836 /** @brief Pencil-like non-photorealistic line drawing
837 
838 @param src Input 8-bit 3-channel image.
839 @param dst1 Output 8-bit 1-channel image.
840 @param dst2 Output image with the same size and type as src.
841 @param sigma_s Range between 0 to 200.
842 @param sigma_r Range between 0 to 1.
843 @param shade_factor Range between 0 to 0.1.
844  */
845 CV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2,
846         float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f);
847 
848 /** @brief Stylization aims to produce digital imagery with a wide variety of effects not focused on
849 photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low
850 contrast while preserving, or enhancing, high-contrast features.
851 
852 @param src Input 8-bit 3-channel image.
853 @param dst Output image with the same size and type as src.
854 @param sigma_s Range between 0 to 200.
855 @param sigma_r Range between 0 to 1.
856  */
857 CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60,
858         float sigma_r = 0.45f);
859 
860 //! @} photo_render
861 
862 //! @} photo
863 
864 } // cv
865 
866 #ifndef DISABLE_OPENCV_24_COMPATIBILITY
867 #include "opencv2/photo/photo_c.h"
868 #endif
869 
870 #endif
871