• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.filterpacks.videoproc;
18 
19 import android.filterfw.core.Filter;
20 import android.filterfw.core.FilterContext;
21 import android.filterfw.core.GenerateFieldPort;
22 import android.filterfw.core.GenerateFinalPort;
23 import android.filterfw.core.Frame;
24 import android.filterfw.core.GLFrame;
25 import android.filterfw.core.FrameFormat;
26 import android.filterfw.core.MutableFrameFormat;
27 import android.filterfw.core.ShaderProgram;
28 import android.filterfw.format.ImageFormat;
29 import android.opengl.GLES20;
30 import android.os.SystemClock;
31 import android.os.SystemProperties;
32 import android.util.Log;
33 
34 import java.lang.Math;
35 import java.util.Arrays;
36 import java.nio.ByteBuffer;
37 
38 /**
39  * @hide
40  */
41 public class BackDropperFilter extends Filter {
42     /** User-visible parameters */
43 
44     private final int BACKGROUND_STRETCH   = 0;
45     private final int BACKGROUND_FIT       = 1;
46     private final int BACKGROUND_FILL_CROP = 2;
47 
48     @GenerateFieldPort(name = "backgroundFitMode", hasDefault = true)
49     private int mBackgroundFitMode = BACKGROUND_FILL_CROP;
50     @GenerateFieldPort(name = "learningDuration", hasDefault = true)
51     private int mLearningDuration = DEFAULT_LEARNING_DURATION;
52     @GenerateFieldPort(name = "learningVerifyDuration", hasDefault = true)
53     private int mLearningVerifyDuration = DEFAULT_LEARNING_VERIFY_DURATION;
54     @GenerateFieldPort(name = "acceptStddev", hasDefault = true)
55     private float mAcceptStddev = DEFAULT_ACCEPT_STDDEV;
56     @GenerateFieldPort(name = "hierLrgScale", hasDefault = true)
57     private float mHierarchyLrgScale = DEFAULT_HIER_LRG_SCALE;
58     @GenerateFieldPort(name = "hierMidScale", hasDefault = true)
59     private float mHierarchyMidScale = DEFAULT_HIER_MID_SCALE;
60     @GenerateFieldPort(name = "hierSmlScale", hasDefault = true)
61     private float mHierarchySmlScale = DEFAULT_HIER_SML_SCALE;
62 
63     // Dimensions of foreground / background mask. Optimum value should take into account only
64     // image contents, NOT dimensions of input video stream.
65     @GenerateFieldPort(name = "maskWidthExp", hasDefault = true)
66     private int mMaskWidthExp = DEFAULT_MASK_WIDTH_EXPONENT;
67     @GenerateFieldPort(name = "maskHeightExp", hasDefault = true)
68     private int mMaskHeightExp = DEFAULT_MASK_HEIGHT_EXPONENT;
69 
70     // Levels at which to compute foreground / background decision. Think of them as are deltas
71     // SUBTRACTED from maskWidthExp and maskHeightExp.
72     @GenerateFieldPort(name = "hierLrgExp", hasDefault = true)
73     private int mHierarchyLrgExp = DEFAULT_HIER_LRG_EXPONENT;
74     @GenerateFieldPort(name = "hierMidExp", hasDefault = true)
75     private int mHierarchyMidExp = DEFAULT_HIER_MID_EXPONENT;
76     @GenerateFieldPort(name = "hierSmlExp", hasDefault = true)
77     private int mHierarchySmlExp = DEFAULT_HIER_SML_EXPONENT;
78 
79     @GenerateFieldPort(name = "lumScale", hasDefault = true)
80     private float mLumScale = DEFAULT_Y_SCALE_FACTOR;
81     @GenerateFieldPort(name = "chromaScale", hasDefault = true)
82     private float mChromaScale = DEFAULT_UV_SCALE_FACTOR;
83     @GenerateFieldPort(name = "maskBg", hasDefault = true)
84     private float mMaskBg = DEFAULT_MASK_BLEND_BG;
85     @GenerateFieldPort(name = "maskFg", hasDefault = true)
86     private float mMaskFg = DEFAULT_MASK_BLEND_FG;
87     @GenerateFieldPort(name = "exposureChange", hasDefault = true)
88     private float mExposureChange = DEFAULT_EXPOSURE_CHANGE;
89     @GenerateFieldPort(name = "whitebalanceredChange", hasDefault = true)
90     private float mWhiteBalanceRedChange = DEFAULT_WHITE_BALANCE_RED_CHANGE;
91     @GenerateFieldPort(name = "whitebalanceblueChange", hasDefault = true)
92     private float mWhiteBalanceBlueChange = DEFAULT_WHITE_BALANCE_BLUE_CHANGE;
93     @GenerateFieldPort(name = "autowbToggle", hasDefault = true)
94     private int mAutoWBToggle = DEFAULT_WHITE_BALANCE_TOGGLE;
95 
96     // TODO: These are not updatable:
97     @GenerateFieldPort(name = "learningAdaptRate", hasDefault = true)
98     private float mAdaptRateLearning = DEFAULT_LEARNING_ADAPT_RATE;
99     @GenerateFieldPort(name = "adaptRateBg", hasDefault = true)
100     private float mAdaptRateBg = DEFAULT_ADAPT_RATE_BG;
101     @GenerateFieldPort(name = "adaptRateFg", hasDefault = true)
102     private float mAdaptRateFg = DEFAULT_ADAPT_RATE_FG;
103     @GenerateFieldPort(name = "maskVerifyRate", hasDefault = true)
104     private float mVerifyRate = DEFAULT_MASK_VERIFY_RATE;
105     @GenerateFieldPort(name = "learningDoneListener", hasDefault = true)
106     private LearningDoneListener mLearningDoneListener = null;
107 
108     @GenerateFieldPort(name = "useTheForce", hasDefault = true)
109     private boolean mUseTheForce = false;
110 
111     @GenerateFinalPort(name = "provideDebugOutputs", hasDefault = true)
112     private boolean mProvideDebugOutputs = false;
113 
114     // Whether to mirror the background or not. For ex, the Camera app
115     // would mirror the preview for the front camera
116     @GenerateFieldPort(name = "mirrorBg", hasDefault = true)
117     private boolean mMirrorBg = false;
118 
119     // The orientation of the display. This will change the flipping
120     // coordinates, if we were to mirror the background
121     @GenerateFieldPort(name = "orientation", hasDefault = true)
122     private int mOrientation = 0;
123 
124     /** Default algorithm parameter values, for non-shader use */
125 
126     // Frame count for learning bg model
127     private static final int DEFAULT_LEARNING_DURATION = 40;
128     // Frame count for learning verification
129     private static final int DEFAULT_LEARNING_VERIFY_DURATION = 10;
130     // Maximum distance (in standard deviations) for considering a pixel as background
131     private static final float DEFAULT_ACCEPT_STDDEV = 0.85f;
132     // Variance threshold scale factor for large scale of hierarchy
133     private static final float DEFAULT_HIER_LRG_SCALE = 0.7f;
134     // Variance threshold scale factor for medium scale of hierarchy
135     private static final float DEFAULT_HIER_MID_SCALE = 0.6f;
136     // Variance threshold scale factor for small scale of hierarchy
137     private static final float DEFAULT_HIER_SML_SCALE = 0.5f;
138     // Width of foreground / background mask.
139     private static final int DEFAULT_MASK_WIDTH_EXPONENT = 8;
140     // Height of foreground / background mask.
141     private static final int DEFAULT_MASK_HEIGHT_EXPONENT = 8;
142     // Area over which to average for large scale (length in pixels = 2^HIERARCHY_*_EXPONENT)
143     private static final int DEFAULT_HIER_LRG_EXPONENT = 3;
144     // Area over which to average for medium scale
145     private static final int DEFAULT_HIER_MID_EXPONENT = 2;
146     // Area over which to average for small scale
147     private static final int DEFAULT_HIER_SML_EXPONENT = 0;
148     // Scale factor for luminance channel in distance calculations (larger = more significant)
149     private static final float DEFAULT_Y_SCALE_FACTOR = 0.40f;
150     // Scale factor for chroma channels in distance calculations
151     private static final float DEFAULT_UV_SCALE_FACTOR = 1.35f;
152     // Mask value to start blending away from background
153     private static final float DEFAULT_MASK_BLEND_BG = 0.65f;
154     // Mask value to start blending away from foreground
155     private static final float DEFAULT_MASK_BLEND_FG = 0.95f;
156     // Exposure stop number to change the brightness of foreground
157     private static final float DEFAULT_EXPOSURE_CHANGE = 1.0f;
158     // White balance change in Red channel for foreground
159     private static final float DEFAULT_WHITE_BALANCE_RED_CHANGE = 0.0f;
160     // White balance change in Blue channel for foreground
161     private static final float DEFAULT_WHITE_BALANCE_BLUE_CHANGE = 0.0f;
162     // Variable to control automatic white balance effect
163     // 0.f -> Auto WB is off; 1.f-> Auto WB is on
164     private static final int DEFAULT_WHITE_BALANCE_TOGGLE = 0;
165 
166     // Default rate at which to learn bg model during learning period
167     private static final float DEFAULT_LEARNING_ADAPT_RATE = 0.2f;
168     // Default rate at which to learn bg model from new background pixels
169     private static final float DEFAULT_ADAPT_RATE_BG = 0.0f;
170     // Default rate at which to learn bg model from new foreground pixels
171     private static final float DEFAULT_ADAPT_RATE_FG = 0.0f;
172     // Default rate at which to verify whether background is stable
173     private static final float DEFAULT_MASK_VERIFY_RATE = 0.25f;
174     // Default rate at which to verify whether background is stable
175     private static final int   DEFAULT_LEARNING_DONE_THRESHOLD = 20;
176 
177     // Default 3x3 matrix, column major, for fitting background 1:1
178     private static final float[] DEFAULT_BG_FIT_TRANSFORM = new float[] {
179         1.0f, 0.0f, 0.0f,
180         0.0f, 1.0f, 0.0f,
181         0.0f, 0.0f, 1.0f
182     };
183 
184     /** Default algorithm parameter values, for shader use */
185 
186     // Area over which to blur binary mask values (length in pixels = 2^MASK_SMOOTH_EXPONENT)
187     private static final String MASK_SMOOTH_EXPONENT = "2.0";
188     // Scale value for mapping variance distance to fit nicely to 0-1, 8-bit
189     private static final String DISTANCE_STORAGE_SCALE = "0.6";
190     // Scale value for mapping variance to fit nicely to 0-1, 8-bit
191     private static final String VARIANCE_STORAGE_SCALE = "5.0";
192     // Default scale of auto white balance parameters
193     private static final String DEFAULT_AUTO_WB_SCALE = "0.25";
194     // Minimum variance (0-255 scale)
195     private static final String MIN_VARIANCE = "3.0";
196     // Column-major array for 4x4 matrix converting RGB to YCbCr, JPEG definition (no pedestal)
197     private static final String RGB_TO_YUV_MATRIX = "0.299, -0.168736,  0.5,      0.000, " +
198                                                     "0.587, -0.331264, -0.418688, 0.000, " +
199                                                     "0.114,  0.5,      -0.081312, 0.000, " +
200                                                     "0.000,  0.5,       0.5,      1.000 ";
201     /** Stream names */
202 
203     private static final String[] mInputNames = {"video",
204                                                  "background"};
205 
206     private static final String[] mOutputNames = {"video"};
207 
208     private static final String[] mDebugOutputNames = {"debug1",
209                                                        "debug2"};
210 
211     /** Other private variables */
212 
213     private FrameFormat mOutputFormat;
214     private MutableFrameFormat mMemoryFormat;
215     private MutableFrameFormat mMaskFormat;
216     private MutableFrameFormat mAverageFormat;
217 
218     private final boolean mLogVerbose;
219     private static final String TAG = "BackDropperFilter";
220 
221     /** Shader source code */
222 
223     // Shared uniforms and utility functions
224     private static String mSharedUtilShader =
225             "precision mediump float;\n" +
226             "uniform float fg_adapt_rate;\n" +
227             "uniform float bg_adapt_rate;\n" +
228             "const mat4 coeff_yuv = mat4(" + RGB_TO_YUV_MATRIX + ");\n" +
229             "const float dist_scale = " + DISTANCE_STORAGE_SCALE + ";\n" +
230             "const float inv_dist_scale = 1. / dist_scale;\n" +
231             "const float var_scale=" + VARIANCE_STORAGE_SCALE + ";\n" +
232             "const float inv_var_scale = 1. / var_scale;\n" +
233             "const float min_variance = inv_var_scale *" + MIN_VARIANCE + "/ 256.;\n" +
234             "const float auto_wb_scale = " + DEFAULT_AUTO_WB_SCALE + ";\n" +
235             "\n" +
236             // Variance distance in luminance between current pixel and background model
237             "float gauss_dist_y(float y, float mean, float variance) {\n" +
238             "  float dist = (y - mean) * (y - mean) / variance;\n" +
239             "  return dist;\n" +
240             "}\n" +
241             // Sum of variance distances in chroma between current pixel and background
242             // model
243             "float gauss_dist_uv(vec2 uv, vec2 mean, vec2 variance) {\n" +
244             "  vec2 dist = (uv - mean) * (uv - mean) / variance;\n" +
245             "  return dist.r + dist.g;\n" +
246             "}\n" +
247             // Select learning rate for pixel based on smoothed decision mask alpha
248             "float local_adapt_rate(float alpha) {\n" +
249             "  return mix(bg_adapt_rate, fg_adapt_rate, alpha);\n" +
250             "}\n" +
251             "\n";
252 
253     // Distance calculation shader. Calculates a distance metric between the foreground and the
254     //   current background model, in both luminance and in chroma (yuv space).  Distance is
255     //   measured in variances from the mean background value. For chroma, the distance is the sum
256     //   of the two individual color channel distances. The distances are output on the b and alpha
257     //   channels, r and g are for debug information.
258     // Inputs:
259     //   tex_sampler_0: Mip-map for foreground (live) video frame.
260     //   tex_sampler_1: Background mean mask.
261     //   tex_sampler_2: Background variance mask.
262     //   subsample_level: Level on foreground frame's mip-map.
263     private static final String mBgDistanceShader =
264             "uniform sampler2D tex_sampler_0;\n" +
265             "uniform sampler2D tex_sampler_1;\n" +
266             "uniform sampler2D tex_sampler_2;\n" +
267             "uniform float subsample_level;\n" +
268             "varying vec2 v_texcoord;\n" +
269             "void main() {\n" +
270             "  vec4 fg_rgb = texture2D(tex_sampler_0, v_texcoord, subsample_level);\n" +
271             "  vec4 fg = coeff_yuv * vec4(fg_rgb.rgb, 1.);\n" +
272             "  vec4 mean = texture2D(tex_sampler_1, v_texcoord);\n" +
273             "  vec4 variance = inv_var_scale * texture2D(tex_sampler_2, v_texcoord);\n" +
274             "\n" +
275             "  float dist_y = gauss_dist_y(fg.r, mean.r, variance.r);\n" +
276             "  float dist_uv = gauss_dist_uv(fg.gb, mean.gb, variance.gb);\n" +
277             "  gl_FragColor = vec4(0.5*fg.rg, dist_scale*dist_y, dist_scale*dist_uv);\n" +
278             "}\n";
279 
280     // Foreground/background mask decision shader. Decides whether a frame is in the foreground or
281     //   the background using a hierarchical threshold on the distance. Binary foreground/background
282     //   mask is placed in the alpha channel. The RGB channels contain debug information.
283     private static final String mBgMaskShader =
284             "uniform sampler2D tex_sampler_0;\n" +
285             "uniform float accept_variance;\n" +
286             "uniform vec2 yuv_weights;\n" +
287             "uniform float scale_lrg;\n" +
288             "uniform float scale_mid;\n" +
289             "uniform float scale_sml;\n" +
290             "uniform float exp_lrg;\n" +
291             "uniform float exp_mid;\n" +
292             "uniform float exp_sml;\n" +
293             "varying vec2 v_texcoord;\n" +
294             // Decide whether pixel is foreground or background based on Y and UV
295             //   distance and maximum acceptable variance.
296             // yuv_weights.x is smaller than yuv_weights.y to discount the influence of shadow
297             "bool is_fg(vec2 dist_yc, float accept_variance) {\n" +
298             "  return ( dot(yuv_weights, dist_yc) >= accept_variance );\n" +
299             "}\n" +
300             "void main() {\n" +
301             "  vec4 dist_lrg_sc = texture2D(tex_sampler_0, v_texcoord, exp_lrg);\n" +
302             "  vec4 dist_mid_sc = texture2D(tex_sampler_0, v_texcoord, exp_mid);\n" +
303             "  vec4 dist_sml_sc = texture2D(tex_sampler_0, v_texcoord, exp_sml);\n" +
304             "  vec2 dist_lrg = inv_dist_scale * dist_lrg_sc.ba;\n" +
305             "  vec2 dist_mid = inv_dist_scale * dist_mid_sc.ba;\n" +
306             "  vec2 dist_sml = inv_dist_scale * dist_sml_sc.ba;\n" +
307             "  vec2 norm_dist = 0.75 * dist_sml / accept_variance;\n" + // For debug viz
308             "  bool is_fg_lrg = is_fg(dist_lrg, accept_variance * scale_lrg);\n" +
309             "  bool is_fg_mid = is_fg_lrg || is_fg(dist_mid, accept_variance * scale_mid);\n" +
310             "  float is_fg_sml =\n" +
311             "      float(is_fg_mid || is_fg(dist_sml, accept_variance * scale_sml));\n" +
312             "  float alpha = 0.5 * is_fg_sml + 0.3 * float(is_fg_mid) + 0.2 * float(is_fg_lrg);\n" +
313             "  gl_FragColor = vec4(alpha, norm_dist, is_fg_sml);\n" +
314             "}\n";
315 
316     // Automatic White Balance parameter decision shader
317     // Use the Gray World assumption that in a white balance corrected image, the average of R, G, B
318     //   channel will be a common gray value.
319     // To match the white balance of foreground and background, the average of R, G, B channel of
320     //   two videos should match.
321     // Inputs:
322     //   tex_sampler_0: Mip-map for foreground (live) video frame.
323     //   tex_sampler_1: Mip-map for background (playback) video frame.
324     //   pyramid_depth: Depth of input frames' mip-maps.
325     private static final String mAutomaticWhiteBalance =
326             "uniform sampler2D tex_sampler_0;\n" +
327             "uniform sampler2D tex_sampler_1;\n" +
328             "uniform float pyramid_depth;\n" +
329             "uniform bool autowb_toggle;\n" +
330             "varying vec2 v_texcoord;\n" +
331             "void main() {\n" +
332             "   vec4 mean_video = texture2D(tex_sampler_0, v_texcoord, pyramid_depth);\n"+
333             "   vec4 mean_bg = texture2D(tex_sampler_1, v_texcoord, pyramid_depth);\n" +
334             // If Auto WB is toggled off, the return texture will be a unicolor texture of value 1
335             // If Auto WB is toggled on, the return texture will be a unicolor texture with
336             //   adjustment parameters for R and B channels stored in the corresponding channel
337             "   float green_normalizer = mean_video.g / mean_bg.g;\n"+
338             "   vec4 adjusted_value = vec4(mean_bg.r / mean_video.r * green_normalizer, 1., \n" +
339             "                         mean_bg.b / mean_video.b * green_normalizer, 1.) * auto_wb_scale; \n" +
340             "   gl_FragColor = autowb_toggle ? adjusted_value : vec4(auto_wb_scale);\n" +
341             "}\n";
342 
343 
344     // Background subtraction shader. Uses a mipmap of the binary mask map to blend smoothly between
345     //   foreground and background
346     // Inputs:
347     //   tex_sampler_0: Foreground (live) video frame.
348     //   tex_sampler_1: Background (playback) video frame.
349     //   tex_sampler_2: Foreground/background mask.
350     //   tex_sampler_3: Auto white-balance factors.
351     private static final String mBgSubtractShader =
352             "uniform mat3 bg_fit_transform;\n" +
353             "uniform float mask_blend_bg;\n" +
354             "uniform float mask_blend_fg;\n" +
355             "uniform float exposure_change;\n" +
356             "uniform float whitebalancered_change;\n" +
357             "uniform float whitebalanceblue_change;\n" +
358             "uniform sampler2D tex_sampler_0;\n" +
359             "uniform sampler2D tex_sampler_1;\n" +
360             "uniform sampler2D tex_sampler_2;\n" +
361             "uniform sampler2D tex_sampler_3;\n" +
362             "varying vec2 v_texcoord;\n" +
363             "void main() {\n" +
364             "  vec2 bg_texcoord = (bg_fit_transform * vec3(v_texcoord, 1.)).xy;\n" +
365             "  vec4 bg_rgb = texture2D(tex_sampler_1, bg_texcoord);\n" +
366             // The foreground texture is modified by multiplying both manual and auto white balance changes in R and B
367             //   channel and multiplying exposure change in all R, G, B channels.
368             "  vec4 wb_auto_scale = texture2D(tex_sampler_3, v_texcoord) * exposure_change / auto_wb_scale;\n" +
369             "  vec4 wb_manual_scale = vec4(1. + whitebalancered_change, 1., 1. + whitebalanceblue_change, 1.);\n" +
370             "  vec4 fg_rgb = texture2D(tex_sampler_0, v_texcoord);\n" +
371             "  vec4 fg_adjusted = fg_rgb * wb_manual_scale * wb_auto_scale;\n"+
372             "  vec4 mask = texture2D(tex_sampler_2, v_texcoord, \n" +
373             "                      " + MASK_SMOOTH_EXPONENT + ");\n" +
374             "  float alpha = smoothstep(mask_blend_bg, mask_blend_fg, mask.a);\n" +
375             "  gl_FragColor = mix(bg_rgb, fg_adjusted, alpha);\n";
376 
377     // May the Force... Makes the foreground object translucent blue, with a bright
378     // blue-white outline
379     private static final String mBgSubtractForceShader =
380             "  vec4 ghost_rgb = (fg_adjusted * 0.7 + vec4(0.3,0.3,0.4,0.))*0.65 + \n" +
381             "                   0.35*bg_rgb;\n" +
382             "  float glow_start = 0.75 * mask_blend_bg; \n"+
383             "  float glow_max   = mask_blend_bg; \n"+
384             "  gl_FragColor = mask.a < glow_start ? bg_rgb : \n" +
385             "                 mask.a < glow_max ? mix(bg_rgb, vec4(0.9,0.9,1.0,1.0), \n" +
386             "                                     (mask.a - glow_start) / (glow_max - glow_start) ) : \n" +
387             "                 mask.a < mask_blend_fg ? mix(vec4(0.9,0.9,1.0,1.0), ghost_rgb, \n" +
388             "                                    (mask.a - glow_max) / (mask_blend_fg - glow_max) ) : \n" +
389             "                 ghost_rgb;\n" +
390             "}\n";
391 
392     // Background model mean update shader. Skews the current model mean toward the most recent pixel
393     //   value for a pixel, weighted by the learning rate and by whether the pixel is classified as
394     //   foreground or background.
395     // Inputs:
396     //   tex_sampler_0: Mip-map for foreground (live) video frame.
397     //   tex_sampler_1: Background mean mask.
398     //   tex_sampler_2: Foreground/background mask.
399     //   subsample_level: Level on foreground frame's mip-map.
400     private static final String mUpdateBgModelMeanShader =
401             "uniform sampler2D tex_sampler_0;\n" +
402             "uniform sampler2D tex_sampler_1;\n" +
403             "uniform sampler2D tex_sampler_2;\n" +
404             "uniform float subsample_level;\n" +
405             "varying vec2 v_texcoord;\n" +
406             "void main() {\n" +
407             "  vec4 fg_rgb = texture2D(tex_sampler_0, v_texcoord, subsample_level);\n" +
408             "  vec4 fg = coeff_yuv * vec4(fg_rgb.rgb, 1.);\n" +
409             "  vec4 mean = texture2D(tex_sampler_1, v_texcoord);\n" +
410             "  vec4 mask = texture2D(tex_sampler_2, v_texcoord, \n" +
411             "                      " + MASK_SMOOTH_EXPONENT + ");\n" +
412             "\n" +
413             "  float alpha = local_adapt_rate(mask.a);\n" +
414             "  vec4 new_mean = mix(mean, fg, alpha);\n" +
415             "  gl_FragColor = new_mean;\n" +
416             "}\n";
417 
418     // Background model variance update shader. Skews the current model variance toward the most
419     //   recent variance for the pixel, weighted by the learning rate and by whether the pixel is
420     //   classified as foreground or background.
421     // Inputs:
422     //   tex_sampler_0: Mip-map for foreground (live) video frame.
423     //   tex_sampler_1: Background mean mask.
424     //   tex_sampler_2: Background variance mask.
425     //   tex_sampler_3: Foreground/background mask.
426     //   subsample_level: Level on foreground frame's mip-map.
427     // TODO: to improve efficiency, use single mark for mean + variance, then merge this into
428     // mUpdateBgModelMeanShader.
429     private static final String mUpdateBgModelVarianceShader =
430             "uniform sampler2D tex_sampler_0;\n" +
431             "uniform sampler2D tex_sampler_1;\n" +
432             "uniform sampler2D tex_sampler_2;\n" +
433             "uniform sampler2D tex_sampler_3;\n" +
434             "uniform float subsample_level;\n" +
435             "varying vec2 v_texcoord;\n" +
436             "void main() {\n" +
437             "  vec4 fg_rgb = texture2D(tex_sampler_0, v_texcoord, subsample_level);\n" +
438             "  vec4 fg = coeff_yuv * vec4(fg_rgb.rgb, 1.);\n" +
439             "  vec4 mean = texture2D(tex_sampler_1, v_texcoord);\n" +
440             "  vec4 variance = inv_var_scale * texture2D(tex_sampler_2, v_texcoord);\n" +
441             "  vec4 mask = texture2D(tex_sampler_3, v_texcoord, \n" +
442             "                      " + MASK_SMOOTH_EXPONENT + ");\n" +
443             "\n" +
444             "  float alpha = local_adapt_rate(mask.a);\n" +
445             "  vec4 cur_variance = (fg-mean)*(fg-mean);\n" +
446             "  vec4 new_variance = mix(variance, cur_variance, alpha);\n" +
447             "  new_variance = max(new_variance, vec4(min_variance));\n" +
448             "  gl_FragColor = var_scale * new_variance;\n" +
449             "}\n";
450 
451     // Background verification shader. Skews the current background verification mask towards the
452     //   most recent frame, weighted by the learning rate.
453     private static final String mMaskVerifyShader =
454             "uniform sampler2D tex_sampler_0;\n" +
455             "uniform sampler2D tex_sampler_1;\n" +
456             "uniform float verify_rate;\n" +
457             "varying vec2 v_texcoord;\n" +
458             "void main() {\n" +
459             "  vec4 lastmask = texture2D(tex_sampler_0, v_texcoord);\n" +
460             "  vec4 mask = texture2D(tex_sampler_1, v_texcoord);\n" +
461             "  float newmask = mix(lastmask.a, mask.a, verify_rate);\n" +
462             "  gl_FragColor = vec4(0., 0., 0., newmask);\n" +
463             "}\n";
464 
465     /** Shader program objects */
466 
467     private ShaderProgram mBgDistProgram;
468     private ShaderProgram mBgMaskProgram;
469     private ShaderProgram mBgSubtractProgram;
470     private ShaderProgram mBgUpdateMeanProgram;
471     private ShaderProgram mBgUpdateVarianceProgram;
472     private ShaderProgram mCopyOutProgram;
473     private ShaderProgram mAutomaticWhiteBalanceProgram;
474     private ShaderProgram mMaskVerifyProgram;
475     private ShaderProgram copyShaderProgram;
476 
477     /** Background model storage */
478 
479     private boolean mPingPong;
480     private GLFrame mBgMean[];
481     private GLFrame mBgVariance[];
482     private GLFrame mMaskVerify[];
483     private GLFrame mDistance;
484     private GLFrame mAutoWB;
485     private GLFrame mMask;
486     private GLFrame mVideoInput;
487     private GLFrame mBgInput;
488     private GLFrame mMaskAverage;
489 
490     /** Overall filter state */
491 
492     private boolean isOpen;
493     private int mFrameCount;
494     private boolean mStartLearning;
495     private boolean mBackgroundFitModeChanged;
496     private float mRelativeAspect;
497     private int mPyramidDepth;
498     private int mSubsampleLevel;
499 
500     /** Learning listener object */
501 
502     public interface LearningDoneListener {
onLearningDone(BackDropperFilter filter)503         public void onLearningDone(BackDropperFilter filter);
504     }
505 
506     /** Public Filter methods */
507 
BackDropperFilter(String name)508     public BackDropperFilter(String name) {
509         super(name);
510 
511         mLogVerbose = Log.isLoggable(TAG, Log.VERBOSE);
512 
513         String adjStr = SystemProperties.get("ro.media.effect.bgdropper.adj");
514         if (adjStr.length() > 0) {
515             try {
516                 mAcceptStddev += Float.parseFloat(adjStr);
517                 if (mLogVerbose) {
518                     Log.v(TAG, "Adjusting accept threshold by " + adjStr +
519                             ", now " + mAcceptStddev);
520                 }
521             } catch (NumberFormatException e) {
522                 Log.e(TAG,
523                         "Badly formatted property ro.media.effect.bgdropper.adj: " + adjStr);
524             }
525         }
526     }
527 
528     @Override
setupPorts()529     public void setupPorts() {
530         // Inputs.
531         // TODO: Target should be GPU, but relaxed for now.
532         FrameFormat imageFormat = ImageFormat.create(ImageFormat.COLORSPACE_RGBA,
533                                                      FrameFormat.TARGET_UNSPECIFIED);
534         for (String inputName : mInputNames) {
535             addMaskedInputPort(inputName, imageFormat);
536         }
537         // Normal outputs
538         for (String outputName : mOutputNames) {
539             addOutputBasedOnInput(outputName, "video");
540         }
541 
542         // Debug outputs
543         if (mProvideDebugOutputs) {
544             for (String outputName : mDebugOutputNames) {
545                 addOutputBasedOnInput(outputName, "video");
546             }
547         }
548     }
549 
550     @Override
getOutputFormat(String portName, FrameFormat inputFormat)551     public FrameFormat getOutputFormat(String portName, FrameFormat inputFormat) {
552         // Create memory format based on video input.
553         MutableFrameFormat format = inputFormat.mutableCopy();
554         // Is this a debug output port? If so, leave dimensions unspecified.
555         if (!Arrays.asList(mOutputNames).contains(portName)) {
556             format.setDimensions(FrameFormat.SIZE_UNSPECIFIED, FrameFormat.SIZE_UNSPECIFIED);
557         }
558         return format;
559     }
560 
createMemoryFormat(FrameFormat inputFormat)561     private boolean createMemoryFormat(FrameFormat inputFormat) {
562         // We can't resize because that would require re-learning.
563         if (mMemoryFormat != null) {
564             return false;
565         }
566 
567         if (inputFormat.getWidth() == FrameFormat.SIZE_UNSPECIFIED ||
568             inputFormat.getHeight() == FrameFormat.SIZE_UNSPECIFIED) {
569             throw new RuntimeException("Attempting to process input frame with unknown size");
570         }
571 
572         mMaskFormat = inputFormat.mutableCopy();
573         int maskWidth = (int)Math.pow(2, mMaskWidthExp);
574         int maskHeight = (int)Math.pow(2, mMaskHeightExp);
575         mMaskFormat.setDimensions(maskWidth, maskHeight);
576 
577         mPyramidDepth = Math.max(mMaskWidthExp, mMaskHeightExp);
578         mMemoryFormat = mMaskFormat.mutableCopy();
579         int widthExp = Math.max(mMaskWidthExp, pyramidLevel(inputFormat.getWidth()));
580         int heightExp = Math.max(mMaskHeightExp, pyramidLevel(inputFormat.getHeight()));
581         mPyramidDepth = Math.max(widthExp, heightExp);
582         int memWidth = Math.max(maskWidth, (int)Math.pow(2, widthExp));
583         int memHeight = Math.max(maskHeight, (int)Math.pow(2, heightExp));
584         mMemoryFormat.setDimensions(memWidth, memHeight);
585         mSubsampleLevel = mPyramidDepth - Math.max(mMaskWidthExp, mMaskHeightExp);
586 
587         if (mLogVerbose) {
588             Log.v(TAG, "Mask frames size " + maskWidth + " x " + maskHeight);
589             Log.v(TAG, "Pyramid levels " + widthExp + " x " + heightExp);
590             Log.v(TAG, "Memory frames size " + memWidth + " x " + memHeight);
591         }
592 
593         mAverageFormat = inputFormat.mutableCopy();
594         mAverageFormat.setDimensions(1,1);
595         return true;
596     }
597 
prepare(FilterContext context)598     public void prepare(FilterContext context){
599         if (mLogVerbose) Log.v(TAG, "Preparing BackDropperFilter!");
600 
601         mBgMean = new GLFrame[2];
602         mBgVariance = new GLFrame[2];
603         mMaskVerify = new GLFrame[2];
604         copyShaderProgram = ShaderProgram.createIdentity(context);
605     }
606 
allocateFrames(FrameFormat inputFormat, FilterContext context)607     private void allocateFrames(FrameFormat inputFormat, FilterContext context) {
608         if (!createMemoryFormat(inputFormat)) {
609             return;  // All set.
610         }
611         if (mLogVerbose) Log.v(TAG, "Allocating BackDropperFilter frames");
612 
613         // Create initial background model values
614         int numBytes = mMaskFormat.getSize();
615         byte[] initialBgMean = new byte[numBytes];
616         byte[] initialBgVariance = new byte[numBytes];
617         byte[] initialMaskVerify = new byte[numBytes];
618         for (int i = 0; i < numBytes; i++) {
619             initialBgMean[i] = (byte)128;
620             initialBgVariance[i] = (byte)10;
621             initialMaskVerify[i] = (byte)0;
622         }
623 
624         // Get frames to store background model in
625         for (int i = 0; i < 2; i++) {
626             mBgMean[i] = (GLFrame)context.getFrameManager().newFrame(mMaskFormat);
627             mBgMean[i].setData(initialBgMean, 0, numBytes);
628 
629             mBgVariance[i] = (GLFrame)context.getFrameManager().newFrame(mMaskFormat);
630             mBgVariance[i].setData(initialBgVariance, 0, numBytes);
631 
632             mMaskVerify[i] = (GLFrame)context.getFrameManager().newFrame(mMaskFormat);
633             mMaskVerify[i].setData(initialMaskVerify, 0, numBytes);
634         }
635 
636         // Get frames to store other textures in
637         if (mLogVerbose) Log.v(TAG, "Done allocating texture for Mean and Variance objects!");
638 
639         mDistance = (GLFrame)context.getFrameManager().newFrame(mMaskFormat);
640         mMask = (GLFrame)context.getFrameManager().newFrame(mMaskFormat);
641         mAutoWB = (GLFrame)context.getFrameManager().newFrame(mAverageFormat);
642         mVideoInput = (GLFrame)context.getFrameManager().newFrame(mMemoryFormat);
643         mBgInput = (GLFrame)context.getFrameManager().newFrame(mMemoryFormat);
644         mMaskAverage = (GLFrame)context.getFrameManager().newFrame(mAverageFormat);
645 
646         // Create shader programs
647         mBgDistProgram = new ShaderProgram(context, mSharedUtilShader + mBgDistanceShader);
648         mBgDistProgram.setHostValue("subsample_level", (float)mSubsampleLevel);
649 
650         mBgMaskProgram = new ShaderProgram(context, mSharedUtilShader + mBgMaskShader);
651         mBgMaskProgram.setHostValue("accept_variance", mAcceptStddev * mAcceptStddev);
652         float[] yuvWeights = { mLumScale, mChromaScale };
653         mBgMaskProgram.setHostValue("yuv_weights", yuvWeights );
654         mBgMaskProgram.setHostValue("scale_lrg", mHierarchyLrgScale);
655         mBgMaskProgram.setHostValue("scale_mid", mHierarchyMidScale);
656         mBgMaskProgram.setHostValue("scale_sml", mHierarchySmlScale);
657         mBgMaskProgram.setHostValue("exp_lrg", (float)(mSubsampleLevel + mHierarchyLrgExp));
658         mBgMaskProgram.setHostValue("exp_mid", (float)(mSubsampleLevel + mHierarchyMidExp));
659         mBgMaskProgram.setHostValue("exp_sml", (float)(mSubsampleLevel + mHierarchySmlExp));
660 
661         if (mUseTheForce) {
662             mBgSubtractProgram = new ShaderProgram(context, mSharedUtilShader + mBgSubtractShader + mBgSubtractForceShader);
663         } else {
664             mBgSubtractProgram = new ShaderProgram(context, mSharedUtilShader + mBgSubtractShader + "}\n");
665         }
666         mBgSubtractProgram.setHostValue("bg_fit_transform", DEFAULT_BG_FIT_TRANSFORM);
667         mBgSubtractProgram.setHostValue("mask_blend_bg", mMaskBg);
668         mBgSubtractProgram.setHostValue("mask_blend_fg", mMaskFg);
669         mBgSubtractProgram.setHostValue("exposure_change", mExposureChange);
670         mBgSubtractProgram.setHostValue("whitebalanceblue_change", mWhiteBalanceBlueChange);
671         mBgSubtractProgram.setHostValue("whitebalancered_change", mWhiteBalanceRedChange);
672 
673 
674         mBgUpdateMeanProgram = new ShaderProgram(context, mSharedUtilShader + mUpdateBgModelMeanShader);
675         mBgUpdateMeanProgram.setHostValue("subsample_level", (float)mSubsampleLevel);
676 
677         mBgUpdateVarianceProgram = new ShaderProgram(context, mSharedUtilShader + mUpdateBgModelVarianceShader);
678         mBgUpdateVarianceProgram.setHostValue("subsample_level", (float)mSubsampleLevel);
679 
680         mCopyOutProgram = ShaderProgram.createIdentity(context);
681 
682         mAutomaticWhiteBalanceProgram = new ShaderProgram(context, mSharedUtilShader + mAutomaticWhiteBalance);
683         mAutomaticWhiteBalanceProgram.setHostValue("pyramid_depth", (float)mPyramidDepth);
684         mAutomaticWhiteBalanceProgram.setHostValue("autowb_toggle", mAutoWBToggle);
685 
686         mMaskVerifyProgram = new ShaderProgram(context, mSharedUtilShader + mMaskVerifyShader);
687         mMaskVerifyProgram.setHostValue("verify_rate", mVerifyRate);
688 
689         if (mLogVerbose) Log.v(TAG, "Shader width set to " + mMemoryFormat.getWidth());
690 
691         mRelativeAspect = 1.f;
692 
693         mFrameCount = 0;
694         mStartLearning = true;
695     }
696 
process(FilterContext context)697     public void process(FilterContext context) {
698         // Grab inputs and ready intermediate frames and outputs.
699         Frame video = pullInput("video");
700         Frame background = pullInput("background");
701         allocateFrames(video.getFormat(), context);
702 
703         // Update learning rate after initial learning period
704         if (mStartLearning) {
705             if (mLogVerbose) Log.v(TAG, "Starting learning");
706             mBgUpdateMeanProgram.setHostValue("bg_adapt_rate", mAdaptRateLearning);
707             mBgUpdateMeanProgram.setHostValue("fg_adapt_rate", mAdaptRateLearning);
708             mBgUpdateVarianceProgram.setHostValue("bg_adapt_rate", mAdaptRateLearning);
709             mBgUpdateVarianceProgram.setHostValue("fg_adapt_rate", mAdaptRateLearning);
710             mFrameCount = 0;
711         }
712 
713         // Select correct pingpong buffers
714         int inputIndex = mPingPong ? 0 : 1;
715         int outputIndex = mPingPong ? 1 : 0;
716         mPingPong = !mPingPong;
717 
718         // Check relative aspect ratios
719         updateBgScaling(video, background, mBackgroundFitModeChanged);
720         mBackgroundFitModeChanged = false;
721 
722         // Make copies for input frames to GLFrames
723 
724         copyShaderProgram.process(video, mVideoInput);
725         copyShaderProgram.process(background, mBgInput);
726 
727         mVideoInput.generateMipMap();
728         mVideoInput.setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
729                                         GLES20.GL_LINEAR_MIPMAP_NEAREST);
730 
731         mBgInput.generateMipMap();
732         mBgInput.setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
733                                      GLES20.GL_LINEAR_MIPMAP_NEAREST);
734 
735         if (mStartLearning) {
736             copyShaderProgram.process(mVideoInput, mBgMean[inputIndex]);
737             mStartLearning = false;
738         }
739 
740         // Process shaders
741         Frame[] distInputs = { mVideoInput, mBgMean[inputIndex], mBgVariance[inputIndex] };
742         mBgDistProgram.process(distInputs, mDistance);
743         mDistance.generateMipMap();
744         mDistance.setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
745                                       GLES20.GL_LINEAR_MIPMAP_NEAREST);
746 
747         mBgMaskProgram.process(mDistance, mMask);
748         mMask.generateMipMap();
749         mMask.setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
750                                   GLES20.GL_LINEAR_MIPMAP_NEAREST);
751 
752         Frame[] autoWBInputs = { mVideoInput, mBgInput };
753         mAutomaticWhiteBalanceProgram.process(autoWBInputs, mAutoWB);
754 
755         if (mFrameCount <= mLearningDuration) {
756             // During learning
757             pushOutput("video", video);
758 
759             if (mFrameCount == mLearningDuration - mLearningVerifyDuration) {
760                 copyShaderProgram.process(mMask, mMaskVerify[outputIndex]);
761 
762                 mBgUpdateMeanProgram.setHostValue("bg_adapt_rate", mAdaptRateBg);
763                 mBgUpdateMeanProgram.setHostValue("fg_adapt_rate", mAdaptRateFg);
764                 mBgUpdateVarianceProgram.setHostValue("bg_adapt_rate", mAdaptRateBg);
765                 mBgUpdateVarianceProgram.setHostValue("fg_adapt_rate", mAdaptRateFg);
766 
767 
768             } else if (mFrameCount > mLearningDuration - mLearningVerifyDuration) {
769                 // In the learning verification stage, compute background masks and a weighted average
770                 //   with weights grow exponentially with time
771                 Frame[] maskVerifyInputs = {mMaskVerify[inputIndex], mMask};
772                 mMaskVerifyProgram.process(maskVerifyInputs, mMaskVerify[outputIndex]);
773                 mMaskVerify[outputIndex].generateMipMap();
774                 mMaskVerify[outputIndex].setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
775                                                              GLES20.GL_LINEAR_MIPMAP_NEAREST);
776             }
777 
778             if (mFrameCount == mLearningDuration) {
779                 // In the last verification frame, verify if the verification mask is almost blank
780                 // If not, restart learning
781                 copyShaderProgram.process(mMaskVerify[outputIndex], mMaskAverage);
782                 ByteBuffer mMaskAverageByteBuffer = mMaskAverage.getData();
783                 byte[] mask_average = mMaskAverageByteBuffer.array();
784                 int bi = (int)(mask_average[3] & 0xFF);
785 
786                 if (mLogVerbose) {
787                     Log.v(TAG,
788                             String.format("Mask_average is %d, threshold is %d",
789                                     bi, DEFAULT_LEARNING_DONE_THRESHOLD));
790                 }
791 
792                 if (bi >= DEFAULT_LEARNING_DONE_THRESHOLD) {
793                     mStartLearning = true;                                      // Restart learning
794                 } else {
795                   if (mLogVerbose) Log.v(TAG, "Learning done");
796                   if (mLearningDoneListener != null) {
797                       mLearningDoneListener.onLearningDone(this);
798                    }
799                 }
800             }
801         } else {
802             Frame output = context.getFrameManager().newFrame(video.getFormat());
803             Frame[] subtractInputs = { video, background, mMask, mAutoWB };
804             mBgSubtractProgram.process(subtractInputs, output);
805             pushOutput("video", output);
806             output.release();
807         }
808 
809         // Compute mean and variance of the background
810         if (mFrameCount < mLearningDuration - mLearningVerifyDuration ||
811             mAdaptRateBg > 0.0 || mAdaptRateFg > 0.0) {
812             Frame[] meanUpdateInputs = { mVideoInput, mBgMean[inputIndex], mMask };
813             mBgUpdateMeanProgram.process(meanUpdateInputs, mBgMean[outputIndex]);
814             mBgMean[outputIndex].generateMipMap();
815             mBgMean[outputIndex].setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
816                                                      GLES20.GL_LINEAR_MIPMAP_NEAREST);
817 
818             Frame[] varianceUpdateInputs = {
819               mVideoInput, mBgMean[inputIndex], mBgVariance[inputIndex], mMask
820             };
821             mBgUpdateVarianceProgram.process(varianceUpdateInputs, mBgVariance[outputIndex]);
822             mBgVariance[outputIndex].generateMipMap();
823             mBgVariance[outputIndex].setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
824                                                          GLES20.GL_LINEAR_MIPMAP_NEAREST);
825         }
826 
827         // Provide debug output to two smaller viewers
828         if (mProvideDebugOutputs) {
829             Frame dbg1 = context.getFrameManager().newFrame(video.getFormat());
830             mCopyOutProgram.process(video, dbg1);
831             pushOutput("debug1", dbg1);
832             dbg1.release();
833 
834             Frame dbg2 = context.getFrameManager().newFrame(mMemoryFormat);
835             mCopyOutProgram.process(mMask, dbg2);
836             pushOutput("debug2", dbg2);
837             dbg2.release();
838         }
839 
840         mFrameCount++;
841 
842         if (mLogVerbose) {
843             if (mFrameCount % 30 == 0) {
844                 if (startTime == -1) {
845                     context.getGLEnvironment().activate();
846                     GLES20.glFinish();
847                     startTime = SystemClock.elapsedRealtime();
848                 } else {
849                     context.getGLEnvironment().activate();
850                     GLES20.glFinish();
851                     long endTime = SystemClock.elapsedRealtime();
852                     Log.v(TAG, "Avg. frame duration: " + String.format("%.2f",(endTime-startTime)/30.) +
853                           " ms. Avg. fps: " + String.format("%.2f", 1000./((endTime-startTime)/30.)) );
854                     startTime = endTime;
855                 }
856             }
857         }
858     }
859 
860     private long startTime = -1;
861 
close(FilterContext context)862     public void close(FilterContext context) {
863         if (mMemoryFormat == null) {
864             return;
865         }
866 
867         if (mLogVerbose) Log.v(TAG, "Filter Closing!");
868         for (int i = 0; i < 2; i++) {
869             mBgMean[i].release();
870             mBgVariance[i].release();
871             mMaskVerify[i].release();
872         }
873         mDistance.release();
874         mMask.release();
875         mAutoWB.release();
876         mVideoInput.release();
877         mBgInput.release();
878         mMaskAverage.release();
879 
880         mMemoryFormat = null;
881     }
882 
883     // Relearn background model
relearn()884     synchronized public void relearn() {
885         // Let the processing thread know about learning restart
886         mStartLearning = true;
887     }
888 
889     @Override
fieldPortValueUpdated(String name, FilterContext context)890     public void fieldPortValueUpdated(String name, FilterContext context) {
891         // TODO: Many of these can be made ProgramPorts!
892         if (name.equals("backgroundFitMode")) {
893             mBackgroundFitModeChanged = true;
894         } else if (name.equals("acceptStddev")) {
895             mBgMaskProgram.setHostValue("accept_variance", mAcceptStddev * mAcceptStddev);
896         } else if (name.equals("hierLrgScale")) {
897             mBgMaskProgram.setHostValue("scale_lrg", mHierarchyLrgScale);
898         } else if (name.equals("hierMidScale")) {
899             mBgMaskProgram.setHostValue("scale_mid", mHierarchyMidScale);
900         } else if (name.equals("hierSmlScale")) {
901             mBgMaskProgram.setHostValue("scale_sml", mHierarchySmlScale);
902         } else if (name.equals("hierLrgExp")) {
903             mBgMaskProgram.setHostValue("exp_lrg", (float)(mSubsampleLevel + mHierarchyLrgExp));
904         } else if (name.equals("hierMidExp")) {
905             mBgMaskProgram.setHostValue("exp_mid", (float)(mSubsampleLevel + mHierarchyMidExp));
906         } else if (name.equals("hierSmlExp")) {
907             mBgMaskProgram.setHostValue("exp_sml", (float)(mSubsampleLevel + mHierarchySmlExp));
908         } else if (name.equals("lumScale") || name.equals("chromaScale")) {
909             float[] yuvWeights = { mLumScale, mChromaScale };
910             mBgMaskProgram.setHostValue("yuv_weights", yuvWeights );
911         } else if (name.equals("maskBg")) {
912             mBgSubtractProgram.setHostValue("mask_blend_bg", mMaskBg);
913         } else if (name.equals("maskFg")) {
914             mBgSubtractProgram.setHostValue("mask_blend_fg", mMaskFg);
915         } else if (name.equals("exposureChange")) {
916             mBgSubtractProgram.setHostValue("exposure_change", mExposureChange);
917         } else if (name.equals("whitebalanceredChange")) {
918             mBgSubtractProgram.setHostValue("whitebalancered_change", mWhiteBalanceRedChange);
919         } else if (name.equals("whitebalanceblueChange")) {
920             mBgSubtractProgram.setHostValue("whitebalanceblue_change", mWhiteBalanceBlueChange);
921         } else if (name.equals("autowbToggle")){
922             mAutomaticWhiteBalanceProgram.setHostValue("autowb_toggle", mAutoWBToggle);
923         }
924     }
925 
updateBgScaling(Frame video, Frame background, boolean fitModeChanged)926     private void updateBgScaling(Frame video, Frame background, boolean fitModeChanged) {
927         float foregroundAspect = (float)video.getFormat().getWidth() / video.getFormat().getHeight();
928         float backgroundAspect = (float)background.getFormat().getWidth() / background.getFormat().getHeight();
929         float currentRelativeAspect = foregroundAspect/backgroundAspect;
930         if (currentRelativeAspect != mRelativeAspect || fitModeChanged) {
931             mRelativeAspect = currentRelativeAspect;
932             float xMin = 0.f, xWidth = 1.f, yMin = 0.f, yWidth = 1.f;
933             switch (mBackgroundFitMode) {
934                 case BACKGROUND_STRETCH:
935                     // Just map 1:1
936                     break;
937                 case BACKGROUND_FIT:
938                     if (mRelativeAspect > 1.0f) {
939                         // Foreground is wider than background, scale down
940                         // background in X
941                         xMin = 0.5f - 0.5f * mRelativeAspect;
942                         xWidth = 1.f * mRelativeAspect;
943                     } else {
944                         // Foreground is taller than background, scale down
945                         // background in Y
946                         yMin = 0.5f - 0.5f / mRelativeAspect;
947                         yWidth = 1 / mRelativeAspect;
948                     }
949                     break;
950                 case BACKGROUND_FILL_CROP:
951                     if (mRelativeAspect > 1.0f) {
952                         // Foreground is wider than background, crop
953                         // background in Y
954                         yMin = 0.5f - 0.5f / mRelativeAspect;
955                         yWidth = 1.f / mRelativeAspect;
956                     } else {
957                         // Foreground is taller than background, crop
958                         // background in X
959                         xMin = 0.5f - 0.5f * mRelativeAspect;
960                         xWidth = mRelativeAspect;
961                     }
962                     break;
963             }
964             // If mirroring is required (for ex. the camera mirrors the preview
965             // in the front camera)
966             // TODO: Backdropper does not attempt to apply any other transformation
967             // than just flipping. However, in the current state, it's "x-axis" is always aligned
968             // with the Camera's width. Hence, we need to define the mirroring based on the camera
969             // orientation. In the future, a cleaner design would be to cast away all the rotation
970             // in a separate place.
971             if (mMirrorBg) {
972                 if (mLogVerbose) Log.v(TAG, "Mirroring the background!");
973                 // Mirroring in portrait
974                 if (mOrientation == 0 || mOrientation == 180) {
975                     xWidth = -xWidth;
976                     xMin = 1.0f - xMin;
977                 } else {
978                     // Mirroring in landscape
979                     yWidth = -yWidth;
980                     yMin = 1.0f - yMin;
981                 }
982             }
983             if (mLogVerbose) Log.v(TAG, "bgTransform: xMin, yMin, xWidth, yWidth : " +
984                     xMin + ", " + yMin + ", " + xWidth + ", " + yWidth +
985                     ", mRelAspRatio = " + mRelativeAspect);
986             // The following matrix is the transpose of the actual matrix
987             float[] bgTransform = {xWidth, 0.f, 0.f,
988                                    0.f, yWidth, 0.f,
989                                    xMin, yMin,  1.f};
990             mBgSubtractProgram.setHostValue("bg_fit_transform", bgTransform);
991         }
992     }
993 
pyramidLevel(int size)994     private int pyramidLevel(int size) {
995         return (int)Math.floor(Math.log10(size) / Math.log10(2)) - 1;
996     }
997 
998 }
999