• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "vpx_config.h"
12 #include "vp8_rtcd.h"
13 #include "./vpx_dsp_rtcd.h"
14 #include "encodemb.h"
15 #include "encodemv.h"
16 #include "vp8/common/common.h"
17 #include "onyx_int.h"
18 #include "vp8/common/extend.h"
19 #include "vp8/common/entropymode.h"
20 #include "vp8/common/quant_common.h"
21 #include "segmentation.h"
22 #include "vp8/common/setupintrarecon.h"
23 #include "encodeintra.h"
24 #include "vp8/common/reconinter.h"
25 #include "rdopt.h"
26 #include "pickinter.h"
27 #include "vp8/common/findnearmv.h"
28 #include <stdio.h>
29 #include <limits.h>
30 #include "vp8/common/invtrans.h"
31 #include "vpx_ports/vpx_timer.h"
32 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
33 #include "bitstream.h"
34 #endif
35 #include "encodeframe.h"
36 
37 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
38 extern void vp8_calc_ref_frame_costs(int *ref_frame_cost, int prob_intra,
39                                      int prob_last, int prob_garf);
40 extern void vp8_convert_rfct_to_prob(VP8_COMP *const cpi);
41 extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
42 extern void vp8_auto_select_speed(VP8_COMP *cpi);
43 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi, MACROBLOCK *x,
44                                       MB_ROW_COMP *mbr_ei, int count);
45 static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x);
46 
47 #ifdef MODE_STATS
48 unsigned int inter_y_modes[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
49 unsigned int inter_uv_modes[4] = { 0, 0, 0, 0 };
50 unsigned int inter_b_modes[15] = {
51   0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
52 };
53 unsigned int y_modes[5] = { 0, 0, 0, 0, 0 };
54 unsigned int uv_modes[4] = { 0, 0, 0, 0 };
55 unsigned int b_modes[14] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
56 #endif
57 
58 /* activity_avg must be positive, or flat regions could get a zero weight
59  *  (infinite lambda), which confounds analysis.
60  * This also avoids the need for divide by zero checks in
61  *  vp8_activity_masking().
62  */
63 #define VP8_ACTIVITY_AVG_MIN (64)
64 
65 /* This is used as a reference when computing the source variance for the
66  *  purposes of activity masking.
67  * Eventually this should be replaced by custom no-reference routines,
68  *  which will be faster.
69  */
70 static const unsigned char VP8_VAR_OFFS[16] = {
71   128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
72 };
73 
74 /* Original activity measure from Tim T's code. */
tt_activity_measure(VP8_COMP * cpi,MACROBLOCK * x)75 static unsigned int tt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x) {
76   unsigned int act;
77   unsigned int sse;
78   (void)cpi;
79   /* TODO: This could also be done over smaller areas (8x8), but that would
80    *  require extensive changes elsewhere, as lambda is assumed to be fixed
81    *  over an entire MB in most of the code.
82    * Another option is to compute four 8x8 variances, and pick a single
83    *  lambda using a non-linear combination (e.g., the smallest, or second
84    *  smallest, etc.).
85    */
86   act = vpx_variance16x16(x->src.y_buffer, x->src.y_stride, VP8_VAR_OFFS, 0,
87                           &sse);
88   act = act << 4;
89 
90   /* If the region is flat, lower the activity some more. */
91   if (act < 8 << 12) act = act < 5 << 12 ? act : 5 << 12;
92 
93   return act;
94 }
95 
96 /* Stub for alternative experimental activity measures. */
alt_activity_measure(VP8_COMP * cpi,MACROBLOCK * x,int use_dc_pred)97 static unsigned int alt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x,
98                                          int use_dc_pred) {
99   return vp8_encode_intra(cpi, x, use_dc_pred);
100 }
101 
102 /* Measure the activity of the current macroblock
103  * What we measure here is TBD so abstracted to this function
104  */
105 #define ALT_ACT_MEASURE 1
mb_activity_measure(VP8_COMP * cpi,MACROBLOCK * x,int mb_row,int mb_col)106 static unsigned int mb_activity_measure(VP8_COMP *cpi, MACROBLOCK *x,
107                                         int mb_row, int mb_col) {
108   unsigned int mb_activity;
109 
110   if (ALT_ACT_MEASURE) {
111     int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
112 
113     /* Or use and alternative. */
114     mb_activity = alt_activity_measure(cpi, x, use_dc_pred);
115   } else {
116     /* Original activity measure from Tim T's code. */
117     mb_activity = tt_activity_measure(cpi, x);
118   }
119 
120   if (mb_activity < VP8_ACTIVITY_AVG_MIN) mb_activity = VP8_ACTIVITY_AVG_MIN;
121 
122   return mb_activity;
123 }
124 
125 /* Calculate an "average" mb activity value for the frame */
126 #define ACT_MEDIAN 0
calc_av_activity(VP8_COMP * cpi,int64_t activity_sum)127 static void calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) {
128 #if ACT_MEDIAN
129   /* Find median: Simple n^2 algorithm for experimentation */
130   {
131     unsigned int median;
132     unsigned int i, j;
133     unsigned int *sortlist;
134     unsigned int tmp;
135 
136     /* Create a list to sort to */
137     CHECK_MEM_ERROR(sortlist,
138                     vpx_calloc(sizeof(unsigned int), cpi->common.MBs));
139 
140     /* Copy map to sort list */
141     memcpy(sortlist, cpi->mb_activity_map,
142            sizeof(unsigned int) * cpi->common.MBs);
143 
144     /* Ripple each value down to its correct position */
145     for (i = 1; i < cpi->common.MBs; ++i) {
146       for (j = i; j > 0; j--) {
147         if (sortlist[j] < sortlist[j - 1]) {
148           /* Swap values */
149           tmp = sortlist[j - 1];
150           sortlist[j - 1] = sortlist[j];
151           sortlist[j] = tmp;
152         } else
153           break;
154       }
155     }
156 
157     /* Even number MBs so estimate median as mean of two either side. */
158     median = (1 + sortlist[cpi->common.MBs >> 1] +
159               sortlist[(cpi->common.MBs >> 1) + 1]) >>
160              1;
161 
162     cpi->activity_avg = median;
163 
164     vpx_free(sortlist);
165   }
166 #else
167   /* Simple mean for now */
168   cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
169 #endif
170 
171   if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN) {
172     cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
173   }
174 
175   /* Experimental code: return fixed value normalized for several clips */
176   if (ALT_ACT_MEASURE) cpi->activity_avg = 100000;
177 }
178 
179 #define USE_ACT_INDEX 0
180 #define OUTPUT_NORM_ACT_STATS 0
181 
182 #if USE_ACT_INDEX
183 /* Calculate and activity index for each mb */
calc_activity_index(VP8_COMP * cpi,MACROBLOCK * x)184 static void calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) {
185   VP8_COMMON *const cm = &cpi->common;
186   int mb_row, mb_col;
187 
188   int64_t act;
189   int64_t a;
190   int64_t b;
191 
192 #if OUTPUT_NORM_ACT_STATS
193   FILE *f = fopen("norm_act.stt", "a");
194   fprintf(f, "\n%12d\n", cpi->activity_avg);
195 #endif
196 
197   /* Reset pointers to start of activity map */
198   x->mb_activity_ptr = cpi->mb_activity_map;
199 
200   /* Calculate normalized mb activity number. */
201   for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
202     /* for each macroblock col in image */
203     for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
204       /* Read activity from the map */
205       act = *(x->mb_activity_ptr);
206 
207       /* Calculate a normalized activity number */
208       a = act + 4 * cpi->activity_avg;
209       b = 4 * act + cpi->activity_avg;
210 
211       if (b >= a)
212         *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
213       else
214         *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
215 
216 #if OUTPUT_NORM_ACT_STATS
217       fprintf(f, " %6d", *(x->mb_activity_ptr));
218 #endif
219       /* Increment activity map pointers */
220       x->mb_activity_ptr++;
221     }
222 
223 #if OUTPUT_NORM_ACT_STATS
224     fprintf(f, "\n");
225 #endif
226   }
227 
228 #if OUTPUT_NORM_ACT_STATS
229   fclose(f);
230 #endif
231 }
232 #endif
233 
234 /* Loop through all MBs. Note activity of each, average activity and
235  * calculate a normalized activity for each
236  */
build_activity_map(VP8_COMP * cpi)237 static void build_activity_map(VP8_COMP *cpi) {
238   MACROBLOCK *const x = &cpi->mb;
239   MACROBLOCKD *xd = &x->e_mbd;
240   VP8_COMMON *const cm = &cpi->common;
241 
242 #if ALT_ACT_MEASURE
243   YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
244   int recon_yoffset;
245   int recon_y_stride = new_yv12->y_stride;
246 #endif
247 
248   int mb_row, mb_col;
249   unsigned int mb_activity;
250   int64_t activity_sum = 0;
251 
252   /* for each macroblock row in image */
253   for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
254 #if ALT_ACT_MEASURE
255     /* reset above block coeffs */
256     xd->up_available = (mb_row != 0);
257     recon_yoffset = (mb_row * recon_y_stride * 16);
258 #endif
259     /* for each macroblock col in image */
260     for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
261 #if ALT_ACT_MEASURE
262       xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
263       xd->left_available = (mb_col != 0);
264       recon_yoffset += 16;
265 #endif
266       /* Copy current mb to a buffer */
267       vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
268 
269       /* measure activity */
270       mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
271 
272       /* Keep frame sum */
273       activity_sum += mb_activity;
274 
275       /* Store MB level activity details. */
276       *x->mb_activity_ptr = mb_activity;
277 
278       /* Increment activity map pointer */
279       x->mb_activity_ptr++;
280 
281       /* adjust to the next column of source macroblocks */
282       x->src.y_buffer += 16;
283     }
284 
285     /* adjust to the next row of mbs */
286     x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
287 
288 #if ALT_ACT_MEASURE
289     /* extend the recon for intra prediction */
290     vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8,
291                       xd->dst.v_buffer + 8);
292 #endif
293   }
294 
295   /* Calculate an "average" MB activity */
296   calc_av_activity(cpi, activity_sum);
297 
298 #if USE_ACT_INDEX
299   /* Calculate an activity index number of each mb */
300   calc_activity_index(cpi, x);
301 #endif
302 }
303 
304 /* Macroblock activity masking */
vp8_activity_masking(VP8_COMP * cpi,MACROBLOCK * x)305 void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) {
306 #if USE_ACT_INDEX
307   x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
308   x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
309   x->errorperbit += (x->errorperbit == 0);
310 #else
311   int64_t a;
312   int64_t b;
313   int64_t act = *(x->mb_activity_ptr);
314 
315   /* Apply the masking to the RD multiplier. */
316   a = act + (2 * cpi->activity_avg);
317   b = (2 * act) + cpi->activity_avg;
318 
319   x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a);
320   x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
321   x->errorperbit += (x->errorperbit == 0);
322 #endif
323 
324   /* Activity based Zbin adjustment */
325   adjust_act_zbin(cpi, x);
326 }
327 
encode_mb_row(VP8_COMP * cpi,VP8_COMMON * cm,int mb_row,MACROBLOCK * x,MACROBLOCKD * xd,TOKENEXTRA ** tp,int * segment_counts,int * totalrate)328 static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
329                           MACROBLOCK *x, MACROBLOCKD *xd, TOKENEXTRA **tp,
330                           int *segment_counts, int *totalrate) {
331   int recon_yoffset, recon_uvoffset;
332   int mb_col;
333   int ref_fb_idx = cm->lst_fb_idx;
334   int dst_fb_idx = cm->new_fb_idx;
335   int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
336   int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
337   int map_index = (mb_row * cpi->common.mb_cols);
338 
339 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
340   const int num_part = (1 << cm->multi_token_partition);
341   TOKENEXTRA *tp_start = cpi->tok;
342   vp8_writer *w;
343 #endif
344 
345 #if CONFIG_MULTITHREAD
346   const int nsync = cpi->mt_sync_range;
347   const int rightmost_col = cm->mb_cols + nsync;
348   const int *last_row_current_mb_col;
349   int *current_mb_col = &cpi->mt_current_mb_col[mb_row];
350 
351   if ((cpi->b_multi_threaded != 0) && (mb_row != 0)) {
352     last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
353   } else {
354     last_row_current_mb_col = &rightmost_col;
355   }
356 #endif
357 
358 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
359   if (num_part > 1)
360     w = &cpi->bc[1 + (mb_row % num_part)];
361   else
362     w = &cpi->bc[1];
363 #endif
364 
365   /* reset above block coeffs */
366   xd->above_context = cm->above_context;
367 
368   xd->up_available = (mb_row != 0);
369   recon_yoffset = (mb_row * recon_y_stride * 16);
370   recon_uvoffset = (mb_row * recon_uv_stride * 8);
371 
372   cpi->tplist[mb_row].start = *tp;
373   /* printf("Main mb_row = %d\n", mb_row); */
374 
375   /* Distance of Mb to the top & bottom edges, specified in 1/8th pel
376    * units as they are always compared to values that are in 1/8th pel
377    */
378   xd->mb_to_top_edge = -((mb_row * 16) << 3);
379   xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
380 
381   /* Set up limit values for vertical motion vector components
382    * to prevent them extending beyond the UMV borders
383    */
384   x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
385   x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
386 
387   /* Set the mb activity pointer to the start of the row. */
388   x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
389 
390   /* for each macroblock col in image */
391   for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
392 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
393     *tp = cpi->tok;
394 #endif
395     /* Distance of Mb to the left & right edges, specified in
396      * 1/8th pel units as they are always compared to values
397      * that are in 1/8th pel units
398      */
399     xd->mb_to_left_edge = -((mb_col * 16) << 3);
400     xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
401 
402     /* Set up limit values for horizontal motion vector components
403      * to prevent them extending beyond the UMV borders
404      */
405     x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
406     x->mv_col_max =
407         ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16);
408 
409     xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
410     xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
411     xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
412     xd->left_available = (mb_col != 0);
413 
414     x->rddiv = cpi->RDDIV;
415     x->rdmult = cpi->RDMULT;
416 
417     /* Copy current mb to a buffer */
418     vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
419 
420 #if CONFIG_MULTITHREAD
421     if (cpi->b_multi_threaded != 0) {
422       if (((mb_col - 1) % nsync) == 0) {
423         pthread_mutex_t *mutex = &cpi->pmutex[mb_row];
424         protected_write(mutex, current_mb_col, mb_col - 1);
425       }
426 
427       if (mb_row && !(mb_col & (nsync - 1))) {
428         pthread_mutex_t *mutex = &cpi->pmutex[mb_row - 1];
429         sync_read(mutex, mb_col, last_row_current_mb_col, nsync);
430       }
431     }
432 #endif
433 
434     if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x);
435 
436     /* Is segmentation enabled */
437     /* MB level adjustment to quantizer */
438     if (xd->segmentation_enabled) {
439       /* Code to set segment id in xd->mbmi.segment_id for current MB
440        * (with range checking)
441        */
442       if (cpi->segmentation_map[map_index + mb_col] <= 3) {
443         xd->mode_info_context->mbmi.segment_id =
444             cpi->segmentation_map[map_index + mb_col];
445       } else {
446         xd->mode_info_context->mbmi.segment_id = 0;
447       }
448 
449       vp8cx_mb_init_quantizer(cpi, x, 1);
450     } else {
451       /* Set to Segment 0 by default */
452       xd->mode_info_context->mbmi.segment_id = 0;
453     }
454 
455     x->active_ptr = cpi->active_map + map_index + mb_col;
456 
457     if (cm->frame_type == KEY_FRAME) {
458       *totalrate += vp8cx_encode_intra_macroblock(cpi, x, tp);
459 #ifdef MODE_STATS
460       y_modes[xd->mbmi.mode]++;
461 #endif
462     } else {
463       *totalrate += vp8cx_encode_inter_macroblock(
464           cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col);
465 
466 #ifdef MODE_STATS
467       inter_y_modes[xd->mbmi.mode]++;
468 
469       if (xd->mbmi.mode == SPLITMV) {
470         int b;
471 
472         for (b = 0; b < xd->mbmi.partition_count; ++b) {
473           inter_b_modes[x->partition->bmi[b].mode]++;
474         }
475       }
476 
477 #endif
478 
479       // Keep track of how many (consecutive) times a  block is coded
480       // as ZEROMV_LASTREF, for base layer frames.
481       // Reset to 0 if its coded as anything else.
482       if (cpi->current_layer == 0) {
483         if (xd->mode_info_context->mbmi.mode == ZEROMV &&
484             xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
485           // Increment, check for wrap-around.
486           if (cpi->consec_zero_last[map_index + mb_col] < 255) {
487             cpi->consec_zero_last[map_index + mb_col] += 1;
488           }
489           if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) {
490             cpi->consec_zero_last_mvbias[map_index + mb_col] += 1;
491           }
492         } else {
493           cpi->consec_zero_last[map_index + mb_col] = 0;
494           cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
495         }
496         if (x->zero_last_dot_suppress) {
497           cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
498         }
499       }
500 
501       /* Special case code for cyclic refresh
502        * If cyclic update enabled then copy xd->mbmi.segment_id; (which
503        * may have been updated based on mode during
504        * vp8cx_encode_inter_macroblock()) back into the global
505        * segmentation map
506        */
507       if ((cpi->current_layer == 0) &&
508           (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)) {
509         cpi->segmentation_map[map_index + mb_col] =
510             xd->mode_info_context->mbmi.segment_id;
511 
512         /* If the block has been refreshed mark it as clean (the
513          * magnitude of the -ve influences how long it will be before
514          * we consider another refresh):
515          * Else if it was coded (last frame 0,0) and has not already
516          * been refreshed then mark it as a candidate for cleanup
517          * next time (marked 0) else mark it as dirty (1).
518          */
519         if (xd->mode_info_context->mbmi.segment_id) {
520           cpi->cyclic_refresh_map[map_index + mb_col] = -1;
521         } else if ((xd->mode_info_context->mbmi.mode == ZEROMV) &&
522                    (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) {
523           if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) {
524             cpi->cyclic_refresh_map[map_index + mb_col] = 0;
525           }
526         } else {
527           cpi->cyclic_refresh_map[map_index + mb_col] = 1;
528         }
529       }
530     }
531 
532     cpi->tplist[mb_row].stop = *tp;
533 
534 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
535     /* pack tokens for this MB */
536     {
537       int tok_count = *tp - tp_start;
538       vp8_pack_tokens(w, tp_start, tok_count);
539     }
540 #endif
541     /* Increment pointer into gf usage flags structure. */
542     x->gf_active_ptr++;
543 
544     /* Increment the activity mask pointers. */
545     x->mb_activity_ptr++;
546 
547     /* adjust to the next column of macroblocks */
548     x->src.y_buffer += 16;
549     x->src.u_buffer += 8;
550     x->src.v_buffer += 8;
551 
552     recon_yoffset += 16;
553     recon_uvoffset += 8;
554 
555     /* Keep track of segment usage */
556     segment_counts[xd->mode_info_context->mbmi.segment_id]++;
557 
558     /* skip to next mb */
559     xd->mode_info_context++;
560     x->partition_info++;
561     xd->above_context++;
562   }
563 
564   /* extend the recon for intra prediction */
565   vp8_extend_mb_row(&cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16,
566                     xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
567 
568 #if CONFIG_MULTITHREAD
569   if (cpi->b_multi_threaded != 0) {
570     protected_write(&cpi->pmutex[mb_row], current_mb_col, rightmost_col);
571   }
572 #endif
573 
574   /* this is to account for the border */
575   xd->mode_info_context++;
576   x->partition_info++;
577 }
578 
init_encode_frame_mb_context(VP8_COMP * cpi)579 static void init_encode_frame_mb_context(VP8_COMP *cpi) {
580   MACROBLOCK *const x = &cpi->mb;
581   VP8_COMMON *const cm = &cpi->common;
582   MACROBLOCKD *const xd = &x->e_mbd;
583 
584   /* GF active flags data structure */
585   x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
586 
587   /* Activity map pointer */
588   x->mb_activity_ptr = cpi->mb_activity_map;
589 
590   x->act_zbin_adj = 0;
591 
592   x->partition_info = x->pi;
593 
594   xd->mode_info_context = cm->mi;
595   xd->mode_info_stride = cm->mode_info_stride;
596 
597   xd->frame_type = cm->frame_type;
598 
599   /* reset intra mode contexts */
600   if (cm->frame_type == KEY_FRAME) vp8_init_mbmode_probs(cm);
601 
602   /* Copy data over into macro block data structures. */
603   x->src = *cpi->Source;
604   xd->pre = cm->yv12_fb[cm->lst_fb_idx];
605   xd->dst = cm->yv12_fb[cm->new_fb_idx];
606 
607   /* set up frame for intra coded blocks */
608   vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
609 
610   vp8_build_block_offsets(x);
611 
612   xd->mode_info_context->mbmi.mode = DC_PRED;
613   xd->mode_info_context->mbmi.uv_mode = DC_PRED;
614 
615   xd->left_context = &cm->left_context;
616 
617   x->mvc = cm->fc.mvc;
618 
619   memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
620 
621   /* Special case treatment when GF and ARF are not sensible options
622    * for reference
623    */
624   if (cpi->ref_frame_flags == VP8_LAST_FRAME) {
625     vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 255,
626                              128);
627   } else if ((cpi->oxcf.number_of_layers > 1) &&
628              (cpi->ref_frame_flags == VP8_GOLD_FRAME)) {
629     vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 255);
630   } else if ((cpi->oxcf.number_of_layers > 1) &&
631              (cpi->ref_frame_flags == VP8_ALTR_FRAME)) {
632     vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 1);
633   } else {
634     vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded,
635                              cpi->prob_last_coded, cpi->prob_gf_coded);
636   }
637 
638   xd->fullpixel_mask = 0xffffffff;
639   if (cm->full_pixel) xd->fullpixel_mask = 0xfffffff8;
640 
641   vp8_zero(x->coef_counts);
642   vp8_zero(x->ymode_count);
643   vp8_zero(x->uv_mode_count) x->prediction_error = 0;
644   x->intra_error = 0;
645   vp8_zero(x->count_mb_ref_frame_usage);
646 }
647 
648 #if CONFIG_MULTITHREAD
sum_coef_counts(MACROBLOCK * x,MACROBLOCK * x_thread)649 static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread) {
650   int i = 0;
651   do {
652     int j = 0;
653     do {
654       int k = 0;
655       do {
656         /* at every context */
657 
658         /* calc probs and branch cts for this frame only */
659         int t = 0; /* token/prob index */
660 
661         do {
662           x->coef_counts[i][j][k][t] += x_thread->coef_counts[i][j][k][t];
663         } while (++t < ENTROPY_NODES);
664       } while (++k < PREV_COEF_CONTEXTS);
665     } while (++j < COEF_BANDS);
666   } while (++i < BLOCK_TYPES);
667 }
668 #endif  // CONFIG_MULTITHREAD
669 
vp8_encode_frame(VP8_COMP * cpi)670 void vp8_encode_frame(VP8_COMP *cpi) {
671   int mb_row;
672   MACROBLOCK *const x = &cpi->mb;
673   VP8_COMMON *const cm = &cpi->common;
674   MACROBLOCKD *const xd = &x->e_mbd;
675   TOKENEXTRA *tp = cpi->tok;
676   int segment_counts[MAX_MB_SEGMENTS];
677   int totalrate;
678 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
679   BOOL_CODER *bc = &cpi->bc[1]; /* bc[0] is for control partition */
680   const int num_part = (1 << cm->multi_token_partition);
681 #endif
682 
683   memset(segment_counts, 0, sizeof(segment_counts));
684   totalrate = 0;
685 
686   if (cpi->compressor_speed == 2) {
687     if (cpi->oxcf.cpu_used < 0) {
688       cpi->Speed = -(cpi->oxcf.cpu_used);
689     } else {
690       vp8_auto_select_speed(cpi);
691     }
692   }
693 
694   /* Functions setup for all frame types so we can use MC in AltRef */
695   if (!cm->use_bilinear_mc_filter) {
696     xd->subpixel_predict = vp8_sixtap_predict4x4;
697     xd->subpixel_predict8x4 = vp8_sixtap_predict8x4;
698     xd->subpixel_predict8x8 = vp8_sixtap_predict8x8;
699     xd->subpixel_predict16x16 = vp8_sixtap_predict16x16;
700   } else {
701     xd->subpixel_predict = vp8_bilinear_predict4x4;
702     xd->subpixel_predict8x4 = vp8_bilinear_predict8x4;
703     xd->subpixel_predict8x8 = vp8_bilinear_predict8x8;
704     xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
705   }
706 
707   cpi->mb.skip_true_count = 0;
708   cpi->tok_count = 0;
709 
710 #if 0
711     /* Experimental code */
712     cpi->frame_distortion = 0;
713     cpi->last_mb_distortion = 0;
714 #endif
715 
716   xd->mode_info_context = cm->mi;
717 
718   vp8_zero(cpi->mb.MVcount);
719 
720   vp8cx_frame_init_quantizer(cpi);
721 
722   vp8_initialize_rd_consts(cpi, x,
723                            vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
724 
725   vp8cx_initialize_me_consts(cpi, cm->base_qindex);
726 
727   if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
728     /* Initialize encode frame context. */
729     init_encode_frame_mb_context(cpi);
730 
731     /* Build a frame level activity map */
732     build_activity_map(cpi);
733   }
734 
735   /* re-init encode frame context. */
736   init_encode_frame_mb_context(cpi);
737 
738 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
739   {
740     int i;
741     for (i = 0; i < num_part; ++i) {
742       vp8_start_encode(&bc[i], cpi->partition_d[i + 1],
743                        cpi->partition_d_end[i + 1]);
744       bc[i].error = &cm->error;
745     }
746   }
747 
748 #endif
749 
750   {
751     struct vpx_usec_timer emr_timer;
752     vpx_usec_timer_start(&emr_timer);
753 
754 #if CONFIG_MULTITHREAD
755     if (cpi->b_multi_threaded) {
756       int i;
757 
758       vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei,
759                                 cpi->encoding_thread_count);
760 
761       for (i = 0; i < cm->mb_rows; ++i) cpi->mt_current_mb_col[i] = -1;
762 
763       for (i = 0; i < cpi->encoding_thread_count; ++i) {
764         sem_post(&cpi->h_event_start_encoding[i]);
765       }
766 
767       for (mb_row = 0; mb_row < cm->mb_rows;
768            mb_row += (cpi->encoding_thread_count + 1)) {
769         vp8_zero(cm->left_context)
770 
771 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
772             tp = cpi->tok;
773 #else
774             tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
775 #endif
776 
777         encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
778 
779         /* adjust to the next row of mbs */
780         x->src.y_buffer +=
781             16 * x->src.y_stride * (cpi->encoding_thread_count + 1) -
782             16 * cm->mb_cols;
783         x->src.u_buffer +=
784             8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
785             8 * cm->mb_cols;
786         x->src.v_buffer +=
787             8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) -
788             8 * cm->mb_cols;
789 
790         xd->mode_info_context +=
791             xd->mode_info_stride * cpi->encoding_thread_count;
792         x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
793         x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count;
794       }
795       /* Wait for all the threads to finish. */
796       for (i = 0; i < cpi->encoding_thread_count; ++i) {
797         sem_wait(&cpi->h_event_end_encoding[i]);
798       }
799 
800       for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
801         cpi->tok_count += (unsigned int)(cpi->tplist[mb_row].stop -
802                                          cpi->tplist[mb_row].start);
803       }
804 
805       if (xd->segmentation_enabled) {
806         int j;
807 
808         if (xd->segmentation_enabled) {
809           for (i = 0; i < cpi->encoding_thread_count; ++i) {
810             for (j = 0; j < 4; ++j) {
811               segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
812             }
813           }
814         }
815       }
816 
817       for (i = 0; i < cpi->encoding_thread_count; ++i) {
818         int mode_count;
819         int c_idx;
820         totalrate += cpi->mb_row_ei[i].totalrate;
821 
822         cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count;
823 
824         for (mode_count = 0; mode_count < VP8_YMODES; ++mode_count) {
825           cpi->mb.ymode_count[mode_count] +=
826               cpi->mb_row_ei[i].mb.ymode_count[mode_count];
827         }
828 
829         for (mode_count = 0; mode_count < VP8_UV_MODES; ++mode_count) {
830           cpi->mb.uv_mode_count[mode_count] +=
831               cpi->mb_row_ei[i].mb.uv_mode_count[mode_count];
832         }
833 
834         for (c_idx = 0; c_idx < MVvals; ++c_idx) {
835           cpi->mb.MVcount[0][c_idx] += cpi->mb_row_ei[i].mb.MVcount[0][c_idx];
836           cpi->mb.MVcount[1][c_idx] += cpi->mb_row_ei[i].mb.MVcount[1][c_idx];
837         }
838 
839         cpi->mb.prediction_error += cpi->mb_row_ei[i].mb.prediction_error;
840         cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error;
841 
842         for (c_idx = 0; c_idx < MAX_REF_FRAMES; ++c_idx) {
843           cpi->mb.count_mb_ref_frame_usage[c_idx] +=
844               cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx];
845         }
846 
847         for (c_idx = 0; c_idx < MAX_ERROR_BINS; ++c_idx) {
848           cpi->mb.error_bins[c_idx] += cpi->mb_row_ei[i].mb.error_bins[c_idx];
849         }
850 
851         /* add up counts for each thread */
852         sum_coef_counts(x, &cpi->mb_row_ei[i].mb);
853       }
854 
855     } else
856 #endif  // CONFIG_MULTITHREAD
857     {
858 
859       /* for each macroblock row in image */
860       for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
861         vp8_zero(cm->left_context)
862 
863 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
864             tp = cpi->tok;
865 #endif
866 
867         encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
868 
869         /* adjust to the next row of mbs */
870         x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
871         x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
872         x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
873       }
874 
875       cpi->tok_count = (unsigned int)(tp - cpi->tok);
876     }
877 
878 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
879     {
880       int i;
881       for (i = 0; i < num_part; ++i) {
882         vp8_stop_encode(&bc[i]);
883         cpi->partition_sz[i + 1] = bc[i].pos;
884       }
885     }
886 #endif
887 
888     vpx_usec_timer_mark(&emr_timer);
889     cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
890   }
891 
892   // Work out the segment probabilities if segmentation is enabled
893   // and needs to be updated
894   if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
895     int tot_count;
896     int i;
897 
898     /* Set to defaults */
899     memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
900 
901     tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] +
902                 segment_counts[3];
903 
904     if (tot_count) {
905       xd->mb_segment_tree_probs[0] =
906           ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
907 
908       tot_count = segment_counts[0] + segment_counts[1];
909 
910       if (tot_count > 0) {
911         xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
912       }
913 
914       tot_count = segment_counts[2] + segment_counts[3];
915 
916       if (tot_count > 0) {
917         xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
918       }
919 
920       /* Zero probabilities not allowed */
921       for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
922         if (xd->mb_segment_tree_probs[i] == 0) xd->mb_segment_tree_probs[i] = 1;
923       }
924     }
925   }
926 
927   /* projected_frame_size in units of BYTES */
928   cpi->projected_frame_size = totalrate >> 8;
929 
930   /* Make a note of the percentage MBs coded Intra. */
931   if (cm->frame_type == KEY_FRAME) {
932     cpi->this_frame_percent_intra = 100;
933   } else {
934     int tot_modes;
935 
936     tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] +
937                 cpi->mb.count_mb_ref_frame_usage[LAST_FRAME] +
938                 cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME] +
939                 cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
940 
941     if (tot_modes) {
942       cpi->this_frame_percent_intra =
943           cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
944     }
945   }
946 
947 #if !CONFIG_REALTIME_ONLY
948   /* Adjust the projected reference frame usage probability numbers to
949    * reflect what we have just seen. This may be useful when we make
950    * multiple iterations of the recode loop rather than continuing to use
951    * values from the previous frame.
952    */
953   if ((cm->frame_type != KEY_FRAME) &&
954       ((cpi->oxcf.number_of_layers > 1) ||
955        (!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame))) {
956     vp8_convert_rfct_to_prob(cpi);
957   }
958 #endif
959 }
vp8_setup_block_ptrs(MACROBLOCK * x)960 void vp8_setup_block_ptrs(MACROBLOCK *x) {
961   int r, c;
962   int i;
963 
964   for (r = 0; r < 4; ++r) {
965     for (c = 0; c < 4; ++c) {
966       x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
967     }
968   }
969 
970   for (r = 0; r < 2; ++r) {
971     for (c = 0; c < 2; ++c) {
972       x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
973     }
974   }
975 
976   for (r = 0; r < 2; ++r) {
977     for (c = 0; c < 2; ++c) {
978       x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
979     }
980   }
981 
982   x->block[24].src_diff = x->src_diff + 384;
983 
984   for (i = 0; i < 25; ++i) {
985     x->block[i].coeff = x->coeff + i * 16;
986   }
987 }
988 
vp8_build_block_offsets(MACROBLOCK * x)989 void vp8_build_block_offsets(MACROBLOCK *x) {
990   int block = 0;
991   int br, bc;
992 
993   vp8_build_block_doffsets(&x->e_mbd);
994 
995   /* y blocks */
996   x->thismb_ptr = &x->thismb[0];
997   for (br = 0; br < 4; ++br) {
998     for (bc = 0; bc < 4; ++bc) {
999       BLOCK *this_block = &x->block[block];
1000       this_block->base_src = &x->thismb_ptr;
1001       this_block->src_stride = 16;
1002       this_block->src = 4 * br * 16 + 4 * bc;
1003       ++block;
1004     }
1005   }
1006 
1007   /* u blocks */
1008   for (br = 0; br < 2; ++br) {
1009     for (bc = 0; bc < 2; ++bc) {
1010       BLOCK *this_block = &x->block[block];
1011       this_block->base_src = &x->src.u_buffer;
1012       this_block->src_stride = x->src.uv_stride;
1013       this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1014       ++block;
1015     }
1016   }
1017 
1018   /* v blocks */
1019   for (br = 0; br < 2; ++br) {
1020     for (bc = 0; bc < 2; ++bc) {
1021       BLOCK *this_block = &x->block[block];
1022       this_block->base_src = &x->src.v_buffer;
1023       this_block->src_stride = x->src.uv_stride;
1024       this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1025       ++block;
1026     }
1027   }
1028 }
1029 
sum_intra_stats(VP8_COMP * cpi,MACROBLOCK * x)1030 static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) {
1031   const MACROBLOCKD *xd = &x->e_mbd;
1032   const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
1033   const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
1034 
1035 #ifdef MODE_STATS
1036   const int is_key = cpi->common.frame_type == KEY_FRAME;
1037 
1038   ++(is_key ? uv_modes : inter_uv_modes)[uvm];
1039 
1040   if (m == B_PRED) {
1041     unsigned int *const bct = is_key ? b_modes : inter_b_modes;
1042 
1043     int b = 0;
1044 
1045     do {
1046       ++bct[xd->block[b].bmi.mode];
1047     } while (++b < 16);
1048   }
1049 
1050 #else
1051   (void)cpi;
1052 #endif
1053 
1054   ++x->ymode_count[m];
1055   ++x->uv_mode_count[uvm];
1056 }
1057 
1058 /* Experimental stub function to create a per MB zbin adjustment based on
1059  * some previously calculated measure of MB activity.
1060  */
adjust_act_zbin(VP8_COMP * cpi,MACROBLOCK * x)1061 static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x) {
1062 #if USE_ACT_INDEX
1063   x->act_zbin_adj = *(x->mb_activity_ptr);
1064 #else
1065   int64_t a;
1066   int64_t b;
1067   int64_t act = *(x->mb_activity_ptr);
1068 
1069   /* Apply the masking to the RD multiplier. */
1070   a = act + 4 * cpi->activity_avg;
1071   b = 4 * act + cpi->activity_avg;
1072 
1073   if (act > cpi->activity_avg) {
1074     x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
1075   } else {
1076     x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
1077   }
1078 #endif
1079 }
1080 
vp8cx_encode_intra_macroblock(VP8_COMP * cpi,MACROBLOCK * x,TOKENEXTRA ** t)1081 int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
1082                                   TOKENEXTRA **t) {
1083   MACROBLOCKD *xd = &x->e_mbd;
1084   int rate;
1085 
1086   if (cpi->sf.RD && cpi->compressor_speed != 2) {
1087     vp8_rd_pick_intra_mode(x, &rate);
1088   } else {
1089     vp8_pick_intra_mode(x, &rate);
1090   }
1091 
1092   if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
1093     adjust_act_zbin(cpi, x);
1094     vp8_update_zbin_extra(cpi, x);
1095   }
1096 
1097   if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) {
1098     vp8_encode_intra4x4mby(x);
1099   } else {
1100     vp8_encode_intra16x16mby(x);
1101   }
1102 
1103   vp8_encode_intra16x16mbuv(x);
1104 
1105   sum_intra_stats(cpi, x);
1106 
1107   vp8_tokenize_mb(cpi, x, t);
1108 
1109   if (xd->mode_info_context->mbmi.mode != B_PRED) vp8_inverse_transform_mby(xd);
1110 
1111   vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
1112                                 xd->dst.u_buffer, xd->dst.v_buffer,
1113                                 xd->dst.uv_stride, xd->eobs + 16);
1114   return rate;
1115 }
1116 #ifdef SPEEDSTATS
1117 extern int cnt_pm;
1118 #endif
1119 
1120 extern void vp8_fix_contexts(MACROBLOCKD *x);
1121 
vp8cx_encode_inter_macroblock(VP8_COMP * cpi,MACROBLOCK * x,TOKENEXTRA ** t,int recon_yoffset,int recon_uvoffset,int mb_row,int mb_col)1122 int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
1123                                   int recon_yoffset, int recon_uvoffset,
1124                                   int mb_row, int mb_col) {
1125   MACROBLOCKD *const xd = &x->e_mbd;
1126   int intra_error = 0;
1127   int rate;
1128   int distortion;
1129 
1130   x->skip = 0;
1131 
1132   if (xd->segmentation_enabled) {
1133     x->encode_breakout =
1134         cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
1135   } else {
1136     x->encode_breakout = cpi->oxcf.encode_breakout;
1137   }
1138 
1139 #if CONFIG_TEMPORAL_DENOISING
1140   /* Reset the best sse mode/mv for each macroblock. */
1141   x->best_reference_frame = INTRA_FRAME;
1142   x->best_zeromv_reference_frame = INTRA_FRAME;
1143   x->best_sse_inter_mode = 0;
1144   x->best_sse_mv.as_int = 0;
1145   x->need_to_clamp_best_mvs = 0;
1146 #endif
1147 
1148   if (cpi->sf.RD) {
1149     int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
1150 
1151     /* Are we using the fast quantizer for the mode selection? */
1152     if (cpi->sf.use_fastquant_for_pick) {
1153       x->quantize_b = vp8_fast_quantize_b;
1154 
1155       /* the fast quantizer does not use zbin_extra, so
1156        * do not recalculate */
1157       x->zbin_mode_boost_enabled = 0;
1158     }
1159     vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
1160                            &distortion, &intra_error, mb_row, mb_col);
1161 
1162     /* switch back to the regular quantizer for the encode */
1163     if (cpi->sf.improved_quant) {
1164       x->quantize_b = vp8_regular_quantize_b;
1165     }
1166 
1167     /* restore cpi->zbin_mode_boost_enabled */
1168     x->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
1169 
1170   } else {
1171     vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
1172                         &distortion, &intra_error, mb_row, mb_col);
1173   }
1174 
1175   x->prediction_error += distortion;
1176   x->intra_error += intra_error;
1177 
1178   if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
1179     /* Adjust the zbin based on this MB rate. */
1180     adjust_act_zbin(cpi, x);
1181   }
1182 
1183 #if 0
1184     /* Experimental RD code */
1185     cpi->frame_distortion += distortion;
1186     cpi->last_mb_distortion = distortion;
1187 #endif
1188 
1189   /* MB level adjutment to quantizer setup */
1190   if (xd->segmentation_enabled) {
1191     /* If cyclic update enabled */
1192     if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled) {
1193       /* Clear segment_id back to 0 if not coded (last frame 0,0) */
1194       if ((xd->mode_info_context->mbmi.segment_id == 1) &&
1195           ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) ||
1196            (xd->mode_info_context->mbmi.mode != ZEROMV))) {
1197         xd->mode_info_context->mbmi.segment_id = 0;
1198 
1199         /* segment_id changed, so update */
1200         vp8cx_mb_init_quantizer(cpi, x, 1);
1201       }
1202     }
1203   }
1204 
1205   {
1206     /* Experimental code.
1207      * Special case for gf and arf zeromv modes, for 1 temporal layer.
1208      * Increase zbin size to supress noise.
1209      */
1210     x->zbin_mode_boost = 0;
1211     if (x->zbin_mode_boost_enabled) {
1212       if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
1213         if (xd->mode_info_context->mbmi.mode == ZEROMV) {
1214           if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME &&
1215               cpi->oxcf.number_of_layers == 1) {
1216             x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
1217           } else {
1218             x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
1219           }
1220         } else if (xd->mode_info_context->mbmi.mode == SPLITMV) {
1221           x->zbin_mode_boost = 0;
1222         } else {
1223           x->zbin_mode_boost = MV_ZBIN_BOOST;
1224         }
1225       }
1226     }
1227 
1228     /* The fast quantizer doesn't use zbin_extra, only do so with
1229      * the regular quantizer. */
1230     if (cpi->sf.improved_quant) vp8_update_zbin_extra(cpi, x);
1231   }
1232 
1233   x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame]++;
1234 
1235   if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
1236     vp8_encode_intra16x16mbuv(x);
1237 
1238     if (xd->mode_info_context->mbmi.mode == B_PRED) {
1239       vp8_encode_intra4x4mby(x);
1240     } else {
1241       vp8_encode_intra16x16mby(x);
1242     }
1243 
1244     sum_intra_stats(cpi, x);
1245   } else {
1246     int ref_fb_idx;
1247 
1248     if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
1249       ref_fb_idx = cpi->common.lst_fb_idx;
1250     } else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) {
1251       ref_fb_idx = cpi->common.gld_fb_idx;
1252     } else {
1253       ref_fb_idx = cpi->common.alt_fb_idx;
1254     }
1255 
1256     xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
1257     xd->pre.u_buffer =
1258         cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
1259     xd->pre.v_buffer =
1260         cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
1261 
1262     if (!x->skip) {
1263       vp8_encode_inter16x16(x);
1264     } else {
1265       vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer,
1266                                          xd->dst.v_buffer, xd->dst.y_stride,
1267                                          xd->dst.uv_stride);
1268     }
1269   }
1270 
1271   if (!x->skip) {
1272     vp8_tokenize_mb(cpi, x, t);
1273 
1274     if (xd->mode_info_context->mbmi.mode != B_PRED) {
1275       vp8_inverse_transform_mby(xd);
1276     }
1277 
1278     vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
1279                                   xd->dst.u_buffer, xd->dst.v_buffer,
1280                                   xd->dst.uv_stride, xd->eobs + 16);
1281   } else {
1282     /* always set mb_skip_coeff as it is needed by the loopfilter */
1283     xd->mode_info_context->mbmi.mb_skip_coeff = 1;
1284 
1285     if (cpi->common.mb_no_coeff_skip) {
1286       x->skip_true_count++;
1287       vp8_fix_contexts(xd);
1288     } else {
1289       vp8_stuff_mb(cpi, x, t);
1290     }
1291   }
1292 
1293   return rate;
1294 }
1295