1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "vp8/common/header.h"
12 #include "encodemv.h"
13 #include "vp8/common/entropymode.h"
14 #include "vp8/common/findnearmv.h"
15 #include "mcomp.h"
16 #include "vp8/common/systemdependent.h"
17 #include <assert.h>
18 #include <stdio.h>
19 #include <limits.h>
20 #include "vpx/vpx_encoder.h"
21 #include "vpx_mem/vpx_mem.h"
22 #include "vpx_ports/compiler_attributes.h"
23 #include "vpx_ports/system_state.h"
24 #include "bitstream.h"
25
26 #include "defaultcoefcounts.h"
27 #include "vp8/common/common.h"
28
29 const int vp8cx_base_skip_false_prob[128] = {
30 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
31 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
32 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
33 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 251, 248, 244, 240,
34 236, 232, 229, 225, 221, 217, 213, 208, 204, 199, 194, 190, 187, 183, 179,
35 175, 172, 168, 164, 160, 157, 153, 149, 145, 142, 138, 134, 130, 127, 124,
36 120, 117, 114, 110, 107, 104, 101, 98, 95, 92, 89, 86, 83, 80, 77,
37 74, 71, 68, 65, 62, 59, 56, 53, 50, 47, 44, 41, 38, 35, 32,
38 30, 28, 26, 24, 22, 20, 18, 16,
39 };
40
41 #if defined(SECTIONBITS_OUTPUT)
42 unsigned __int64 Sectionbits[500];
43 #endif
44
45 #ifdef MODE_STATS
46 int count_mb_seg[4] = { 0, 0, 0, 0 };
47 #endif
48
update_mode(vp8_writer * const w,int n,vp8_token tok[],vp8_tree tree,vp8_prob Pnew[],vp8_prob Pcur[],unsigned int bct[][2],const unsigned int num_events[])49 static void update_mode(vp8_writer *const w, int n, vp8_token tok[/* n */],
50 vp8_tree tree, vp8_prob Pnew[/* n-1 */],
51 vp8_prob Pcur[/* n-1 */],
52 unsigned int bct[/* n-1 */][2],
53 const unsigned int num_events[/* n */]) {
54 unsigned int new_b = 0, old_b = 0;
55 int i = 0;
56
57 vp8_tree_probs_from_distribution(n--, tok, tree, Pnew, bct, num_events, 256,
58 1);
59
60 do {
61 new_b += vp8_cost_branch(bct[i], Pnew[i]);
62 old_b += vp8_cost_branch(bct[i], Pcur[i]);
63 } while (++i < n);
64
65 if (new_b + (n << 8) < old_b) {
66 int j = 0;
67
68 vp8_write_bit(w, 1);
69
70 do {
71 const vp8_prob p = Pnew[j];
72
73 vp8_write_literal(w, Pcur[j] = p ? p : 1, 8);
74 } while (++j < n);
75 } else
76 vp8_write_bit(w, 0);
77 }
78
update_mbintra_mode_probs(VP8_COMP * cpi)79 static void update_mbintra_mode_probs(VP8_COMP *cpi) {
80 VP8_COMMON *const x = &cpi->common;
81
82 vp8_writer *const w = cpi->bc;
83
84 {
85 vp8_prob Pnew[VP8_YMODES - 1];
86 unsigned int bct[VP8_YMODES - 1][2];
87
88 update_mode(w, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree, Pnew,
89 x->fc.ymode_prob, bct, (unsigned int *)cpi->mb.ymode_count);
90 }
91 {
92 vp8_prob Pnew[VP8_UV_MODES - 1];
93 unsigned int bct[VP8_UV_MODES - 1][2];
94
95 update_mode(w, VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree, Pnew,
96 x->fc.uv_mode_prob, bct, (unsigned int *)cpi->mb.uv_mode_count);
97 }
98 }
99
write_ymode(vp8_writer * bc,int m,const vp8_prob * p)100 static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
101 vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m);
102 }
103
kfwrite_ymode(vp8_writer * bc,int m,const vp8_prob * p)104 static void kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
105 vp8_write_token(bc, vp8_kf_ymode_tree, p, vp8_kf_ymode_encodings + m);
106 }
107
write_uv_mode(vp8_writer * bc,int m,const vp8_prob * p)108 static void write_uv_mode(vp8_writer *bc, int m, const vp8_prob *p) {
109 vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_uv_mode_encodings + m);
110 }
111
write_bmode(vp8_writer * bc,int m,const vp8_prob * p)112 static void write_bmode(vp8_writer *bc, int m, const vp8_prob *p) {
113 vp8_write_token(bc, vp8_bmode_tree, p, vp8_bmode_encodings + m);
114 }
115
write_split(vp8_writer * bc,int x)116 static void write_split(vp8_writer *bc, int x) {
117 vp8_write_token(bc, vp8_mbsplit_tree, vp8_mbsplit_probs,
118 vp8_mbsplit_encodings + x);
119 }
120
vp8_pack_tokens(vp8_writer * w,const TOKENEXTRA * p,int xcount)121 void VPX_NO_UNSIGNED_SHIFT_CHECK vp8_pack_tokens(vp8_writer *w,
122 const TOKENEXTRA *p,
123 int xcount) {
124 const TOKENEXTRA *stop = p + xcount;
125 unsigned int split;
126 int shift;
127 int count = w->count;
128 unsigned int range = w->range;
129 unsigned int lowvalue = w->lowvalue;
130
131 while (p < stop) {
132 const int t = p->Token;
133 vp8_token *a = vp8_coef_encodings + t;
134 const vp8_extra_bit_struct *b = vp8_extra_bits + t;
135 int i = 0;
136 const unsigned char *pp = p->context_tree;
137 int v = a->value;
138 int n = a->Len;
139
140 if (p->skip_eob_node) {
141 n--;
142 i = 2;
143 }
144
145 do {
146 const int bb = (v >> --n) & 1;
147 split = 1 + (((range - 1) * pp[i >> 1]) >> 8);
148 i = vp8_coef_tree[i + bb];
149
150 if (bb) {
151 lowvalue += split;
152 range = range - split;
153 } else {
154 range = split;
155 }
156
157 shift = vp8_norm[range];
158 range <<= shift;
159 count += shift;
160
161 if (count >= 0) {
162 int offset = shift - count;
163
164 if ((lowvalue << (offset - 1)) & 0x80000000) {
165 int x = w->pos - 1;
166
167 while (x >= 0 && w->buffer[x] == 0xff) {
168 w->buffer[x] = (unsigned char)0;
169 x--;
170 }
171
172 w->buffer[x] += 1;
173 }
174
175 validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error);
176
177 w->buffer[w->pos++] = (lowvalue >> (24 - offset)) & 0xff;
178 shift = count;
179 lowvalue = (int)(((uint64_t)lowvalue << offset) & 0xffffff);
180 count -= 8;
181 }
182
183 lowvalue <<= shift;
184 } while (n);
185
186 if (b->base_val) {
187 const int e = p->Extra, L = b->Len;
188
189 if (L) {
190 const unsigned char *proba = b->prob;
191 const int v2 = e >> 1;
192 int n2 = L; /* number of bits in v2, assumed nonzero */
193 i = 0;
194
195 do {
196 const int bb = (v2 >> --n2) & 1;
197 split = 1 + (((range - 1) * proba[i >> 1]) >> 8);
198 i = b->tree[i + bb];
199
200 if (bb) {
201 lowvalue += split;
202 range = range - split;
203 } else {
204 range = split;
205 }
206
207 shift = vp8_norm[range];
208 range <<= shift;
209 count += shift;
210
211 if (count >= 0) {
212 int offset = shift - count;
213
214 if ((lowvalue << (offset - 1)) & 0x80000000) {
215 int x = w->pos - 1;
216
217 while (x >= 0 && w->buffer[x] == 0xff) {
218 w->buffer[x] = (unsigned char)0;
219 x--;
220 }
221
222 w->buffer[x] += 1;
223 }
224
225 validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error);
226
227 w->buffer[w->pos++] = (lowvalue >> (24 - offset)) & 0xff;
228 shift = count;
229 lowvalue = (int)(((uint64_t)lowvalue << offset) & 0xffffff);
230 count -= 8;
231 }
232
233 lowvalue <<= shift;
234 } while (n2);
235 }
236
237 {
238 split = (range + 1) >> 1;
239
240 if (e & 1) {
241 lowvalue += split;
242 range = range - split;
243 } else {
244 range = split;
245 }
246
247 range <<= 1;
248
249 if ((lowvalue & 0x80000000)) {
250 int x = w->pos - 1;
251
252 while (x >= 0 && w->buffer[x] == 0xff) {
253 w->buffer[x] = (unsigned char)0;
254 x--;
255 }
256
257 w->buffer[x] += 1;
258 }
259
260 lowvalue <<= 1;
261
262 if (!++count) {
263 count = -8;
264
265 validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error);
266
267 w->buffer[w->pos++] = (lowvalue >> 24);
268 lowvalue &= 0xffffff;
269 }
270 }
271 }
272
273 ++p;
274 }
275
276 w->count = count;
277 w->lowvalue = lowvalue;
278 w->range = range;
279 }
280
write_partition_size(unsigned char * cx_data,int size)281 static void write_partition_size(unsigned char *cx_data, int size) {
282 signed char csize;
283
284 csize = size & 0xff;
285 *cx_data = csize;
286 csize = (size >> 8) & 0xff;
287 *(cx_data + 1) = csize;
288 csize = (size >> 16) & 0xff;
289 *(cx_data + 2) = csize;
290 }
291
pack_tokens_into_partitions(VP8_COMP * cpi,unsigned char * cx_data,unsigned char * cx_data_end,int num_part)292 static void pack_tokens_into_partitions(VP8_COMP *cpi, unsigned char *cx_data,
293 unsigned char *cx_data_end,
294 int num_part) {
295 int i;
296 unsigned char *ptr = cx_data;
297 unsigned char *ptr_end = cx_data_end;
298 vp8_writer *w;
299
300 for (i = 0; i < num_part; ++i) {
301 int mb_row;
302
303 w = cpi->bc + i + 1;
304
305 vp8_start_encode(w, ptr, ptr_end);
306
307 for (mb_row = i; mb_row < cpi->common.mb_rows; mb_row += num_part) {
308 const TOKENEXTRA *p = cpi->tplist[mb_row].start;
309 const TOKENEXTRA *stop = cpi->tplist[mb_row].stop;
310 int tokens = (int)(stop - p);
311
312 vp8_pack_tokens(w, p, tokens);
313 }
314
315 vp8_stop_encode(w);
316 ptr += w->pos;
317 }
318 }
319
320 #if CONFIG_MULTITHREAD
pack_mb_row_tokens(VP8_COMP * cpi,vp8_writer * w)321 static void pack_mb_row_tokens(VP8_COMP *cpi, vp8_writer *w) {
322 int mb_row;
323
324 for (mb_row = 0; mb_row < cpi->common.mb_rows; ++mb_row) {
325 const TOKENEXTRA *p = cpi->tplist[mb_row].start;
326 const TOKENEXTRA *stop = cpi->tplist[mb_row].stop;
327 int tokens = (int)(stop - p);
328
329 vp8_pack_tokens(w, p, tokens);
330 }
331 }
332 #endif // CONFIG_MULTITHREAD
333
write_mv_ref(vp8_writer * w,MB_PREDICTION_MODE m,const vp8_prob * p)334 static void write_mv_ref(vp8_writer *w, MB_PREDICTION_MODE m,
335 const vp8_prob *p) {
336 assert(NEARESTMV <= m && m <= SPLITMV);
337 vp8_write_token(w, vp8_mv_ref_tree, p,
338 vp8_mv_ref_encoding_array + (m - NEARESTMV));
339 }
340
write_sub_mv_ref(vp8_writer * w,B_PREDICTION_MODE m,const vp8_prob * p)341 static void write_sub_mv_ref(vp8_writer *w, B_PREDICTION_MODE m,
342 const vp8_prob *p) {
343 assert(LEFT4X4 <= m && m <= NEW4X4);
344 vp8_write_token(w, vp8_sub_mv_ref_tree, p,
345 vp8_sub_mv_ref_encoding_array + (m - LEFT4X4));
346 }
347
write_mv(vp8_writer * w,const MV * mv,const int_mv * ref,const MV_CONTEXT * mvc)348 static void write_mv(vp8_writer *w, const MV *mv, const int_mv *ref,
349 const MV_CONTEXT *mvc) {
350 MV e;
351 e.row = mv->row - ref->as_mv.row;
352 e.col = mv->col - ref->as_mv.col;
353
354 vp8_encode_motion_vector(w, &e, mvc);
355 }
356
write_mb_features(vp8_writer * w,const MB_MODE_INFO * mi,const MACROBLOCKD * x)357 static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi,
358 const MACROBLOCKD *x) {
359 /* Encode the MB segment id. */
360 if (x->segmentation_enabled && x->update_mb_segmentation_map) {
361 switch (mi->segment_id) {
362 case 0:
363 vp8_write(w, 0, x->mb_segment_tree_probs[0]);
364 vp8_write(w, 0, x->mb_segment_tree_probs[1]);
365 break;
366 case 1:
367 vp8_write(w, 0, x->mb_segment_tree_probs[0]);
368 vp8_write(w, 1, x->mb_segment_tree_probs[1]);
369 break;
370 case 2:
371 vp8_write(w, 1, x->mb_segment_tree_probs[0]);
372 vp8_write(w, 0, x->mb_segment_tree_probs[2]);
373 break;
374 case 3:
375 vp8_write(w, 1, x->mb_segment_tree_probs[0]);
376 vp8_write(w, 1, x->mb_segment_tree_probs[2]);
377 break;
378
379 /* TRAP.. This should not happen */
380 default:
381 vp8_write(w, 0, x->mb_segment_tree_probs[0]);
382 vp8_write(w, 0, x->mb_segment_tree_probs[1]);
383 break;
384 }
385 }
386 }
vp8_convert_rfct_to_prob(VP8_COMP * const cpi)387 void vp8_convert_rfct_to_prob(VP8_COMP *const cpi) {
388 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
389 const int rf_intra = rfct[INTRA_FRAME];
390 const int rf_inter =
391 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
392
393 /* Calculate the probabilities used to code the ref frame based on usage */
394 if (!(cpi->prob_intra_coded = rf_intra * 255 / (rf_intra + rf_inter))) {
395 cpi->prob_intra_coded = 1;
396 }
397
398 cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
399
400 if (!cpi->prob_last_coded) cpi->prob_last_coded = 1;
401
402 cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
403 ? (rfct[GOLDEN_FRAME] * 255) /
404 (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
405 : 128;
406
407 if (!cpi->prob_gf_coded) cpi->prob_gf_coded = 1;
408 }
409
pack_inter_mode_mvs(VP8_COMP * const cpi)410 static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
411 VP8_COMMON *const pc = &cpi->common;
412 vp8_writer *const w = cpi->bc;
413 const MV_CONTEXT *mvc = pc->fc.mvc;
414
415 MODE_INFO *m = pc->mi;
416 const int mis = pc->mode_info_stride;
417 int mb_row = -1;
418
419 int prob_skip_false = 0;
420
421 cpi->mb.partition_info = cpi->mb.pi;
422
423 vp8_convert_rfct_to_prob(cpi);
424
425 if (pc->mb_no_coeff_skip) {
426 int total_mbs = pc->mb_rows * pc->mb_cols;
427
428 prob_skip_false = (total_mbs - cpi->mb.skip_true_count) * 256 / total_mbs;
429
430 if (prob_skip_false <= 1) prob_skip_false = 1;
431
432 if (prob_skip_false > 255) prob_skip_false = 255;
433
434 cpi->prob_skip_false = prob_skip_false;
435 vp8_write_literal(w, prob_skip_false, 8);
436 }
437
438 vp8_write_literal(w, cpi->prob_intra_coded, 8);
439 vp8_write_literal(w, cpi->prob_last_coded, 8);
440 vp8_write_literal(w, cpi->prob_gf_coded, 8);
441
442 update_mbintra_mode_probs(cpi);
443
444 vp8_write_mvprobs(cpi);
445
446 while (++mb_row < pc->mb_rows) {
447 int mb_col = -1;
448
449 while (++mb_col < pc->mb_cols) {
450 const MB_MODE_INFO *const mi = &m->mbmi;
451 const MV_REFERENCE_FRAME rf = mi->ref_frame;
452 const MB_PREDICTION_MODE mode = mi->mode;
453
454 MACROBLOCKD *xd = &cpi->mb.e_mbd;
455
456 /* Distance of Mb to the various image edges.
457 * These specified to 8th pel as they are always compared to MV
458 * values that are in 1/8th pel units
459 */
460 xd->mb_to_left_edge = -((mb_col * 16) << 3);
461 xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
462 xd->mb_to_top_edge = -((mb_row * 16) << 3);
463 xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
464
465 if (cpi->mb.e_mbd.update_mb_segmentation_map) {
466 write_mb_features(w, mi, &cpi->mb.e_mbd);
467 }
468
469 if (pc->mb_no_coeff_skip) {
470 vp8_encode_bool(w, m->mbmi.mb_skip_coeff, prob_skip_false);
471 }
472
473 if (rf == INTRA_FRAME) {
474 vp8_write(w, 0, cpi->prob_intra_coded);
475 write_ymode(w, mode, pc->fc.ymode_prob);
476
477 if (mode == B_PRED) {
478 int j = 0;
479
480 do {
481 write_bmode(w, m->bmi[j].as_mode, pc->fc.bmode_prob);
482 } while (++j < 16);
483 }
484
485 write_uv_mode(w, mi->uv_mode, pc->fc.uv_mode_prob);
486 } else { /* inter coded */
487 int_mv best_mv;
488 vp8_prob mv_ref_p[VP8_MVREFS - 1];
489
490 vp8_write(w, 1, cpi->prob_intra_coded);
491
492 if (rf == LAST_FRAME)
493 vp8_write(w, 0, cpi->prob_last_coded);
494 else {
495 vp8_write(w, 1, cpi->prob_last_coded);
496 vp8_write(w, (rf == GOLDEN_FRAME) ? 0 : 1, cpi->prob_gf_coded);
497 }
498
499 {
500 int_mv n1, n2;
501 int ct[4];
502
503 vp8_find_near_mvs(xd, m, &n1, &n2, &best_mv, ct, rf,
504 cpi->common.ref_frame_sign_bias);
505 vp8_clamp_mv2(&best_mv, xd);
506
507 vp8_mv_ref_probs(mv_ref_p, ct);
508 }
509
510 write_mv_ref(w, mode, mv_ref_p);
511
512 switch (mode) /* new, split require MVs */
513 {
514 case NEWMV: write_mv(w, &mi->mv.as_mv, &best_mv, mvc); break;
515
516 case SPLITMV: {
517 int j = 0;
518
519 #ifdef MODE_STATS
520 ++count_mb_seg[mi->partitioning];
521 #endif
522
523 write_split(w, mi->partitioning);
524
525 do {
526 B_PREDICTION_MODE blockmode;
527 int_mv blockmv;
528 const int *const L = vp8_mbsplits[mi->partitioning];
529 int k = -1; /* first block in subset j */
530 int mv_contz;
531 int_mv leftmv, abovemv;
532
533 blockmode = cpi->mb.partition_info->bmi[j].mode;
534 blockmv = cpi->mb.partition_info->bmi[j].mv;
535 while (j != L[++k]) {
536 assert(k < 16);
537 }
538 leftmv.as_int = left_block_mv(m, k);
539 abovemv.as_int = above_block_mv(m, k, mis);
540 mv_contz = vp8_mv_cont(&leftmv, &abovemv);
541
542 write_sub_mv_ref(w, blockmode, vp8_sub_mv_ref_prob2[mv_contz]);
543
544 if (blockmode == NEW4X4) {
545 write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *)mvc);
546 }
547 } while (++j < cpi->mb.partition_info->count);
548 break;
549 }
550 default: break;
551 }
552 }
553
554 ++m;
555 cpi->mb.partition_info++;
556 }
557
558 ++m; /* skip L prediction border */
559 cpi->mb.partition_info++;
560 }
561 }
562
write_kfmodes(VP8_COMP * cpi)563 static void write_kfmodes(VP8_COMP *cpi) {
564 vp8_writer *const bc = cpi->bc;
565 const VP8_COMMON *const c = &cpi->common;
566 /* const */
567 MODE_INFO *m = c->mi;
568
569 int mb_row = -1;
570 int prob_skip_false = 0;
571
572 if (c->mb_no_coeff_skip) {
573 int total_mbs = c->mb_rows * c->mb_cols;
574
575 prob_skip_false = (total_mbs - cpi->mb.skip_true_count) * 256 / total_mbs;
576
577 if (prob_skip_false <= 1) prob_skip_false = 1;
578
579 if (prob_skip_false >= 255) prob_skip_false = 255;
580
581 cpi->prob_skip_false = prob_skip_false;
582 vp8_write_literal(bc, prob_skip_false, 8);
583 }
584
585 while (++mb_row < c->mb_rows) {
586 int mb_col = -1;
587
588 while (++mb_col < c->mb_cols) {
589 const int ym = m->mbmi.mode;
590
591 if (cpi->mb.e_mbd.update_mb_segmentation_map) {
592 write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd);
593 }
594
595 if (c->mb_no_coeff_skip) {
596 vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false);
597 }
598
599 kfwrite_ymode(bc, ym, vp8_kf_ymode_prob);
600
601 if (ym == B_PRED) {
602 const int mis = c->mode_info_stride;
603 int i = 0;
604
605 do {
606 const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
607 const B_PREDICTION_MODE L = left_block_mode(m, i);
608 const int bm = m->bmi[i].as_mode;
609
610 write_bmode(bc, bm, vp8_kf_bmode_prob[A][L]);
611 } while (++i < 16);
612 }
613
614 write_uv_mode(bc, (m++)->mbmi.uv_mode, vp8_kf_uv_mode_prob);
615 }
616
617 m++; /* skip L prediction border */
618 }
619 }
620
621 #if 0
622 /* This function is used for debugging probability trees. */
623 static void print_prob_tree(vp8_prob
624 coef_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES])
625 {
626 /* print coef probability tree */
627 int i,j,k,l;
628 FILE* f = fopen("enc_tree_probs.txt", "a");
629 fprintf(f, "{\n");
630 for (i = 0; i < BLOCK_TYPES; ++i)
631 {
632 fprintf(f, " {\n");
633 for (j = 0; j < COEF_BANDS; ++j)
634 {
635 fprintf(f, " {\n");
636 for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
637 {
638 fprintf(f, " {");
639 for (l = 0; l < ENTROPY_NODES; ++l)
640 {
641 fprintf(f, "%3u, ",
642 (unsigned int)(coef_probs [i][j][k][l]));
643 }
644 fprintf(f, " }\n");
645 }
646 fprintf(f, " }\n");
647 }
648 fprintf(f, " }\n");
649 }
650 fprintf(f, "}\n");
651 fclose(f);
652 }
653 #endif
654
sum_probs_over_prev_coef_context(const unsigned int probs[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS],unsigned int * out)655 static void sum_probs_over_prev_coef_context(
656 const unsigned int probs[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS],
657 unsigned int *out) {
658 int i, j;
659 for (i = 0; i < MAX_ENTROPY_TOKENS; ++i) {
660 for (j = 0; j < PREV_COEF_CONTEXTS; ++j) {
661 const unsigned int tmp = out[i];
662 out[i] += probs[j][i];
663 /* check for wrap */
664 if (out[i] < tmp) out[i] = UINT_MAX;
665 }
666 }
667 }
668
prob_update_savings(const unsigned int * ct,const vp8_prob oldp,const vp8_prob newp,const vp8_prob upd)669 static int prob_update_savings(const unsigned int *ct, const vp8_prob oldp,
670 const vp8_prob newp, const vp8_prob upd) {
671 const int old_b = vp8_cost_branch(ct, oldp);
672 const int new_b = vp8_cost_branch(ct, newp);
673 const int update_b = 8 + ((vp8_cost_one(upd) - vp8_cost_zero(upd)) >> 8);
674
675 return old_b - new_b - update_b;
676 }
677
independent_coef_context_savings(VP8_COMP * cpi)678 static int independent_coef_context_savings(VP8_COMP *cpi) {
679 MACROBLOCK *const x = &cpi->mb;
680 int savings = 0;
681 int i = 0;
682 do {
683 int j = 0;
684 do {
685 int k = 0;
686 unsigned int prev_coef_count_sum[MAX_ENTROPY_TOKENS] = { 0 };
687 int prev_coef_savings[MAX_ENTROPY_TOKENS] = { 0 };
688 const unsigned int(*probs)[MAX_ENTROPY_TOKENS];
689 /* Calculate new probabilities given the constraint that
690 * they must be equal over the prev coef contexts
691 */
692
693 probs = (const unsigned int(*)[MAX_ENTROPY_TOKENS])x->coef_counts[i][j];
694
695 /* Reset to default probabilities at key frames */
696 if (cpi->common.frame_type == KEY_FRAME) {
697 probs = default_coef_counts[i][j];
698 }
699
700 sum_probs_over_prev_coef_context(probs, prev_coef_count_sum);
701
702 do {
703 /* at every context */
704
705 /* calc probs and branch cts for this frame only */
706 int t = 0; /* token/prob index */
707
708 vp8_tree_probs_from_distribution(
709 MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
710 cpi->frame_coef_probs[i][j][k], cpi->frame_branch_ct[i][j][k],
711 prev_coef_count_sum, 256, 1);
712
713 do {
714 const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t];
715 const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
716 const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t];
717 const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
718 const int s = prob_update_savings(ct, oldp, newp, upd);
719
720 if (cpi->common.frame_type != KEY_FRAME ||
721 (cpi->common.frame_type == KEY_FRAME && newp != oldp)) {
722 prev_coef_savings[t] += s;
723 }
724 } while (++t < ENTROPY_NODES);
725 } while (++k < PREV_COEF_CONTEXTS);
726 k = 0;
727 do {
728 /* We only update probabilities if we can save bits, except
729 * for key frames where we have to update all probabilities
730 * to get the equal probabilities across the prev coef
731 * contexts.
732 */
733 if (prev_coef_savings[k] > 0 || cpi->common.frame_type == KEY_FRAME) {
734 savings += prev_coef_savings[k];
735 }
736 } while (++k < ENTROPY_NODES);
737 } while (++j < COEF_BANDS);
738 } while (++i < BLOCK_TYPES);
739 return savings;
740 }
741
default_coef_context_savings(VP8_COMP * cpi)742 static int default_coef_context_savings(VP8_COMP *cpi) {
743 MACROBLOCK *const x = &cpi->mb;
744 int savings = 0;
745 int i = 0;
746 do {
747 int j = 0;
748 do {
749 int k = 0;
750 do {
751 /* at every context */
752
753 /* calc probs and branch cts for this frame only */
754 int t = 0; /* token/prob index */
755
756 vp8_tree_probs_from_distribution(
757 MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
758 cpi->frame_coef_probs[i][j][k], cpi->frame_branch_ct[i][j][k],
759 x->coef_counts[i][j][k], 256, 1);
760
761 do {
762 const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t];
763 const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
764 const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t];
765 const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
766 const int s = prob_update_savings(ct, oldp, newp, upd);
767
768 if (s > 0) {
769 savings += s;
770 }
771 } while (++t < ENTROPY_NODES);
772 } while (++k < PREV_COEF_CONTEXTS);
773 } while (++j < COEF_BANDS);
774 } while (++i < BLOCK_TYPES);
775 return savings;
776 }
777
vp8_calc_ref_frame_costs(int * ref_frame_cost,int prob_intra,int prob_last,int prob_garf)778 void vp8_calc_ref_frame_costs(int *ref_frame_cost, int prob_intra,
779 int prob_last, int prob_garf) {
780 assert(prob_intra >= 0);
781 assert(prob_intra <= 255);
782 assert(prob_last >= 0);
783 assert(prob_last <= 255);
784 assert(prob_garf >= 0);
785 assert(prob_garf <= 255);
786 ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(prob_intra);
787 ref_frame_cost[LAST_FRAME] =
788 vp8_cost_one(prob_intra) + vp8_cost_zero(prob_last);
789 ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(prob_intra) +
790 vp8_cost_one(prob_last) +
791 vp8_cost_zero(prob_garf);
792 ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(prob_intra) +
793 vp8_cost_one(prob_last) +
794 vp8_cost_one(prob_garf);
795 }
796
vp8_estimate_entropy_savings(VP8_COMP * cpi)797 int vp8_estimate_entropy_savings(VP8_COMP *cpi) {
798 int savings = 0;
799
800 const int *const rfct = cpi->mb.count_mb_ref_frame_usage;
801 const int rf_intra = rfct[INTRA_FRAME];
802 const int rf_inter =
803 rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
804 int new_intra, new_last, new_garf, oldtotal, newtotal;
805 int ref_frame_cost[MAX_REF_FRAMES];
806
807 vpx_clear_system_state();
808
809 if (cpi->common.frame_type != KEY_FRAME) {
810 if (!(new_intra = rf_intra * 255 / (rf_intra + rf_inter))) new_intra = 1;
811
812 new_last = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
813
814 new_garf = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
815 ? (rfct[GOLDEN_FRAME] * 255) /
816 (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
817 : 128;
818
819 vp8_calc_ref_frame_costs(ref_frame_cost, new_intra, new_last, new_garf);
820
821 newtotal = rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] +
822 rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] +
823 rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] +
824 rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME];
825
826 /* old costs */
827 vp8_calc_ref_frame_costs(ref_frame_cost, cpi->prob_intra_coded,
828 cpi->prob_last_coded, cpi->prob_gf_coded);
829
830 oldtotal = rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] +
831 rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] +
832 rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] +
833 rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME];
834
835 savings += (oldtotal - newtotal) / 256;
836 }
837
838 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
839 savings += independent_coef_context_savings(cpi);
840 } else {
841 savings += default_coef_context_savings(cpi);
842 }
843
844 return savings;
845 }
846
847 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
vp8_update_coef_context(VP8_COMP * cpi)848 int vp8_update_coef_context(VP8_COMP *cpi) {
849 int savings = 0;
850
851 if (cpi->common.frame_type == KEY_FRAME) {
852 /* Reset to default counts/probabilities at key frames */
853 vp8_copy(cpi->mb.coef_counts, default_coef_counts);
854 }
855
856 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
857 savings += independent_coef_context_savings(cpi);
858 else
859 savings += default_coef_context_savings(cpi);
860
861 return savings;
862 }
863 #endif
864
vp8_update_coef_probs(VP8_COMP * cpi)865 void vp8_update_coef_probs(VP8_COMP *cpi) {
866 int i = 0;
867 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
868 vp8_writer *const w = cpi->bc;
869 #endif
870
871 vpx_clear_system_state();
872
873 do {
874 int j = 0;
875
876 do {
877 int k = 0;
878 int prev_coef_savings[ENTROPY_NODES] = { 0 };
879 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
880 for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
881 int t; /* token/prob index */
882 for (t = 0; t < ENTROPY_NODES; ++t) {
883 const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t];
884 const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
885 const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t];
886 const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
887
888 prev_coef_savings[t] += prob_update_savings(ct, oldp, newp, upd);
889 }
890 }
891 k = 0;
892 }
893 do {
894 /* note: use result from vp8_estimate_entropy_savings, so no
895 * need to call vp8_tree_probs_from_distribution here.
896 */
897
898 /* at every context */
899
900 /* calc probs and branch cts for this frame only */
901 int t = 0; /* token/prob index */
902
903 do {
904 const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
905
906 vp8_prob *Pold = cpi->common.fc.coef_probs[i][j][k] + t;
907 const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
908
909 int s = prev_coef_savings[t];
910 int u = 0;
911
912 if (!(cpi->oxcf.error_resilient_mode &
913 VPX_ERROR_RESILIENT_PARTITIONS)) {
914 s = prob_update_savings(cpi->frame_branch_ct[i][j][k][t], *Pold,
915 newp, upd);
916 }
917
918 if (s > 0) u = 1;
919
920 /* Force updates on key frames if the new is different,
921 * so that we can be sure we end up with equal probabilities
922 * over the prev coef contexts.
923 */
924 if ((cpi->oxcf.error_resilient_mode &
925 VPX_ERROR_RESILIENT_PARTITIONS) &&
926 cpi->common.frame_type == KEY_FRAME && newp != *Pold) {
927 u = 1;
928 }
929
930 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
931 cpi->update_probs[i][j][k][t] = u;
932 #else
933 vp8_write(w, u, upd);
934 #endif
935
936 if (u) {
937 /* send/use new probability */
938
939 *Pold = newp;
940 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
941 vp8_write_literal(w, newp, 8);
942 #endif
943 }
944
945 } while (++t < ENTROPY_NODES);
946
947 } while (++k < PREV_COEF_CONTEXTS);
948 } while (++j < COEF_BANDS);
949 } while (++i < BLOCK_TYPES);
950 }
951
952 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
pack_coef_probs(VP8_COMP * cpi)953 static void pack_coef_probs(VP8_COMP *cpi) {
954 int i = 0;
955 vp8_writer *const w = cpi->bc;
956
957 do {
958 int j = 0;
959
960 do {
961 int k = 0;
962
963 do {
964 int t = 0; /* token/prob index */
965
966 do {
967 const vp8_prob newp = cpi->common.fc.coef_probs[i][j][k][t];
968 const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
969
970 const char u = cpi->update_probs[i][j][k][t];
971
972 vp8_write(w, u, upd);
973
974 if (u) {
975 /* send/use new probability */
976 vp8_write_literal(w, newp, 8);
977 }
978 } while (++t < ENTROPY_NODES);
979 } while (++k < PREV_COEF_CONTEXTS);
980 } while (++j < COEF_BANDS);
981 } while (++i < BLOCK_TYPES);
982 }
983 #endif
984
985 #ifdef PACKET_TESTING
986 FILE *vpxlogc = 0;
987 #endif
988
put_delta_q(vp8_writer * bc,int delta_q)989 static void put_delta_q(vp8_writer *bc, int delta_q) {
990 if (delta_q != 0) {
991 vp8_write_bit(bc, 1);
992 vp8_write_literal(bc, abs(delta_q), 4);
993
994 if (delta_q < 0)
995 vp8_write_bit(bc, 1);
996 else
997 vp8_write_bit(bc, 0);
998 } else
999 vp8_write_bit(bc, 0);
1000 }
1001
vp8_pack_bitstream(VP8_COMP * cpi,unsigned char * dest,unsigned char * dest_end,size_t * size)1002 void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
1003 unsigned char *dest_end, size_t *size) {
1004 int i, j;
1005 VP8_HEADER oh;
1006 VP8_COMMON *const pc = &cpi->common;
1007 vp8_writer *const bc = cpi->bc;
1008 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1009 int extra_bytes_packed = 0;
1010
1011 unsigned char *cx_data = dest;
1012 unsigned char *cx_data_end = dest_end;
1013 const int *mb_feature_data_bits;
1014
1015 oh.show_frame = (int)pc->show_frame;
1016 oh.type = (int)pc->frame_type;
1017 oh.version = pc->version;
1018 oh.first_partition_length_in_bytes = 0;
1019
1020 mb_feature_data_bits = vp8_mb_feature_data_bits;
1021
1022 bc[0].error = &pc->error;
1023
1024 validate_buffer(cx_data, 3, cx_data_end, &cpi->common.error);
1025 cx_data += 3;
1026
1027 #if defined(SECTIONBITS_OUTPUT)
1028 Sectionbits[active_section = 1] += sizeof(VP8_HEADER) * 8 * 256;
1029 #endif
1030
1031 /* every keyframe send startcode, width, height, scale factor, clamp
1032 * and color type
1033 */
1034 if (oh.type == KEY_FRAME) {
1035 int v;
1036
1037 validate_buffer(cx_data, 7, cx_data_end, &cpi->common.error);
1038
1039 /* Start / synch code */
1040 cx_data[0] = 0x9D;
1041 cx_data[1] = 0x01;
1042 cx_data[2] = 0x2a;
1043
1044 /* Pack scale and frame size into 16 bits. Store it 8 bits at a time.
1045 * https://tools.ietf.org/html/rfc6386
1046 * 9.1. Uncompressed Data Chunk
1047 * 16 bits : (2 bits Horizontal Scale << 14) | Width (14 bits)
1048 * 16 bits : (2 bits Vertical Scale << 14) | Height (14 bits)
1049 */
1050 v = (pc->horiz_scale << 14) | pc->Width;
1051 cx_data[3] = v & 0xff;
1052 cx_data[4] = v >> 8;
1053
1054 v = (pc->vert_scale << 14) | pc->Height;
1055 cx_data[5] = v & 0xff;
1056 cx_data[6] = v >> 8;
1057
1058 extra_bytes_packed = 7;
1059 cx_data += extra_bytes_packed;
1060
1061 vp8_start_encode(bc, cx_data, cx_data_end);
1062
1063 /* signal clr type */
1064 vp8_write_bit(bc, 0);
1065 vp8_write_bit(bc, pc->clamp_type);
1066
1067 } else {
1068 vp8_start_encode(bc, cx_data, cx_data_end);
1069 }
1070
1071 /* Signal whether or not Segmentation is enabled */
1072 vp8_write_bit(bc, xd->segmentation_enabled);
1073
1074 /* Indicate which features are enabled */
1075 if (xd->segmentation_enabled) {
1076 /* Signal whether or not the segmentation map is being updated. */
1077 vp8_write_bit(bc, xd->update_mb_segmentation_map);
1078 vp8_write_bit(bc, xd->update_mb_segmentation_data);
1079
1080 if (xd->update_mb_segmentation_data) {
1081 signed char Data;
1082
1083 vp8_write_bit(bc, xd->mb_segement_abs_delta);
1084
1085 /* For each segmentation feature (Quant and loop filter level) */
1086 for (i = 0; i < MB_LVL_MAX; ++i) {
1087 /* For each of the segments */
1088 for (j = 0; j < MAX_MB_SEGMENTS; ++j) {
1089 Data = xd->segment_feature_data[i][j];
1090
1091 /* Frame level data */
1092 if (Data) {
1093 vp8_write_bit(bc, 1);
1094
1095 if (Data < 0) {
1096 Data = -Data;
1097 vp8_write_literal(bc, Data, mb_feature_data_bits[i]);
1098 vp8_write_bit(bc, 1);
1099 } else {
1100 vp8_write_literal(bc, Data, mb_feature_data_bits[i]);
1101 vp8_write_bit(bc, 0);
1102 }
1103 } else
1104 vp8_write_bit(bc, 0);
1105 }
1106 }
1107 }
1108
1109 if (xd->update_mb_segmentation_map) {
1110 /* Write the probs used to decode the segment id for each mb */
1111 for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
1112 int Data = xd->mb_segment_tree_probs[i];
1113
1114 if (Data != 255) {
1115 vp8_write_bit(bc, 1);
1116 vp8_write_literal(bc, Data, 8);
1117 } else
1118 vp8_write_bit(bc, 0);
1119 }
1120 }
1121 }
1122
1123 vp8_write_bit(bc, pc->filter_type);
1124 vp8_write_literal(bc, pc->filter_level, 6);
1125 vp8_write_literal(bc, pc->sharpness_level, 3);
1126
1127 /* Write out loop filter deltas applied at the MB level based on mode
1128 * or ref frame (if they are enabled).
1129 */
1130 vp8_write_bit(bc, xd->mode_ref_lf_delta_enabled);
1131
1132 if (xd->mode_ref_lf_delta_enabled) {
1133 /* Do the deltas need to be updated */
1134 int send_update =
1135 xd->mode_ref_lf_delta_update || cpi->oxcf.error_resilient_mode;
1136
1137 vp8_write_bit(bc, send_update);
1138 if (send_update) {
1139 int Data;
1140
1141 /* Send update */
1142 for (i = 0; i < MAX_REF_LF_DELTAS; ++i) {
1143 Data = xd->ref_lf_deltas[i];
1144
1145 /* Frame level data */
1146 if (xd->ref_lf_deltas[i] != xd->last_ref_lf_deltas[i] ||
1147 cpi->oxcf.error_resilient_mode) {
1148 xd->last_ref_lf_deltas[i] = xd->ref_lf_deltas[i];
1149 vp8_write_bit(bc, 1);
1150
1151 if (Data > 0) {
1152 vp8_write_literal(bc, (Data & 0x3F), 6);
1153 vp8_write_bit(bc, 0); /* sign */
1154 } else {
1155 Data = -Data;
1156 vp8_write_literal(bc, (Data & 0x3F), 6);
1157 vp8_write_bit(bc, 1); /* sign */
1158 }
1159 } else
1160 vp8_write_bit(bc, 0);
1161 }
1162
1163 /* Send update */
1164 for (i = 0; i < MAX_MODE_LF_DELTAS; ++i) {
1165 Data = xd->mode_lf_deltas[i];
1166
1167 if (xd->mode_lf_deltas[i] != xd->last_mode_lf_deltas[i] ||
1168 cpi->oxcf.error_resilient_mode) {
1169 xd->last_mode_lf_deltas[i] = xd->mode_lf_deltas[i];
1170 vp8_write_bit(bc, 1);
1171
1172 if (Data > 0) {
1173 vp8_write_literal(bc, (Data & 0x3F), 6);
1174 vp8_write_bit(bc, 0); /* sign */
1175 } else {
1176 Data = -Data;
1177 vp8_write_literal(bc, (Data & 0x3F), 6);
1178 vp8_write_bit(bc, 1); /* sign */
1179 }
1180 } else
1181 vp8_write_bit(bc, 0);
1182 }
1183 }
1184 }
1185
1186 /* signal here is multi token partition is enabled */
1187 vp8_write_literal(bc, pc->multi_token_partition, 2);
1188
1189 /* Frame Qbaseline quantizer index */
1190 vp8_write_literal(bc, pc->base_qindex, 7);
1191
1192 /* Transmit Dc, Second order and Uv quantizer delta information */
1193 put_delta_q(bc, pc->y1dc_delta_q);
1194 put_delta_q(bc, pc->y2dc_delta_q);
1195 put_delta_q(bc, pc->y2ac_delta_q);
1196 put_delta_q(bc, pc->uvdc_delta_q);
1197 put_delta_q(bc, pc->uvac_delta_q);
1198
1199 /* When there is a key frame all reference buffers are updated using
1200 * the new key frame
1201 */
1202 if (pc->frame_type != KEY_FRAME) {
1203 /* Should the GF or ARF be updated using the transmitted frame
1204 * or buffer
1205 */
1206 vp8_write_bit(bc, pc->refresh_golden_frame);
1207 vp8_write_bit(bc, pc->refresh_alt_ref_frame);
1208
1209 /* If not being updated from current frame should either GF or ARF
1210 * be updated from another buffer
1211 */
1212 if (!pc->refresh_golden_frame)
1213 vp8_write_literal(bc, pc->copy_buffer_to_gf, 2);
1214
1215 if (!pc->refresh_alt_ref_frame)
1216 vp8_write_literal(bc, pc->copy_buffer_to_arf, 2);
1217
1218 /* Indicate reference frame sign bias for Golden and ARF frames
1219 * (always 0 for last frame buffer)
1220 */
1221 vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
1222 vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
1223 }
1224
1225 #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
1226 if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
1227 if (pc->frame_type == KEY_FRAME) {
1228 pc->refresh_entropy_probs = 1;
1229 } else {
1230 pc->refresh_entropy_probs = 0;
1231 }
1232 }
1233 #endif
1234
1235 vp8_write_bit(bc, pc->refresh_entropy_probs);
1236
1237 if (pc->frame_type != KEY_FRAME) vp8_write_bit(bc, pc->refresh_last_frame);
1238
1239 vpx_clear_system_state();
1240
1241 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1242 pack_coef_probs(cpi);
1243 #else
1244 if (pc->refresh_entropy_probs == 0) {
1245 /* save a copy for later refresh */
1246 memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc));
1247 }
1248
1249 vp8_update_coef_probs(cpi);
1250 #endif
1251
1252 /* Write out the mb_no_coeff_skip flag */
1253 vp8_write_bit(bc, pc->mb_no_coeff_skip);
1254
1255 if (pc->frame_type == KEY_FRAME) {
1256 write_kfmodes(cpi);
1257 } else {
1258 pack_inter_mode_mvs(cpi);
1259 }
1260
1261 vp8_stop_encode(bc);
1262
1263 cx_data += bc->pos;
1264
1265 oh.first_partition_length_in_bytes = cpi->bc->pos;
1266
1267 /* update frame tag */
1268 {
1269 /* Pack partition size, show frame, version and frame type into to 24 bits.
1270 * Store it 8 bits at a time.
1271 * https://tools.ietf.org/html/rfc6386
1272 * 9.1. Uncompressed Data Chunk
1273 * The uncompressed data chunk comprises a common (for key frames and
1274 * interframes) 3-byte frame tag that contains four fields, as follows:
1275 *
1276 * 1. A 1-bit frame type (0 for key frames, 1 for interframes).
1277 *
1278 * 2. A 3-bit version number (0 - 3 are defined as four different
1279 * profiles with different decoding complexity; other values may be
1280 * defined for future variants of the VP8 data format).
1281 *
1282 * 3. A 1-bit show_frame flag (0 when current frame is not for display,
1283 * 1 when current frame is for display).
1284 *
1285 * 4. A 19-bit field containing the size of the first data partition in
1286 * bytes
1287 */
1288 int v = (oh.first_partition_length_in_bytes << 5) | (oh.show_frame << 4) |
1289 (oh.version << 1) | oh.type;
1290
1291 dest[0] = v & 0xff;
1292 dest[1] = (v >> 8) & 0xff;
1293 dest[2] = v >> 16;
1294 }
1295
1296 *size = VP8_HEADER_SIZE + extra_bytes_packed + cpi->bc->pos;
1297
1298 cpi->partition_sz[0] = (unsigned int)*size;
1299
1300 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
1301 {
1302 const int num_part = (1 << pc->multi_token_partition);
1303 unsigned char *dp = cpi->partition_d[0] + cpi->partition_sz[0];
1304
1305 if (num_part > 1) {
1306 /* write token part sizes (all but last) if more than 1 */
1307 validate_buffer(dp, 3 * (num_part - 1), cpi->partition_d_end[0],
1308 &pc->error);
1309
1310 cpi->partition_sz[0] += 3 * (num_part - 1);
1311
1312 for (i = 1; i < num_part; ++i) {
1313 write_partition_size(dp, cpi->partition_sz[i]);
1314 dp += 3;
1315 }
1316 }
1317
1318 if (!cpi->output_partition) {
1319 /* concatenate partition buffers */
1320 for (i = 0; i < num_part; ++i) {
1321 memmove(dp, cpi->partition_d[i + 1], cpi->partition_sz[i + 1]);
1322 cpi->partition_d[i + 1] = dp;
1323 dp += cpi->partition_sz[i + 1];
1324 }
1325 }
1326
1327 /* update total size */
1328 *size = 0;
1329 for (i = 0; i < num_part + 1; ++i) {
1330 *size += cpi->partition_sz[i];
1331 }
1332 }
1333 #else
1334 if (pc->multi_token_partition != ONE_PARTITION) {
1335 int num_part = 1 << pc->multi_token_partition;
1336
1337 /* partition size table at the end of first partition */
1338 cpi->partition_sz[0] += 3 * (num_part - 1);
1339 *size += 3 * (num_part - 1);
1340
1341 validate_buffer(cx_data, 3 * (num_part - 1), cx_data_end, &pc->error);
1342
1343 for (i = 1; i < num_part + 1; ++i) {
1344 cpi->bc[i].error = &pc->error;
1345 }
1346
1347 pack_tokens_into_partitions(cpi, cx_data + 3 * (num_part - 1), cx_data_end,
1348 num_part);
1349
1350 for (i = 1; i < num_part; ++i) {
1351 cpi->partition_sz[i] = cpi->bc[i].pos;
1352 write_partition_size(cx_data, cpi->partition_sz[i]);
1353 cx_data += 3;
1354 *size += cpi->partition_sz[i]; /* add to total */
1355 }
1356
1357 /* add last partition to total size */
1358 cpi->partition_sz[i] = cpi->bc[i].pos;
1359 *size += cpi->partition_sz[i];
1360 } else {
1361 bc[1].error = &pc->error;
1362
1363 vp8_start_encode(&cpi->bc[1], cx_data, cx_data_end);
1364
1365 #if CONFIG_MULTITHREAD
1366 if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) {
1367 pack_mb_row_tokens(cpi, &cpi->bc[1]);
1368 } else {
1369 vp8_pack_tokens(&cpi->bc[1], cpi->tok, cpi->tok_count);
1370 }
1371 #else
1372 vp8_pack_tokens(&cpi->bc[1], cpi->tok, cpi->tok_count);
1373 #endif // CONFIG_MULTITHREAD
1374
1375 vp8_stop_encode(&cpi->bc[1]);
1376
1377 *size += cpi->bc[1].pos;
1378 cpi->partition_sz[1] = cpi->bc[1].pos;
1379 }
1380 #endif
1381 }
1382