• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <assert.h>
12 #include <limits.h>
13 #include <stdio.h>
14 
15 #include "./vp9_rtcd.h"
16 #include "./vpx_dsp_rtcd.h"
17 #include "./vpx_scale_rtcd.h"
18 
19 #include "vpx_mem/vpx_mem.h"
20 #include "vpx_ports/system_state.h"
21 #include "vpx_ports/vpx_once.h"
22 #include "vpx_ports/vpx_timer.h"
23 #include "vpx_scale/vpx_scale.h"
24 #include "vpx_util/vpx_thread.h"
25 
26 #include "vp9/common/vp9_alloccommon.h"
27 #include "vp9/common/vp9_loopfilter.h"
28 #include "vp9/common/vp9_onyxc_int.h"
29 #if CONFIG_VP9_POSTPROC
30 #include "vp9/common/vp9_postproc.h"
31 #endif
32 #include "vp9/common/vp9_quant_common.h"
33 #include "vp9/common/vp9_reconintra.h"
34 
35 #include "vp9/decoder/vp9_decodeframe.h"
36 #include "vp9/decoder/vp9_decoder.h"
37 #include "vp9/decoder/vp9_detokenize.h"
38 
initialize_dec(void)39 static void initialize_dec(void) {
40   static volatile int init_done = 0;
41 
42   if (!init_done) {
43     vp9_rtcd();
44     vpx_dsp_rtcd();
45     vpx_scale_rtcd();
46     vp9_init_intra_predictors();
47     init_done = 1;
48   }
49 }
50 
vp9_dec_setup_mi(VP9_COMMON * cm)51 static void vp9_dec_setup_mi(VP9_COMMON *cm) {
52   cm->mi = cm->mip + cm->mi_stride + 1;
53   cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
54   memset(cm->mi_grid_base, 0,
55          cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
56 }
57 
vp9_dec_alloc_row_mt_mem(RowMTWorkerData * row_mt_worker_data,VP9_COMMON * cm,int num_sbs,int max_threads,int num_jobs)58 void vp9_dec_alloc_row_mt_mem(RowMTWorkerData *row_mt_worker_data,
59                               VP9_COMMON *cm, int num_sbs, int max_threads,
60                               int num_jobs) {
61   int plane;
62   const size_t dqcoeff_size = (num_sbs << DQCOEFFS_PER_SB_LOG2) *
63                               sizeof(*row_mt_worker_data->dqcoeff[0]);
64   row_mt_worker_data->num_jobs = num_jobs;
65 #if CONFIG_MULTITHREAD
66   {
67     int i;
68     CHECK_MEM_ERROR(
69         &cm->error, row_mt_worker_data->recon_sync_mutex,
70         vpx_malloc(sizeof(*row_mt_worker_data->recon_sync_mutex) * num_jobs));
71     if (row_mt_worker_data->recon_sync_mutex) {
72       for (i = 0; i < num_jobs; ++i) {
73         pthread_mutex_init(&row_mt_worker_data->recon_sync_mutex[i], NULL);
74       }
75     }
76 
77     CHECK_MEM_ERROR(
78         &cm->error, row_mt_worker_data->recon_sync_cond,
79         vpx_malloc(sizeof(*row_mt_worker_data->recon_sync_cond) * num_jobs));
80     if (row_mt_worker_data->recon_sync_cond) {
81       for (i = 0; i < num_jobs; ++i) {
82         pthread_cond_init(&row_mt_worker_data->recon_sync_cond[i], NULL);
83       }
84     }
85   }
86 #endif
87   row_mt_worker_data->num_sbs = num_sbs;
88   for (plane = 0; plane < 3; ++plane) {
89     CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->dqcoeff[plane],
90                     vpx_memalign(32, dqcoeff_size));
91     memset(row_mt_worker_data->dqcoeff[plane], 0, dqcoeff_size);
92     CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->eob[plane],
93                     vpx_calloc(num_sbs << EOBS_PER_SB_LOG2,
94                                sizeof(*row_mt_worker_data->eob[plane])));
95   }
96   CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->partition,
97                   vpx_calloc(num_sbs * PARTITIONS_PER_SB,
98                              sizeof(*row_mt_worker_data->partition)));
99   CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->recon_map,
100                   vpx_calloc(num_sbs, sizeof(*row_mt_worker_data->recon_map)));
101 
102   // allocate memory for thread_data
103   if (row_mt_worker_data->thread_data == NULL) {
104     const size_t thread_size =
105         max_threads * sizeof(*row_mt_worker_data->thread_data);
106     CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->thread_data,
107                     vpx_memalign(32, thread_size));
108   }
109 }
110 
vp9_dec_free_row_mt_mem(RowMTWorkerData * row_mt_worker_data)111 void vp9_dec_free_row_mt_mem(RowMTWorkerData *row_mt_worker_data) {
112   if (row_mt_worker_data != NULL) {
113     int plane;
114 #if CONFIG_MULTITHREAD
115     int i;
116     if (row_mt_worker_data->recon_sync_mutex != NULL) {
117       for (i = 0; i < row_mt_worker_data->num_jobs; ++i) {
118         pthread_mutex_destroy(&row_mt_worker_data->recon_sync_mutex[i]);
119       }
120       vpx_free(row_mt_worker_data->recon_sync_mutex);
121       row_mt_worker_data->recon_sync_mutex = NULL;
122     }
123     if (row_mt_worker_data->recon_sync_cond != NULL) {
124       for (i = 0; i < row_mt_worker_data->num_jobs; ++i) {
125         pthread_cond_destroy(&row_mt_worker_data->recon_sync_cond[i]);
126       }
127       vpx_free(row_mt_worker_data->recon_sync_cond);
128       row_mt_worker_data->recon_sync_cond = NULL;
129     }
130 #endif
131     for (plane = 0; plane < 3; ++plane) {
132       vpx_free(row_mt_worker_data->eob[plane]);
133       row_mt_worker_data->eob[plane] = NULL;
134       vpx_free(row_mt_worker_data->dqcoeff[plane]);
135       row_mt_worker_data->dqcoeff[plane] = NULL;
136     }
137     vpx_free(row_mt_worker_data->partition);
138     row_mt_worker_data->partition = NULL;
139     vpx_free(row_mt_worker_data->recon_map);
140     row_mt_worker_data->recon_map = NULL;
141     vpx_free(row_mt_worker_data->thread_data);
142     row_mt_worker_data->thread_data = NULL;
143   }
144 }
145 
vp9_dec_alloc_mi(VP9_COMMON * cm,int mi_size)146 static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) {
147   cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
148   if (!cm->mip) return 1;
149   cm->mi_alloc_size = mi_size;
150   cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *));
151   if (!cm->mi_grid_base) return 1;
152   return 0;
153 }
154 
vp9_dec_free_mi(VP9_COMMON * cm)155 static void vp9_dec_free_mi(VP9_COMMON *cm) {
156 #if CONFIG_VP9_POSTPROC
157   // MFQE allocates an additional mip and swaps it with cm->mip.
158   vpx_free(cm->postproc_state.prev_mip);
159   cm->postproc_state.prev_mip = NULL;
160 #endif
161   vpx_free(cm->mip);
162   cm->mip = NULL;
163   vpx_free(cm->mi_grid_base);
164   cm->mi_grid_base = NULL;
165   cm->mi_alloc_size = 0;
166 }
167 
vp9_decoder_create(BufferPool * const pool)168 VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
169   VP9Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
170   VP9_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
171 
172   if (!cm) return NULL;
173 
174   vp9_zero(*pbi);
175 
176   if (setjmp(cm->error.jmp)) {
177     cm->error.setjmp = 0;
178     vp9_decoder_remove(pbi);
179     return NULL;
180   }
181 
182   cm->error.setjmp = 1;
183 
184   CHECK_MEM_ERROR(&cm->error, cm->fc,
185                   (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
186   CHECK_MEM_ERROR(
187       &cm->error, cm->frame_contexts,
188       (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts)));
189 
190   pbi->need_resync = 1;
191   once(initialize_dec);
192 
193   // Initialize the references to not point to any frame buffers.
194   memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
195   memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map));
196 
197   init_frame_indexes(cm);
198   pbi->ready_for_new_data = 1;
199   pbi->common.buffer_pool = pool;
200 
201   cm->bit_depth = VPX_BITS_8;
202   cm->dequant_bit_depth = VPX_BITS_8;
203 
204   cm->alloc_mi = vp9_dec_alloc_mi;
205   cm->free_mi = vp9_dec_free_mi;
206   cm->setup_mi = vp9_dec_setup_mi;
207 
208   vp9_loop_filter_init(cm);
209 
210   cm->error.setjmp = 0;
211 
212   vpx_get_worker_interface()->init(&pbi->lf_worker);
213 
214   return pbi;
215 }
216 
vp9_decoder_remove(VP9Decoder * pbi)217 void vp9_decoder_remove(VP9Decoder *pbi) {
218   int i;
219 
220   if (!pbi) return;
221 
222   vpx_get_worker_interface()->end(&pbi->lf_worker);
223   vpx_free(pbi->lf_worker.data1);
224 
225   for (i = 0; i < pbi->num_tile_workers; ++i) {
226     VPxWorker *const worker = &pbi->tile_workers[i];
227     vpx_get_worker_interface()->end(worker);
228   }
229 
230   vpx_free(pbi->tile_worker_data);
231   vpx_free(pbi->tile_workers);
232 
233   if (pbi->num_tile_workers > 0) {
234     vp9_loop_filter_dealloc(&pbi->lf_row_sync);
235   }
236 
237   if (pbi->row_mt == 1) {
238     vp9_dec_free_row_mt_mem(pbi->row_mt_worker_data);
239     if (pbi->row_mt_worker_data != NULL) {
240       vp9_jobq_deinit(&pbi->row_mt_worker_data->jobq);
241       vpx_free(pbi->row_mt_worker_data->jobq_buf);
242 #if CONFIG_MULTITHREAD
243       pthread_mutex_destroy(&pbi->row_mt_worker_data->recon_done_mutex);
244 #endif
245     }
246     vpx_free(pbi->row_mt_worker_data);
247   }
248 
249   vp9_remove_common(&pbi->common);
250   vpx_free(pbi);
251 }
252 
equal_dimensions(const YV12_BUFFER_CONFIG * a,const YV12_BUFFER_CONFIG * b)253 static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
254                             const YV12_BUFFER_CONFIG *b) {
255   return a->y_height == b->y_height && a->y_width == b->y_width &&
256          a->uv_height == b->uv_height && a->uv_width == b->uv_width;
257 }
258 
vp9_copy_reference_dec(VP9Decoder * pbi,VP9_REFFRAME ref_frame_flag,YV12_BUFFER_CONFIG * sd)259 vpx_codec_err_t vp9_copy_reference_dec(VP9Decoder *pbi,
260                                        VP9_REFFRAME ref_frame_flag,
261                                        YV12_BUFFER_CONFIG *sd) {
262   VP9_COMMON *cm = &pbi->common;
263 
264   /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
265    * encoder is using the frame buffers for. This is just a stub to keep the
266    * vpxenc --test-decode functionality working, and will be replaced in a
267    * later commit that adds VP9-specific controls for this functionality.
268    */
269   if (ref_frame_flag == VP9_LAST_FLAG) {
270     const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0);
271     if (cfg == NULL) {
272       vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
273                          "No 'last' reference frame");
274       return VPX_CODEC_ERROR;
275     }
276     if (!equal_dimensions(cfg, sd))
277       vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
278                          "Incorrect buffer dimensions");
279     else
280       vpx_yv12_copy_frame(cfg, sd);
281   } else {
282     vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
283   }
284 
285   return cm->error.error_code;
286 }
287 
vp9_set_reference_dec(VP9_COMMON * cm,VP9_REFFRAME ref_frame_flag,YV12_BUFFER_CONFIG * sd)288 vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm,
289                                       VP9_REFFRAME ref_frame_flag,
290                                       YV12_BUFFER_CONFIG *sd) {
291   int idx;
292   YV12_BUFFER_CONFIG *ref_buf = NULL;
293 
294   // TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
295   // encoder is using the frame buffers for. This is just a stub to keep the
296   // vpxenc --test-decode functionality working, and will be replaced in a
297   // later commit that adds VP9-specific controls for this functionality.
298   // (Yunqing) The set_reference control depends on the following setting in
299   // encoder.
300   // cpi->lst_fb_idx = 0;
301   // cpi->gld_fb_idx = 1;
302   // cpi->alt_fb_idx = 2;
303   if (ref_frame_flag == VP9_LAST_FLAG) {
304     idx = cm->ref_frame_map[0];
305   } else if (ref_frame_flag == VP9_GOLD_FLAG) {
306     idx = cm->ref_frame_map[1];
307   } else if (ref_frame_flag == VP9_ALT_FLAG) {
308     idx = cm->ref_frame_map[2];
309   } else {
310     vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame");
311     return cm->error.error_code;
312   }
313 
314   if (idx < 0 || idx >= FRAME_BUFFERS) {
315     vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
316                        "Invalid reference frame map");
317     return cm->error.error_code;
318   }
319 
320   // Get the destination reference buffer.
321   ref_buf = &cm->buffer_pool->frame_bufs[idx].buf;
322 
323   if (!equal_dimensions(ref_buf, sd)) {
324     vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
325                        "Incorrect buffer dimensions");
326   } else {
327     // Overwrite the reference frame buffer.
328     vpx_yv12_copy_frame(sd, ref_buf);
329   }
330 
331   return cm->error.error_code;
332 }
333 
334 /* If any buffer updating is signaled it should be done here. */
swap_frame_buffers(VP9Decoder * pbi)335 static void swap_frame_buffers(VP9Decoder *pbi) {
336   int ref_index = 0, mask;
337   VP9_COMMON *const cm = &pbi->common;
338   BufferPool *const pool = cm->buffer_pool;
339   RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
340 
341   for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
342     const int old_idx = cm->ref_frame_map[ref_index];
343     // Current thread releases the holding of reference frame.
344     decrease_ref_count(old_idx, frame_bufs, pool);
345 
346     // Release the reference frame in reference map.
347     if (mask & 1) {
348       decrease_ref_count(old_idx, frame_bufs, pool);
349     }
350     cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
351     ++ref_index;
352   }
353 
354   // Current thread releases the holding of reference frame.
355   for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
356     const int old_idx = cm->ref_frame_map[ref_index];
357     decrease_ref_count(old_idx, frame_bufs, pool);
358     cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
359   }
360   pbi->hold_ref_buf = 0;
361   cm->frame_to_show = get_frame_new_buffer(cm);
362 
363   --frame_bufs[cm->new_fb_idx].ref_count;
364 
365   // Invalidate these references until the next frame starts.
366   for (ref_index = 0; ref_index < 3; ref_index++)
367     cm->frame_refs[ref_index].idx = -1;
368 }
369 
release_fb_on_decoder_exit(VP9Decoder * pbi)370 static void release_fb_on_decoder_exit(VP9Decoder *pbi) {
371   const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
372   VP9_COMMON *volatile const cm = &pbi->common;
373   BufferPool *volatile const pool = cm->buffer_pool;
374   RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
375   int i;
376 
377   // Synchronize all threads immediately as a subsequent decode call may
378   // cause a resize invalidating some allocations.
379   winterface->sync(&pbi->lf_worker);
380   for (i = 0; i < pbi->num_tile_workers; ++i) {
381     winterface->sync(&pbi->tile_workers[i]);
382   }
383 
384   // Release all the reference buffers if worker thread is holding them.
385   if (pbi->hold_ref_buf == 1) {
386     int ref_index = 0, mask;
387     for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
388       const int old_idx = cm->ref_frame_map[ref_index];
389       // Current thread releases the holding of reference frame.
390       decrease_ref_count(old_idx, frame_bufs, pool);
391 
392       // Release the reference frame in reference map.
393       if (mask & 1) {
394         decrease_ref_count(old_idx, frame_bufs, pool);
395       }
396       ++ref_index;
397     }
398 
399     // Current thread releases the holding of reference frame.
400     for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
401       const int old_idx = cm->ref_frame_map[ref_index];
402       decrease_ref_count(old_idx, frame_bufs, pool);
403     }
404     pbi->hold_ref_buf = 0;
405   }
406 }
407 
vp9_receive_compressed_data(VP9Decoder * pbi,size_t size,const uint8_t ** psource)408 int vp9_receive_compressed_data(VP9Decoder *pbi, size_t size,
409                                 const uint8_t **psource) {
410   VP9_COMMON *volatile const cm = &pbi->common;
411   BufferPool *volatile const pool = cm->buffer_pool;
412   RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
413   const uint8_t *source = *psource;
414   int retcode = 0;
415   cm->error.error_code = VPX_CODEC_OK;
416 
417   if (size == 0) {
418     // This is used to signal that we are missing frames.
419     // We do not know if the missing frame(s) was supposed to update
420     // any of the reference buffers, but we act conservative and
421     // mark only the last buffer as corrupted.
422     //
423     // TODO(jkoleszar): Error concealment is undefined and non-normative
424     // at this point, but if it becomes so, [0] may not always be the correct
425     // thing to do here.
426     if (cm->frame_refs[0].idx > 0) {
427       assert(cm->frame_refs[0].buf != NULL);
428       cm->frame_refs[0].buf->corrupted = 1;
429     }
430   }
431 
432   pbi->ready_for_new_data = 0;
433 
434   // Check if the previous frame was a frame without any references to it.
435   if (cm->new_fb_idx >= 0 && frame_bufs[cm->new_fb_idx].ref_count == 0 &&
436       !frame_bufs[cm->new_fb_idx].released) {
437     pool->release_fb_cb(pool->cb_priv,
438                         &frame_bufs[cm->new_fb_idx].raw_frame_buffer);
439     frame_bufs[cm->new_fb_idx].released = 1;
440   }
441 
442   // Find a free frame buffer. Return error if can not find any.
443   cm->new_fb_idx = get_free_fb(cm);
444   if (cm->new_fb_idx == INVALID_IDX) {
445     pbi->ready_for_new_data = 1;
446     release_fb_on_decoder_exit(pbi);
447     vpx_clear_system_state();
448     vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
449                        "Unable to find free frame buffer");
450     return cm->error.error_code;
451   }
452 
453   // Assign a MV array to the frame buffer.
454   cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
455 
456   pbi->hold_ref_buf = 0;
457   pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
458 
459   if (setjmp(cm->error.jmp)) {
460     cm->error.setjmp = 0;
461     pbi->ready_for_new_data = 1;
462     release_fb_on_decoder_exit(pbi);
463     // Release current frame.
464     decrease_ref_count(cm->new_fb_idx, frame_bufs, pool);
465     vpx_clear_system_state();
466     return -1;
467   }
468 
469   cm->error.setjmp = 1;
470   vp9_decode_frame(pbi, source, source + size, psource);
471 
472   swap_frame_buffers(pbi);
473 
474   vpx_clear_system_state();
475 
476   if (!cm->show_existing_frame) {
477     cm->last_show_frame = cm->show_frame;
478     cm->prev_frame = cm->cur_frame;
479     if (cm->seg.enabled) vp9_swap_current_and_last_seg_map(cm);
480   }
481 
482   if (cm->show_frame) cm->cur_show_frame_fb_idx = cm->new_fb_idx;
483 
484   // Update progress in frame parallel decode.
485   cm->last_width = cm->width;
486   cm->last_height = cm->height;
487   if (cm->show_frame) {
488     cm->current_video_frame++;
489   }
490 
491   cm->error.setjmp = 0;
492   return retcode;
493 }
494 
vp9_get_raw_frame(VP9Decoder * pbi,YV12_BUFFER_CONFIG * sd,vp9_ppflags_t * flags)495 int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd,
496                       vp9_ppflags_t *flags) {
497   VP9_COMMON *const cm = &pbi->common;
498   int ret = -1;
499 #if !CONFIG_VP9_POSTPROC
500   (void)*flags;
501 #endif
502 
503   if (pbi->ready_for_new_data == 1) return ret;
504 
505   pbi->ready_for_new_data = 1;
506 
507   /* no raw frame to show!!! */
508   if (!cm->show_frame) return ret;
509 
510   pbi->ready_for_new_data = 1;
511 
512 #if CONFIG_VP9_POSTPROC
513   if (!cm->show_existing_frame) {
514     ret = vp9_post_proc_frame(cm, sd, flags, cm->width);
515   } else {
516     *sd = *cm->frame_to_show;
517     ret = 0;
518   }
519 #else
520   *sd = *cm->frame_to_show;
521   ret = 0;
522 #endif /*!CONFIG_POSTPROC*/
523   vpx_clear_system_state();
524   return ret;
525 }
526 
vp9_parse_superframe_index(const uint8_t * data,size_t data_sz,uint32_t sizes[8],int * count,vpx_decrypt_cb decrypt_cb,void * decrypt_state)527 vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz,
528                                            uint32_t sizes[8], int *count,
529                                            vpx_decrypt_cb decrypt_cb,
530                                            void *decrypt_state) {
531   // A chunk ending with a byte matching 0xc0 is an invalid chunk unless
532   // it is a super frame index. If the last byte of real video compression
533   // data is 0xc0 the encoder must add a 0 byte. If we have the marker but
534   // not the associated matching marker byte at the front of the index we have
535   // an invalid bitstream and need to return an error.
536 
537   uint8_t marker;
538 
539   assert(data_sz);
540   marker = read_marker(decrypt_cb, decrypt_state, data + data_sz - 1);
541   *count = 0;
542 
543   if ((marker & 0xe0) == 0xc0) {
544     const uint32_t frames = (marker & 0x7) + 1;
545     const uint32_t mag = ((marker >> 3) & 0x3) + 1;
546     const size_t index_sz = 2 + mag * frames;
547 
548     // This chunk is marked as having a superframe index but doesn't have
549     // enough data for it, thus it's an invalid superframe index.
550     if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME;
551 
552     {
553       const uint8_t marker2 =
554           read_marker(decrypt_cb, decrypt_state, data + data_sz - index_sz);
555 
556       // This chunk is marked as having a superframe index but doesn't have
557       // the matching marker byte at the front of the index therefore it's an
558       // invalid chunk.
559       if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME;
560     }
561 
562     {
563       // Found a valid superframe index.
564       uint32_t i, j;
565       const uint8_t *x = &data[data_sz - index_sz + 1];
566 
567       // Frames has a maximum of 8 and mag has a maximum of 4.
568       uint8_t clear_buffer[32];
569       assert(sizeof(clear_buffer) >= frames * mag);
570       if (decrypt_cb) {
571         decrypt_cb(decrypt_state, x, clear_buffer, frames * mag);
572         x = clear_buffer;
573       }
574 
575       for (i = 0; i < frames; ++i) {
576         uint32_t this_sz = 0;
577 
578         for (j = 0; j < mag; ++j) this_sz |= ((uint32_t)(*x++)) << (j * 8);
579         sizes[i] = this_sz;
580       }
581       *count = frames;
582     }
583   }
584   return VPX_CODEC_OK;
585 }
586