1 /*
2 *
3 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
4 *
5 * This source code is subject to the terms of the BSD 2 Clause License and
6 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
7 * was not distributed with this source code in the LICENSE file, you can
8 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
9 * Media Patent License 1.0 was not distributed with this source code in the
10 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
11 */
12
13 #include "config/aom_config.h"
14
15 #include "aom_mem/aom_mem.h"
16
17 #include "av1/common/alloccommon.h"
18 #include "av1/common/av1_common_int.h"
19 #include "av1/common/blockd.h"
20 #include "av1/common/cdef_block.h"
21 #include "av1/common/entropymode.h"
22 #include "av1/common/entropymv.h"
23 #include "av1/common/thread_common.h"
24
av1_get_MBs(int width,int height)25 int av1_get_MBs(int width, int height) {
26 const int aligned_width = ALIGN_POWER_OF_TWO(width, 3);
27 const int aligned_height = ALIGN_POWER_OF_TWO(height, 3);
28 const int mi_cols = aligned_width >> MI_SIZE_LOG2;
29 const int mi_rows = aligned_height >> MI_SIZE_LOG2;
30
31 const int mb_cols = (mi_cols + 2) >> 2;
32 const int mb_rows = (mi_rows + 2) >> 2;
33 return mb_rows * mb_cols;
34 }
35
av1_free_ref_frame_buffers(BufferPool * pool)36 void av1_free_ref_frame_buffers(BufferPool *pool) {
37 int i;
38
39 for (i = 0; i < FRAME_BUFFERS; ++i) {
40 if (pool->frame_bufs[i].ref_count > 0 &&
41 pool->frame_bufs[i].raw_frame_buffer.data != NULL) {
42 pool->release_fb_cb(pool->cb_priv, &pool->frame_bufs[i].raw_frame_buffer);
43 pool->frame_bufs[i].raw_frame_buffer.data = NULL;
44 pool->frame_bufs[i].raw_frame_buffer.size = 0;
45 pool->frame_bufs[i].raw_frame_buffer.priv = NULL;
46 pool->frame_bufs[i].ref_count = 0;
47 }
48 aom_free(pool->frame_bufs[i].mvs);
49 pool->frame_bufs[i].mvs = NULL;
50 aom_free(pool->frame_bufs[i].seg_map);
51 pool->frame_bufs[i].seg_map = NULL;
52 aom_free_frame_buffer(&pool->frame_bufs[i].buf);
53 }
54 }
55
free_cdef_linebuf_conditional(AV1_COMMON * const cm,const size_t * new_linebuf_size)56 static INLINE void free_cdef_linebuf_conditional(
57 AV1_COMMON *const cm, const size_t *new_linebuf_size) {
58 CdefInfo *cdef_info = &cm->cdef_info;
59 for (int plane = 0; plane < MAX_MB_PLANE; plane++) {
60 if (new_linebuf_size[plane] != cdef_info->allocated_linebuf_size[plane]) {
61 aom_free(cdef_info->linebuf[plane]);
62 cdef_info->linebuf[plane] = NULL;
63 }
64 }
65 }
66
free_cdef_bufs_conditional(AV1_COMMON * const cm,uint16_t ** colbuf,uint16_t ** srcbuf,const size_t * new_colbuf_size,const size_t new_srcbuf_size)67 static INLINE void free_cdef_bufs_conditional(AV1_COMMON *const cm,
68 uint16_t **colbuf,
69 uint16_t **srcbuf,
70 const size_t *new_colbuf_size,
71 const size_t new_srcbuf_size) {
72 CdefInfo *cdef_info = &cm->cdef_info;
73 if (new_srcbuf_size != cdef_info->allocated_srcbuf_size) {
74 aom_free(*srcbuf);
75 *srcbuf = NULL;
76 }
77 for (int plane = 0; plane < MAX_MB_PLANE; plane++) {
78 if (new_colbuf_size[plane] != cdef_info->allocated_colbuf_size[plane]) {
79 aom_free(colbuf[plane]);
80 colbuf[plane] = NULL;
81 }
82 }
83 }
84
free_cdef_bufs(uint16_t ** colbuf,uint16_t ** srcbuf)85 static INLINE void free_cdef_bufs(uint16_t **colbuf, uint16_t **srcbuf) {
86 aom_free(*srcbuf);
87 *srcbuf = NULL;
88 for (int plane = 0; plane < MAX_MB_PLANE; plane++) {
89 aom_free(colbuf[plane]);
90 colbuf[plane] = NULL;
91 }
92 }
93
free_cdef_row_sync(AV1CdefRowSync ** cdef_row_mt,const int num_mi_rows)94 static INLINE void free_cdef_row_sync(AV1CdefRowSync **cdef_row_mt,
95 const int num_mi_rows) {
96 if (*cdef_row_mt == NULL) return;
97 #if CONFIG_MULTITHREAD
98 for (int row_idx = 0; row_idx < num_mi_rows; row_idx++) {
99 pthread_mutex_destroy((*cdef_row_mt)[row_idx].row_mutex_);
100 pthread_cond_destroy((*cdef_row_mt)[row_idx].row_cond_);
101 aom_free((*cdef_row_mt)[row_idx].row_mutex_);
102 aom_free((*cdef_row_mt)[row_idx].row_cond_);
103 }
104 #else
105 (void)num_mi_rows;
106 #endif // CONFIG_MULTITHREAD
107 aom_free(*cdef_row_mt);
108 *cdef_row_mt = NULL;
109 }
110
av1_free_cdef_buffers(AV1_COMMON * const cm,AV1CdefWorkerData ** cdef_worker,AV1CdefSync * cdef_sync,int num_workers)111 void av1_free_cdef_buffers(AV1_COMMON *const cm,
112 AV1CdefWorkerData **cdef_worker,
113 AV1CdefSync *cdef_sync, int num_workers) {
114 CdefInfo *cdef_info = &cm->cdef_info;
115 const int num_mi_rows = cdef_info->allocated_mi_rows;
116
117 for (int plane = 0; plane < MAX_MB_PLANE; plane++) {
118 aom_free(cdef_info->linebuf[plane]);
119 cdef_info->linebuf[plane] = NULL;
120 }
121 // De-allocation of column buffer & source buffer (worker_0).
122 free_cdef_bufs(cdef_info->colbuf, &cdef_info->srcbuf);
123
124 if (num_workers < 2) return;
125 if (*cdef_worker != NULL) {
126 for (int idx = num_workers - 1; idx >= 1; idx--) {
127 // De-allocation of column buffer & source buffer for remaining workers.
128 free_cdef_bufs((*cdef_worker)[idx].colbuf, &(*cdef_worker)[idx].srcbuf);
129 }
130 aom_free(*cdef_worker);
131 *cdef_worker = NULL;
132 }
133 free_cdef_row_sync(&cdef_sync->cdef_row_mt, num_mi_rows);
134 }
135
alloc_cdef_linebuf(AV1_COMMON * const cm,uint16_t ** linebuf,const int num_planes)136 static INLINE void alloc_cdef_linebuf(AV1_COMMON *const cm, uint16_t **linebuf,
137 const int num_planes) {
138 CdefInfo *cdef_info = &cm->cdef_info;
139 for (int plane = 0; plane < num_planes; plane++) {
140 if (linebuf[plane] == NULL)
141 CHECK_MEM_ERROR(cm, linebuf[plane],
142 aom_malloc(cdef_info->allocated_linebuf_size[plane]));
143 }
144 }
145
alloc_cdef_bufs(AV1_COMMON * const cm,uint16_t ** colbuf,uint16_t ** srcbuf,const int num_planes)146 static INLINE void alloc_cdef_bufs(AV1_COMMON *const cm, uint16_t **colbuf,
147 uint16_t **srcbuf, const int num_planes) {
148 CdefInfo *cdef_info = &cm->cdef_info;
149 if (*srcbuf == NULL)
150 CHECK_MEM_ERROR(cm, *srcbuf,
151 aom_memalign(16, cdef_info->allocated_srcbuf_size));
152
153 for (int plane = 0; plane < num_planes; plane++) {
154 if (colbuf[plane] == NULL)
155 CHECK_MEM_ERROR(cm, colbuf[plane],
156 aom_malloc(cdef_info->allocated_colbuf_size[plane]));
157 }
158 }
159
alloc_cdef_row_sync(AV1_COMMON * const cm,AV1CdefRowSync ** cdef_row_mt,const int num_mi_rows)160 static INLINE void alloc_cdef_row_sync(AV1_COMMON *const cm,
161 AV1CdefRowSync **cdef_row_mt,
162 const int num_mi_rows) {
163 if (*cdef_row_mt != NULL) return;
164
165 CHECK_MEM_ERROR(cm, *cdef_row_mt,
166 aom_malloc(sizeof(**cdef_row_mt) * num_mi_rows));
167 #if CONFIG_MULTITHREAD
168 for (int row_idx = 0; row_idx < num_mi_rows; row_idx++) {
169 CHECK_MEM_ERROR(cm, (*cdef_row_mt)[row_idx].row_mutex_,
170 aom_malloc(sizeof(*(*cdef_row_mt)[row_idx].row_mutex_)));
171 pthread_mutex_init((*cdef_row_mt)[row_idx].row_mutex_, NULL);
172
173 CHECK_MEM_ERROR(cm, (*cdef_row_mt)[row_idx].row_cond_,
174 aom_malloc(sizeof(*(*cdef_row_mt)[row_idx].row_cond_)));
175 pthread_cond_init((*cdef_row_mt)[row_idx].row_cond_, NULL);
176
177 (*cdef_row_mt)[row_idx].is_row_done = 0;
178 }
179 #endif // CONFIG_MULTITHREAD
180 }
181
av1_alloc_cdef_buffers(AV1_COMMON * const cm,AV1CdefWorkerData ** cdef_worker,AV1CdefSync * cdef_sync,int num_workers,int init_worker)182 void av1_alloc_cdef_buffers(AV1_COMMON *const cm,
183 AV1CdefWorkerData **cdef_worker,
184 AV1CdefSync *cdef_sync, int num_workers,
185 int init_worker) {
186 const int num_planes = av1_num_planes(cm);
187 size_t new_linebuf_size[MAX_MB_PLANE] = { 0 };
188 size_t new_colbuf_size[MAX_MB_PLANE] = { 0 };
189 size_t new_srcbuf_size = 0;
190 CdefInfo *const cdef_info = &cm->cdef_info;
191 // Check for configuration change
192 const int num_mi_rows =
193 (cm->mi_params.mi_rows + MI_SIZE_64X64 - 1) / MI_SIZE_64X64;
194 const int is_num_workers_changed =
195 cdef_info->allocated_num_workers != num_workers;
196 const int is_cdef_enabled =
197 cm->seq_params->enable_cdef && !cm->tiles.large_scale;
198
199 // num-bufs=3 represents ping-pong buffers for top linebuf,
200 // followed by bottom linebuf.
201 // ping-pong is to avoid top linebuf over-write by consecutive row.
202 int num_bufs = 3;
203 if (num_workers > 1)
204 num_bufs = (cm->mi_params.mi_rows + MI_SIZE_64X64 - 1) / MI_SIZE_64X64;
205
206 if (is_cdef_enabled) {
207 // Calculate src buffer size
208 new_srcbuf_size = sizeof(*cdef_info->srcbuf) * CDEF_INBUF_SIZE;
209 for (int plane = 0; plane < num_planes; plane++) {
210 const int shift =
211 plane == AOM_PLANE_Y ? 0 : cm->seq_params->subsampling_x;
212 // Calculate top and bottom line buffer size
213 const int luma_stride =
214 ALIGN_POWER_OF_TWO(cm->mi_params.mi_cols << MI_SIZE_LOG2, 4);
215 new_linebuf_size[plane] = sizeof(*cdef_info->linebuf) * num_bufs *
216 (CDEF_VBORDER << 1) * (luma_stride >> shift);
217 // Calculate column buffer size
218 const int block_height =
219 (CDEF_BLOCKSIZE << (MI_SIZE_LOG2 - shift)) * 2 * CDEF_VBORDER;
220 new_colbuf_size[plane] =
221 sizeof(*cdef_info->colbuf[plane]) * block_height * CDEF_HBORDER;
222 }
223 }
224
225 // Free src, line and column buffers for worker 0 in case of reallocation
226 free_cdef_linebuf_conditional(cm, new_linebuf_size);
227 free_cdef_bufs_conditional(cm, cdef_info->colbuf, &cdef_info->srcbuf,
228 new_colbuf_size, new_srcbuf_size);
229
230 // The flag init_worker indicates if cdef_worker has to be allocated for the
231 // frame. This is passed as 1 always from decoder. At encoder side, it is 0
232 // when called for parallel frames during FPMT (where cdef_worker is shared
233 // across parallel frames) and 1 otherwise.
234 if (*cdef_worker != NULL && init_worker) {
235 if (is_num_workers_changed) {
236 // Free src and column buffers for remaining workers in case of change in
237 // num_workers
238 for (int idx = cdef_info->allocated_num_workers - 1; idx >= 1; idx--)
239 free_cdef_bufs((*cdef_worker)[idx].colbuf, &(*cdef_worker)[idx].srcbuf);
240 } else if (num_workers > 1) {
241 // Free src and column buffers for remaining workers in case of
242 // reallocation
243 for (int idx = num_workers - 1; idx >= 1; idx--)
244 free_cdef_bufs_conditional(cm, (*cdef_worker)[idx].colbuf,
245 &(*cdef_worker)[idx].srcbuf, new_colbuf_size,
246 new_srcbuf_size);
247 }
248 }
249
250 if (cdef_info->allocated_mi_rows != num_mi_rows)
251 free_cdef_row_sync(&cdef_sync->cdef_row_mt, cdef_info->allocated_mi_rows);
252
253 // Store allocated sizes for reallocation
254 cdef_info->allocated_srcbuf_size = new_srcbuf_size;
255 av1_copy(cdef_info->allocated_colbuf_size, new_colbuf_size);
256 av1_copy(cdef_info->allocated_linebuf_size, new_linebuf_size);
257 // Store configuration to check change in configuration
258 cdef_info->allocated_mi_rows = num_mi_rows;
259 cdef_info->allocated_num_workers = num_workers;
260
261 if (!is_cdef_enabled) return;
262
263 // Memory allocation of column buffer & source buffer (worker_0).
264 alloc_cdef_bufs(cm, cdef_info->colbuf, &cdef_info->srcbuf, num_planes);
265 alloc_cdef_linebuf(cm, cdef_info->linebuf, num_planes);
266
267 if (num_workers < 2) return;
268
269 if (init_worker) {
270 if (*cdef_worker == NULL)
271 CHECK_MEM_ERROR(cm, *cdef_worker,
272 aom_calloc(num_workers, sizeof(**cdef_worker)));
273
274 // Memory allocation of column buffer & source buffer for remaining workers.
275 for (int idx = num_workers - 1; idx >= 1; idx--)
276 alloc_cdef_bufs(cm, (*cdef_worker)[idx].colbuf,
277 &(*cdef_worker)[idx].srcbuf, num_planes);
278 }
279
280 alloc_cdef_row_sync(cm, &cdef_sync->cdef_row_mt,
281 cdef_info->allocated_mi_rows);
282 }
283
284 #if !CONFIG_REALTIME_ONLY
285 // Assumes cm->rst_info[p].restoration_unit_size is already initialized
av1_alloc_restoration_buffers(AV1_COMMON * cm)286 void av1_alloc_restoration_buffers(AV1_COMMON *cm) {
287 const int num_planes = av1_num_planes(cm);
288 for (int p = 0; p < num_planes; ++p)
289 av1_alloc_restoration_struct(cm, &cm->rst_info[p], p > 0);
290
291 if (cm->rst_tmpbuf == NULL) {
292 CHECK_MEM_ERROR(cm, cm->rst_tmpbuf,
293 (int32_t *)aom_memalign(16, RESTORATION_TMPBUF_SIZE));
294 }
295
296 if (cm->rlbs == NULL) {
297 CHECK_MEM_ERROR(cm, cm->rlbs, aom_malloc(sizeof(RestorationLineBuffers)));
298 }
299
300 // For striped loop restoration, we divide each row of tiles into "stripes",
301 // of height 64 luma pixels but with an offset by RESTORATION_UNIT_OFFSET
302 // luma pixels to match the output from CDEF. We will need to store 2 *
303 // RESTORATION_CTX_VERT lines of data for each stripe, and also need to be
304 // able to quickly answer the question "Where is the <n>'th stripe for tile
305 // row <m>?" To make that efficient, we generate the rst_last_stripe array.
306 int num_stripes = 0;
307 for (int i = 0; i < cm->tiles.rows; ++i) {
308 TileInfo tile_info;
309 av1_tile_set_row(&tile_info, cm, i);
310 const int mi_h = tile_info.mi_row_end - tile_info.mi_row_start;
311 const int ext_h = RESTORATION_UNIT_OFFSET + (mi_h << MI_SIZE_LOG2);
312 const int tile_stripes = (ext_h + 63) / 64;
313 num_stripes += tile_stripes;
314 }
315
316 // Now we need to allocate enough space to store the line buffers for the
317 // stripes
318 const int frame_w = cm->superres_upscaled_width;
319 const int use_highbd = cm->seq_params->use_highbitdepth;
320
321 for (int p = 0; p < num_planes; ++p) {
322 const int is_uv = p > 0;
323 const int ss_x = is_uv && cm->seq_params->subsampling_x;
324 const int plane_w = ((frame_w + ss_x) >> ss_x) + 2 * RESTORATION_EXTRA_HORZ;
325 const int stride = ALIGN_POWER_OF_TWO(plane_w, 5);
326 const int buf_size = num_stripes * stride * RESTORATION_CTX_VERT
327 << use_highbd;
328 RestorationStripeBoundaries *boundaries = &cm->rst_info[p].boundaries;
329
330 if (buf_size != boundaries->stripe_boundary_size ||
331 boundaries->stripe_boundary_above == NULL ||
332 boundaries->stripe_boundary_below == NULL) {
333 aom_free(boundaries->stripe_boundary_above);
334 aom_free(boundaries->stripe_boundary_below);
335
336 CHECK_MEM_ERROR(cm, boundaries->stripe_boundary_above,
337 (uint8_t *)aom_memalign(32, buf_size));
338 CHECK_MEM_ERROR(cm, boundaries->stripe_boundary_below,
339 (uint8_t *)aom_memalign(32, buf_size));
340
341 boundaries->stripe_boundary_size = buf_size;
342 }
343 boundaries->stripe_boundary_stride = stride;
344 }
345 }
346
av1_free_restoration_buffers(AV1_COMMON * cm)347 void av1_free_restoration_buffers(AV1_COMMON *cm) {
348 int p;
349 for (p = 0; p < MAX_MB_PLANE; ++p)
350 av1_free_restoration_struct(&cm->rst_info[p]);
351 aom_free(cm->rst_tmpbuf);
352 cm->rst_tmpbuf = NULL;
353 aom_free(cm->rlbs);
354 cm->rlbs = NULL;
355 for (p = 0; p < MAX_MB_PLANE; ++p) {
356 RestorationStripeBoundaries *boundaries = &cm->rst_info[p].boundaries;
357 aom_free(boundaries->stripe_boundary_above);
358 aom_free(boundaries->stripe_boundary_below);
359 boundaries->stripe_boundary_above = NULL;
360 boundaries->stripe_boundary_below = NULL;
361 }
362
363 aom_free_frame_buffer(&cm->rst_frame);
364 }
365 #endif // !CONFIG_REALTIME_ONLY
366
av1_free_above_context_buffers(CommonContexts * above_contexts)367 void av1_free_above_context_buffers(CommonContexts *above_contexts) {
368 int i;
369 const int num_planes = above_contexts->num_planes;
370
371 for (int tile_row = 0; tile_row < above_contexts->num_tile_rows; tile_row++) {
372 for (i = 0; i < num_planes; i++) {
373 aom_free(above_contexts->entropy[i][tile_row]);
374 above_contexts->entropy[i][tile_row] = NULL;
375 }
376 aom_free(above_contexts->partition[tile_row]);
377 above_contexts->partition[tile_row] = NULL;
378
379 aom_free(above_contexts->txfm[tile_row]);
380 above_contexts->txfm[tile_row] = NULL;
381 }
382 for (i = 0; i < num_planes; i++) {
383 aom_free(above_contexts->entropy[i]);
384 above_contexts->entropy[i] = NULL;
385 }
386 aom_free(above_contexts->partition);
387 above_contexts->partition = NULL;
388
389 aom_free(above_contexts->txfm);
390 above_contexts->txfm = NULL;
391
392 above_contexts->num_tile_rows = 0;
393 above_contexts->num_mi_cols = 0;
394 above_contexts->num_planes = 0;
395 }
396
av1_free_context_buffers(AV1_COMMON * cm)397 void av1_free_context_buffers(AV1_COMMON *cm) {
398 cm->mi_params.free_mi(&cm->mi_params);
399
400 av1_free_above_context_buffers(&cm->above_contexts);
401 }
402
av1_alloc_above_context_buffers(CommonContexts * above_contexts,int num_tile_rows,int num_mi_cols,int num_planes)403 int av1_alloc_above_context_buffers(CommonContexts *above_contexts,
404 int num_tile_rows, int num_mi_cols,
405 int num_planes) {
406 const int aligned_mi_cols =
407 ALIGN_POWER_OF_TWO(num_mi_cols, MAX_MIB_SIZE_LOG2);
408
409 // Allocate above context buffers
410 above_contexts->num_tile_rows = num_tile_rows;
411 above_contexts->num_mi_cols = aligned_mi_cols;
412 above_contexts->num_planes = num_planes;
413 for (int plane_idx = 0; plane_idx < num_planes; plane_idx++) {
414 above_contexts->entropy[plane_idx] = (ENTROPY_CONTEXT **)aom_calloc(
415 num_tile_rows, sizeof(above_contexts->entropy[0]));
416 if (!above_contexts->entropy[plane_idx]) return 1;
417 }
418
419 above_contexts->partition = (PARTITION_CONTEXT **)aom_calloc(
420 num_tile_rows, sizeof(above_contexts->partition));
421 if (!above_contexts->partition) return 1;
422
423 above_contexts->txfm =
424 (TXFM_CONTEXT **)aom_calloc(num_tile_rows, sizeof(above_contexts->txfm));
425 if (!above_contexts->txfm) return 1;
426
427 for (int tile_row = 0; tile_row < num_tile_rows; tile_row++) {
428 for (int plane_idx = 0; plane_idx < num_planes; plane_idx++) {
429 above_contexts->entropy[plane_idx][tile_row] =
430 (ENTROPY_CONTEXT *)aom_calloc(
431 aligned_mi_cols, sizeof(*above_contexts->entropy[0][tile_row]));
432 if (!above_contexts->entropy[plane_idx][tile_row]) return 1;
433 }
434
435 above_contexts->partition[tile_row] = (PARTITION_CONTEXT *)aom_calloc(
436 aligned_mi_cols, sizeof(*above_contexts->partition[tile_row]));
437 if (!above_contexts->partition[tile_row]) return 1;
438
439 above_contexts->txfm[tile_row] = (TXFM_CONTEXT *)aom_calloc(
440 aligned_mi_cols, sizeof(*above_contexts->txfm[tile_row]));
441 if (!above_contexts->txfm[tile_row]) return 1;
442 }
443
444 return 0;
445 }
446
447 // Allocate the dynamically allocated arrays in 'mi_params' assuming
448 // 'mi_params->set_mb_mi()' was already called earlier to initialize the rest of
449 // the struct members.
alloc_mi(CommonModeInfoParams * mi_params)450 static int alloc_mi(CommonModeInfoParams *mi_params) {
451 const int aligned_mi_rows = calc_mi_size(mi_params->mi_rows);
452 const int mi_grid_size = mi_params->mi_stride * aligned_mi_rows;
453 const int alloc_size_1d = mi_size_wide[mi_params->mi_alloc_bsize];
454 const int alloc_mi_size =
455 mi_params->mi_alloc_stride * (aligned_mi_rows / alloc_size_1d);
456
457 if (mi_params->mi_alloc_size < alloc_mi_size ||
458 mi_params->mi_grid_size < mi_grid_size) {
459 mi_params->free_mi(mi_params);
460
461 mi_params->mi_alloc =
462 aom_calloc(alloc_mi_size, sizeof(*mi_params->mi_alloc));
463 if (!mi_params->mi_alloc) return 1;
464 mi_params->mi_alloc_size = alloc_mi_size;
465
466 mi_params->mi_grid_base = (MB_MODE_INFO **)aom_calloc(
467 mi_grid_size, sizeof(*mi_params->mi_grid_base));
468 if (!mi_params->mi_grid_base) return 1;
469 mi_params->mi_grid_size = mi_grid_size;
470
471 mi_params->tx_type_map =
472 aom_calloc(mi_grid_size, sizeof(*mi_params->tx_type_map));
473 if (!mi_params->tx_type_map) return 1;
474 }
475
476 return 0;
477 }
478
av1_alloc_context_buffers(AV1_COMMON * cm,int width,int height)479 int av1_alloc_context_buffers(AV1_COMMON *cm, int width, int height) {
480 CommonModeInfoParams *const mi_params = &cm->mi_params;
481 mi_params->set_mb_mi(mi_params, width, height);
482 if (alloc_mi(mi_params)) goto fail;
483 return 0;
484
485 fail:
486 // clear the mi_* values to force a realloc on resync
487 mi_params->set_mb_mi(mi_params, 0, 0);
488 av1_free_context_buffers(cm);
489 return 1;
490 }
491
av1_remove_common(AV1_COMMON * cm)492 void av1_remove_common(AV1_COMMON *cm) {
493 av1_free_context_buffers(cm);
494
495 aom_free(cm->fc);
496 cm->fc = NULL;
497 aom_free(cm->default_frame_context);
498 cm->default_frame_context = NULL;
499 }
500
av1_init_mi_buffers(CommonModeInfoParams * mi_params)501 void av1_init_mi_buffers(CommonModeInfoParams *mi_params) {
502 mi_params->setup_mi(mi_params);
503 }
504