1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "vp9/common/vp9_thread_common.h"
12 #include "vp9/encoder/vp9_bitstream.h"
13 #include "vp9/encoder/vp9_encodeframe.h"
14 #include "vp9/encoder/vp9_encoder.h"
15 #include "vp9/encoder/vp9_ethread.h"
16 #include "vp9/encoder/vp9_firstpass.h"
17 #include "vp9/encoder/vp9_multi_thread.h"
18 #include "vp9/encoder/vp9_temporal_filter.h"
19 #include "vpx_dsp/vpx_dsp_common.h"
20
accumulate_rd_opt(ThreadData * td,ThreadData * td_t)21 static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) {
22 int i, j, k, l, m, n;
23
24 for (i = 0; i < REFERENCE_MODES; i++)
25 td->rd_counts.comp_pred_diff[i] += td_t->rd_counts.comp_pred_diff[i];
26
27 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
28 td->rd_counts.filter_diff[i] += td_t->rd_counts.filter_diff[i];
29
30 for (i = 0; i < TX_SIZES; i++)
31 for (j = 0; j < PLANE_TYPES; j++)
32 for (k = 0; k < REF_TYPES; k++)
33 for (l = 0; l < COEF_BANDS; l++)
34 for (m = 0; m < COEFF_CONTEXTS; m++)
35 for (n = 0; n < ENTROPY_TOKENS; n++)
36 td->rd_counts.coef_counts[i][j][k][l][m][n] +=
37 td_t->rd_counts.coef_counts[i][j][k][l][m][n];
38 }
39
enc_worker_hook(void * arg1,void * unused)40 static int enc_worker_hook(void *arg1, void *unused) {
41 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
42 VP9_COMP *const cpi = thread_data->cpi;
43 const VP9_COMMON *const cm = &cpi->common;
44 const int tile_cols = 1 << cm->log2_tile_cols;
45 const int tile_rows = 1 << cm->log2_tile_rows;
46 int t;
47
48 (void)unused;
49
50 for (t = thread_data->start; t < tile_rows * tile_cols;
51 t += cpi->num_workers) {
52 int tile_row = t / tile_cols;
53 int tile_col = t % tile_cols;
54
55 vp9_encode_tile(cpi, thread_data->td, tile_row, tile_col);
56 }
57
58 return 0;
59 }
60
get_max_tile_cols(VP9_COMP * cpi)61 static int get_max_tile_cols(VP9_COMP *cpi) {
62 const int aligned_width = ALIGN_POWER_OF_TWO(cpi->oxcf.width, MI_SIZE_LOG2);
63 int mi_cols = aligned_width >> MI_SIZE_LOG2;
64 int min_log2_tile_cols, max_log2_tile_cols;
65 int log2_tile_cols;
66
67 vp9_get_tile_n_bits(mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
68 log2_tile_cols =
69 clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
70 if (cpi->oxcf.target_level == LEVEL_AUTO) {
71 const int level_tile_cols =
72 log_tile_cols_from_picsize_level(cpi->common.width, cpi->common.height);
73 if (log2_tile_cols > level_tile_cols) {
74 log2_tile_cols = VPXMAX(level_tile_cols, min_log2_tile_cols);
75 }
76 }
77 return (1 << log2_tile_cols);
78 }
79
create_enc_workers(VP9_COMP * cpi,int num_workers)80 static void create_enc_workers(VP9_COMP *cpi, int num_workers) {
81 VP9_COMMON *const cm = &cpi->common;
82 const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
83 int i;
84 // While using SVC, we need to allocate threads according to the highest
85 // resolution. When row based multithreading is enabled, it is OK to
86 // allocate more threads than the number of max tile columns.
87 if (cpi->use_svc && !cpi->row_mt) {
88 int max_tile_cols = get_max_tile_cols(cpi);
89 num_workers = VPXMIN(cpi->oxcf.max_threads, max_tile_cols);
90 }
91 assert(num_workers > 0);
92 if (num_workers == cpi->num_workers) return;
93 vp9_loop_filter_dealloc(&cpi->lf_row_sync);
94 vp9_bitstream_encode_tiles_buffer_dealloc(cpi);
95 vp9_encode_free_mt_data(cpi);
96
97 CHECK_MEM_ERROR(cm, cpi->workers,
98 vpx_malloc(num_workers * sizeof(*cpi->workers)));
99
100 CHECK_MEM_ERROR(cm, cpi->tile_thr_data,
101 vpx_calloc(num_workers, sizeof(*cpi->tile_thr_data)));
102
103 for (i = 0; i < num_workers; i++) {
104 VPxWorker *const worker = &cpi->workers[i];
105 EncWorkerData *thread_data = &cpi->tile_thr_data[i];
106
107 ++cpi->num_workers;
108 winterface->init(worker);
109
110 if (i < num_workers - 1) {
111 thread_data->cpi = cpi;
112
113 // Allocate thread data.
114 CHECK_MEM_ERROR(cm, thread_data->td,
115 vpx_memalign(32, sizeof(*thread_data->td)));
116 vp9_zero(*thread_data->td);
117
118 // Set up pc_tree.
119 thread_data->td->leaf_tree = NULL;
120 thread_data->td->pc_tree = NULL;
121 vp9_setup_pc_tree(cm, thread_data->td);
122
123 // Allocate frame counters in thread data.
124 CHECK_MEM_ERROR(cm, thread_data->td->counts,
125 vpx_calloc(1, sizeof(*thread_data->td->counts)));
126
127 // Create threads
128 if (!winterface->reset(worker))
129 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
130 "Tile encoder thread creation failed");
131 } else {
132 // Main thread acts as a worker and uses the thread data in cpi.
133 thread_data->cpi = cpi;
134 thread_data->td = &cpi->td;
135 }
136 winterface->sync(worker);
137 }
138 }
139
launch_enc_workers(VP9_COMP * cpi,VPxWorkerHook hook,void * data2,int num_workers)140 static void launch_enc_workers(VP9_COMP *cpi, VPxWorkerHook hook, void *data2,
141 int num_workers) {
142 const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
143 int i;
144
145 for (i = 0; i < num_workers; i++) {
146 VPxWorker *const worker = &cpi->workers[i];
147 worker->hook = hook;
148 worker->data1 = &cpi->tile_thr_data[i];
149 worker->data2 = data2;
150 }
151
152 // Encode a frame
153 for (i = 0; i < num_workers; i++) {
154 VPxWorker *const worker = &cpi->workers[i];
155 EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
156
157 // Set the starting tile for each thread.
158 thread_data->start = i;
159
160 if (i == cpi->num_workers - 1)
161 winterface->execute(worker);
162 else
163 winterface->launch(worker);
164 }
165
166 // Encoding ends.
167 for (i = 0; i < num_workers; i++) {
168 VPxWorker *const worker = &cpi->workers[i];
169 winterface->sync(worker);
170 }
171 }
172
vp9_encode_free_mt_data(struct VP9_COMP * cpi)173 void vp9_encode_free_mt_data(struct VP9_COMP *cpi) {
174 int t;
175 for (t = 0; t < cpi->num_workers; ++t) {
176 VPxWorker *const worker = &cpi->workers[t];
177 EncWorkerData *const thread_data = &cpi->tile_thr_data[t];
178
179 // Deallocate allocated threads.
180 vpx_get_worker_interface()->end(worker);
181
182 // Deallocate allocated thread data.
183 if (t < cpi->num_workers - 1) {
184 vpx_free(thread_data->td->counts);
185 vp9_free_pc_tree(thread_data->td);
186 vpx_free(thread_data->td);
187 }
188 }
189 vpx_free(cpi->tile_thr_data);
190 vpx_free(cpi->workers);
191 cpi->num_workers = 0;
192 }
193
vp9_encode_tiles_mt(VP9_COMP * cpi)194 void vp9_encode_tiles_mt(VP9_COMP *cpi) {
195 VP9_COMMON *const cm = &cpi->common;
196 const int tile_cols = 1 << cm->log2_tile_cols;
197 const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols);
198 int i;
199
200 vp9_init_tile_data(cpi);
201
202 create_enc_workers(cpi, num_workers);
203
204 for (i = 0; i < num_workers; i++) {
205 EncWorkerData *thread_data;
206 thread_data = &cpi->tile_thr_data[i];
207
208 // Before encoding a frame, copy the thread data from cpi.
209 if (thread_data->td != &cpi->td) {
210 thread_data->td->mb = cpi->td.mb;
211 thread_data->td->rd_counts = cpi->td.rd_counts;
212 }
213 if (thread_data->td->counts != &cpi->common.counts) {
214 memcpy(thread_data->td->counts, &cpi->common.counts,
215 sizeof(cpi->common.counts));
216 }
217
218 // Handle use_nonrd_pick_mode case.
219 if (cpi->sf.use_nonrd_pick_mode) {
220 MACROBLOCK *const x = &thread_data->td->mb;
221 MACROBLOCKD *const xd = &x->e_mbd;
222 struct macroblock_plane *const p = x->plane;
223 struct macroblockd_plane *const pd = xd->plane;
224 PICK_MODE_CONTEXT *ctx = &thread_data->td->pc_root->none;
225 int j;
226
227 for (j = 0; j < MAX_MB_PLANE; ++j) {
228 p[j].coeff = ctx->coeff_pbuf[j][0];
229 p[j].qcoeff = ctx->qcoeff_pbuf[j][0];
230 pd[j].dqcoeff = ctx->dqcoeff_pbuf[j][0];
231 p[j].eobs = ctx->eobs_pbuf[j][0];
232 }
233 }
234 }
235
236 launch_enc_workers(cpi, enc_worker_hook, NULL, num_workers);
237
238 for (i = 0; i < num_workers; i++) {
239 VPxWorker *const worker = &cpi->workers[i];
240 EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
241
242 // Accumulate counters.
243 if (i < cpi->num_workers - 1) {
244 vp9_accumulate_frame_counts(&cm->counts, thread_data->td->counts, 0);
245 accumulate_rd_opt(&cpi->td, thread_data->td);
246 }
247 }
248 }
249
250 #if !CONFIG_REALTIME_ONLY
accumulate_fp_tile_stat(TileDataEnc * tile_data,TileDataEnc * tile_data_t)251 static void accumulate_fp_tile_stat(TileDataEnc *tile_data,
252 TileDataEnc *tile_data_t) {
253 tile_data->fp_data.intra_factor += tile_data_t->fp_data.intra_factor;
254 tile_data->fp_data.brightness_factor +=
255 tile_data_t->fp_data.brightness_factor;
256 tile_data->fp_data.coded_error += tile_data_t->fp_data.coded_error;
257 tile_data->fp_data.sr_coded_error += tile_data_t->fp_data.sr_coded_error;
258 tile_data->fp_data.frame_noise_energy +=
259 tile_data_t->fp_data.frame_noise_energy;
260 tile_data->fp_data.intra_error += tile_data_t->fp_data.intra_error;
261 tile_data->fp_data.intercount += tile_data_t->fp_data.intercount;
262 tile_data->fp_data.second_ref_count += tile_data_t->fp_data.second_ref_count;
263 tile_data->fp_data.neutral_count += tile_data_t->fp_data.neutral_count;
264 tile_data->fp_data.intra_count_low += tile_data_t->fp_data.intra_count_low;
265 tile_data->fp_data.intra_count_high += tile_data_t->fp_data.intra_count_high;
266 tile_data->fp_data.intra_skip_count += tile_data_t->fp_data.intra_skip_count;
267 tile_data->fp_data.mvcount += tile_data_t->fp_data.mvcount;
268 tile_data->fp_data.sum_mvr += tile_data_t->fp_data.sum_mvr;
269 tile_data->fp_data.sum_mvr_abs += tile_data_t->fp_data.sum_mvr_abs;
270 tile_data->fp_data.sum_mvc += tile_data_t->fp_data.sum_mvc;
271 tile_data->fp_data.sum_mvc_abs += tile_data_t->fp_data.sum_mvc_abs;
272 tile_data->fp_data.sum_mvrs += tile_data_t->fp_data.sum_mvrs;
273 tile_data->fp_data.sum_mvcs += tile_data_t->fp_data.sum_mvcs;
274 tile_data->fp_data.sum_in_vectors += tile_data_t->fp_data.sum_in_vectors;
275 tile_data->fp_data.intra_smooth_count +=
276 tile_data_t->fp_data.intra_smooth_count;
277 tile_data->fp_data.image_data_start_row =
278 VPXMIN(tile_data->fp_data.image_data_start_row,
279 tile_data_t->fp_data.image_data_start_row) == INVALID_ROW
280 ? VPXMAX(tile_data->fp_data.image_data_start_row,
281 tile_data_t->fp_data.image_data_start_row)
282 : VPXMIN(tile_data->fp_data.image_data_start_row,
283 tile_data_t->fp_data.image_data_start_row);
284 }
285 #endif // !CONFIG_REALTIME_ONLY
286
287 // Allocate memory for row synchronization
vp9_row_mt_sync_mem_alloc(VP9RowMTSync * row_mt_sync,VP9_COMMON * cm,int rows)288 void vp9_row_mt_sync_mem_alloc(VP9RowMTSync *row_mt_sync, VP9_COMMON *cm,
289 int rows) {
290 row_mt_sync->rows = rows;
291 #if CONFIG_MULTITHREAD
292 {
293 int i;
294
295 CHECK_MEM_ERROR(cm, row_mt_sync->mutex,
296 vpx_malloc(sizeof(*row_mt_sync->mutex) * rows));
297 if (row_mt_sync->mutex) {
298 for (i = 0; i < rows; ++i) {
299 pthread_mutex_init(&row_mt_sync->mutex[i], NULL);
300 }
301 }
302
303 CHECK_MEM_ERROR(cm, row_mt_sync->cond,
304 vpx_malloc(sizeof(*row_mt_sync->cond) * rows));
305 if (row_mt_sync->cond) {
306 for (i = 0; i < rows; ++i) {
307 pthread_cond_init(&row_mt_sync->cond[i], NULL);
308 }
309 }
310 }
311 #endif // CONFIG_MULTITHREAD
312
313 CHECK_MEM_ERROR(cm, row_mt_sync->cur_col,
314 vpx_malloc(sizeof(*row_mt_sync->cur_col) * rows));
315
316 // Set up nsync.
317 row_mt_sync->sync_range = 1;
318 }
319
320 // Deallocate row based multi-threading synchronization related mutex and data
vp9_row_mt_sync_mem_dealloc(VP9RowMTSync * row_mt_sync)321 void vp9_row_mt_sync_mem_dealloc(VP9RowMTSync *row_mt_sync) {
322 if (row_mt_sync != NULL) {
323 #if CONFIG_MULTITHREAD
324 int i;
325
326 if (row_mt_sync->mutex != NULL) {
327 for (i = 0; i < row_mt_sync->rows; ++i) {
328 pthread_mutex_destroy(&row_mt_sync->mutex[i]);
329 }
330 vpx_free(row_mt_sync->mutex);
331 }
332 if (row_mt_sync->cond != NULL) {
333 for (i = 0; i < row_mt_sync->rows; ++i) {
334 pthread_cond_destroy(&row_mt_sync->cond[i]);
335 }
336 vpx_free(row_mt_sync->cond);
337 }
338 #endif // CONFIG_MULTITHREAD
339 vpx_free(row_mt_sync->cur_col);
340 // clear the structure as the source of this call may be dynamic change
341 // in tiles in which case this call will be followed by an _alloc()
342 // which may fail.
343 vp9_zero(*row_mt_sync);
344 }
345 }
346
vp9_row_mt_sync_read(VP9RowMTSync * const row_mt_sync,int r,int c)347 void vp9_row_mt_sync_read(VP9RowMTSync *const row_mt_sync, int r, int c) {
348 #if CONFIG_MULTITHREAD
349 const int nsync = row_mt_sync->sync_range;
350
351 if (r && !(c & (nsync - 1))) {
352 pthread_mutex_t *const mutex = &row_mt_sync->mutex[r - 1];
353 pthread_mutex_lock(mutex);
354
355 while (c > row_mt_sync->cur_col[r - 1] - nsync + 1) {
356 pthread_cond_wait(&row_mt_sync->cond[r - 1], mutex);
357 }
358 pthread_mutex_unlock(mutex);
359 }
360 #else
361 (void)row_mt_sync;
362 (void)r;
363 (void)c;
364 #endif // CONFIG_MULTITHREAD
365 }
366
vp9_row_mt_sync_read_dummy(VP9RowMTSync * const row_mt_sync,int r,int c)367 void vp9_row_mt_sync_read_dummy(VP9RowMTSync *const row_mt_sync, int r, int c) {
368 (void)row_mt_sync;
369 (void)r;
370 (void)c;
371 return;
372 }
373
vp9_row_mt_sync_write(VP9RowMTSync * const row_mt_sync,int r,int c,const int cols)374 void vp9_row_mt_sync_write(VP9RowMTSync *const row_mt_sync, int r, int c,
375 const int cols) {
376 #if CONFIG_MULTITHREAD
377 const int nsync = row_mt_sync->sync_range;
378 int cur;
379 // Only signal when there are enough encoded blocks for next row to run.
380 int sig = 1;
381
382 if (c < cols - 1) {
383 cur = c;
384 if (c % nsync != nsync - 1) sig = 0;
385 } else {
386 cur = cols + nsync;
387 }
388
389 if (sig) {
390 pthread_mutex_lock(&row_mt_sync->mutex[r]);
391
392 row_mt_sync->cur_col[r] = cur;
393
394 pthread_cond_signal(&row_mt_sync->cond[r]);
395 pthread_mutex_unlock(&row_mt_sync->mutex[r]);
396 }
397 #else
398 (void)row_mt_sync;
399 (void)r;
400 (void)c;
401 (void)cols;
402 #endif // CONFIG_MULTITHREAD
403 }
404
vp9_row_mt_sync_write_dummy(VP9RowMTSync * const row_mt_sync,int r,int c,const int cols)405 void vp9_row_mt_sync_write_dummy(VP9RowMTSync *const row_mt_sync, int r, int c,
406 const int cols) {
407 (void)row_mt_sync;
408 (void)r;
409 (void)c;
410 (void)cols;
411 return;
412 }
413
414 #if !CONFIG_REALTIME_ONLY
first_pass_worker_hook(void * arg1,void * arg2)415 static int first_pass_worker_hook(void *arg1, void *arg2) {
416 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
417 MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
418 VP9_COMP *const cpi = thread_data->cpi;
419 const VP9_COMMON *const cm = &cpi->common;
420 const int tile_cols = 1 << cm->log2_tile_cols;
421 int tile_row, tile_col;
422 TileDataEnc *this_tile;
423 int end_of_frame;
424 int thread_id = thread_data->thread_id;
425 int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
426 JobNode *proc_job = NULL;
427 FIRSTPASS_DATA fp_acc_data;
428 MV zero_mv = { 0, 0 };
429 MV best_ref_mv;
430 int mb_row;
431
432 end_of_frame = 0;
433 while (0 == end_of_frame) {
434 // Get the next job in the queue
435 proc_job =
436 (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
437 if (NULL == proc_job) {
438 // Query for the status of other tiles
439 end_of_frame = vp9_get_tiles_proc_status(
440 multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
441 tile_cols);
442 } else {
443 tile_col = proc_job->tile_col_id;
444 tile_row = proc_job->tile_row_id;
445
446 this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
447 mb_row = proc_job->vert_unit_row_num;
448
449 best_ref_mv = zero_mv;
450 vp9_zero(fp_acc_data);
451 fp_acc_data.image_data_start_row = INVALID_ROW;
452 vp9_first_pass_encode_tile_mb_row(cpi, thread_data->td, &fp_acc_data,
453 this_tile, &best_ref_mv, mb_row);
454 }
455 }
456 return 0;
457 }
458
vp9_encode_fp_row_mt(VP9_COMP * cpi)459 void vp9_encode_fp_row_mt(VP9_COMP *cpi) {
460 VP9_COMMON *const cm = &cpi->common;
461 const int tile_cols = 1 << cm->log2_tile_cols;
462 const int tile_rows = 1 << cm->log2_tile_rows;
463 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
464 TileDataEnc *first_tile_col;
465 int num_workers = VPXMAX(cpi->oxcf.max_threads, 1);
466 int i;
467
468 if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
469 multi_thread_ctxt->allocated_tile_rows < tile_rows ||
470 multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
471 vp9_row_mt_mem_dealloc(cpi);
472 vp9_init_tile_data(cpi);
473 vp9_row_mt_mem_alloc(cpi);
474 } else {
475 vp9_init_tile_data(cpi);
476 }
477
478 create_enc_workers(cpi, num_workers);
479
480 vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
481
482 vp9_prepare_job_queue(cpi, FIRST_PASS_JOB);
483
484 vp9_multi_thread_tile_init(cpi);
485
486 for (i = 0; i < num_workers; i++) {
487 EncWorkerData *thread_data;
488 thread_data = &cpi->tile_thr_data[i];
489
490 // Before encoding a frame, copy the thread data from cpi.
491 if (thread_data->td != &cpi->td) {
492 thread_data->td->mb = cpi->td.mb;
493 }
494 }
495
496 launch_enc_workers(cpi, first_pass_worker_hook, multi_thread_ctxt,
497 num_workers);
498
499 first_tile_col = &cpi->tile_data[0];
500 for (i = 1; i < tile_cols; i++) {
501 TileDataEnc *this_tile = &cpi->tile_data[i];
502 accumulate_fp_tile_stat(first_tile_col, this_tile);
503 }
504 }
505
temporal_filter_worker_hook(void * arg1,void * arg2)506 static int temporal_filter_worker_hook(void *arg1, void *arg2) {
507 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
508 MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
509 VP9_COMP *const cpi = thread_data->cpi;
510 const VP9_COMMON *const cm = &cpi->common;
511 const int tile_cols = 1 << cm->log2_tile_cols;
512 int tile_row, tile_col;
513 int mb_col_start, mb_col_end;
514 TileDataEnc *this_tile;
515 int end_of_frame;
516 int thread_id = thread_data->thread_id;
517 int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
518 JobNode *proc_job = NULL;
519 int mb_row;
520
521 end_of_frame = 0;
522 while (0 == end_of_frame) {
523 // Get the next job in the queue
524 proc_job =
525 (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
526 if (NULL == proc_job) {
527 // Query for the status of other tiles
528 end_of_frame = vp9_get_tiles_proc_status(
529 multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
530 tile_cols);
531 } else {
532 tile_col = proc_job->tile_col_id;
533 tile_row = proc_job->tile_row_id;
534 this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
535 mb_col_start = (this_tile->tile_info.mi_col_start) >> TF_SHIFT;
536 mb_col_end = (this_tile->tile_info.mi_col_end + TF_ROUND) >> TF_SHIFT;
537 mb_row = proc_job->vert_unit_row_num;
538
539 vp9_temporal_filter_iterate_row_c(cpi, thread_data->td, mb_row,
540 mb_col_start, mb_col_end);
541 }
542 }
543 return 0;
544 }
545
vp9_temporal_filter_row_mt(VP9_COMP * cpi)546 void vp9_temporal_filter_row_mt(VP9_COMP *cpi) {
547 VP9_COMMON *const cm = &cpi->common;
548 const int tile_cols = 1 << cm->log2_tile_cols;
549 const int tile_rows = 1 << cm->log2_tile_rows;
550 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
551 int num_workers = cpi->num_workers ? cpi->num_workers : 1;
552 int i;
553
554 if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
555 multi_thread_ctxt->allocated_tile_rows < tile_rows ||
556 multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
557 vp9_row_mt_mem_dealloc(cpi);
558 vp9_init_tile_data(cpi);
559 vp9_row_mt_mem_alloc(cpi);
560 } else {
561 vp9_init_tile_data(cpi);
562 }
563
564 create_enc_workers(cpi, num_workers);
565
566 vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
567
568 vp9_prepare_job_queue(cpi, ARNR_JOB);
569
570 for (i = 0; i < num_workers; i++) {
571 EncWorkerData *thread_data;
572 thread_data = &cpi->tile_thr_data[i];
573
574 // Before encoding a frame, copy the thread data from cpi.
575 if (thread_data->td != &cpi->td) {
576 thread_data->td->mb = cpi->td.mb;
577 }
578 }
579
580 launch_enc_workers(cpi, temporal_filter_worker_hook, multi_thread_ctxt,
581 num_workers);
582 }
583 #endif // !CONFIG_REALTIME_ONLY
584
enc_row_mt_worker_hook(void * arg1,void * arg2)585 static int enc_row_mt_worker_hook(void *arg1, void *arg2) {
586 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
587 MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
588 VP9_COMP *const cpi = thread_data->cpi;
589 const VP9_COMMON *const cm = &cpi->common;
590 const int tile_cols = 1 << cm->log2_tile_cols;
591 int tile_row, tile_col;
592 int end_of_frame;
593 int thread_id = thread_data->thread_id;
594 int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
595 JobNode *proc_job = NULL;
596 int mi_row;
597
598 end_of_frame = 0;
599 while (0 == end_of_frame) {
600 // Get the next job in the queue
601 proc_job =
602 (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
603 if (NULL == proc_job) {
604 // Query for the status of other tiles
605 end_of_frame = vp9_get_tiles_proc_status(
606 multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
607 tile_cols);
608 } else {
609 tile_col = proc_job->tile_col_id;
610 tile_row = proc_job->tile_row_id;
611 mi_row = proc_job->vert_unit_row_num * MI_BLOCK_SIZE;
612
613 vp9_encode_sb_row(cpi, thread_data->td, tile_row, tile_col, mi_row);
614 }
615 }
616 return 0;
617 }
618
vp9_encode_tiles_row_mt(VP9_COMP * cpi)619 void vp9_encode_tiles_row_mt(VP9_COMP *cpi) {
620 VP9_COMMON *const cm = &cpi->common;
621 const int tile_cols = 1 << cm->log2_tile_cols;
622 const int tile_rows = 1 << cm->log2_tile_rows;
623 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
624 int num_workers = VPXMAX(cpi->oxcf.max_threads, 1);
625 int i;
626
627 if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
628 multi_thread_ctxt->allocated_tile_rows < tile_rows ||
629 multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
630 vp9_row_mt_mem_dealloc(cpi);
631 vp9_init_tile_data(cpi);
632 vp9_row_mt_mem_alloc(cpi);
633 } else {
634 vp9_init_tile_data(cpi);
635 }
636
637 create_enc_workers(cpi, num_workers);
638
639 vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
640
641 vp9_prepare_job_queue(cpi, ENCODE_JOB);
642
643 vp9_multi_thread_tile_init(cpi);
644
645 for (i = 0; i < num_workers; i++) {
646 EncWorkerData *thread_data;
647 thread_data = &cpi->tile_thr_data[i];
648 // Before encoding a frame, copy the thread data from cpi.
649 if (thread_data->td != &cpi->td) {
650 thread_data->td->mb = cpi->td.mb;
651 thread_data->td->rd_counts = cpi->td.rd_counts;
652 }
653 if (thread_data->td->counts != &cpi->common.counts) {
654 memcpy(thread_data->td->counts, &cpi->common.counts,
655 sizeof(cpi->common.counts));
656 }
657
658 // Handle use_nonrd_pick_mode case.
659 if (cpi->sf.use_nonrd_pick_mode) {
660 MACROBLOCK *const x = &thread_data->td->mb;
661 MACROBLOCKD *const xd = &x->e_mbd;
662 struct macroblock_plane *const p = x->plane;
663 struct macroblockd_plane *const pd = xd->plane;
664 PICK_MODE_CONTEXT *ctx = &thread_data->td->pc_root->none;
665 int j;
666
667 for (j = 0; j < MAX_MB_PLANE; ++j) {
668 p[j].coeff = ctx->coeff_pbuf[j][0];
669 p[j].qcoeff = ctx->qcoeff_pbuf[j][0];
670 pd[j].dqcoeff = ctx->dqcoeff_pbuf[j][0];
671 p[j].eobs = ctx->eobs_pbuf[j][0];
672 }
673 }
674 }
675
676 launch_enc_workers(cpi, enc_row_mt_worker_hook, multi_thread_ctxt,
677 num_workers);
678
679 for (i = 0; i < num_workers; i++) {
680 VPxWorker *const worker = &cpi->workers[i];
681 EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
682
683 // Accumulate counters.
684 if (i < cpi->num_workers - 1) {
685 vp9_accumulate_frame_counts(&cm->counts, thread_data->td->counts, 0);
686 accumulate_rd_opt(&cpi->td, thread_data->td);
687 }
688 }
689 }
690