1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "vp9/common/vp9_thread_common.h"
12 #include "vp9/encoder/vp9_bitstream.h"
13 #include "vp9/encoder/vp9_encodeframe.h"
14 #include "vp9/encoder/vp9_encoder.h"
15 #include "vp9/encoder/vp9_ethread.h"
16 #include "vp9/encoder/vp9_firstpass.h"
17 #include "vp9/encoder/vp9_multi_thread.h"
18 #include "vp9/encoder/vp9_temporal_filter.h"
19 #include "vpx_dsp/vpx_dsp_common.h"
20
accumulate_rd_opt(ThreadData * td,ThreadData * td_t)21 static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) {
22 int i, j, k, l, m, n;
23
24 for (i = 0; i < REFERENCE_MODES; i++)
25 td->rd_counts.comp_pred_diff[i] += td_t->rd_counts.comp_pred_diff[i];
26
27 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
28 td->rd_counts.filter_diff[i] += td_t->rd_counts.filter_diff[i];
29
30 for (i = 0; i < TX_SIZES; i++)
31 for (j = 0; j < PLANE_TYPES; j++)
32 for (k = 0; k < REF_TYPES; k++)
33 for (l = 0; l < COEF_BANDS; l++)
34 for (m = 0; m < COEFF_CONTEXTS; m++)
35 for (n = 0; n < ENTROPY_TOKENS; n++)
36 td->rd_counts.coef_counts[i][j][k][l][m][n] +=
37 td_t->rd_counts.coef_counts[i][j][k][l][m][n];
38 }
39
enc_worker_hook(void * arg1,void * unused)40 static int enc_worker_hook(void *arg1, void *unused) {
41 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
42 VP9_COMP *const cpi = thread_data->cpi;
43 const VP9_COMMON *const cm = &cpi->common;
44 const int tile_cols = 1 << cm->log2_tile_cols;
45 const int tile_rows = 1 << cm->log2_tile_rows;
46 int t;
47
48 (void)unused;
49
50 for (t = thread_data->start; t < tile_rows * tile_cols;
51 t += cpi->num_workers) {
52 int tile_row = t / tile_cols;
53 int tile_col = t % tile_cols;
54
55 vp9_encode_tile(cpi, thread_data->td, tile_row, tile_col);
56 }
57
58 return 0;
59 }
60
get_max_tile_cols(VP9_COMP * cpi)61 static int get_max_tile_cols(VP9_COMP *cpi) {
62 const int aligned_width = ALIGN_POWER_OF_TWO(cpi->oxcf.width, MI_SIZE_LOG2);
63 int mi_cols = aligned_width >> MI_SIZE_LOG2;
64 int min_log2_tile_cols, max_log2_tile_cols;
65 int log2_tile_cols;
66
67 vp9_get_tile_n_bits(mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
68 log2_tile_cols =
69 clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
70 if (cpi->oxcf.target_level == LEVEL_AUTO) {
71 const int level_tile_cols =
72 log_tile_cols_from_picsize_level(cpi->common.width, cpi->common.height);
73 if (log2_tile_cols > level_tile_cols) {
74 log2_tile_cols = VPXMAX(level_tile_cols, min_log2_tile_cols);
75 }
76 }
77 return (1 << log2_tile_cols);
78 }
79
create_enc_workers(VP9_COMP * cpi,int num_workers)80 static void create_enc_workers(VP9_COMP *cpi, int num_workers) {
81 VP9_COMMON *const cm = &cpi->common;
82 const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
83 int i;
84 // While using SVC, we need to allocate threads according to the highest
85 // resolution. When row based multithreading is enabled, it is OK to
86 // allocate more threads than the number of max tile columns.
87 if (cpi->use_svc && !cpi->row_mt) {
88 int max_tile_cols = get_max_tile_cols(cpi);
89 num_workers = VPXMIN(cpi->oxcf.max_threads, max_tile_cols);
90 }
91 assert(num_workers > 0);
92 if (num_workers == cpi->num_workers) return;
93 vp9_loop_filter_dealloc(&cpi->lf_row_sync);
94 vp9_bitstream_encode_tiles_buffer_dealloc(cpi);
95 vp9_encode_free_mt_data(cpi);
96
97 CHECK_MEM_ERROR(&cm->error, cpi->workers,
98 vpx_malloc(num_workers * sizeof(*cpi->workers)));
99
100 CHECK_MEM_ERROR(&cm->error, cpi->tile_thr_data,
101 vpx_calloc(num_workers, sizeof(*cpi->tile_thr_data)));
102
103 for (i = 0; i < num_workers; i++) {
104 VPxWorker *const worker = &cpi->workers[i];
105 EncWorkerData *thread_data = &cpi->tile_thr_data[i];
106
107 ++cpi->num_workers;
108 winterface->init(worker);
109
110 if (i < num_workers - 1) {
111 thread_data->cpi = cpi;
112
113 // Allocate thread data.
114 CHECK_MEM_ERROR(&cm->error, thread_data->td,
115 vpx_memalign(32, sizeof(*thread_data->td)));
116 vp9_zero(*thread_data->td);
117
118 // Set up pc_tree.
119 thread_data->td->leaf_tree = NULL;
120 thread_data->td->pc_tree = NULL;
121 vp9_setup_pc_tree(cm, thread_data->td);
122
123 // Allocate frame counters in thread data.
124 CHECK_MEM_ERROR(&cm->error, thread_data->td->counts,
125 vpx_calloc(1, sizeof(*thread_data->td->counts)));
126
127 // Create threads
128 if (!winterface->reset(worker))
129 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
130 "Tile encoder thread creation failed");
131 } else {
132 // Main thread acts as a worker and uses the thread data in cpi.
133 thread_data->cpi = cpi;
134 thread_data->td = &cpi->td;
135 }
136 winterface->sync(worker);
137 }
138 }
139
launch_enc_workers(VP9_COMP * cpi,VPxWorkerHook hook,void * data2,int num_workers)140 static void launch_enc_workers(VP9_COMP *cpi, VPxWorkerHook hook, void *data2,
141 int num_workers) {
142 const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
143 int i;
144
145 for (i = 0; i < num_workers; i++) {
146 VPxWorker *const worker = &cpi->workers[i];
147 worker->hook = hook;
148 worker->data1 = &cpi->tile_thr_data[i];
149 worker->data2 = data2;
150 }
151
152 // Encode a frame
153 for (i = 0; i < num_workers; i++) {
154 VPxWorker *const worker = &cpi->workers[i];
155 EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
156
157 // Set the starting tile for each thread.
158 thread_data->start = i;
159
160 if (i == cpi->num_workers - 1)
161 winterface->execute(worker);
162 else
163 winterface->launch(worker);
164 }
165
166 // Encoding ends.
167 for (i = 0; i < num_workers; i++) {
168 VPxWorker *const worker = &cpi->workers[i];
169 winterface->sync(worker);
170 }
171 }
172
vp9_encode_free_mt_data(struct VP9_COMP * cpi)173 void vp9_encode_free_mt_data(struct VP9_COMP *cpi) {
174 int t;
175 for (t = 0; t < cpi->num_workers; ++t) {
176 VPxWorker *const worker = &cpi->workers[t];
177 EncWorkerData *const thread_data = &cpi->tile_thr_data[t];
178
179 // Deallocate allocated threads.
180 vpx_get_worker_interface()->end(worker);
181
182 // Deallocate allocated thread data.
183 if (t < cpi->num_workers - 1) {
184 vpx_free(thread_data->td->counts);
185 vp9_free_pc_tree(thread_data->td);
186 vpx_free(thread_data->td);
187 }
188 }
189 vpx_free(cpi->tile_thr_data);
190 vpx_free(cpi->workers);
191 cpi->num_workers = 0;
192 }
193
vp9_encode_tiles_mt(VP9_COMP * cpi)194 void vp9_encode_tiles_mt(VP9_COMP *cpi) {
195 VP9_COMMON *const cm = &cpi->common;
196 const int tile_cols = 1 << cm->log2_tile_cols;
197 const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols);
198 int i;
199
200 vp9_init_tile_data(cpi);
201
202 create_enc_workers(cpi, num_workers);
203
204 for (i = 0; i < num_workers; i++) {
205 EncWorkerData *thread_data;
206 thread_data = &cpi->tile_thr_data[i];
207
208 // Before encoding a frame, copy the thread data from cpi.
209 if (thread_data->td != &cpi->td) {
210 thread_data->td->mb = cpi->td.mb;
211 thread_data->td->rd_counts = cpi->td.rd_counts;
212 }
213 if (thread_data->td->counts != &cpi->common.counts) {
214 memcpy(thread_data->td->counts, &cpi->common.counts,
215 sizeof(cpi->common.counts));
216 }
217
218 // Handle use_nonrd_pick_mode case.
219 if (cpi->sf.use_nonrd_pick_mode) {
220 MACROBLOCK *const x = &thread_data->td->mb;
221 MACROBLOCKD *const xd = &x->e_mbd;
222 struct macroblock_plane *const p = x->plane;
223 struct macroblockd_plane *const pd = xd->plane;
224 PICK_MODE_CONTEXT *ctx = &thread_data->td->pc_root->none;
225 int j;
226
227 for (j = 0; j < MAX_MB_PLANE; ++j) {
228 p[j].coeff = ctx->coeff_pbuf[j][0];
229 p[j].qcoeff = ctx->qcoeff_pbuf[j][0];
230 pd[j].dqcoeff = ctx->dqcoeff_pbuf[j][0];
231 p[j].eobs = ctx->eobs_pbuf[j][0];
232 }
233 }
234 }
235
236 launch_enc_workers(cpi, enc_worker_hook, NULL, num_workers);
237
238 for (i = 0; i < num_workers; i++) {
239 VPxWorker *const worker = &cpi->workers[i];
240 EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
241
242 // Accumulate counters.
243 if (i < cpi->num_workers - 1) {
244 vp9_accumulate_frame_counts(&cm->counts, thread_data->td->counts, 0);
245 accumulate_rd_opt(&cpi->td, thread_data->td);
246 }
247 }
248 }
249
250 #if !CONFIG_REALTIME_ONLY
accumulate_fp_tile_stat(TileDataEnc * tile_data,TileDataEnc * tile_data_t)251 static void accumulate_fp_tile_stat(TileDataEnc *tile_data,
252 TileDataEnc *tile_data_t) {
253 tile_data->fp_data.intra_factor += tile_data_t->fp_data.intra_factor;
254 tile_data->fp_data.brightness_factor +=
255 tile_data_t->fp_data.brightness_factor;
256 tile_data->fp_data.coded_error += tile_data_t->fp_data.coded_error;
257 tile_data->fp_data.sr_coded_error += tile_data_t->fp_data.sr_coded_error;
258 tile_data->fp_data.frame_noise_energy +=
259 tile_data_t->fp_data.frame_noise_energy;
260 tile_data->fp_data.intra_error += tile_data_t->fp_data.intra_error;
261 tile_data->fp_data.intercount += tile_data_t->fp_data.intercount;
262 tile_data->fp_data.second_ref_count += tile_data_t->fp_data.second_ref_count;
263 tile_data->fp_data.neutral_count += tile_data_t->fp_data.neutral_count;
264 tile_data->fp_data.intra_count_low += tile_data_t->fp_data.intra_count_low;
265 tile_data->fp_data.intra_count_high += tile_data_t->fp_data.intra_count_high;
266 tile_data->fp_data.intra_skip_count += tile_data_t->fp_data.intra_skip_count;
267 tile_data->fp_data.mvcount += tile_data_t->fp_data.mvcount;
268 tile_data->fp_data.new_mv_count += tile_data_t->fp_data.new_mv_count;
269 tile_data->fp_data.sum_mvr += tile_data_t->fp_data.sum_mvr;
270 tile_data->fp_data.sum_mvr_abs += tile_data_t->fp_data.sum_mvr_abs;
271 tile_data->fp_data.sum_mvc += tile_data_t->fp_data.sum_mvc;
272 tile_data->fp_data.sum_mvc_abs += tile_data_t->fp_data.sum_mvc_abs;
273 tile_data->fp_data.sum_mvrs += tile_data_t->fp_data.sum_mvrs;
274 tile_data->fp_data.sum_mvcs += tile_data_t->fp_data.sum_mvcs;
275 tile_data->fp_data.sum_in_vectors += tile_data_t->fp_data.sum_in_vectors;
276 tile_data->fp_data.intra_smooth_count +=
277 tile_data_t->fp_data.intra_smooth_count;
278 tile_data->fp_data.image_data_start_row =
279 VPXMIN(tile_data->fp_data.image_data_start_row,
280 tile_data_t->fp_data.image_data_start_row) == INVALID_ROW
281 ? VPXMAX(tile_data->fp_data.image_data_start_row,
282 tile_data_t->fp_data.image_data_start_row)
283 : VPXMIN(tile_data->fp_data.image_data_start_row,
284 tile_data_t->fp_data.image_data_start_row);
285 }
286 #endif // !CONFIG_REALTIME_ONLY
287
288 // Allocate memory for row synchronization
vp9_row_mt_sync_mem_alloc(VP9RowMTSync * row_mt_sync,VP9_COMMON * cm,int rows)289 void vp9_row_mt_sync_mem_alloc(VP9RowMTSync *row_mt_sync, VP9_COMMON *cm,
290 int rows) {
291 row_mt_sync->rows = rows;
292 #if CONFIG_MULTITHREAD
293 {
294 int i;
295
296 CHECK_MEM_ERROR(&cm->error, row_mt_sync->mutex,
297 vpx_malloc(sizeof(*row_mt_sync->mutex) * rows));
298 if (row_mt_sync->mutex) {
299 for (i = 0; i < rows; ++i) {
300 pthread_mutex_init(&row_mt_sync->mutex[i], NULL);
301 }
302 }
303
304 CHECK_MEM_ERROR(&cm->error, row_mt_sync->cond,
305 vpx_malloc(sizeof(*row_mt_sync->cond) * rows));
306 if (row_mt_sync->cond) {
307 for (i = 0; i < rows; ++i) {
308 pthread_cond_init(&row_mt_sync->cond[i], NULL);
309 }
310 }
311 }
312 #endif // CONFIG_MULTITHREAD
313
314 CHECK_MEM_ERROR(&cm->error, row_mt_sync->cur_col,
315 vpx_malloc(sizeof(*row_mt_sync->cur_col) * rows));
316
317 // Set up nsync.
318 row_mt_sync->sync_range = 1;
319 }
320
321 // Deallocate row based multi-threading synchronization related mutex and data
vp9_row_mt_sync_mem_dealloc(VP9RowMTSync * row_mt_sync)322 void vp9_row_mt_sync_mem_dealloc(VP9RowMTSync *row_mt_sync) {
323 if (row_mt_sync != NULL) {
324 #if CONFIG_MULTITHREAD
325 int i;
326
327 if (row_mt_sync->mutex != NULL) {
328 for (i = 0; i < row_mt_sync->rows; ++i) {
329 pthread_mutex_destroy(&row_mt_sync->mutex[i]);
330 }
331 vpx_free(row_mt_sync->mutex);
332 }
333 if (row_mt_sync->cond != NULL) {
334 for (i = 0; i < row_mt_sync->rows; ++i) {
335 pthread_cond_destroy(&row_mt_sync->cond[i]);
336 }
337 vpx_free(row_mt_sync->cond);
338 }
339 #endif // CONFIG_MULTITHREAD
340 vpx_free(row_mt_sync->cur_col);
341 // clear the structure as the source of this call may be dynamic change
342 // in tiles in which case this call will be followed by an _alloc()
343 // which may fail.
344 vp9_zero(*row_mt_sync);
345 }
346 }
347
vp9_row_mt_sync_read(VP9RowMTSync * const row_mt_sync,int r,int c)348 void vp9_row_mt_sync_read(VP9RowMTSync *const row_mt_sync, int r, int c) {
349 #if CONFIG_MULTITHREAD
350 const int nsync = row_mt_sync->sync_range;
351
352 if (r && !(c & (nsync - 1))) {
353 pthread_mutex_t *const mutex = &row_mt_sync->mutex[r - 1];
354 pthread_mutex_lock(mutex);
355
356 while (c > row_mt_sync->cur_col[r - 1] - nsync + 1) {
357 pthread_cond_wait(&row_mt_sync->cond[r - 1], mutex);
358 }
359 pthread_mutex_unlock(mutex);
360 }
361 #else
362 (void)row_mt_sync;
363 (void)r;
364 (void)c;
365 #endif // CONFIG_MULTITHREAD
366 }
367
vp9_row_mt_sync_read_dummy(VP9RowMTSync * const row_mt_sync,int r,int c)368 void vp9_row_mt_sync_read_dummy(VP9RowMTSync *const row_mt_sync, int r, int c) {
369 (void)row_mt_sync;
370 (void)r;
371 (void)c;
372 return;
373 }
374
vp9_row_mt_sync_write(VP9RowMTSync * const row_mt_sync,int r,int c,const int cols)375 void vp9_row_mt_sync_write(VP9RowMTSync *const row_mt_sync, int r, int c,
376 const int cols) {
377 #if CONFIG_MULTITHREAD
378 const int nsync = row_mt_sync->sync_range;
379 int cur;
380 // Only signal when there are enough encoded blocks for next row to run.
381 int sig = 1;
382
383 if (c < cols - 1) {
384 cur = c;
385 if (c % nsync != nsync - 1) sig = 0;
386 } else {
387 cur = cols + nsync;
388 }
389
390 if (sig) {
391 pthread_mutex_lock(&row_mt_sync->mutex[r]);
392
393 row_mt_sync->cur_col[r] = cur;
394
395 pthread_cond_signal(&row_mt_sync->cond[r]);
396 pthread_mutex_unlock(&row_mt_sync->mutex[r]);
397 }
398 #else
399 (void)row_mt_sync;
400 (void)r;
401 (void)c;
402 (void)cols;
403 #endif // CONFIG_MULTITHREAD
404 }
405
vp9_row_mt_sync_write_dummy(VP9RowMTSync * const row_mt_sync,int r,int c,const int cols)406 void vp9_row_mt_sync_write_dummy(VP9RowMTSync *const row_mt_sync, int r, int c,
407 const int cols) {
408 (void)row_mt_sync;
409 (void)r;
410 (void)c;
411 (void)cols;
412 return;
413 }
414
415 #if !CONFIG_REALTIME_ONLY
first_pass_worker_hook(void * arg1,void * arg2)416 static int first_pass_worker_hook(void *arg1, void *arg2) {
417 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
418 MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
419 VP9_COMP *const cpi = thread_data->cpi;
420 const VP9_COMMON *const cm = &cpi->common;
421 const int tile_cols = 1 << cm->log2_tile_cols;
422 int tile_row, tile_col;
423 TileDataEnc *this_tile;
424 int end_of_frame;
425 int thread_id = thread_data->thread_id;
426 int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
427 JobNode *proc_job = NULL;
428 FIRSTPASS_DATA fp_acc_data;
429 MV zero_mv = { 0, 0 };
430 MV best_ref_mv;
431 int mb_row;
432
433 end_of_frame = 0;
434 while (0 == end_of_frame) {
435 // Get the next job in the queue
436 proc_job =
437 (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
438 if (NULL == proc_job) {
439 // Query for the status of other tiles
440 end_of_frame = vp9_get_tiles_proc_status(
441 multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
442 tile_cols);
443 } else {
444 tile_col = proc_job->tile_col_id;
445 tile_row = proc_job->tile_row_id;
446
447 this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
448 mb_row = proc_job->vert_unit_row_num;
449
450 best_ref_mv = zero_mv;
451 vp9_zero(fp_acc_data);
452 fp_acc_data.image_data_start_row = INVALID_ROW;
453 vp9_first_pass_encode_tile_mb_row(cpi, thread_data->td, &fp_acc_data,
454 this_tile, &best_ref_mv, mb_row);
455 }
456 }
457 return 0;
458 }
459
vp9_encode_fp_row_mt(VP9_COMP * cpi)460 void vp9_encode_fp_row_mt(VP9_COMP *cpi) {
461 VP9_COMMON *const cm = &cpi->common;
462 const int tile_cols = 1 << cm->log2_tile_cols;
463 const int tile_rows = 1 << cm->log2_tile_rows;
464 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
465 TileDataEnc *first_tile_col;
466 int num_workers = VPXMAX(cpi->oxcf.max_threads, 1);
467 int i;
468
469 if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
470 multi_thread_ctxt->allocated_tile_rows < tile_rows ||
471 multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
472 vp9_row_mt_mem_dealloc(cpi);
473 vp9_init_tile_data(cpi);
474 vp9_row_mt_mem_alloc(cpi);
475 } else {
476 vp9_init_tile_data(cpi);
477 }
478
479 create_enc_workers(cpi, num_workers);
480
481 vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
482
483 vp9_prepare_job_queue(cpi, FIRST_PASS_JOB);
484
485 vp9_multi_thread_tile_init(cpi);
486
487 for (i = 0; i < num_workers; i++) {
488 EncWorkerData *thread_data;
489 thread_data = &cpi->tile_thr_data[i];
490
491 // Before encoding a frame, copy the thread data from cpi.
492 if (thread_data->td != &cpi->td) {
493 thread_data->td->mb = cpi->td.mb;
494 }
495 }
496
497 launch_enc_workers(cpi, first_pass_worker_hook, multi_thread_ctxt,
498 num_workers);
499
500 first_tile_col = &cpi->tile_data[0];
501 for (i = 1; i < tile_cols; i++) {
502 TileDataEnc *this_tile = &cpi->tile_data[i];
503 accumulate_fp_tile_stat(first_tile_col, this_tile);
504 }
505 }
506
temporal_filter_worker_hook(void * arg1,void * arg2)507 static int temporal_filter_worker_hook(void *arg1, void *arg2) {
508 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
509 MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
510 VP9_COMP *const cpi = thread_data->cpi;
511 const VP9_COMMON *const cm = &cpi->common;
512 const int tile_cols = 1 << cm->log2_tile_cols;
513 int tile_row, tile_col;
514 int mb_col_start, mb_col_end;
515 TileDataEnc *this_tile;
516 int end_of_frame;
517 int thread_id = thread_data->thread_id;
518 int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
519 JobNode *proc_job = NULL;
520 int mb_row;
521
522 end_of_frame = 0;
523 while (0 == end_of_frame) {
524 // Get the next job in the queue
525 proc_job =
526 (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
527 if (NULL == proc_job) {
528 // Query for the status of other tiles
529 end_of_frame = vp9_get_tiles_proc_status(
530 multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
531 tile_cols);
532 } else {
533 tile_col = proc_job->tile_col_id;
534 tile_row = proc_job->tile_row_id;
535 this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
536 mb_col_start = (this_tile->tile_info.mi_col_start) >> TF_SHIFT;
537 mb_col_end = (this_tile->tile_info.mi_col_end + TF_ROUND) >> TF_SHIFT;
538 mb_row = proc_job->vert_unit_row_num;
539
540 vp9_temporal_filter_iterate_row_c(cpi, thread_data->td, mb_row,
541 mb_col_start, mb_col_end);
542 }
543 }
544 return 0;
545 }
546
vp9_temporal_filter_row_mt(VP9_COMP * cpi)547 void vp9_temporal_filter_row_mt(VP9_COMP *cpi) {
548 VP9_COMMON *const cm = &cpi->common;
549 const int tile_cols = 1 << cm->log2_tile_cols;
550 const int tile_rows = 1 << cm->log2_tile_rows;
551 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
552 int num_workers = cpi->num_workers ? cpi->num_workers : 1;
553 int i;
554
555 if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
556 multi_thread_ctxt->allocated_tile_rows < tile_rows ||
557 multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
558 vp9_row_mt_mem_dealloc(cpi);
559 vp9_init_tile_data(cpi);
560 vp9_row_mt_mem_alloc(cpi);
561 } else {
562 vp9_init_tile_data(cpi);
563 }
564
565 create_enc_workers(cpi, num_workers);
566
567 vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
568
569 vp9_prepare_job_queue(cpi, ARNR_JOB);
570
571 for (i = 0; i < num_workers; i++) {
572 EncWorkerData *thread_data;
573 thread_data = &cpi->tile_thr_data[i];
574
575 // Before encoding a frame, copy the thread data from cpi.
576 if (thread_data->td != &cpi->td) {
577 thread_data->td->mb = cpi->td.mb;
578 }
579 }
580
581 launch_enc_workers(cpi, temporal_filter_worker_hook, multi_thread_ctxt,
582 num_workers);
583 }
584 #endif // !CONFIG_REALTIME_ONLY
585
enc_row_mt_worker_hook(void * arg1,void * arg2)586 static int enc_row_mt_worker_hook(void *arg1, void *arg2) {
587 EncWorkerData *const thread_data = (EncWorkerData *)arg1;
588 MultiThreadHandle *multi_thread_ctxt = (MultiThreadHandle *)arg2;
589 VP9_COMP *const cpi = thread_data->cpi;
590 const VP9_COMMON *const cm = &cpi->common;
591 const int tile_cols = 1 << cm->log2_tile_cols;
592 int tile_row, tile_col;
593 int end_of_frame;
594 int thread_id = thread_data->thread_id;
595 int cur_tile_id = multi_thread_ctxt->thread_id_to_tile_id[thread_id];
596 JobNode *proc_job = NULL;
597 int mi_row;
598
599 end_of_frame = 0;
600 while (0 == end_of_frame) {
601 // Get the next job in the queue
602 proc_job =
603 (JobNode *)vp9_enc_grp_get_next_job(multi_thread_ctxt, cur_tile_id);
604 if (NULL == proc_job) {
605 // Query for the status of other tiles
606 end_of_frame = vp9_get_tiles_proc_status(
607 multi_thread_ctxt, thread_data->tile_completion_status, &cur_tile_id,
608 tile_cols);
609 } else {
610 tile_col = proc_job->tile_col_id;
611 tile_row = proc_job->tile_row_id;
612 mi_row = proc_job->vert_unit_row_num * MI_BLOCK_SIZE;
613
614 vp9_encode_sb_row(cpi, thread_data->td, tile_row, tile_col, mi_row);
615 }
616 }
617 return 0;
618 }
619
vp9_encode_tiles_row_mt(VP9_COMP * cpi)620 void vp9_encode_tiles_row_mt(VP9_COMP *cpi) {
621 VP9_COMMON *const cm = &cpi->common;
622 const int tile_cols = 1 << cm->log2_tile_cols;
623 const int tile_rows = 1 << cm->log2_tile_rows;
624 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
625 int num_workers = VPXMAX(cpi->oxcf.max_threads, 1);
626 int i;
627
628 if (multi_thread_ctxt->allocated_tile_cols < tile_cols ||
629 multi_thread_ctxt->allocated_tile_rows < tile_rows ||
630 multi_thread_ctxt->allocated_vert_unit_rows < cm->mb_rows) {
631 vp9_row_mt_mem_dealloc(cpi);
632 vp9_init_tile_data(cpi);
633 vp9_row_mt_mem_alloc(cpi);
634 } else {
635 vp9_init_tile_data(cpi);
636 }
637
638 create_enc_workers(cpi, num_workers);
639
640 vp9_assign_tile_to_thread(multi_thread_ctxt, tile_cols, cpi->num_workers);
641
642 vp9_prepare_job_queue(cpi, ENCODE_JOB);
643
644 vp9_multi_thread_tile_init(cpi);
645
646 for (i = 0; i < num_workers; i++) {
647 EncWorkerData *thread_data;
648 thread_data = &cpi->tile_thr_data[i];
649 // Before encoding a frame, copy the thread data from cpi.
650 if (thread_data->td != &cpi->td) {
651 thread_data->td->mb = cpi->td.mb;
652 thread_data->td->rd_counts = cpi->td.rd_counts;
653 }
654 if (thread_data->td->counts != &cpi->common.counts) {
655 memcpy(thread_data->td->counts, &cpi->common.counts,
656 sizeof(cpi->common.counts));
657 }
658
659 // Handle use_nonrd_pick_mode case.
660 if (cpi->sf.use_nonrd_pick_mode) {
661 MACROBLOCK *const x = &thread_data->td->mb;
662 MACROBLOCKD *const xd = &x->e_mbd;
663 struct macroblock_plane *const p = x->plane;
664 struct macroblockd_plane *const pd = xd->plane;
665 PICK_MODE_CONTEXT *ctx = &thread_data->td->pc_root->none;
666 int j;
667
668 for (j = 0; j < MAX_MB_PLANE; ++j) {
669 p[j].coeff = ctx->coeff_pbuf[j][0];
670 p[j].qcoeff = ctx->qcoeff_pbuf[j][0];
671 pd[j].dqcoeff = ctx->dqcoeff_pbuf[j][0];
672 p[j].eobs = ctx->eobs_pbuf[j][0];
673 }
674 }
675 }
676
677 launch_enc_workers(cpi, enc_row_mt_worker_hook, multi_thread_ctxt,
678 num_workers);
679
680 for (i = 0; i < num_workers; i++) {
681 VPxWorker *const worker = &cpi->workers[i];
682 EncWorkerData *const thread_data = (EncWorkerData *)worker->data1;
683
684 // Accumulate counters.
685 if (i < cpi->num_workers - 1) {
686 vp9_accumulate_frame_counts(&cm->counts, thread_data->td->counts, 0);
687 accumulate_rd_opt(&cpi->td, thread_data->td);
688 }
689 }
690 }
691