1 /*
2 * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12
13 #include "vp9/encoder/vp9_encoder.h"
14 #include "vp9/encoder/vp9_ethread.h"
15 #include "vp9/encoder/vp9_multi_thread.h"
16 #include "vp9/encoder/vp9_temporal_filter.h"
17
vp9_enc_grp_get_next_job(MultiThreadHandle * multi_thread_ctxt,int tile_id)18 void *vp9_enc_grp_get_next_job(MultiThreadHandle *multi_thread_ctxt,
19 int tile_id) {
20 RowMTInfo *row_mt_info;
21 JobQueueHandle *job_queue_hdl = NULL;
22 void *next = NULL;
23 JobNode *job_info = NULL;
24 #if CONFIG_MULTITHREAD
25 pthread_mutex_t *mutex_handle = NULL;
26 #endif
27
28 row_mt_info = (RowMTInfo *)(&multi_thread_ctxt->row_mt_info[tile_id]);
29 job_queue_hdl = (JobQueueHandle *)&row_mt_info->job_queue_hdl;
30 #if CONFIG_MULTITHREAD
31 mutex_handle = &row_mt_info->job_mutex;
32 #endif
33
34 // lock the mutex for queue access
35 #if CONFIG_MULTITHREAD
36 pthread_mutex_lock(mutex_handle);
37 #endif
38 next = job_queue_hdl->next;
39 if (next != NULL) {
40 JobQueue *job_queue = (JobQueue *)next;
41 job_info = &job_queue->job_info;
42 // Update the next job in the queue
43 job_queue_hdl->next = job_queue->next;
44 job_queue_hdl->num_jobs_acquired++;
45 }
46
47 #if CONFIG_MULTITHREAD
48 pthread_mutex_unlock(mutex_handle);
49 #endif
50
51 return job_info;
52 }
53
vp9_row_mt_alloc_rd_thresh(VP9_COMP * const cpi,TileDataEnc * const this_tile)54 void vp9_row_mt_alloc_rd_thresh(VP9_COMP *const cpi,
55 TileDataEnc *const this_tile) {
56 VP9_COMMON *const cm = &cpi->common;
57 const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
58 int i;
59
60 if (this_tile->row_base_thresh_freq_fact != NULL) {
61 if (sb_rows <= this_tile->sb_rows) {
62 return;
63 }
64 vpx_free(this_tile->row_base_thresh_freq_fact);
65 this_tile->row_base_thresh_freq_fact = NULL;
66 }
67 CHECK_MEM_ERROR(
68 &cm->error, this_tile->row_base_thresh_freq_fact,
69 (int *)vpx_calloc(sb_rows * BLOCK_SIZES * MAX_MODES,
70 sizeof(*(this_tile->row_base_thresh_freq_fact))));
71 for (i = 0; i < sb_rows * BLOCK_SIZES * MAX_MODES; i++)
72 this_tile->row_base_thresh_freq_fact[i] = RD_THRESH_INIT_FACT;
73 this_tile->sb_rows = sb_rows;
74 }
75
vp9_row_mt_mem_alloc(VP9_COMP * cpi)76 void vp9_row_mt_mem_alloc(VP9_COMP *cpi) {
77 struct VP9Common *cm = &cpi->common;
78 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
79 int tile_row, tile_col;
80 const int tile_cols = 1 << cm->log2_tile_cols;
81 const int tile_rows = 1 << cm->log2_tile_rows;
82 const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
83 int jobs_per_tile_col, total_jobs;
84
85 // Allocate memory that is large enough for all row_mt stages. First pass
86 // uses 16x16 block size.
87 jobs_per_tile_col = VPXMAX(cm->mb_rows, sb_rows);
88 // Calculate the total number of jobs
89 total_jobs = jobs_per_tile_col * tile_cols;
90
91 multi_thread_ctxt->allocated_tile_cols = tile_cols;
92 multi_thread_ctxt->allocated_tile_rows = tile_rows;
93 multi_thread_ctxt->allocated_vert_unit_rows = jobs_per_tile_col;
94
95 CHECK_MEM_ERROR(&cm->error, multi_thread_ctxt->job_queue,
96 (JobQueue *)vpx_memalign(32, total_jobs * sizeof(JobQueue)));
97
98 #if CONFIG_MULTITHREAD
99 // Create mutex for each tile
100 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
101 RowMTInfo *row_mt_info = &multi_thread_ctxt->row_mt_info[tile_col];
102 pthread_mutex_init(&row_mt_info->job_mutex, NULL);
103 }
104 #endif
105
106 // Allocate memory for row based multi-threading
107 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
108 TileDataEnc *this_tile = &cpi->tile_data[tile_col];
109 vp9_row_mt_sync_mem_alloc(&this_tile->row_mt_sync, cm, jobs_per_tile_col);
110 }
111
112 // Assign the sync pointer of tile row zero for every tile row > 0
113 for (tile_row = 1; tile_row < tile_rows; tile_row++) {
114 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
115 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
116 TileDataEnc *this_col_tile = &cpi->tile_data[tile_col];
117 this_tile->row_mt_sync = this_col_tile->row_mt_sync;
118 }
119 }
120
121 // Calculate the number of vertical units in the given tile row
122 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
123 TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols];
124 TileInfo *tile_info = &this_tile->tile_info;
125 multi_thread_ctxt->num_tile_vert_sbs[tile_row] =
126 get_num_vert_units(*tile_info, MI_BLOCK_SIZE_LOG2);
127 }
128 }
129
vp9_row_mt_mem_dealloc(VP9_COMP * cpi)130 void vp9_row_mt_mem_dealloc(VP9_COMP *cpi) {
131 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
132 int tile_col;
133 #if CONFIG_MULTITHREAD
134 int tile_row;
135 #endif
136
137 // Deallocate memory for job queue
138 if (multi_thread_ctxt->job_queue) {
139 vpx_free(multi_thread_ctxt->job_queue);
140 multi_thread_ctxt->job_queue = NULL;
141 }
142
143 #if CONFIG_MULTITHREAD
144 // Destroy mutex for each tile
145 for (tile_col = 0; tile_col < multi_thread_ctxt->allocated_tile_cols;
146 tile_col++) {
147 RowMTInfo *row_mt_info = &multi_thread_ctxt->row_mt_info[tile_col];
148 pthread_mutex_destroy(&row_mt_info->job_mutex);
149 }
150 #endif
151
152 // Free row based multi-threading sync memory
153 for (tile_col = 0; tile_col < multi_thread_ctxt->allocated_tile_cols;
154 tile_col++) {
155 TileDataEnc *this_tile = &cpi->tile_data[tile_col];
156 vp9_row_mt_sync_mem_dealloc(&this_tile->row_mt_sync);
157 }
158
159 #if CONFIG_MULTITHREAD
160 for (tile_row = 0; tile_row < multi_thread_ctxt->allocated_tile_rows;
161 tile_row++) {
162 for (tile_col = 0; tile_col < multi_thread_ctxt->allocated_tile_cols;
163 tile_col++) {
164 TileDataEnc *this_tile =
165 &cpi->tile_data[tile_row * multi_thread_ctxt->allocated_tile_cols +
166 tile_col];
167 if (this_tile->row_base_thresh_freq_fact != NULL) {
168 vpx_free(this_tile->row_base_thresh_freq_fact);
169 this_tile->row_base_thresh_freq_fact = NULL;
170 }
171 }
172 }
173 #endif
174
175 multi_thread_ctxt->allocated_tile_cols = 0;
176 multi_thread_ctxt->allocated_tile_rows = 0;
177 multi_thread_ctxt->allocated_vert_unit_rows = 0;
178 }
179
vp9_multi_thread_tile_init(VP9_COMP * cpi)180 void vp9_multi_thread_tile_init(VP9_COMP *cpi) {
181 VP9_COMMON *const cm = &cpi->common;
182 const int tile_cols = 1 << cm->log2_tile_cols;
183 const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
184 int i;
185
186 for (i = 0; i < tile_cols; i++) {
187 TileDataEnc *this_tile = &cpi->tile_data[i];
188 int jobs_per_tile_col = cpi->oxcf.pass == 1 ? cm->mb_rows : sb_rows;
189
190 // Initialize cur_col to -1 for all rows.
191 memset(this_tile->row_mt_sync.cur_col, -1,
192 sizeof(*this_tile->row_mt_sync.cur_col) * jobs_per_tile_col);
193 vp9_zero(this_tile->fp_data);
194 this_tile->fp_data.image_data_start_row = INVALID_ROW;
195 }
196 }
197
vp9_assign_tile_to_thread(MultiThreadHandle * multi_thread_ctxt,int tile_cols,int num_workers)198 void vp9_assign_tile_to_thread(MultiThreadHandle *multi_thread_ctxt,
199 int tile_cols, int num_workers) {
200 int tile_id = 0;
201 int i;
202
203 // Allocating the threads for the tiles
204 for (i = 0; i < num_workers; i++) {
205 multi_thread_ctxt->thread_id_to_tile_id[i] = tile_id++;
206 if (tile_id == tile_cols) tile_id = 0;
207 }
208 }
209
vp9_get_job_queue_status(MultiThreadHandle * multi_thread_ctxt,int cur_tile_id)210 int vp9_get_job_queue_status(MultiThreadHandle *multi_thread_ctxt,
211 int cur_tile_id) {
212 RowMTInfo *row_mt_info;
213 JobQueueHandle *job_queue_hndl;
214 #if CONFIG_MULTITHREAD
215 pthread_mutex_t *mutex;
216 #endif
217 int num_jobs_remaining;
218
219 row_mt_info = &multi_thread_ctxt->row_mt_info[cur_tile_id];
220 job_queue_hndl = &row_mt_info->job_queue_hdl;
221 #if CONFIG_MULTITHREAD
222 mutex = &row_mt_info->job_mutex;
223 #endif
224
225 #if CONFIG_MULTITHREAD
226 pthread_mutex_lock(mutex);
227 #endif
228 num_jobs_remaining =
229 multi_thread_ctxt->jobs_per_tile_col - job_queue_hndl->num_jobs_acquired;
230 #if CONFIG_MULTITHREAD
231 pthread_mutex_unlock(mutex);
232 #endif
233
234 return (num_jobs_remaining);
235 }
236
vp9_prepare_job_queue(VP9_COMP * cpi,JOB_TYPE job_type)237 void vp9_prepare_job_queue(VP9_COMP *cpi, JOB_TYPE job_type) {
238 VP9_COMMON *const cm = &cpi->common;
239 MultiThreadHandle *multi_thread_ctxt = &cpi->multi_thread_ctxt;
240 JobQueue *job_queue = multi_thread_ctxt->job_queue;
241 const int tile_cols = 1 << cm->log2_tile_cols;
242 int job_row_num, jobs_per_tile, jobs_per_tile_col = 0, total_jobs;
243 const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
244 int tile_col, i;
245
246 switch (job_type) {
247 case ENCODE_JOB: jobs_per_tile_col = sb_rows; break;
248 case FIRST_PASS_JOB: jobs_per_tile_col = cm->mb_rows; break;
249 case ARNR_JOB:
250 jobs_per_tile_col = ((cm->mi_rows + TF_ROUND) >> TF_SHIFT);
251 break;
252 default: assert(0);
253 }
254
255 total_jobs = jobs_per_tile_col * tile_cols;
256
257 multi_thread_ctxt->jobs_per_tile_col = jobs_per_tile_col;
258 // memset the entire job queue buffer to zero
259 memset(job_queue, 0, total_jobs * sizeof(JobQueue));
260
261 // Job queue preparation
262 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
263 RowMTInfo *tile_ctxt = &multi_thread_ctxt->row_mt_info[tile_col];
264 JobQueue *job_queue_curr, *job_queue_temp;
265 int tile_row = 0;
266
267 tile_ctxt->job_queue_hdl.next = (void *)job_queue;
268 tile_ctxt->job_queue_hdl.num_jobs_acquired = 0;
269
270 job_queue_curr = job_queue;
271 job_queue_temp = job_queue;
272
273 // loop over all the vertical rows
274 for (job_row_num = 0, jobs_per_tile = 0; job_row_num < jobs_per_tile_col;
275 job_row_num++, jobs_per_tile++) {
276 job_queue_curr->job_info.vert_unit_row_num = job_row_num;
277 job_queue_curr->job_info.tile_col_id = tile_col;
278 job_queue_curr->job_info.tile_row_id = tile_row;
279 job_queue_curr->next = (void *)(job_queue_temp + 1);
280 job_queue_curr = ++job_queue_temp;
281
282 if (ENCODE_JOB == job_type) {
283 if (jobs_per_tile >=
284 multi_thread_ctxt->num_tile_vert_sbs[tile_row] - 1) {
285 tile_row++;
286 jobs_per_tile = -1;
287 }
288 }
289 }
290
291 // Set the last pointer to NULL
292 job_queue_curr += -1;
293 job_queue_curr->next = (void *)NULL;
294
295 // Move to the next tile
296 job_queue += jobs_per_tile_col;
297 }
298
299 for (i = 0; i < cpi->num_workers; i++) {
300 EncWorkerData *thread_data;
301 thread_data = &cpi->tile_thr_data[i];
302 thread_data->thread_id = i;
303
304 for (tile_col = 0; tile_col < tile_cols; tile_col++)
305 thread_data->tile_completion_status[tile_col] = 0;
306 }
307 }
308
vp9_get_tiles_proc_status(MultiThreadHandle * multi_thread_ctxt,int * tile_completion_status,int * cur_tile_id,int tile_cols)309 int vp9_get_tiles_proc_status(MultiThreadHandle *multi_thread_ctxt,
310 int *tile_completion_status, int *cur_tile_id,
311 int tile_cols) {
312 int tile_col;
313 int tile_id = -1; // Stores the tile ID with minimum proc done
314 int max_num_jobs_remaining = 0;
315 int num_jobs_remaining;
316
317 // Mark the completion to avoid check in the loop
318 tile_completion_status[*cur_tile_id] = 1;
319 // Check for the status of all the tiles
320 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
321 if (tile_completion_status[tile_col] == 0) {
322 num_jobs_remaining =
323 vp9_get_job_queue_status(multi_thread_ctxt, tile_col);
324 // Mark the completion to avoid checks during future switches across tiles
325 if (num_jobs_remaining == 0) tile_completion_status[tile_col] = 1;
326 if (num_jobs_remaining > max_num_jobs_remaining) {
327 max_num_jobs_remaining = num_jobs_remaining;
328 tile_id = tile_col;
329 }
330 }
331 }
332
333 if (-1 == tile_id) {
334 return 1;
335 } else {
336 // Update the cur ID to the next tile ID that will be processed,
337 // which will be the least processed tile
338 *cur_tile_id = tile_id;
339 return 0;
340 }
341 }
342