1 /******************************************************************************
2 *
3 * Copyright (C) 2018 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 *****************************************************************************
18 * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
19 */
20 /**
21 *******************************************************************************
22 * @file
23 * ihevce_multi_thread_funcs.c
24 *
25 * @brief
26 * Contains functions related to Job Ques and others, required for multi threading
27 *
28 * @author
29 * Ittiam
30 *
31 * @par List of Functions:
32 * <TODO: TO BE ADDED>
33 *
34 * @remarks
35 * None
36 *
37 *******************************************************************************
38 */
39 /*****************************************************************************/
40 /* File Includes */
41 /*****************************************************************************/
42 /* System include files */
43 #include <stdio.h>
44 #include <string.h>
45 #include <stdlib.h>
46 #include <assert.h>
47 #include <stdarg.h>
48 #include <math.h>
49
50 /* User include files */
51 #include "ihevc_typedefs.h"
52 #include "itt_video_api.h"
53 #include "ihevce_api.h"
54
55 #include "rc_cntrl_param.h"
56 #include "rc_frame_info_collector.h"
57 #include "rc_look_ahead_params.h"
58
59 #include "ihevc_defs.h"
60 #include "ihevc_structs.h"
61 #include "ihevc_platform_macros.h"
62 #include "ihevc_deblk.h"
63 #include "ihevc_itrans_recon.h"
64 #include "ihevc_chroma_itrans_recon.h"
65 #include "ihevc_chroma_intra_pred.h"
66 #include "ihevc_intra_pred.h"
67 #include "ihevc_inter_pred.h"
68 #include "ihevc_mem_fns.h"
69 #include "ihevc_padding.h"
70 #include "ihevc_weighted_pred.h"
71 #include "ihevc_sao.h"
72 #include "ihevc_resi_trans.h"
73 #include "ihevc_quant_iquant_ssd.h"
74 #include "ihevc_cabac_tables.h"
75
76 #include "ihevce_defs.h"
77 #include "ihevce_lap_enc_structs.h"
78 #include "ihevce_multi_thrd_structs.h"
79 #include "ihevce_multi_thrd_funcs.h"
80 #include "ihevce_me_common_defs.h"
81 #include "ihevce_had_satd.h"
82 #include "ihevce_error_codes.h"
83 #include "ihevce_bitstream.h"
84 #include "ihevce_cabac.h"
85 #include "ihevce_rdoq_macros.h"
86 #include "ihevce_function_selector.h"
87 #include "ihevce_enc_structs.h"
88 #include "ihevce_entropy_structs.h"
89 #include "ihevce_cmn_utils_instr_set_router.h"
90 #include "ihevce_enc_loop_structs.h"
91 #include "ihevce_bs_compute_ctb.h"
92 #include "ihevce_global_tables.h"
93 #include "ihevce_dep_mngr_interface.h"
94 #include "hme_datatype.h"
95 #include "hme_interface.h"
96 #include "hme_common_defs.h"
97 #include "hme_defs.h"
98 #include "ihevce_me_instr_set_router.h"
99 #include "ihevce_ipe_instr_set_router.h"
100 #include "ihevce_ipe_structs.h"
101 #include "ihevce_coarse_me_pass.h"
102
103 #include "cast_types.h"
104 #include "osal.h"
105 #include "osal_defaults.h"
106
107 /********************************************************************/
108 /*Macros */
109 /********************************************************************/
110 #define MULT_FACT 100
111
112 /*****************************************************************************/
113 /* Function Definitions */
114 /*****************************************************************************/
ihevce_is_nonzero(volatile UWORD8 * buf,WORD32 size)115 static inline WORD32 ihevce_is_nonzero(volatile UWORD8 *buf, WORD32 size)
116 {
117 WORD32 i;
118 for (i = 0; i < size; i++)
119 {
120 if (buf[i])
121 return 1;
122 }
123 return 0;
124 }
125 /**
126 *******************************************************************************
127 *
128 * @brief Function Pops out the next Job in the appropriate Job Que
129 *
130 * @par Description: Does under mutex lock to ensure thread safe
131 *
132 * @param[inout] pv_multi_thrd_ctxt
133 * Pointer to Multi thread context
134 *
135 * @param[in] i4_job_type
136 * Job type from which a job needs to be popped out
137 *
138 * @param[in] i4_blocking_mode
139 * Mode of operation
140 *
141 * @returns
142 * None
143 *
144 * @remarks
145 *
146 *******************************************************************************
147 */
ihevce_pre_enc_grp_get_next_job(void * pv_multi_thrd_ctxt,WORD32 i4_job_type,WORD32 i4_blocking_mode,WORD32 i4_ping_pong)148 void *ihevce_pre_enc_grp_get_next_job(
149 void *pv_multi_thrd_ctxt, WORD32 i4_job_type, WORD32 i4_blocking_mode, WORD32 i4_ping_pong)
150 {
151 /* Local variables */
152 multi_thrd_ctxt_t *ps_multi_thrd;
153 job_queue_handle_t *ps_job_queue_hdl;
154 void *pv_next = NULL;
155 void *pv_job_q_mutex_hdl_pre_enc = NULL;
156
157 /* Derive local variables */
158 ps_multi_thrd = (multi_thrd_ctxt_t *)pv_multi_thrd_ctxt;
159 ps_job_queue_hdl =
160 (job_queue_handle_t *)&ps_multi_thrd->as_job_que_preenc_hdls[i4_ping_pong][i4_job_type];
161
162 /* lock the mutex for Q access */
163 /* As design must facilitate for parallelism in each stage,
164 It is recommended to have seperate mutex for each stage*/
165 if(i4_job_type < ME_JOB_LYR4)
166 {
167 pv_job_q_mutex_hdl_pre_enc = ps_multi_thrd->pv_job_q_mutex_hdl_pre_enc_decomp;
168 }
169 else if(i4_job_type < IPE_JOB_LYR0)
170 {
171 pv_job_q_mutex_hdl_pre_enc = ps_multi_thrd->pv_job_q_mutex_hdl_pre_enc_hme;
172 }
173 else
174 {
175 pv_job_q_mutex_hdl_pre_enc = ps_multi_thrd->pv_job_q_mutex_hdl_pre_enc_l0ipe;
176 }
177
178 osal_mutex_lock(pv_job_q_mutex_hdl_pre_enc);
179 /* Get the next */
180 pv_next = ps_job_queue_hdl->pv_next;
181
182 /* Update the next by checking input dependency */
183 if(NULL != pv_next)
184 {
185 job_queue_t *ps_job_queue = (job_queue_t *)pv_next;
186
187 /* check for input dependencies to be resolved */
188 /* this can be blocking or non blocking based on use case */
189 /* if non blocking then the function returns NULL */
190
191 if(1 == i4_blocking_mode)
192 {
193 while(ihevce_is_nonzero(ps_job_queue->au1_in_dep, MAX_IN_DEP));
194
195 /* update the next job in the queue */
196 ps_job_queue_hdl->pv_next = ps_job_queue->pv_next;
197 }
198 else
199 {
200 /* check for input dependency resolved */
201 if(ihevce_is_nonzero(ps_job_queue->au1_in_dep, MAX_IN_DEP))
202 {
203 /* return null */
204 pv_next = NULL;
205 }
206 else
207 {
208 /* update the next job in the queue */
209 ps_job_queue_hdl->pv_next = ps_job_queue->pv_next;
210 }
211 }
212 }
213
214 /* unlock the mutex */
215 osal_mutex_unlock(pv_job_q_mutex_hdl_pre_enc);
216
217 /* Return */
218 return (pv_next);
219
220 } /* End of get_next_job */
221
222 /**
223 *******************************************************************************
224 *
225 * @brief Function Pops out the next Job in the appropriate Job Que
226 *
227 * @par Description: Does under mutex lock to ensure thread safe
228 *
229 * @param[inout] pv_multi_thrd_ctxt
230 * Pointer to Multi thread context
231 *
232 * @param[in] i4_job_type
233 * Job type from which a job needs to be popped out
234 *
235 * @param[in] i4_blocking_mode
236 * Mode of operation
237 *
238 * @returns
239 * None
240 *
241 * @remarks
242 *
243 *******************************************************************************
244 */
ihevce_enc_grp_get_next_job(void * pv_multi_thrd_ctxt,WORD32 i4_job_type,WORD32 i4_blocking_mode,WORD32 i4_curr_frm_id)245 void *ihevce_enc_grp_get_next_job(
246 void *pv_multi_thrd_ctxt, WORD32 i4_job_type, WORD32 i4_blocking_mode, WORD32 i4_curr_frm_id)
247 {
248 /* Local variables */
249 multi_thrd_ctxt_t *ps_multi_thrd;
250 job_queue_handle_t *ps_job_queue_hdl;
251 void *pv_next = NULL;
252 void *pv_job_q_mutex_hdl_enc_grp;
253
254 /* Derive local variables */
255 ps_multi_thrd = (multi_thrd_ctxt_t *)pv_multi_thrd_ctxt;
256
257 if(ME_JOB_ENC_LYR == i4_job_type)
258 {
259 pv_job_q_mutex_hdl_enc_grp = ps_multi_thrd->pv_job_q_mutex_hdl_enc_grp_me;
260
261 ps_job_queue_hdl = (job_queue_handle_t *)&ps_multi_thrd->aps_cur_out_me_prms[i4_curr_frm_id]
262 ->as_job_que_enc_hdls[i4_job_type];
263 }
264 else
265 {
266 pv_job_q_mutex_hdl_enc_grp = ps_multi_thrd->pv_job_q_mutex_hdl_enc_grp_enc_loop;
267 ps_job_queue_hdl =
268 (job_queue_handle_t *)&ps_multi_thrd->aps_cur_inp_enc_prms[i4_curr_frm_id]
269 ->as_job_que_enc_hdls[i4_job_type];
270 }
271
272 /* lock the mutex for Q access */
273 osal_mutex_lock(pv_job_q_mutex_hdl_enc_grp);
274
275 /* Get the next */
276 pv_next = ps_job_queue_hdl->pv_next;
277
278 /* Update the next by checking input dependency */
279 if(NULL != pv_next)
280 {
281 job_queue_t *ps_job_queue = (job_queue_t *)pv_next;
282
283 /* check for input dependencies to be resolved */
284 /* this can be blocking or non blocking based on use case */
285 /* if non blocking then the function returns NULL */
286
287 if(1 == i4_blocking_mode)
288 {
289 while(ihevce_is_nonzero(ps_job_queue->au1_in_dep, MAX_IN_DEP));
290
291 /* update the next job in the queue */
292 ps_job_queue_hdl->pv_next = ps_job_queue->pv_next;
293 }
294 else
295 {
296 /* check for input dependency resolved */
297 if(ihevce_is_nonzero(ps_job_queue->au1_in_dep, MAX_IN_DEP))
298 {
299 /* return null */
300 pv_next = NULL;
301 }
302 else
303 {
304 /* update the next job in the queue */
305 ps_job_queue_hdl->pv_next = ps_job_queue->pv_next;
306 }
307 }
308 }
309
310 /* unlock the mutex */
311 osal_mutex_unlock(pv_job_q_mutex_hdl_enc_grp);
312
313 /* Return */
314 return (pv_next);
315
316 } /* End of get_next_job */
317
318 /**
319 *******************************************************************************
320 *
321 * @brief Set the output dependency to done state
322 *
323 * @par Description: same as brief
324 *
325 * @param[inout] pv_multi_thrd_ctxt
326 * Pointer to Multi thread context
327 *
328 * @param[in] ps_curr_job
329 * Current finished Job pointer
330 *
331 * @returns
332 * None
333 *
334 * @remarks
335 *
336 *******************************************************************************
337 */
ihevce_pre_enc_grp_job_set_out_dep(void * pv_multi_thrd_ctxt,job_queue_t * ps_curr_job,WORD32 i4_ping_pong)338 void ihevce_pre_enc_grp_job_set_out_dep(
339 void *pv_multi_thrd_ctxt, job_queue_t *ps_curr_job, WORD32 i4_ping_pong)
340 {
341 /* local vareiables */
342 WORD32 ctr;
343 multi_thrd_ctxt_t *ps_multi_thrd;
344
345 ps_multi_thrd = (multi_thrd_ctxt_t *)pv_multi_thrd_ctxt;
346
347 /* loop over number output dependencies */
348 for(ctr = 0; ctr < ps_curr_job->i4_num_output_dep; ctr++)
349 {
350 UWORD8 *pu1_ptr;
351
352 pu1_ptr = (UWORD8 *)ps_multi_thrd->aps_job_q_pre_enc[i4_ping_pong];
353 pu1_ptr += ps_curr_job->au4_out_ofsts[ctr];
354 *pu1_ptr = 0;
355 }
356
357 return;
358 }
359
360 /**
361 *******************************************************************************
362 *
363 * @brief Set the output dependency to done state
364 *
365 * @par Description: same as brief
366 *
367 * @param[inout] pv_multi_thrd_ctxt
368 * Pointer to Multi thread context
369 *
370 * @param[in] ps_curr_job
371 * Current finished Job pointer
372 *
373 * @returns
374 * None
375 *
376 * @remarks
377 *
378 *******************************************************************************
379 */
ihevce_enc_grp_job_set_out_dep(void * pv_multi_thrd_ctxt,job_queue_t * ps_curr_job,WORD32 i4_curr_frm_id)380 void ihevce_enc_grp_job_set_out_dep(
381 void *pv_multi_thrd_ctxt, job_queue_t *ps_curr_job, WORD32 i4_curr_frm_id)
382 {
383 /* local vareiables */
384 WORD32 ctr;
385 UWORD8 *pu1_ptr;
386 multi_thrd_ctxt_t *ps_multi_thrd;
387
388 ps_multi_thrd = (multi_thrd_ctxt_t *)pv_multi_thrd_ctxt;
389
390 if(ME_JOB_ENC_LYR == ps_curr_job->i4_task_type)
391 {
392 pu1_ptr = (UWORD8 *)ps_multi_thrd->aps_cur_out_me_prms[i4_curr_frm_id]->ps_job_q_enc;
393 }
394 else
395 {
396 pu1_ptr = (UWORD8 *)ps_multi_thrd->aps_cur_inp_enc_prms[i4_curr_frm_id]->ps_job_q_enc;
397 }
398
399 /* loop over number output dependencies */
400 for(ctr = 0; ctr < ps_curr_job->i4_num_output_dep; ctr++)
401 {
402 WORD32 i4_off;
403 i4_off = ps_curr_job->au4_out_ofsts[ctr];
404 pu1_ptr[i4_off] = 0;
405 }
406
407 return;
408 }
409
410 /**
411 *******************************************************************************
412 *
413 * @brief Function prepares the Job Queues for all the passes of encoder
414 *
415 * @par Description: Based on picture type sets the input and output dependency
416 *
417 * @param[inout] pv_enc_ctxt
418 * Pointer to encoder context
419 *
420 * @param[in] ps_curr_inp
421 * Current Input buffer pointer
422 *
423 * @returns
424 * None
425 *
426 * @remarks
427 *
428 *******************************************************************************
429 */
ihevce_prepare_job_queue(void * pv_enc_ctxt,ihevce_lap_enc_buf_t * ps_curr_inp,WORD32 i4_curr_frm_id)430 void ihevce_prepare_job_queue(
431 void *pv_enc_ctxt, ihevce_lap_enc_buf_t *ps_curr_inp, WORD32 i4_curr_frm_id)
432 {
433 /* local variables */
434 enc_ctxt_t *ps_ctxt;
435 job_queue_t *ps_me_job_queue_lyr0;
436 job_queue_t *ps_enc_loop_job_queue;
437 WORD32 pass;
438 WORD32 num_jobs, col_tile_ctr;
439 WORD32 num_ctb_vert_rows;
440 WORD32 i4_pic_type;
441 WORD32 i; //counter for bitrate
442 WORD32 i4_num_bitrate_instances;
443 WORD32 i4_num_tile_col;
444
445 /* derive local varaibles */
446 ps_ctxt = (enc_ctxt_t *)pv_enc_ctxt;
447 num_ctb_vert_rows = ps_ctxt->s_frm_ctb_prms.i4_num_ctbs_vert;
448 i4_num_bitrate_instances = ps_ctxt->i4_num_bitrates;
449
450 i4_num_tile_col = 1;
451 if(1 == ps_ctxt->ps_tile_params_base->i4_tiles_enabled_flag)
452 {
453 i4_num_tile_col = ps_ctxt->ps_tile_params_base->i4_num_tile_cols;
454 }
455 /* memset the entire job que buffer to zero */
456 memset(
457 ps_ctxt->s_multi_thrd.aps_cur_out_me_prms[i4_curr_frm_id]->ps_job_q_enc,
458 0,
459 MAX_NUM_VERT_UNITS_FRM * NUM_ENC_JOBS_QUES * i4_num_tile_col * sizeof(job_queue_t));
460
461 /* get the start address of Job queues */
462 ps_me_job_queue_lyr0 = ps_ctxt->s_multi_thrd.aps_cur_out_me_prms[i4_curr_frm_id]->ps_job_q_enc;
463 ps_enc_loop_job_queue = ps_me_job_queue_lyr0 + (i4_num_tile_col * MAX_NUM_VERT_UNITS_FRM);
464
465 /* store the JOB queue in the Job handle */
466 ps_ctxt->s_multi_thrd.aps_cur_out_me_prms[i4_curr_frm_id]
467 ->as_job_que_enc_hdls[ME_JOB_ENC_LYR]
468 .pv_next = (void *)ps_me_job_queue_lyr0;
469 /* store the JOB queue in the Job handle for reenc */
470 ps_ctxt->s_multi_thrd.aps_cur_out_me_prms[i4_curr_frm_id]
471 ->as_job_que_enc_hdls_reenc[ME_JOB_ENC_LYR]
472 .pv_next = (void *)ps_me_job_queue_lyr0;
473
474 for(i = 0; i < i4_num_bitrate_instances; i++)
475 {
476 ps_ctxt->s_multi_thrd.aps_cur_out_me_prms[i4_curr_frm_id]
477 ->as_job_que_enc_hdls[ENC_LOOP_JOB + i]
478 .pv_next = (void *)ps_enc_loop_job_queue;
479 ps_ctxt->s_multi_thrd.aps_cur_out_me_prms[i4_curr_frm_id]
480 ->as_job_que_enc_hdls_reenc[ENC_LOOP_JOB + i]
481 .pv_next = (void *)ps_enc_loop_job_queue;
482 ps_enc_loop_job_queue += (i4_num_tile_col * MAX_NUM_VERT_UNITS_FRM);
483 }
484
485 i4_pic_type = ps_curr_inp->s_lap_out.i4_pic_type;
486
487 //prepare ME JOB queue first
488 //for(pass = 0; pass < NUM_ENC_JOBS_QUES; pass++)
489 {
490 job_queue_t *ps_job_queue_curr;
491 job_queue_t *ps_job_queue_next;
492 WORD32 ctr;
493 WORD32 inp_dep;
494 WORD32 out_dep;
495 WORD32 num_vert_units;
496 HEVCE_ENC_JOB_TYPES_T task_type;
497
498 pass = 0; //= ENC_LOOP_JOB
499
500 {
501 /* num_ver_units of finest layer is stored at (num_hme_lyrs - 1)th index */
502 num_vert_units = num_ctb_vert_rows;
503 task_type = ME_JOB_ENC_LYR;
504 ps_job_queue_curr = ps_me_job_queue_lyr0;
505 ps_job_queue_next =
506 (job_queue_t *)ps_ctxt->s_multi_thrd.aps_cur_out_me_prms[i4_curr_frm_id]
507 ->as_job_que_enc_hdls[ENC_LOOP_JOB]
508 .pv_next;
509 inp_dep = 0;
510 out_dep = 1; //set reference bit-rate's input dependency
511 }
512
513 if((ME_JOB_ENC_LYR == pass) &&
514 ((IV_I_FRAME == i4_pic_type) || (IV_IDR_FRAME == i4_pic_type)) && !L0ME_IN_OPENLOOP_MODE)
515 {
516 //continue;
517 }
518 else
519 {
520 /* loop over all the vertical rows */
521 for(num_jobs = 0; num_jobs < num_vert_units; num_jobs++)
522 {
523 /* loop over all the column tiles */
524 for(col_tile_ctr = 0; col_tile_ctr < i4_num_tile_col; col_tile_ctr++)
525 {
526 ULWORD64 u8_temp;
527
528 {
529 ps_job_queue_curr->s_job_info.s_me_job_info.i4_vert_unit_row_no = num_jobs;
530 ps_job_queue_curr->s_job_info.s_me_job_info.i4_tile_col_idx = col_tile_ctr;
531 }
532
533 ps_job_queue_curr->pv_next = (void *)(ps_job_queue_curr + 1);
534
535 ps_job_queue_curr->i4_task_type = task_type;
536
537 ps_job_queue_curr->i4_num_input_dep = inp_dep;
538
539 /* set the entire input dep buffer to default value 0 */
540 memset(&ps_job_queue_curr->au1_in_dep[0], 0, sizeof(UWORD8) * MAX_IN_DEP);
541
542 /* set the input dep buffer to 1 for num inp dep */
543 if(0 != inp_dep)
544 {
545 memset(&ps_job_queue_curr->au1_in_dep[0], 1, sizeof(UWORD8) * inp_dep);
546 }
547
548 ps_job_queue_curr->i4_num_output_dep = out_dep;
549
550 /* set the entire offset buffer to default value */
551 memset(
552 &ps_job_queue_curr->au4_out_ofsts[0], 0xFF, sizeof(UWORD32) * MAX_OUT_DEP);
553
554 for(ctr = 0; ctr < out_dep; ctr++)
555 {
556 /* col tile level dependency b/w ME & EncLoop */
557 u8_temp = (ULWORD64)(
558 &ps_job_queue_next[num_jobs * i4_num_tile_col + col_tile_ctr] -
559 ps_ctxt->s_multi_thrd.aps_cur_out_me_prms[i4_curr_frm_id]->ps_job_q_enc);
560
561 u8_temp *= sizeof(job_queue_t);
562
563 /* store the offset to the array */
564 ps_job_queue_curr->au4_out_ofsts[ctr] = (UWORD32)u8_temp;
565 }
566
567 ps_job_queue_curr++;
568 }
569 } //for ends
570
571 /* set the last pointer to NULL */
572 ps_job_queue_curr--;
573 ps_job_queue_curr->pv_next = (void *)NULL;
574 } //else ends
575 }
576
577 //prepare Enc_loop JOB queue for all bitrate instances
578 //for(pass = 0; pass < NUM_ENC_JOBS_QUES; pass++)
579 for(i = 0; i < i4_num_bitrate_instances; i++)
580 {
581 job_queue_t *ps_job_queue_curr;
582 job_queue_t *ps_job_queue_next;
583 WORD32 ctr;
584 WORD32 inp_dep;
585 WORD32 out_dep;
586 WORD32 num_vert_units;
587 HEVCE_ENC_JOB_TYPES_T task_type;
588
589 /* In case of I or IDR pictures ME will not perform any processing */
590 //if(ENC_LOOP_JOB == pass)
591 {
592 if(((IV_I_FRAME == i4_pic_type) || (IV_IDR_FRAME == i4_pic_type)) &&
593 !L0ME_IN_OPENLOOP_MODE)
594 {
595 inp_dep = 0;
596 }
597 else
598 {
599 inp_dep = 1;
600 }
601
602 task_type = (HEVCE_ENC_JOB_TYPES_T)(ENC_LOOP_JOB + i);
603 ps_job_queue_curr =
604 (job_queue_t *)ps_ctxt->s_multi_thrd.aps_cur_out_me_prms[i4_curr_frm_id]
605 ->as_job_que_enc_hdls[ENC_LOOP_JOB + i]
606 .pv_next;
607 ps_job_queue_next =
608 (job_queue_t *)ps_ctxt->s_multi_thrd.aps_cur_out_me_prms[i4_curr_frm_id]
609 ->as_job_que_enc_hdls[ENC_LOOP_JOB + i + 1]
610 .pv_next;
611 out_dep = 1; //output dependecny is the next bit-rate instance's input dependency
612 num_vert_units = num_ctb_vert_rows;
613
614 if(i == i4_num_bitrate_instances - 1) //for last bit-rate instance
615 {
616 //clear output dependency
617 ps_job_queue_next = NULL;
618 out_dep = 0;
619 }
620 }
621
622 /* loop over all the vertical rows */
623 for(num_jobs = 0; num_jobs < num_vert_units; num_jobs++)
624 {
625 /* loop over all the column tiles */
626 for(col_tile_ctr = 0; col_tile_ctr < i4_num_tile_col; col_tile_ctr++)
627 {
628 ULWORD64 u8_temp;
629
630 {
631 ps_job_queue_curr->s_job_info.s_enc_loop_job_info.i4_ctb_row_no = num_jobs;
632 ps_job_queue_curr->s_job_info.s_enc_loop_job_info.i4_tile_col_idx =
633 col_tile_ctr;
634 ps_job_queue_curr->s_job_info.s_enc_loop_job_info.i4_bitrate_instance_no = i;
635 }
636
637 ps_job_queue_curr->pv_next = (void *)(ps_job_queue_curr + 1);
638
639 ps_job_queue_curr->i4_task_type = task_type;
640
641 ps_job_queue_curr->i4_num_input_dep = inp_dep;
642
643 /* set the entire input dep buffer to default value 0 */
644 memset(&ps_job_queue_curr->au1_in_dep[0], 0, sizeof(UWORD8) * MAX_IN_DEP);
645
646 /* set the input dep buffer to 1 for num inp dep */
647 if(0 != inp_dep)
648 {
649 memset(&ps_job_queue_curr->au1_in_dep[0], 1, sizeof(UWORD8) * inp_dep);
650 }
651
652 ps_job_queue_curr->i4_num_output_dep = out_dep;
653
654 /* set the entire offset buffer to default value */
655 memset(&ps_job_queue_curr->au4_out_ofsts[0], 0xFF, sizeof(UWORD32) * MAX_OUT_DEP);
656
657 for(ctr = 0; ctr < out_dep; ctr++)
658 {
659 /* col tile level dependency b/w EncLoops of MBR */
660 u8_temp = (ULWORD64)(
661 &ps_job_queue_next[num_jobs * i4_num_tile_col + col_tile_ctr] -
662 ps_ctxt->s_multi_thrd.aps_cur_out_me_prms[i4_curr_frm_id]->ps_job_q_enc);
663
664 u8_temp *= sizeof(job_queue_t);
665
666 /* store the offset to the array */
667 ps_job_queue_curr->au4_out_ofsts[ctr] = (UWORD32)u8_temp;
668 }
669
670 ps_job_queue_curr++;
671 }
672 }
673
674 /* set the last pointer to NULL */
675 ps_job_queue_curr--;
676 ps_job_queue_curr->pv_next = (void *)NULL;
677 }
678
679 return;
680
681 } /* End of ihevce_prepare_job_queue */
682
683 /**
684 *******************************************************************************
685 *
686 * @brief Function prepares the Job Queues for all the passes of pre enc
687 *
688 * @par Description: Based on picture type sets the input and output dependency
689 *
690 * @param[inout] pv_enc_ctxt
691 * Pointer to encoder context
692 *
693 * @param[in] ps_curr_inp
694 * Current Input buffer pointer
695 *
696 * @returns
697 * None
698 *
699 * @remarks
700 *
701 *******************************************************************************
702 */
ihevce_prepare_pre_enc_job_queue(void * pv_enc_ctxt,ihevce_lap_enc_buf_t * ps_curr_inp,WORD32 i4_ping_pong)703 void ihevce_prepare_pre_enc_job_queue(
704 void *pv_enc_ctxt, ihevce_lap_enc_buf_t *ps_curr_inp, WORD32 i4_ping_pong)
705 {
706 /* local variables */
707 enc_ctxt_t *ps_ctxt;
708 job_queue_t *ps_decomp_job_queue_lyr0;
709 job_queue_t *ps_decomp_job_queue_lyr1;
710 job_queue_t *ps_decomp_job_queue_lyr2;
711 job_queue_t *ps_decomp_job_queue_lyr3;
712 job_queue_t *ps_me_job_queue_lyr1;
713 job_queue_t *ps_me_job_queue_lyr2;
714 job_queue_t *ps_me_job_queue_lyr3;
715 job_queue_t *ps_me_job_queue_lyr4;
716 job_queue_t *ps_ipe_job_queue;
717 job_queue_t *aps_me_job_queues[MAX_NUM_HME_LAYERS];
718 multi_thrd_me_job_q_prms_t *ps_me_job_q_prms;
719 WORD32 ai4_decomp_num_vert_units_lyr[MAX_NUM_HME_LAYERS];
720 WORD32 a14_decomp_lyr_unit_size[MAX_NUM_HME_LAYERS];
721 WORD32 layer_no;
722 WORD32 decomp_lyr_cnt;
723 WORD32 num_jobs;
724 WORD32 n_tot_layers;
725 WORD32 a_wd[MAX_NUM_HME_LAYERS];
726 WORD32 a_ht[MAX_NUM_HME_LAYERS];
727 WORD32 a_disp_wd[MAX_NUM_HME_LAYERS];
728 WORD32 a_disp_ht[MAX_NUM_HME_LAYERS];
729 WORD32 u4_log_ctb_size;
730 WORD32 num_ctb_vert_rows;
731 WORD32 pass;
732 WORD32 me_lyr_cnt;
733 WORD32 num_hme_lyrs;
734 WORD32 ai4_me_num_vert_units_lyr[MAX_NUM_HME_LAYERS];
735 WORD32 me_start_lyr_pass;
736 WORD32 ctb_size;
737 WORD32 me_coarsest_lyr_inp_dep = -1;
738
739 (void)ps_curr_inp;
740 /* derive local varaibles */
741 ps_ctxt = (enc_ctxt_t *)pv_enc_ctxt;
742 num_ctb_vert_rows = ps_ctxt->s_frm_ctb_prms.i4_num_ctbs_vert;
743
744 /* CHANGE REQUIRED: change the pointer to the job queue buffer */
745 /* memset the entire job que buffer to zero */
746 memset(
747 ps_ctxt->s_multi_thrd.aps_job_q_pre_enc[i4_ping_pong],
748 0,
749 MAX_NUM_VERT_UNITS_FRM * NUM_PRE_ENC_JOBS_QUES * sizeof(job_queue_t));
750
751 /* Get the number of vertical units in a layer from the resolution of the layer */
752 a_wd[0] = ps_ctxt->s_frm_ctb_prms.i4_cu_aligned_pic_wd;
753 a_ht[0] = ps_ctxt->s_frm_ctb_prms.i4_cu_aligned_pic_ht;
754 n_tot_layers = hme_derive_num_layers(1, a_wd, a_ht, a_disp_wd, a_disp_ht);
755 GETRANGE(u4_log_ctb_size, ps_ctxt->s_frm_ctb_prms.i4_ctb_size);
756
757 ASSERT(n_tot_layers >= 3);
758
759 /*
760 * Always force minimum layers as 4 so that we would have both l1 and l2
761 * pre intra analysis
762 */
763 if(n_tot_layers == 3)
764 {
765 n_tot_layers = 4;
766 a_wd[3] = CEIL16(a_wd[2] >> 1);
767 a_ht[3] = CEIL16(a_ht[2] >> 1);
768 }
769
770 for(layer_no = 0; layer_no < n_tot_layers; layer_no++)
771 {
772 ctb_size = 1 << (u4_log_ctb_size - 1 - layer_no);
773 ai4_decomp_num_vert_units_lyr[layer_no] = ((a_ht[layer_no] + ctb_size) & ~(ctb_size - 1)) >>
774 (u4_log_ctb_size - 1 - layer_no);
775 a14_decomp_lyr_unit_size[layer_no] = 1 << (u4_log_ctb_size - 1 - layer_no);
776 }
777
778 /* get the start address of Job queues */
779 ps_decomp_job_queue_lyr0 = ps_ctxt->s_multi_thrd.aps_job_q_pre_enc[i4_ping_pong];
780 ps_decomp_job_queue_lyr1 = ps_decomp_job_queue_lyr0 + MAX_NUM_VERT_UNITS_FRM;
781 ps_decomp_job_queue_lyr2 = ps_decomp_job_queue_lyr1 + MAX_NUM_VERT_UNITS_FRM;
782 ps_decomp_job_queue_lyr3 = ps_decomp_job_queue_lyr2 + MAX_NUM_VERT_UNITS_FRM;
783 ps_me_job_queue_lyr4 = ps_decomp_job_queue_lyr3 + MAX_NUM_VERT_UNITS_FRM;
784 ps_me_job_queue_lyr3 = ps_me_job_queue_lyr4 + MAX_NUM_VERT_UNITS_FRM;
785 ps_me_job_queue_lyr2 = ps_me_job_queue_lyr3 + MAX_NUM_VERT_UNITS_FRM;
786 ps_me_job_queue_lyr1 = ps_me_job_queue_lyr2 + MAX_NUM_VERT_UNITS_FRM;
787
788 ps_ipe_job_queue = ps_me_job_queue_lyr1 + MAX_NUM_VERT_UNITS_FRM;
789
790 /* store the JOB queue in the Job handle */
791 ps_ctxt->s_multi_thrd.as_job_que_preenc_hdls[i4_ping_pong][DECOMP_JOB_LYR0].pv_next =
792 (void *)ps_decomp_job_queue_lyr0;
793 ps_ctxt->s_multi_thrd.as_job_que_preenc_hdls[i4_ping_pong][DECOMP_JOB_LYR1].pv_next =
794 (void *)ps_decomp_job_queue_lyr1;
795 ps_ctxt->s_multi_thrd.as_job_que_preenc_hdls[i4_ping_pong][DECOMP_JOB_LYR2].pv_next =
796 (void *)ps_decomp_job_queue_lyr2;
797 ps_ctxt->s_multi_thrd.as_job_que_preenc_hdls[i4_ping_pong][DECOMP_JOB_LYR3].pv_next =
798 (void *)ps_decomp_job_queue_lyr3;
799 ps_ctxt->s_multi_thrd.as_job_que_preenc_hdls[i4_ping_pong][ME_JOB_LYR4].pv_next =
800 (void *)ps_me_job_queue_lyr4;
801 ps_ctxt->s_multi_thrd.as_job_que_preenc_hdls[i4_ping_pong][ME_JOB_LYR3].pv_next =
802 (void *)ps_me_job_queue_lyr3;
803 ps_ctxt->s_multi_thrd.as_job_que_preenc_hdls[i4_ping_pong][ME_JOB_LYR2].pv_next =
804 (void *)ps_me_job_queue_lyr2;
805 ps_ctxt->s_multi_thrd.as_job_que_preenc_hdls[i4_ping_pong][ME_JOB_LYR1].pv_next =
806 (void *)ps_me_job_queue_lyr1;
807 ps_ctxt->s_multi_thrd.as_job_que_preenc_hdls[i4_ping_pong][IPE_JOB_LYR0].pv_next =
808 (void *)ps_ipe_job_queue;
809
810 /* store the ME Jobs que into array */
811 aps_me_job_queues[0] = NULL;
812 aps_me_job_queues[1] = ps_me_job_queue_lyr1;
813 aps_me_job_queues[2] = ps_me_job_queue_lyr2;
814 aps_me_job_queues[3] = ps_me_job_queue_lyr3;
815 aps_me_job_queues[4] = ps_me_job_queue_lyr4;
816 decomp_lyr_cnt = 0;
817 /* Set the me_lyr_cnt to 0 */
818 me_lyr_cnt = 0;
819
820 /* call the ME function which returns the layer properties */
821 ihevce_coarse_me_get_lyr_prms_job_que(
822 ps_ctxt->s_module_ctxt.pv_coarse_me_ctxt,
823 ps_curr_inp,
824 &num_hme_lyrs,
825 &ai4_me_num_vert_units_lyr[0],
826 &ps_ctxt->s_multi_thrd.as_me_job_q_prms[0][0]);
827
828 ps_me_job_q_prms = &ps_ctxt->s_multi_thrd.as_me_job_q_prms[0][0];
829
830 /* derive ME coarsest layer tak type */
831 me_start_lyr_pass = ME_JOB_LYR4 + (MAX_NUM_HME_LAYERS - num_hme_lyrs);
832
833 ps_ctxt->s_multi_thrd.i4_me_coarsest_lyr_type = me_start_lyr_pass;
834
835 /* coarsest HME layer number of units should be less than or equal to max in dep in Job queue */
836 /* this constraint is to take care of Coarsest layer requring entire layer to do FULL search */
837 ASSERT(ai4_me_num_vert_units_lyr[0] <= MAX_IN_DEP);
838 /* loop over all the passes in the encoder */
839 for(pass = 0; pass < NUM_PRE_ENC_JOBS_QUES; pass++)
840 {
841 job_queue_t *ps_pre_enc_job_queue_curr;
842 job_queue_t *ps_pre_enc_job_queue_next;
843 WORD32 inp_dep_pass;
844 WORD32 out_dep_pass;
845 WORD32 num_vert_units;
846 HEVCE_PRE_ENC_JOB_TYPES_T pre_enc_task_type;
847 HEVCE_ENC_JOB_TYPES_T enc_task_type;
848 WORD32 proc_valid_flag = 0;
849
850 // num_vert_units = ai4_decomp_num_vert_units_lyr[decomp_lyr_cnt];
851 /* Initializing the job queues for max no of rows among all the layers. And max would be for last layer*/
852 num_vert_units = ai4_decomp_num_vert_units_lyr[n_tot_layers - 1];
853
854 if(DECOMP_JOB_LYR0 == pass)
855 {
856 proc_valid_flag = 1;
857 pre_enc_task_type = DECOMP_JOB_LYR0;
858 enc_task_type = (HEVCE_ENC_JOB_TYPES_T)-1;
859 ps_pre_enc_job_queue_curr = ps_decomp_job_queue_lyr0;
860
861 inp_dep_pass = 0;
862 decomp_lyr_cnt++;
863
864 /* If all the decomp layers are done next job queue will be ME job queue */
865 if(decomp_lyr_cnt == (n_tot_layers - 1))
866 {
867 /* Assumption : num_hme_lyrs > 1*/
868 ps_pre_enc_job_queue_next = aps_me_job_queues[num_hme_lyrs - 1];
869
870 /* ME coarsest layer is currently made dependent on entire decomp layer */
871 out_dep_pass = ai4_me_num_vert_units_lyr[0];
872 me_coarsest_lyr_inp_dep = num_vert_units;
873 }
874 else
875 {
876 ps_pre_enc_job_queue_next = ps_decomp_job_queue_lyr1;
877 out_dep_pass = 3;
878 }
879 }
880 else if((DECOMP_JOB_LYR1 == pass) && (decomp_lyr_cnt != (n_tot_layers - 1)))
881 {
882 proc_valid_flag = 1;
883 pre_enc_task_type = DECOMP_JOB_LYR1;
884 enc_task_type = (HEVCE_ENC_JOB_TYPES_T)-1;
885 ps_pre_enc_job_queue_curr = ps_decomp_job_queue_lyr1;
886
887 inp_dep_pass = 3;
888 decomp_lyr_cnt++;
889
890 /* If all the decomp layers are done next job queue will be ME job queue */
891 if(decomp_lyr_cnt == (n_tot_layers - 1))
892 {
893 /* Assumption : num_hme_lyrs > 1*/
894 ps_pre_enc_job_queue_next = aps_me_job_queues[num_hme_lyrs - 1];
895
896 /* ME coarsest layer is currently made dependent on entire decomp layer */
897 out_dep_pass = ai4_me_num_vert_units_lyr[0];
898 me_coarsest_lyr_inp_dep = num_vert_units;
899 }
900 else
901 {
902 ps_pre_enc_job_queue_next = ps_decomp_job_queue_lyr2;
903 out_dep_pass = 3;
904 }
905 }
906 else if((DECOMP_JOB_LYR2 == pass) && (decomp_lyr_cnt != (n_tot_layers - 1)))
907 {
908 proc_valid_flag = 1;
909 pre_enc_task_type = DECOMP_JOB_LYR2;
910 enc_task_type = (HEVCE_ENC_JOB_TYPES_T)-1;
911 ps_pre_enc_job_queue_curr = ps_decomp_job_queue_lyr2;
912
913 inp_dep_pass = 3;
914 decomp_lyr_cnt++;
915
916 /* If all the decomp layers are done next job queue will be ME job queue */
917 if(decomp_lyr_cnt == (n_tot_layers - 1))
918 {
919 /* Assumption : num_hme_lyrs > 1*/
920 ps_pre_enc_job_queue_next = aps_me_job_queues[num_hme_lyrs - 1];
921
922 /* ME coarsest layer is currently made dependent on entire decomp layer */
923 out_dep_pass = ai4_me_num_vert_units_lyr[0];
924 me_coarsest_lyr_inp_dep = num_vert_units;
925 }
926 else
927 {
928 /* right now MAX 4 layers worth of JOB queues are prepared */
929 ASSERT(0);
930 }
931 }
932
933 else if(IPE_JOB_LYR0 == pass)
934 {
935 proc_valid_flag = 1;
936 pre_enc_task_type = IPE_JOB_LYR0;
937 enc_task_type = (HEVCE_ENC_JOB_TYPES_T)-1;
938 ps_pre_enc_job_queue_curr = ps_ipe_job_queue;
939 ps_pre_enc_job_queue_next = NULL;
940 num_vert_units = num_ctb_vert_rows;
941 }
942 else if(((pass >= ME_JOB_LYR4) && (pass <= ME_JOB_LYR1)) && (pass >= me_start_lyr_pass))
943 {
944 /* num_ver_units of coarsest layer is stored at 0th index */
945 num_vert_units = ai4_me_num_vert_units_lyr[me_lyr_cnt];
946 proc_valid_flag = 1;
947
948 pre_enc_task_type =
949 (HEVCE_PRE_ENC_JOB_TYPES_T)((WORD32)ME_JOB_LYR1 - (num_hme_lyrs - me_lyr_cnt - 2));
950
951 enc_task_type = (HEVCE_ENC_JOB_TYPES_T)-1;
952
953 /* Assumption : num_hme_lyrs > 1*/
954 ps_pre_enc_job_queue_curr = aps_me_job_queues[num_hme_lyrs - me_lyr_cnt - 1];
955
956 if(me_lyr_cnt == (num_hme_lyrs - 2))
957 {
958 ps_pre_enc_job_queue_next = ps_ipe_job_queue;
959 }
960 else
961 {
962 ps_pre_enc_job_queue_next = aps_me_job_queues[num_hme_lyrs - me_lyr_cnt - 2];
963 }
964 me_lyr_cnt++;
965 }
966
967 /* check for valid processing flag */
968 if(0 == proc_valid_flag)
969 {
970 continue;
971 }
972
973 /* in the loop ps_me_job_q_prms get incremented for every row */
974 /* so at the end of one layer the pointer will be correctly */
975 /* pointing to the start of next layer */
976
977 /* loop over all the vertical rows */
978 for(num_jobs = 0; num_jobs < num_vert_units; num_jobs++)
979 {
980 ULWORD64 u8_temp;
981 WORD32 inp_dep = 0;
982 WORD32 out_dep = 0;
983 WORD32 ctr;
984 WORD32 job_off_ipe;
985
986 if(IPE_JOB_LYR0 == pass)
987 {
988 ps_pre_enc_job_queue_curr->s_job_info.s_ipe_job_info.i4_ctb_row_no = num_jobs;
989 inp_dep = ps_me_job_q_prms->i4_num_inp_dep;
990 out_dep = 0;
991 }
992 else if((pass >= DECOMP_JOB_LYR0) && (pass <= DECOMP_JOB_LYR3))
993 {
994 ps_pre_enc_job_queue_curr->s_job_info.s_decomp_job_info.i4_vert_unit_row_no =
995 num_jobs;
996
997 /* Input and output dependencies of 1st row and last row is 1 less than other rows*/
998 inp_dep = inp_dep_pass;
999 out_dep = out_dep_pass;
1000
1001 if(pass != DECOMP_JOB_LYR0)
1002 {
1003 if(((num_jobs == 0) || (num_jobs == num_vert_units - 1)))
1004 {
1005 inp_dep = inp_dep_pass - 1;
1006 }
1007 }
1008
1009 if(pass != (DECOMP_JOB_LYR0 + n_tot_layers - 2))
1010 {
1011 if(((num_jobs == 0) || (num_jobs == num_vert_units - 1)))
1012 {
1013 out_dep = out_dep_pass - 1;
1014 }
1015 }
1016 }
1017 else /* remaining all are ME JOBS */
1018 {
1019 ps_pre_enc_job_queue_curr->s_job_info.s_me_job_info.i4_vert_unit_row_no = num_jobs;
1020
1021 if(pass == me_start_lyr_pass)
1022 {
1023 ASSERT(me_coarsest_lyr_inp_dep != -1);
1024 inp_dep = me_coarsest_lyr_inp_dep;
1025 }
1026 else
1027 {
1028 inp_dep = ps_me_job_q_prms->i4_num_inp_dep;
1029 }
1030 out_dep = ps_me_job_q_prms->i4_num_output_dep;
1031 }
1032 ps_pre_enc_job_queue_curr->pv_next = (void *)(ps_pre_enc_job_queue_curr + 1);
1033
1034 ps_pre_enc_job_queue_curr->i4_pre_enc_task_type = pre_enc_task_type;
1035 ps_pre_enc_job_queue_curr->i4_task_type = enc_task_type;
1036
1037 /* Set the input dependencies */
1038 ps_pre_enc_job_queue_curr->i4_num_input_dep = inp_dep;
1039
1040 /* set the entire input dep buffer to default value 0 */
1041 memset(&ps_pre_enc_job_queue_curr->au1_in_dep[0], 0, sizeof(UWORD8) * MAX_IN_DEP);
1042
1043 /* set the input dep buffer to 1 for num inp dep */
1044 if(0 != inp_dep)
1045 {
1046 memset(&ps_pre_enc_job_queue_curr->au1_in_dep[0], 1, sizeof(UWORD8) * inp_dep);
1047 }
1048
1049 /* If decomposition layer ends at this pass the no of out dependencies
1050 * will be based on number of vertical units in the coarsets layer of HME
1051 * This is because the search range in coarsest layer will be almost
1052 * entire frame (search range of +-128 in vert direction is max supported
1053 */
1054 if(pass == (DECOMP_JOB_LYR0 + n_tot_layers - 2))
1055 {
1056 job_off_ipe = 0;
1057 }
1058 else
1059 {
1060 if(num_jobs == 0)
1061 job_off_ipe = num_jobs;
1062
1063 else
1064 job_off_ipe = num_jobs - 1;
1065 }
1066
1067 /* Set the offsets of output dependencies */
1068 ps_pre_enc_job_queue_curr->i4_num_output_dep = out_dep;
1069
1070 /* set the entire offset buffer to default value */
1071 memset(
1072 &ps_pre_enc_job_queue_curr->au4_out_ofsts[0], 0xFF, sizeof(UWORD32) * MAX_OUT_DEP);
1073
1074 for(ctr = 0; ctr < out_dep; ctr++)
1075 {
1076 /* if IPE or DECOMP loop the dep is 1 to 1*/
1077 if(((pass >= DECOMP_JOB_LYR0) && (pass <= DECOMP_JOB_LYR3)) ||
1078 (IPE_JOB_LYR0 == pass))
1079 {
1080 u8_temp = (ULWORD64)(
1081 &ps_pre_enc_job_queue_next[job_off_ipe] -
1082 ps_ctxt->s_multi_thrd.aps_job_q_pre_enc[i4_ping_pong]);
1083
1084 u8_temp *= sizeof(job_queue_t);
1085
1086 /* add the excat inp dep byte for the next layer JOB */
1087 u8_temp += ps_pre_enc_job_queue_next[job_off_ipe].i4_num_input_dep;
1088
1089 /* increment the inp dep number for a given job */
1090 ps_pre_enc_job_queue_next[job_off_ipe].i4_num_input_dep++;
1091
1092 job_off_ipe++;
1093 }
1094 else if((pass >= ME_JOB_LYR4) && (pass <= ME_JOB_LYR1))
1095 {
1096 /* ME layer Jobs */
1097 WORD32 job_off;
1098
1099 job_off = ps_me_job_q_prms->ai4_out_dep_unit_off[ctr];
1100
1101 u8_temp = (ULWORD64)(
1102 &ps_pre_enc_job_queue_next[job_off] -
1103 ps_ctxt->s_multi_thrd.aps_job_q_pre_enc[i4_ping_pong]);
1104
1105 u8_temp *= sizeof(job_queue_t);
1106
1107 /* add the excat inp dep byte for the next layer JOB */
1108 u8_temp += ps_pre_enc_job_queue_next[job_off].i4_num_input_dep;
1109
1110 /* increment the inp dep number for a given job */
1111 ps_pre_enc_job_queue_next[job_off].i4_num_input_dep++;
1112 }
1113 /* store the offset to the array */
1114 ps_pre_enc_job_queue_curr->au4_out_ofsts[ctr] = (UWORD32)u8_temp;
1115 }
1116 /* ME job q params is incremented only for ME jobs */
1117 if(((pass >= ME_JOB_LYR4) && (pass <= ME_JOB_LYR1)) || (IPE_JOB_LYR0 == pass))
1118 {
1119 ps_me_job_q_prms++;
1120 }
1121 ps_pre_enc_job_queue_curr++;
1122 }
1123
1124 /* set the last pointer to NULL */
1125 ps_pre_enc_job_queue_curr--;
1126 ps_pre_enc_job_queue_curr->pv_next = (void *)NULL;
1127 }
1128
1129 /* reset the num ctb processed in every row for IPE sync */
1130 memset(
1131 &ps_ctxt->s_multi_thrd.ai4_ctbs_in_row_proc_ipe_pass[0],
1132 0,
1133 (MAX_NUM_CTB_ROWS_FRM * sizeof(WORD32)));
1134
1135 } /* End of ihevce_prepare_pre_enc_job_queue */
1136