Home
last modified time | relevance | path

Searched refs:S32 (Results 1 – 25 of 553) sorted by relevance

12345678910>>...23

/external/libhevc/encoder/
Dhme_interface.h185 S32 luma_stride;
188 S32 luma_offset;
212 S32 chroma_stride;
214 S32 chroma_offset;
245 S32 i4_ref_stride;
248 S32 i4_blk_wd;
251 S32 i4_blk_ht;
257 S32 i4_out_stride;
263 S32 i4_final_out_stride;
270 typedef void (*PF_EXT_UPDATE_FXN_T)(void *, void *, S32, S32);
[all …]
Dhme_utils.h59 void hme_init_histogram(mv_hist_t *ps_hist, S32 i4_max_mv_x, S32 i4_max_mv_y);
76 void hme_update_histogram(mv_hist_t *ps_hist, S32 i4_mv_x, S32 i4_mv_y);
94 void hme_get_global_mv(layer_ctxt_t *ps_prev_layer, hme_mv_t *ps_mv, S32 i4_delta_poc);
158 S32 i4_num_pred_dir);
174 S32 hme_create_valid_part_ids(S32 i4_part_mask, S32 *pi4_valid_part_ids);
199 S32 get_num_blks_in_ctb(S32 i4_ctb_x, S32 i4_ctb_y, S32 i4_pic_wd, S32 i4_pic_ht, S32 i4_blk_size);
237 S32 i4_src1_stride,
238 S32 i4_src2_stride,
239 S32 i4_blk_wd,
240 S32 i4_blk_ht,
[all …]
Dhme_defs.h221 S32 y, rnd; \
257 S32 mvx_q8 = (ps_mv)->mvx << 8; \
258 S32 mvy_q8 = (ps_mv)->mvy << 8; \
259 S32 mvcx_q8 = (ps_data)->s_centroid.i4_pos_x_q8; \
260 S32 mvcy_q8 = (ps_data)->s_centroid.i4_pos_y_q8; \
262 S32 mvdx_q8 = mvx_q8 - mvcx_q8; \
263 S32 mvdy_q8 = mvy_q8 - mvcy_q8; \
265 S32 mvdx = (mvdx_q8 + (1 << 7)) >> 8; \
266 S32 mvdy = (mvdy_q8 + (1 << 7)) >> 8; \
268 S32 mvd = ABS(mvdx) + ABS(mvdy); \
[all …]
Dihevce_me_instr_set_router.h54 hme_search_prms_t *, wgt_pred_ctxt_t *, err_prms_t *, result_upd_prms_t *, U08 **, S32);
58 typedef void FT_QPEL_INTERP_AVG(interp_prms_t *, S32, S32, S32);
60 typedef void FT_QPEL_INTERP_AVG_1PT(interp_prms_t *, S32, S32, S32, U08 **, S32 *);
62 typedef void FT_QPEL_INTERP_AVG_2PT(interp_prms_t *, S32, S32, U08 **, S32 *);
64 typedef void FT_GET_WT_INP(layer_ctxt_t *, wgt_pred_ctxt_t *, S32, S32, S32, S32, S32, U08);
81 typedef void FT_MV_CLIPPER(hme_search_prms_t *, S32, S08, U08, U08, U08);
83 typedef void FT_COMPUTE_VARIANCE(U08 *, S32, S32 *, U32 *, S32, U08);
86 U08 *, S32, S32 *, ULWORD64 *, ULWORD64 *, S32 *, S32, S32, S32, S32, S32, U08);
171 BLK_SIZE_T e_blk_size, S32 i4_grid_mask, S32 i4_part_mask);
179 U08 u1_is_cu_noisy, S32 i4_part_mask, S32 num_parts, S32 num_results);
Dhme_err_compute.h119 PF_RESULT_FXN_T hme_get_result_fxn(S32 i4_grid_mask, S32 i4_part_mask, S32 i4_num_results);
127 S32 compute_mv_cost(search_node_t *ps_search_node, pred_ctxt_t *ps_pred_ctxt, BLK_SIZE_T e_blk_size…
137 S32 pred_lx,
138 S32 lambda,
139 S32 lambda_q_shift,
149 S32 pred_lx,
150 S32 lambda,
151 S32 lambda_q_shift,
173 S32 compute_mv_cost_coarse(
174 search_node_t *ps_node, pred_ctxt_t *ps_pred_ctxt, PART_ID_T e_part_id, S32 inp_mv_pel);
[all …]
Dhme_common_utils.c76 U32 hme_compute_2d_sum_u08(U08 *pu1_inp, S32 i4_wd, S32 i4_ht, S32 i4_stride) in hme_compute_2d_sum_u08()
78 S32 i, j; in hme_compute_2d_sum_u08()
91 U32 hme_compute_2d_sum_u16(U16 *pu2_inp, S32 i4_wd, S32 i4_ht, S32 i4_stride) in hme_compute_2d_sum_u16()
93 S32 i, j; in hme_compute_2d_sum_u16()
106 U32 hme_compute_2d_sum_u32(U32 *pu4_inp, S32 i4_wd, S32 i4_ht, S32 i4_stride) in hme_compute_2d_sum_u32()
108 S32 i, j; in hme_compute_2d_sum_u32()
147 void *pv_inp, S32 i4_blk_wd, S32 i4_blk_ht, S32 i4_stride, S32 i4_datatype) in hme_compute_2d_sum_unsigned()
174 S32 get_rand_num(S32 low, S32 high) in get_rand_num()
177 S32 result; in get_rand_num()
181 result = (S32)floor((num + 0.5)); in get_rand_num()
Dhme_err_compute.c143 S32 hme_cmp_nodes(search_node_t *ps_best_node1, search_node_t *ps_best_node2) in hme_cmp_nodes()
222 (((S32)cand->pu1_ref_ptr[(z_ref + d)]) - ((S32)pu1_cur_ptr[(z_cur + d)]))); in compute_4x4_sads_for_16x16_blk()
413 S32 i4_ref_idx = 0, i; in hme_evalsad_grid_pu_16x16()
414 S32 num_candts = 0; in hme_evalsad_grid_pu_16x16()
442 S32 *pi4_sad = ps_prms->pi4_sad_grid; in hme_evalsad_grid_npu_MxN()
443 S32 i, grid_count = 0; in hme_evalsad_grid_npu_MxN()
444 S32 step = ps_prms->i4_step; in hme_evalsad_grid_npu_MxN()
445 S32 x_off = step, y_off = step * ps_prms->i4_ref_stride; in hme_evalsad_grid_npu_MxN()
462 S32 sad = 0, j, k; in hme_evalsad_grid_npu_MxN()
498 sad += (ABS(((S32)pu1_inp[j] - (S32)pu1_ref[j]))); in hme_evalsad_pt_npu_MxN_8bit_compute()
[all …]
Dhme_fullpel.c118 S32 i4_alpha_stim_multiplier, in hme_fullpel_cand_sifter()
122 S32 i4_i; in hme_fullpel_cand_sifter()
127 S32 i4_temp_part_mask; in hme_fullpel_cand_sifter()
198 S32 *pi4_num_unique_nodes, in hme_add_fpel_refine_candidates_to_search_cand_array()
200 S32 i4_fpel_search_result_id, in hme_add_fpel_refine_candidates_to_search_cand_array()
201 S32 i4_fpel_search_result_array_index, in hme_add_fpel_refine_candidates_to_search_cand_array()
202 S32 i4_unique_node_map_center_x, in hme_add_fpel_refine_candidates_to_search_cand_array()
203 S32 i4_unique_node_map_center_y, in hme_add_fpel_refine_candidates_to_search_cand_array()
212 S32 i2_mvx = in hme_add_fpel_refine_candidates_to_search_cand_array()
214 S32 i2_mvy = in hme_add_fpel_refine_candidates_to_search_cand_array()
[all …]
Dhme_search_algo.c138 S32 i4_num_results; in hme_compute_grid_results()
139 S32 part_id; in hme_compute_grid_results()
143 i4_num_results = (S32)ps_result_prms->ps_search_results->u1_num_results_per_part; in hme_compute_grid_results()
183 S32 ai4_sad_grid[9][TOT_NUM_PARTS]; in hme_pred_search_square_stepn()
185 S32 ai4_valid_part_ids[TOT_NUM_PARTS + 1]; in hme_pred_search_square_stepn()
192 S32 i4_num_candts, max_num_iters, i4_num_results; in hme_pred_search_square_stepn()
195 S32 i4_inp_stride, i4_ref_stride, i4_ref_offset; in hme_pred_search_square_stepn()
205 S32 i4_part_mask, i4_grid_mask; in hme_pred_search_square_stepn()
210 S32 i4_blk_wd, i4_blk_ht, i4_step, i4_candt, i4_iter; in hme_pred_search_square_stepn()
211 S32 i4_inp_off; in hme_pred_search_square_stepn()
[all …]
Dhme_refine.c124 get_ctb_attrs(S32 ctb_start_x, S32 ctb_start_y, S32 pic_wd, S32 pic_ht, me_frm_ctxt_t *ps_ctxt);
130 S32 i4_pos_x,
131 S32 i4_pos_y,
133 S32 i4_result_id);
139 S32 i4_pos_x,
140 S32 i4_pos_y,
141 S32 i4_num_act_ref_l0,
144 S32 i4_result_id);
284 S32 i4_num_cands_added, in hme_add_me_best_as_merge_cands()
584 S32 i4_search_idx, in hme_pick_eval_merge_candts()
[all …]
Dhme_subpel.c121 void hme_qpel_interp_avg(interp_prms_t *ps_prms, S32 i4_mv_x, S32 i4_mv_y, S32 i4_buf_id) in hme_qpel_interp_avg()
125 S32 i4_mv_x_frac, i4_mv_y_frac, i4_offset; in hme_qpel_interp_avg()
196 S32 i4_mv_x, in hme_qpel_interp_avg_2pt_vert_no_reuse()
197 S32 i4_mv_y, in hme_qpel_interp_avg_2pt_vert_no_reuse()
199 S32 *pi4_final_stride, in hme_qpel_interp_avg_2pt_vert_no_reuse()
209 S32 i4_mv_x, in hme_qpel_interp_avg_2pt_horz_no_reuse()
210 S32 i4_mv_y, in hme_qpel_interp_avg_2pt_horz_no_reuse()
212 S32 *pi4_final_stride, in hme_qpel_interp_avg_2pt_horz_no_reuse()
243 S32 *pi4_final_stride, in hme_qpel_interp_comprehensive()
244 S32 i4_mv_x, in hme_qpel_interp_comprehensive()
[all …]
Dhme_interface.c98 S32 i, j; in hme_init_globals()
649 S32 hme_enc_num_alloc(WORD32 i4_num_me_frm_pllel) in hme_enc_num_alloc()
653 return ((S32)MAX_HME_ENC_TOT_MEMTABS); in hme_enc_num_alloc()
657 return ((S32)MIN_HME_ENC_TOT_MEMTABS); in hme_enc_num_alloc()
670 S32 hme_coarse_num_alloc() in hme_coarse_num_alloc()
672 return ((S32)HME_COARSE_TOT_MEMTABS); in hme_coarse_num_alloc()
689 S32 hme_validate_init_prms(hme_init_prms_t *ps_prms) in hme_validate_init_prms()
691 S32 n_layers = ps_prms->num_simulcast_layers; in hme_validate_init_prms()
714 layer_ctxt_t *ps_layer, S32 wd, S32 ht, S32 disp_wd, S32 disp_ht, U08 u1_enc) in hme_set_layer_res_attrs()
740 S32 n_tot_layers, in hme_coarse_get_layer1_mv_bank_ref_idx_size()
[all …]
Dhme_utils.c107 S32 stride, in ihevce_open_loop_pred_data()
108 S32 src_strd, in ihevce_open_loop_pred_data()
111 S32 best_sad_l0 = -1, best_sad_l1 = -1; in ihevce_open_loop_pred_data()
112 S32 sad_diff, status; in ihevce_open_loop_pred_data()
193 void *hme_get_wkg_mem(buf_mgr_t *ps_buf_mgr, S32 i4_size) in hme_get_wkg_mem()
223 void hme_init_histogram(mv_hist_t *ps_hist, S32 i4_max_mv_x, S32 i4_max_mv_y) in hme_init_histogram()
225 S32 i4_num_bins, i4_num_cols, i4_num_rows; in hme_init_histogram()
226 S32 i4_shift_x, i4_shift_y, i, i4_range, i4_val; in hme_init_histogram()
291 void hme_update_histogram(mv_hist_t *ps_hist, S32 i4_mv_x, S32 i4_mv_y) in hme_update_histogram()
293 S32 i4_bin_index, i4_col, i4_row; in hme_update_histogram()
[all …]
Dhme_fullpel.h47 S32 i4_alpha_stim_multiplier,
59 S32 i4_unique_node_map_center_x,
60 S32 i4_unique_node_map_center_y,
65 S32 hme_remove_duplicate_fpel_search_candidates(
70 S32 i4_num_srch_cands,
71 S32 i4_num_init_candts,
72 S32 i4_refine_iter_ctr,
73 S32 i4_num_refinement_iterations,
74 S32 i4_num_act_ref_l0,
76 S32 i4_unique_node_map_center_x,
[all …]
/external/libxaac/decoder/armv7/
Dixheaacd_post_twiddle_overlap.s47 VMOV.S32 Q10, #0x00008000
141 VMULL.S32 Q0, D2, D0
149 VQSUB.S32 D8, D0, D8
152 VQSHL.S32 D8, D8, #2
154 VQADD.S32 D8, D8, D0
155 VSHR.S32 D8, D8, #16
235 VNEG.S32 Q15, Q15
264 VNEG.S32 Q1, Q1
269 VSHR.S32 Q12, Q12, #16
271 VSHR.S32 Q13, Q13, #16
[all …]
Dixheaacd_esbr_qmfsyn64_winadd.s51 VMLAL.S32 Q13, D0, D2
52 VMLAL.S32 Q14, D1, D3
57 VMLAL.S32 Q13, D6, D4
58 VMLAL.S32 Q14, D7, D5
63 VMLAL.S32 Q13, D10, D8
64 VMLAL.S32 Q14, D11, D9
69 VMLAL.S32 Q13, D12, D14
70 VMLAL.S32 Q14, D13, D15
75 VMLAL.S32 Q13, D16, D18
76 VMLAL.S32 Q14, D17, D19
[all …]
Dixheaacd_sbr_imdct_using_fft.s193 VADD.S32 q3, q9, q6
196 VSUB.S32 q7, q9, q6
199 VSUB.S32 q6, q4, q5
202 VADD.S32 q9, q4, q5
205 VADD.S32 q4, q8, q1
208 VSUB.S32 q5, q8, q1
211 VADD.S32 q8, q0, q2
214 VSUB.S32 q0, q0, q2
259 VADD.S32 q11, q7, q15
260 VSUB.S32 q2, q7, q15
[all …]
Dixheaacd_overlap_add1.s51 VQNEG.S32 Q0, Q3
75 VQSHL.S32 Q15, Q15, Q11
80 VMULL.S32 Q13, D4, D14
82 VMULL.S32 Q13, D5, D15
85 VQADD.S32 Q14, Q14, Q10
86 VQSUB.S32 Q13, Q15, Q14
87 VQSHL.S32 Q13, Q13, #2
88 VSHR.S32 Q13, Q13, #16
95 VQSHL.S32 Q12, Q12, Q11
101 VMULL.S32 Q0, D14, D4
[all …]
Dixheaacd_imdct_using_fft.s189 VADD.S32 q3, q9, q6
192 VSUB.S32 q7, q9, q6
195 VSUB.S32 q6, q4, q5
198 VADD.S32 q9, q4, q5
201 VADD.S32 q4, q8, q1
204 VSUB.S32 q5, q8, q1
207 VADD.S32 q8, q0, q2
210 VSUB.S32 q0, q0, q2
255 VADD.S32 q11, q7, q15
256 VSUB.S32 q2, q7, q15
[all …]
Dixheaacd_fft32x32_ld.s152 … @VHADD.S32 q8, q0, q4 @b_data0_r=vhaddq_s32(a_data0_r_i.val[0],a_data4_r_i.val[0])@
156 … @VHSUB.S32 q9, q0, q4 @b_data4_r=vhsubq_s32(a_data0_r_i.val[0],a_data4_r_i.val[0])@
163 … @VHADD.S32 q0, q1, q5 @b_data0_i=vhaddq_s32(a_data0_r_i.val[1],a_data4_r_i.val[1])@
167 … @VHSUB.S32 q4, q1, q5 @b_data4_i=vhsubq_s32(a_data0_r_i.val[1],a_data4_r_i.val[1])@
181 … @VHADD.S32 q1, q2, q6 @b_data2_r=vhaddq_s32(a_data2_r_i.val[0],a_data6_r_i.val[0])@
185 … @VHSUB.S32 q5, q2, q6 @b_data6_r=vhsubq_s32(a_data2_r_i.val[0],a_data6_r_i.val[0])@
189 … @VHADD.S32 q2, q3, q7 @b_data2_i=vhaddq_s32(a_data2_r_i.val[1],a_data6_r_i.val[1])@
193 … @VHSUB.S32 q6, q3, q7 @b_data6_i=vhsubq_s32(a_data2_r_i.val[1],a_data6_r_i.val[1])@
197 VADD.S32 q3, q9, q6 @c_data4_r=vaddq_s32(b_data4_r,b_data6_i)@
200 VSUB.S32 q7, q9, q6 @c_data6_r=vsubq_s32(b_data4_r,b_data6_i)@
[all …]
Dixheaacd_overlap_add2.s66 VSHR.S32 Q13, Q13, #16
72 VQADD.S32 Q12, Q13, Q10
73 VQSHL.S32 Q12, Q12, Q11
88 VSHR.S32 Q9, Q9, #16
92 VSHR.S32 Q13, Q13, #16
105 VQADD.S32 Q8, Q9, Q10
107 VQADD.S32 Q12, Q13, Q10
108 VQSHL.S32 Q8, Q8, Q11
110 VQSHL.S32 Q12, Q12, Q11
128 VSHR.S32 Q9, Q9, #16
[all …]
Dia_xheaacd_mps_reoder_mulshift_acc.s64 VMULL.S32 Q12, D0, D8
65 VMULL.S32 Q10, D1, D9
66 VMULL.S32 Q11, D3, D11
67 VMULL.S32 Q13, D2, D10
69 VMULL.S32 Q0, D4, D12
70 VMULL.S32 Q4, D5, D13
71 VMULL.S32 Q5, D7, D15
72 VMULL.S32 Q1, D6, D14
108 VMULL.S32 Q12, D0, D8
109 VMULL.S32 Q10, D1, D9
[all …]
/external/libhevc/common/arm/
Dihevc_resi_trans_32x32_a9q.s124 VLD1.S32 D30[0],[R9],R12
125 VLD1.S32 D30[1],[R9],R12 @ D30 - [0 0] [0 1] [8 0] [8 1]
126 VLD1.S32 D31[0],[R9],R12
127 VLD1.S32 D31[1],[R9],R12 @ D31 - [16 0] [16 1] [24 0] [24 1]
129 VTRN.S32 D30,D31 @ D30 - [0 0] [0 1] [16 0] [16 1]
140 VMOV.S32 Q14,#0
249 VTRN.S32 D26,D27
271 VDUP.S32 D8,D4[0] @ R1eeee[0] R1eeeo[0] R1eeee[0] R1eeeo[0]
272 VDUP.S32 D9,D4[1] @ R2eeee[0] R2eeeo[0] R2eeee[0] R2eeeo[0]
273 VDUP.S32 D10,D5[0] @ R1eeee[1] R1eeeo[1] R1eeee[1] R1eeeo[1]
[all …]
Dihevc_resi_trans.s291 VSUB.S32 q10,q9,q7 @ q10 = q9 - q7 = 55*C1 - 84*C2 - 29*C4
293 VRSHR.S32 q8,q8,#1 @ Truncating the 1 bit in q8
299 VRSHR.S32 q7,q7,#1 @ Truncating the 1 bit in q7
300 VRSHR.S32 q9,q9,#1 @ Truncating the 1 bit in q9
301 VRSHR.S32 q10,q10,#1 @ Truncating the 1 bit in q10
308 VADD.S32 q13,q7,q8 @ q13 = S1 + S2
309 VADD.S32 q1,q7,q10 @ q1 = S1 + S4
310 VADD.S32 q4,q8,q10 @ q4 = S2 + S4
311 VSUB.S32 q13,q13,q10 @ q13 = S1 + S2 - S4
312 VMUL.S32 q12,q1,d5[0] @ q12 = 29*S1 + 29*S4
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/
DAMDGPULegalizerInfo.cpp40 const LLT S32 = LLT::scalar(32); in AMDGPULegalizerInfo() local
58 setAction({G_ADD, S32}, Legal); in AMDGPULegalizerInfo()
59 setAction({G_ASHR, S32}, Legal); in AMDGPULegalizerInfo()
60 setAction({G_SUB, S32}, Legal); in AMDGPULegalizerInfo()
61 setAction({G_MUL, S32}, Legal); in AMDGPULegalizerInfo()
62 setAction({G_AND, S32}, Legal); in AMDGPULegalizerInfo()
63 setAction({G_OR, S32}, Legal); in AMDGPULegalizerInfo()
64 setAction({G_XOR, S32}, Legal); in AMDGPULegalizerInfo()
67 setAction({G_BITCAST, 1, S32}, Legal); in AMDGPULegalizerInfo()
69 setAction({G_BITCAST, S32}, Legal); in AMDGPULegalizerInfo()
[all …]

12345678910>>...23