/external/llvm/unittests/ADT/ |
D | MapVectorTest.cpp | 43 MapVector<int, int> MV; in TEST() local 46 R = MV.insert(std::make_pair(1, 2)); in TEST() 47 ASSERT_EQ(R.first, MV.begin()); in TEST() 52 R = MV.insert(std::make_pair(1, 3)); in TEST() 53 ASSERT_EQ(R.first, MV.begin()); in TEST() 58 R = MV.insert(std::make_pair(4, 5)); in TEST() 59 ASSERT_NE(R.first, MV.end()); in TEST() 64 EXPECT_EQ(MV.size(), 2u); in TEST() 65 EXPECT_EQ(MV[1], 2); in TEST() 66 EXPECT_EQ(MV[4], 5); in TEST() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/unittests/ADT/ |
D | MapVectorTest.cpp | 43 MapVector<int, int> MV; in TEST() local 46 R = MV.insert(std::make_pair(1, 2)); in TEST() 47 ASSERT_EQ(R.first, MV.begin()); in TEST() 52 R = MV.insert(std::make_pair(1, 3)); in TEST() 53 ASSERT_EQ(R.first, MV.begin()); in TEST() 58 R = MV.insert(std::make_pair(4, 5)); in TEST() 59 ASSERT_NE(R.first, MV.end()); in TEST() 64 EXPECT_EQ(MV.size(), 2u); in TEST() 65 EXPECT_EQ(MV[1], 2); in TEST() 66 EXPECT_EQ(MV[4], 5); in TEST() [all …]
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_mcomp.h | 38 MV ss_mv[8 * MAX_MVSEARCH_STEPS]; // Motion vector 45 const MV *mv) { in get_buf_from_mv() 52 void vp9_set_mv_search_range(MvLimits *mv_limits, const MV *mv); 53 int vp9_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost, 57 int vp9_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv, 58 const MV *center_mv, const vp9_variance_fn_ptr_t *vfp, 60 int vp9_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv, 61 const MV *center_mv, const uint8_t *second_pred, 78 const MV *ref_mv); 81 const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp, [all …]
|
D | vp9_mcomp.c | 32 void vp9_set_mv_search_range(MvLimits *mv_limits, const MV *mv) { in vp9_set_mv_search_range() 53 const MV *ref_mv) { in vp9_set_subpel_mv_search_range() 80 static INLINE int mv_cost(const MV *mv, const int *joint_cost, in mv_cost() 88 int vp9_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost, in vp9_mv_bit_cost() 90 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in vp9_mv_bit_cost() 95 static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost, in mv_err_cost() 98 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in mv_err_cost() 107 static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref, in mvsad_err_cost() 109 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in mvsad_err_cost() 121 const MV ss_mvs[] = { { -len, 0 }, { len, 0 }, { 0, -len }, { 0, len } }; in vp9_init_dsmotion_compensation() [all …]
|
D | vp9_non_greedy_mv.h | 95 int64_t vp9_nb_mvs_inconsistency(const MV *mv, const int_mv *nb_full_mvs, 98 void vp9_get_smooth_motion_field(const MV *search_mf, 101 float alpha, int num_iters, MV *smooth_mf); 105 const MV *search_mf,
|
D | vp9_mbgraph.c | 25 static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, const MV *ref_mv, in do_16x16_motion_iteration() 26 MV *dst_mv, int mb_row, in do_16x16_motion_iteration() 34 MV ref_full; in do_16x16_motion_iteration() 77 static int do_16x16_motion_search(VP9_COMP *cpi, const MV *ref_mv, in do_16x16_motion_search() 82 MV tmp_mv; in do_16x16_motion_search() 102 MV zero_ref_mv = { 0, 0 }, tmp_mv; in do_16x16_motion_search() 162 const MV *prev_golden_ref_mv, in update_mbgraph_mb_stats() 223 MV gld_top_mv = { 0, 0 }; in update_mbgraph_frame_stats() 244 MV gld_left_mv = gld_top_mv; in update_mbgraph_frame_stats()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/MemCpyOpt/ |
D | form-memset.ll | 60 %struct.MV = type { i16, i16 } 66 %left_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17] 67 %up_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17] 84 …%tmp43 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 0 ; <i16*> … 86 …%tmp46 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 1 ; <i16*> … 88 …%tmp57 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 0 ; <i16*> … 90 …%tmp60 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 1 ; <i16*> … 92 …%tmp71 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 0 ; <i16*> … 94 …%tmp74 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 1 ; <i16*> … 96 …%tmp85 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 0 ; <i16*> … [all …]
|
/external/llvm/test/Transforms/MemCpyOpt/ |
D | form-memset.ll | 60 %struct.MV = type { i16, i16 } 66 %left_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17] 67 %up_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17] 84 …%tmp43 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 0 ; <i16*> … 86 …%tmp46 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 1 ; <i16*> … 88 …%tmp57 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 0 ; <i16*> … 90 …%tmp60 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 1 ; <i16*> … 92 …%tmp71 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 0 ; <i16*> … 94 …%tmp74 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 1 ; <i16*> … 96 …%tmp85 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 0 ; <i16*> … [all …]
|
/external/libvpx/libvpx/test/ |
D | non_greedy_mv_test.cc | 19 MV **buffer_ptr) { in read_in_mf() 28 *buffer_ptr = (MV *)malloc((*rows_ptr) * (*cols_ptr) * sizeof(MV)); in read_in_mf() 63 static void compare_mf(const MV *mf1, const MV *mf2, int rows, int cols, in compare_mf() 70 MV mv1 = mf1[idx]; in compare_mf() 71 MV mv2 = mf2[idx]; in compare_mf() 132 MV *search_mf = NULL; in TEST() 133 MV *smooth_mf = NULL; in TEST() 134 MV *estimation = NULL; in TEST() 135 MV *ground_truth = NULL; in TEST() 149 smooth_mf = (MV *)malloc(rows * cols * sizeof(MV)); in TEST() [all …]
|
/external/libaom/libaom/av1/encoder/ |
D | encodemv.h | 21 void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref, 24 void av1_update_mv_stats(const MV *mv, const MV *ref, nmv_context *mvctx, 33 void av1_encode_dv(aom_writer *w, const MV *mv, const MV *ref, 46 static INLINE MV_JOINT_TYPE av1_get_mv_joint(const MV *mv) { in av1_get_mv_joint()
|
D | mcomp.c | 36 const MACROBLOCK *x, const MV *ref_mv) { in init_mv_cost_params() 59 const MACROBLOCK *x, BLOCK_SIZE bsize, const MV *ref_mv, in av1_make_default_fullpel_ms_params() 90 const MV *ref_mv, const int *cost_list) { in av1_make_default_subpel_ms_params() 124 void av1_set_mv_search_range(FullMvLimits *mv_limits, const MV *mv) { in av1_set_mv_search_range() 178 static INLINE int mv_cost(const MV *mv, const int *joint_cost, in mv_cost() 189 int av1_mv_bit_cost(const MV *mv, const MV *ref_mv, const int *mvjcost, in av1_mv_bit_cost() 191 const MV diff = { mv->row - ref_mv->row, mv->col - ref_mv->col }; in av1_mv_bit_cost() 199 static INLINE int mv_err_cost(const MV *mv, const MV *ref_mv, in mv_err_cost() 202 const MV diff = { mv->row - ref_mv->row, mv->col - ref_mv->col }; in mv_err_cost() 203 const MV abs_diff = { abs(diff.row), abs(diff.col) }; in mv_err_cost() [all …]
|
D | mcomp.h | 64 const MV *ref_mv; 73 int av1_mv_bit_cost(const MV *mv, const MV *ref_mv, const int *mvjcost, 77 const MV *ref_mv, const aom_variance_fn_ptr_t *vfp); 159 const MV *ref_mv, 204 void av1_set_mv_search_range(FullMvLimits *mv_limits, const MV *mv); 211 const MV *ref_mv); 273 const MV *ref_mv, const int *cost_list); 277 MV start_mv, MV *bestmv, int *distortion, 302 const MV *ref_mv) { in av1_set_subpel_mv_search_range() 320 MV mv) { in av1_is_subpelmv_in_range()
|
D | encodemv.c | 63 void av1_update_mv_stats(const MV *mv, const MV *ref, nmv_context *mvctx, in av1_update_mv_stats() 65 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in av1_update_mv_stats() 176 void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref, in av1_encode_mv() 178 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in av1_encode_mv() 201 void av1_encode_dv(aom_writer *w, const MV *mv, const MV *ref, in av1_encode_dv() 208 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in av1_encode_dv()
|
D | mv_prec.c | 124 static AOM_INLINE void keep_one_mv_stat(MV_STATS *mv_stats, const MV *ref_mv, in keep_one_mv_stat() 125 const MV *cur_mv, const AV1_COMP *cpi) { in keep_one_mv_stat() 133 const MV diff = { cur_mv->row - ref_mv->row, cur_mv->col - ref_mv->col }; in keep_one_mv_stat() 136 const MV hp_diff = diff; in keep_one_mv_stat() 138 const MV truncated_diff = { (diff.row / 2) * 2, (diff.col / 2) * 2 }; in keep_one_mv_stat() 139 const MV lp_diff = use_hp ? truncated_diff : diff; in keep_one_mv_stat() 205 const MV ref_mv = in collect_mv_stats_b() 207 const MV cur_mv = mbmi->mv[ref_idx].as_mv; in collect_mv_stats_b() 216 const MV ref_mv = in collect_mv_stats_b() 218 const MV cur_mv = mbmi->mv[ref_idx].as_mv; in collect_mv_stats_b()
|
/external/libvpx/libvpx/vp9/common/ |
D | vp9_mv.h | 27 } MV; typedef 31 MV as_mv; 39 static INLINE int is_zero_mv(const MV *mv) { in is_zero_mv() 43 static INLINE int is_equal_mv(const MV *a, const MV *b) { in is_equal_mv() 47 static INLINE void clamp_mv(MV *mv, int min_col, int max_col, int min_row, in clamp_mv()
|
D | vp9_reconinter.c | 25 const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref, in vp9_highbd_build_inter_predictor() 29 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, in vp9_highbd_build_inter_predictor() 44 int dst_stride, const MV *src_mv, in vp9_build_inter_predictor() 49 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, in vp9_build_inter_predictor() 65 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) { in mi_mv_pred_q4() 66 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row + in mi_mv_pred_q4() 81 static MV mi_mv_pred_q2(const MODE_INFO *mi, int idx, int block0, int block1) { in mi_mv_pred_q2() 82 MV res = { round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.row + in mi_mv_pred_q2() 90 MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, int bw, in clamp_mv_to_umv_border_sb() 99 MV clamped_mv = { (short)(src_mv->row * (1 << (1 - ss_y))), in clamp_mv_to_umv_border_sb() [all …]
|
D | vp9_reconinter.h | 45 MV average_split_mvs(const struct macroblockd_plane *pd, const MODE_INFO *mi, 48 MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, int bw, 64 int dst_stride, const MV *src_mv, 72 const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref,
|
/external/libaom/libaom/av1/common/ |
D | mv.h | 43 } MV; typedef 45 static const MV kZeroMv = { 0, 0 }; 50 MV as_mv; 75 static AOM_INLINE FULLPEL_MV get_fullmv_from_mv(const MV *subpel_mv) { in get_fullmv_from_mv() 81 static AOM_INLINE MV get_mv_from_fullmv(const FULLPEL_MV *full_mv) { in get_mv_from_fullmv() 82 const MV subpel_mv = { (int16_t)GET_MV_SUBPEL(full_mv->row), in get_mv_from_fullmv() 227 static INLINE void integer_mv_precision(MV *mv) { in integer_mv_precision() 332 static INLINE int is_zero_mv(const MV *mv) { in is_zero_mv() 336 static INLINE int is_equal_mv(const MV *a, const MV *b) { in is_equal_mv() 340 static INLINE void clamp_mv(MV *mv, const SubpelMvLimits *mv_limits) { in clamp_mv()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | legalize-merge-values.mir | 11 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32) 12 ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) 26 ; CHECK: [[MV:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C]](s32), [[C1]](s32) 27 ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](<2 x s32>) 42 ; CHECK: [[MV:%[0-9]+]]:_(<3 x s32>) = G_MERGE_VALUES [[C]](s32), [[C1]](s32), [[C2]](s32) 43 ; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[MV]](<3 x s32>) 58 ; CHECK: [[MV:%[0-9]+]]:_(<2 x s64>) = G_MERGE_VALUES [[C]](s64), [[C1]](s64) 59 ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](<2 x s64>) 75 …; CHECK: [[MV:%[0-9]+]]:_(<4 x s64>) = G_MERGE_VALUES [[C]](s64), [[C1]](s64), [[C2]](s64), [[C3]]… 76 ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV]](<4 x s64>)
|
D | legalize-extract-vector-elt.mir | 61 …; CHECK: [[MV:%[0-9]+]]:_(<5 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32),… 63 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<5 x s32>), [[C]](s32) 80 …; CHECK: [[MV:%[0-9]+]]:_(<6 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32),… 82 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<6 x s32>), [[C]](s32) 99 …; CHECK: [[MV:%[0-9]+]]:_(<7 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32),… 101 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<7 x s32>), [[C]](s32) 118 …; CHECK: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32),… 120 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<8 x s32>), [[C]](s32) 137 …; CHECK: [[MV:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32)… 139 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<16 x s32>), [[C]](s32)
|
D | regbankselect-merge-values.mir | 16 ; CHECK: [[MV:%[0-9]+]]:sgpr(s64) = G_MERGE_VALUES [[EXTRACT]](s32), [[EXTRACT1]](s32) 17 ; CHECK: S_ENDPGM implicit [[MV]](s64) 36 ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[EXTRACT]](s32), [[EXTRACT1]](s32) 37 ; CHECK: S_ENDPGM implicit [[MV]](s64)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/GlobalISel/ |
D | legalize-add-v256.mir | 52 ; SSE2: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) 53 ; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) 54 ; SSE2: $ymm0 = COPY [[MV]](<32 x s8>) 55 ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>) 86 ; SSE2: [[MV:%[0-9]+]]:_(<16 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>) 91 ; AVX1: [[MV:%[0-9]+]]:_(<16 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>) 92 ; SSE2: $ymm0 = COPY [[MV]](<16 x s16>) 93 ; AVX1: $ymm0 = COPY [[MV]](<16 x s16>) 124 ; SSE2: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>) 125 ; SSE2: $ymm0 = COPY [[MV]](<8 x s32>) [all …]
|
D | legalize-add-v512.mir | 54 …; AVX1: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[AD… 55 ; AVX1: $zmm0 = COPY [[MV]](<64 x s8>) 60 ; AVX512F: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<32 x s8>), [[ADD1]](<32 x s8>) 61 ; AVX512F: $zmm0 = COPY [[MV]](<64 x s8>) 94 …; AVX1: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[A… 95 ; AVX1: $zmm0 = COPY [[MV]](<32 x s16>) 100 … ; AVX512F: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<16 x s16>), [[ADD1]](<16 x s16>) 101 ; AVX512F: $zmm0 = COPY [[MV]](<32 x s16>) 134 …; AVX1: [[MV:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[A… 135 ; AVX1: $zmm0 = COPY [[MV]](<16 x s32>) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/GlobalISel/ |
D | legalize-combines.mir | 24 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[ADD]](s32) 25 ; CHECK: [[EXTRACT:%[0-9]+]]:_(s1) = G_EXTRACT [[MV]](s64), 0 26 ; CHECK: [[EXTRACT1:%[0-9]+]]:_(s64) = COPY [[MV]](s64) 106 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[ADD]](s32) 107 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
|
/external/libvpx/libvpx/vp8/common/ |
D | mv.h | 22 } MV; typedef 26 MV as_mv;
|