/external/llvm/unittests/ADT/ |
D | MapVectorTest.cpp | 43 MapVector<int, int> MV; in TEST() local 46 R = MV.insert(std::make_pair(1, 2)); in TEST() 47 ASSERT_EQ(R.first, MV.begin()); in TEST() 52 R = MV.insert(std::make_pair(1, 3)); in TEST() 53 ASSERT_EQ(R.first, MV.begin()); in TEST() 58 R = MV.insert(std::make_pair(4, 5)); in TEST() 59 ASSERT_NE(R.first, MV.end()); in TEST() 64 EXPECT_EQ(MV.size(), 2u); in TEST() 65 EXPECT_EQ(MV[1], 2); in TEST() 66 EXPECT_EQ(MV[4], 5); in TEST() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/unittests/ADT/ |
D | MapVectorTest.cpp | 43 MapVector<int, int> MV; in TEST() local 46 R = MV.insert(std::make_pair(1, 2)); in TEST() 47 ASSERT_EQ(R.first, MV.begin()); in TEST() 52 R = MV.insert(std::make_pair(1, 3)); in TEST() 53 ASSERT_EQ(R.first, MV.begin()); in TEST() 58 R = MV.insert(std::make_pair(4, 5)); in TEST() 59 ASSERT_NE(R.first, MV.end()); in TEST() 64 EXPECT_EQ(MV.size(), 2u); in TEST() 65 EXPECT_EQ(MV[1], 2); in TEST() 66 EXPECT_EQ(MV[4], 5); in TEST() [all …]
|
/external/libaom/libaom/av1/encoder/ |
D | mcomp.h | 42 MV mv; 53 MV coord; 60 void av1_set_mv_search_range(MvLimits *mv_limits, const MV *mv); 62 int av1_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost, 66 int av1_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv, 67 const MV *center_mv, const aom_variance_fn_ptr_t *vfp, 69 int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv, 70 const MV *center_mv, const uint8_t *second_pred, 72 int av1_get_mvpred_mask_var(const MACROBLOCK *x, const MV *best_mv, 73 const MV *center_mv, const uint8_t *second_pred, [all …]
|
D | mcomp.c | 39 const MV *mv) { in get_buf_from_mv() 43 void av1_set_mv_search_range(MvLimits *mv_limits, const MV *mv) { in av1_set_mv_search_range() 64 const MV *ref_mv) { in set_subpel_mv_search_range() 88 static INLINE int mv_cost(const MV *mv, const int *joint_cost, in mv_cost() 94 int av1_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost, in av1_mv_bit_cost() 96 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in av1_mv_bit_cost() 101 static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost, in mv_err_cost() 104 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in mv_err_cost() 113 static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref, in mvsad_err_cost() 115 const MV diff = { (mv->row - ref->row) * 8, (mv->col - ref->col) * 8 }; in mvsad_err_cost() [all …]
|
D | encodemv.h | 21 void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref, 30 void av1_encode_dv(aom_writer *w, const MV *mv, const MV *ref, 43 static INLINE MV_JOINT_TYPE av1_get_mv_joint(const MV *mv) { in av1_get_mv_joint()
|
D | encodemv.c | 140 void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref, in av1_encode_mv() 142 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in av1_encode_mv() 162 void av1_encode_dv(aom_writer *w, const MV *mv, const MV *ref, in av1_encode_dv() 169 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in av1_encode_dv()
|
D | mbgraph.c | 27 static unsigned int do_16x16_motion_iteration(AV1_COMP *cpi, const MV *ref_mv, in do_16x16_motion_iteration() 35 MV ref_full; in do_16x16_motion_iteration() 84 static int do_16x16_motion_search(AV1_COMP *cpi, const MV *ref_mv, int mb_row, in do_16x16_motion_search() 89 MV best_mv; in do_16x16_motion_search() 108 MV zero_ref_mv = kZeroMv; in do_16x16_motion_search() 170 const MV *prev_golden_ref_mv, in update_mbgraph_mb_stats() 231 MV gld_top_mv = kZeroMv; in update_mbgraph_frame_stats() 249 MV gld_left_mv = gld_top_mv; in update_mbgraph_frame_stats()
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_mcomp.h | 35 MV ss_mv[8 * MAX_MVSEARCH_STEPS]; // Motion vector 44 void vp9_set_mv_search_range(MvLimits *mv_limits, const MV *mv); 45 int vp9_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost, 49 int vp9_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv, 50 const MV *center_mv, const vp9_variance_fn_ptr_t *vfp, 52 int vp9_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv, 53 const MV *center_mv, const uint8_t *second_pred, 70 const MV *ref_mv); 73 const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp, 88 typedef int (*vp9_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv, [all …]
|
D | vp9_mcomp.c | 33 const MV *mv) { in get_buf_from_mv() 37 void vp9_set_mv_search_range(MvLimits *mv_limits, const MV *mv) { in vp9_set_mv_search_range() 58 const MV *ref_mv) { in vp9_set_subpel_mv_search_range() 85 static INLINE int mv_cost(const MV *mv, const int *joint_cost, in mv_cost() 93 int vp9_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost, in vp9_mv_bit_cost() 95 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in vp9_mv_bit_cost() 100 static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost, in mv_err_cost() 103 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in mv_err_cost() 112 static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref, in mvsad_err_cost() 114 const MV diff = { mv->row - ref->row, mv->col - ref->col }; in mvsad_err_cost() [all …]
|
D | vp9_mbgraph.c | 25 static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, const MV *ref_mv, in do_16x16_motion_iteration() 26 MV *dst_mv, int mb_row, in do_16x16_motion_iteration() 34 MV ref_full; in do_16x16_motion_iteration() 77 static int do_16x16_motion_search(VP9_COMP *cpi, const MV *ref_mv, in do_16x16_motion_search() 82 MV tmp_mv; in do_16x16_motion_search() 102 MV zero_ref_mv = { 0, 0 }, tmp_mv; in do_16x16_motion_search() 162 const MV *prev_golden_ref_mv, in update_mbgraph_mb_stats() 223 MV gld_top_mv = { 0, 0 }; in update_mbgraph_frame_stats() 244 MV gld_left_mv = gld_top_mv; in update_mbgraph_frame_stats()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/MemCpyOpt/ |
D | form-memset.ll | 60 %struct.MV = type { i16, i16 } 66 %left_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17] 67 %up_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17] 84 …%tmp43 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 0 ; <i16*> … 86 …%tmp46 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 1 ; <i16*> … 88 …%tmp57 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 0 ; <i16*> … 90 …%tmp60 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 1 ; <i16*> … 92 …%tmp71 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 0 ; <i16*> … 94 …%tmp74 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 1 ; <i16*> … 96 …%tmp85 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 0 ; <i16*> … [all …]
|
/external/llvm/test/Transforms/MemCpyOpt/ |
D | form-memset.ll | 60 %struct.MV = type { i16, i16 } 66 %left_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17] 67 %up_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17] 84 …%tmp43 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 0 ; <i16*> … 86 …%tmp46 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 1 ; <i16*> … 88 …%tmp57 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 0 ; <i16*> … 90 …%tmp60 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 1 ; <i16*> … 92 …%tmp71 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 0 ; <i16*> … 94 …%tmp74 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 1 ; <i16*> … 96 …%tmp85 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 0 ; <i16*> … [all …]
|
/external/swiftshader/third_party/LLVM/test/Transforms/MemCpyOpt/ |
D | form-memset.ll | 61 %struct.MV = type { i16, i16 } 66 %left_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17] 67 %up_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17] 84 %tmp43 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 0 ; <i16*> [#uses=1] 86 %tmp46 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 1 ; <i16*> [#uses=1] 88 %tmp57 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 0 ; <i16*> [#uses=1] 90 %tmp60 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 1 ; <i16*> [#uses=1] 92 %tmp71 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 0 ; <i16*> [#uses=1] 94 %tmp74 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 1 ; <i16*> [#uses=1] 96 %tmp85 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 0 ; <i16*> [#uses=1] [all …]
|
/external/libvpx/libvpx/vp9/common/ |
D | vp9_mv.h | 25 } MV; typedef 29 MV as_mv; 37 static INLINE int is_zero_mv(const MV *mv) { in is_zero_mv() 41 static INLINE int is_equal_mv(const MV *a, const MV *b) { in is_equal_mv() 45 static INLINE void clamp_mv(MV *mv, int min_col, int max_col, int min_row, in clamp_mv()
|
D | vp9_reconinter.c | 25 const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref, in vp9_highbd_build_inter_predictor() 29 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, in vp9_highbd_build_inter_predictor() 44 int dst_stride, const MV *src_mv, in vp9_build_inter_predictor() 49 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, in vp9_build_inter_predictor() 65 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) { in mi_mv_pred_q4() 66 MV res = { in mi_mv_pred_q4() 81 static MV mi_mv_pred_q2(const MODE_INFO *mi, int idx, int block0, int block1) { in mi_mv_pred_q2() 82 MV res = { round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.row + in mi_mv_pred_q2() 90 MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, int bw, in clamp_mv_to_umv_border_sb() 99 MV clamped_mv = { src_mv->row * (1 << (1 - ss_y)), in clamp_mv_to_umv_border_sb() [all …]
|
D | vp9_reconinter.h | 45 MV average_split_mvs(const struct macroblockd_plane *pd, const MODE_INFO *mi, 48 MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, int bw, 64 int dst_stride, const MV *src_mv, 72 const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref,
|
D | vp9_entropymv.h | 30 static INLINE int use_mv_hp(const MV *ref) { in use_mv_hp() 104 static INLINE MV_JOINT_TYPE vp9_get_mv_joint(const MV *mv) { in vp9_get_mv_joint() 130 void vp9_inc_mv(const MV *mv, nmv_context_counts *counts);
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/GlobalISel/ |
D | legalize-add-v256.mir | 52 ; SSE2: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) 53 ; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) 54 ; SSE2: $ymm0 = COPY [[MV]](<32 x s8>) 55 ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>) 86 ; SSE2: [[MV:%[0-9]+]]:_(<16 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>) 91 ; AVX1: [[MV:%[0-9]+]]:_(<16 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>) 92 ; SSE2: $ymm0 = COPY [[MV]](<16 x s16>) 93 ; AVX1: $ymm0 = COPY [[MV]](<16 x s16>) 124 ; SSE2: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>) 125 ; SSE2: $ymm0 = COPY [[MV]](<8 x s32>) [all …]
|
D | legalize-add-v512.mir | 54 …; AVX1: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[AD… 55 ; AVX1: $zmm0 = COPY [[MV]](<64 x s8>) 60 ; AVX512F: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<32 x s8>), [[ADD1]](<32 x s8>) 61 ; AVX512F: $zmm0 = COPY [[MV]](<64 x s8>) 94 …; AVX1: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[A… 95 ; AVX1: $zmm0 = COPY [[MV]](<32 x s16>) 100 … ; AVX512F: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<16 x s16>), [[ADD1]](<16 x s16>) 101 ; AVX512F: $zmm0 = COPY [[MV]](<32 x s16>) 134 …; AVX1: [[MV:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[A… 135 ; AVX1: $zmm0 = COPY [[MV]](<16 x s32>) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | legalize-merge-values.mir | 11 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32) 12 ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) 26 ; CHECK: [[MV:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C]](s32), [[C1]](s32) 27 ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](<2 x s32>) 42 ; CHECK: [[MV:%[0-9]+]]:_(<3 x s32>) = G_MERGE_VALUES [[C]](s32), [[C1]](s32), [[C2]](s32) 43 ; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[MV]](<3 x s32>) 58 ; CHECK: [[MV:%[0-9]+]]:_(<2 x s64>) = G_MERGE_VALUES [[C]](s64), [[C1]](s64) 59 ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](<2 x s64>) 75 …; CHECK: [[MV:%[0-9]+]]:_(<4 x s64>) = G_MERGE_VALUES [[C]](s64), [[C1]](s64), [[C2]](s64), [[C3]]… 76 ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV]](<4 x s64>)
|
D | legalize-extract-vector-elt.mir | 61 …; CHECK: [[MV:%[0-9]+]]:_(<5 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32),… 63 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<5 x s32>), [[C]](s32) 80 …; CHECK: [[MV:%[0-9]+]]:_(<6 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32),… 82 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<6 x s32>), [[C]](s32) 99 …; CHECK: [[MV:%[0-9]+]]:_(<7 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32),… 101 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<7 x s32>), [[C]](s32) 118 …; CHECK: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32),… 120 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<8 x s32>), [[C]](s32) 137 …; CHECK: [[MV:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32)… 139 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<16 x s32>), [[C]](s32)
|
D | regbankselect-merge-values.mir | 16 ; CHECK: [[MV:%[0-9]+]]:sgpr(s64) = G_MERGE_VALUES [[EXTRACT]](s32), [[EXTRACT1]](s32) 17 ; CHECK: S_ENDPGM implicit [[MV]](s64) 36 ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[EXTRACT]](s32), [[EXTRACT1]](s32) 37 ; CHECK: S_ENDPGM implicit [[MV]](s64)
|
/external/libaom/libaom/av1/common/ |
D | mv.h | 28 } MV; typedef 30 static const MV kZeroMv = { 0, 0 }; 34 MV as_mv; 178 static INLINE void integer_mv_precision(MV *mv) { in integer_mv_precision() 283 static INLINE int is_zero_mv(const MV *mv) { in is_zero_mv() 287 static INLINE int is_equal_mv(const MV *a, const MV *b) { in is_equal_mv() 291 static INLINE void clamp_mv(MV *mv, int min_col, int max_col, int min_row, in clamp_mv()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/GlobalISel/ |
D | legalize-combines.mir | 24 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[ADD]](s32) 25 ; CHECK: [[EXTRACT:%[0-9]+]]:_(s1) = G_EXTRACT [[MV]](s64), 0 26 ; CHECK: [[EXTRACT1:%[0-9]+]]:_(s64) = COPY [[MV]](s64) 106 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[ADD]](s32) 107 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
|
/external/libvpx/libvpx/vp8/common/ |
D | mv.h | 22 } MV; typedef 26 MV as_mv;
|