Searched refs:yptr (Results 1 – 14 of 14) sorted by relevance
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | memcmp-1.ll | 92 %yptr = bitcast i64* %y.addr to i8* 93 %call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 8) 110 %yptr = bitcast i32* %y.addr to i8* 111 %call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 4) 128 %yptr = bitcast i16* %y.addr to i8* 129 %call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 2)
|
/external/llvm/test/Transforms/InstCombine/ |
D | memcmp-1.ll | 83 %yptr = bitcast i64* %y.addr to i8* 84 %call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 8) 100 %yptr = bitcast i32* %y.addr to i8* 101 %call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 4) 117 %yptr = bitcast i16* %y.addr to i8* 118 %call = call i32 @memcmp(i8* %xptr, i8* %yptr, i32 2)
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | rotr.i64.ll | 27 define void @v_rotr_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) { 30 %y = load i64, i64 addrspace(1)* %yptr, align 8 51 …_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> addrspace(1)* %xptr, <2 x i64> addrspace(1)* %yptr) { 54 %y = load <2 x i64>, <2 x i64> addrspace(1)* %yptr, align 8
|
D | rotl.i64.ll | 29 define void @v_rotl_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) { 32 %y = load i64, i64 addrspace(1)* %yptr, align 8
|
D | fneg-fabs.f64.ll | 17 …_fabs_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %xptr, double addrspace(1)* %yptr) {
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | rotr.i64.ll | 27 …_kernel void @v_rotr_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) { 30 %y = load i64, i64 addrspace(1)* %yptr, align 8 51 …_v2i64(<2 x i64> addrspace(1)* %in, <2 x i64> addrspace(1)* %xptr, <2 x i64> addrspace(1)* %yptr) { 54 %y = load <2 x i64>, <2 x i64> addrspace(1)* %yptr, align 8
|
D | rotl.i64.ll | 29 …_kernel void @v_rotl_i64(i64 addrspace(1)* %in, i64 addrspace(1)* %xptr, i64 addrspace(1)* %yptr) { 32 %y = load i64, i64 addrspace(1)* %yptr, align 8
|
D | fneg-fabs.f64.ll | 17 …_fabs_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %xptr, double addrspace(1)* %yptr) {
|
/external/libvpx/libvpx/vp8/encoder/ |
D | temporal_filter.c | 42 unsigned char *yptr, *uptr, *vptr; in vp8_temporal_filter_predictors_mb_c() local 45 yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3); in vp8_temporal_filter_predictors_mb_c() 48 x->subpixel_predict16x16(yptr, stride, mv_col & 7, mv_row & 7, &pred[0], in vp8_temporal_filter_predictors_mb_c() 51 vp8_copy_mem16x16(yptr, stride, &pred[0], 16); in vp8_temporal_filter_predictors_mb_c()
|
/external/libxaac/decoder/ |
D | ixheaacd_imdct.c | 125 WORD32 *yptr; in ixheaacd_calc_post_twid_dec() local 127 yptr = &xptr[2 * nlength - 1]; in ixheaacd_calc_post_twid_dec() 132 *yptr = (-(ixheaacd_mult32((i_ptr[i]), (*cos_ptr++)) + in ixheaacd_calc_post_twid_dec() 135 yptr -= 2; in ixheaacd_calc_post_twid_dec()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopStrengthReduce/X86/ |
D | macro-fuse-cmp.ll | 104 %yptr = bitcast double* %gepy to <2 x double>* 106 %yval = load <2 x double>, <2 x double>* %yptr, align 8
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | required-vector-width.ll | 126 define void @psubus_64i8_max_256(<64 x i8>* %xptr, <64 x i8>* %yptr, <64 x i8>* %zptr) "required-ve… 138 %y = load <64 x i8>, <64 x i8>* %yptr 146 define void @psubus_64i8_max_512(<64 x i8>* %xptr, <64 x i8>* %yptr, <64 x i8>* %zptr) "required-ve… 155 %y = load <64 x i8>, <64 x i8>* %yptr
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SROA/ |
D | vector-promotion.ll | 381 ; CHECK-NEXT: %[[yptr:.*]] = bitcast i8* %y to <2 x float>* 382 ; CHECK-NEXT: %[[y:.*]] = load <2 x float>, <2 x float>* %[[yptr]]
|
/external/llvm/test/Transforms/SROA/ |
D | vector-promotion.ll | 381 ; CHECK-NEXT: %[[yptr:.*]] = bitcast i8* %y to <2 x float>* 382 ; CHECK-NEXT: %[[y:.*]] = load <2 x float>, <2 x float>* %[[yptr]]
|