/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | fround.ll | 10 @src64 = common global [8 x double] zeroinitializer, align 64 29 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 0), al… 30 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 1), al… 38 ; SSE41-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @src64 to <… 44 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([8 x double]* @src64 to <2 … 49 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 50 …%ld1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 60 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 0), al… 61 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 1), al… 62 …load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64 2), al… [all …]
|
D | bswap.ll | 8 @src64 = common global [4 x i64] zeroinitializer, align 32 21 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… 22 ; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… 30 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*),… 35 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8 36 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8 46 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… 47 ; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… 48 ; SSE-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… 49 ; SSE-NEXT: [[LD3:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… [all …]
|
D | bitreverse.ll | 10 @src64 = common global [4 x i64] zeroinitializer, align 32 26 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… 27 ; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… 35 ; AVX-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… 36 ; AVX-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… 44 ; XOP-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*),… 49 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8 50 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8 60 ; SSE-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… 61 ; SSE-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i… [all …]
|
D | ctpop.ll | 8 @src64 = common global [4 x i64] zeroinitializer, align 32 24 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*… 29 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8 30 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8 40 ; SSE-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* bitcast ([4 x i64]* @src64 to <2 x i64>*),… 41 …, <2 x i64>* bitcast (i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2) to … 49 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([4 x i64]* @src64 to <4 x i64>*),… 54 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4 55 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 1), align 4 56 %ld2 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 2), align 4 [all …]
|
D | cttz.ll | 8 @src64 = common global [4 x i64] zeroinitializer, align 32 28 ; CHECK-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 29 ; CHECK-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 36 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8 37 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8 47 ; CHECK-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 48 ; CHECK-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 49 ; CHECK-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 50 ; CHECK-NEXT: [[LD3:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 61 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4 [all …]
|
D | ctlz.ll | 8 @src64 = common global [4 x i64] zeroinitializer, align 32 28 ; CHECK-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 29 ; CHECK-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 36 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8 37 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8 47 ; CHECK-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 48 ; CHECK-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 49 ; CHECK-NEXT: [[LD2:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 50 ; CHECK-NEXT: [[LD3:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 61 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4 [all …]
|
/external/elfutils/libelf/ |
D | gnuhash_xlate.h | 57 const Elf64_Xword *src64 = (const Elf64_Xword *) &src32[4]; in elf_cvt_gnuhash() local 62 dest64[cnt] = bswap_64 (src64[cnt]); in elf_cvt_gnuhash() 67 src32 = (const Elf32_Word *) &src64[bitmask_words]; in elf_cvt_gnuhash()
|
/external/skqp/src/core/ |
D | SkConvertPixels.cpp | 199 auto src64 = (const uint64_t*) src; in convert_to_alpha8() local 202 dst[x] = (uint8_t) (255.0f * SkHalfToFloat(src64[x] >> 48)); in convert_to_alpha8() 205 src64 = SkTAddOffset<const uint64_t>(src64, srcRB); in convert_to_alpha8()
|
/external/skia/src/core/ |
D | SkConvertPixels.cpp | 199 auto src64 = (const uint64_t*) src; in convert_to_alpha8() local 202 dst[x] = (uint8_t) (255.0f * SkHalfToFloat(src64[x] >> 48)); in convert_to_alpha8() 205 src64 = SkTAddOffset<const uint64_t>(src64, srcRB); in convert_to_alpha8()
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-fp128.ll | 99 %src64 = load i64, i64* @var64 100 %val64 = sitofp i64 %src64 to fp128 115 %src64 = load i64, i64* @var64 116 %val64 = uitofp i64 %src64 to fp128
|
/external/mesa3d/src/amd/vulkan/ |
D | radv_query.c | 169 volatile uint64_t const *src64 = (volatile uint64_t const *)src; in radv_GetQueryPoolResults() local 177 start = src64[2 * i]; in radv_GetQueryPoolResults() 178 end = src64[2 * i + 1]; in radv_GetQueryPoolResults()
|
/external/mesa3d/src/mesa/main/ |
D | pack.c | 1414 convert_integer_luminance64(int64_t src64, int bits, in convert_integer_luminance64() argument 1423 src32 = clamp_sint64_to_sint32(src64); in convert_integer_luminance64() 1425 src32 = clamp_sint64_to_uint32(src64); in convert_integer_luminance64() 1427 src32 = clamp_uint64_to_sint32(src64); in convert_integer_luminance64() 1429 src32 = clamp_uint64_to_uint32(src64); in convert_integer_luminance64()
|
/external/valgrind/none/tests/amd64/ |
D | sse4-64.c | 92 ULong src64, V128* dst, V128* res ) in showIGVV() argument 95 printf("%016llx", src64); in showIGVV() 350 ULong src64 = (ULong)(_src); \ 362 : /*in*/ "r"(&dstv), "r"(&src64), "r"(&res) \ 365 showIGVV("r", (_opname), (_imm), src64, &dstv, &res); \ 371 ULong src64 = (ULong)(_src); \ 379 : /*in*/ "r"(&dstv), "r"(&src64), "r"(&res) \ 382 showIGVV("m", (_opname), (_imm), src64, &dstv, &res); \
|
/external/valgrind/VEX/priv/ |
D | guest_amd64_toIR.c | 3465 IRTemp src64 = newTemp(Ity_I64); in codegen_div() local 3469 assign( src64, in codegen_div() 3472 binop(op, mkexpr(src64), mkexpr(t)) ); in codegen_div() 3479 assign( src64, unop(widen3264, in codegen_div() 3483 assign( dst64, binop(op, mkexpr(src64), unop(widen1632,mkexpr(t))) ); in codegen_div() 3492 assign( src64, unop(widen3264, in codegen_div() 3495 binop(op, mkexpr(src64), in codegen_div() 4977 IRTemp src64 = newTemp(Ity_I64); in gen_LZCNT() local 4978 assign(src64, widenUto64( mkexpr(src) )); in gen_LZCNT() 4982 binop(Iop_Shl64, mkexpr(src64), in gen_LZCNT() [all …]
|
D | guest_x86_toIR.c | 2243 IRTemp src64 = newTemp(Ity_I64); in codegen_div() local 2247 assign( src64, binop(Iop_32HLto64, in codegen_div() 2249 assign( dst64, binop(op, mkexpr(src64), mkexpr(t)) ); in codegen_div() 2256 assign( src64, unop(widen3264, in codegen_div() 2259 assign( dst64, binop(op, mkexpr(src64), unop(widen1632,mkexpr(t))) ); in codegen_div() 2268 assign( src64, unop(widen3264, unop(widen1632, getIReg(2,R_EAX))) ); in codegen_div() 2270 binop(op, mkexpr(src64), in codegen_div()
|
D | guest_ppc_toIR.c | 3497 IRExpr * src64 = unop( Iop_32Uto64, src ); in putGST_field() local 3500 putGST_masked( reg, src64, mask ); in putGST_field() 3503 binop( Iop_Shl64, src64, mkU8( toUChar( shft ) ) ), in putGST_field()
|