/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | builtins-ppc-elf2-abi.ll | 22 %2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1) 34 %2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1) 70 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1) 82 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1) 94 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1) 106 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1) 118 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1) 130 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1) 143 %1 = call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float> %0) 155 %1 = call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double> %0) [all …]
|
D | vsx-minmax.ll | 1 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=+vsx -O0 -fast-isel=0 -mtriple=powerpc64-unknown… 23 %2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1) 27 %4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3) 32 %7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6) 37 %10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9) 41 %12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11) 46 %15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14) 50 %17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16) 53 %18 = tail call double @llvm.ppc.vsx.xsmindp(double %16, double %16) 60 declare double @llvm.ppc.vsx.xsmaxdp(double, double) [all …]
|
D | vsx-ldst-builtin-le.ll | 1 ; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mattr=+vsx -O2 \ 30 %0 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vsi to i8*)) 36 %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vui to i8*)) 42 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x float>* @vf to i8*)) 49 %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vsll to i8*)) 56 %6 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vull to i8*)) 63 %8 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x double>* @vd to i8*)) 72 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %9, i8* bitcast (<4 x i32>* @res_vsi to i8*)) 78 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %10, i8* bitcast (<4 x i32>* @res_vui to i8*)) 85 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %12, i8* bitcast (<4 x float>* @res_vf to i8*)) [all …]
|
D | i64_fp.ll | 4 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mattr=+64bit | \ 6 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mattr=+64bit | \ 8 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mcpu=g5 | \ 10 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mcpu=g5 | \ 12 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mattr=-64bit | \ 14 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mattr=-64bit | \ 16 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mcpu=g4 | \ 18 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mcpu=g4 | \
|
D | vsx-p9.ll | 149 %0 = tail call <4 x float> @llvm.ppc.vsx.xviexpsp(<4 x i32> %a, <4 x i32> %b) 156 declare <4 x float> @llvm.ppc.vsx.xviexpsp(<4 x i32>, <4 x i32>) 161 %0 = tail call <2 x double> @llvm.ppc.vsx.xviexpdp(<2 x i64> %a, <2 x i64> %b) 168 declare <2 x double> @llvm.ppc.vsx.xviexpdp(<2 x i64>, <2 x i64>) 199 %0 = tail call <4 x float> @llvm.ppc.vsx.xvcvsphp(<4 x float> %a) 249 declare <4 x float> @llvm.ppc.vsx.xvcvsphp(<4 x float>) 265 %0 = tail call <4 x i32> @llvm.ppc.vsx.xvxexpsp(<4 x float> %a) 272 declare <4 x i32> @llvm.ppc.vsx.xvxexpsp(<4 x float>) 277 %0 = tail call <2 x i64> @llvm.ppc.vsx.xvxexpdp(<2 x double> %a) 284 declare <2 x i64>@llvm.ppc.vsx.xvxexpdp(<2 x double>) [all …]
|
D | vsx-div.ll | 1 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=+vsx -O1 -mtriple=powerpc64-unknown-linux-gnu < … 11 %1 = tail call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %0) 21 %1 = tail call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %0) 28 declare <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double>, <2 x double>) 29 declare <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float>, <4 x float>)
|
D | fsqrt.ll | 4 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=powerpc-apple-darwin8 -mattr=+fsqrt | \ 6 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=powerpc-apple-darwin8 -mcpu=g5 | \ 8 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=powerpc-apple-darwin8 -mattr=-fsqrt | \ 10 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=powerpc-apple-darwin8 -mcpu=g4 | \
|
D | mcm-4.ll | 2 ; RUN: -fast-isel=false -mattr=-vsx <%s | FileCheck -check-prefix=MEDIUM %s 4 ; RUN: -fast-isel=false -mattr=+vsx <%s | FileCheck -check-prefix=MEDIUM-VSX %s 6 ; RUN: -fast-isel=false -mattr=-vsx <%s | FileCheck -check-prefix=LARGE %s 8 ; RUN: -fast-isel=false -mattr=+vsx <%s | FileCheck -check-prefix=LARGE-VSX %s 10 ; RUN: -fast-isel=false -mattr=+vsx <%s | FileCheck -check-prefix=MEDIUM-P9 %s 12 ; RUN: -fast-isel=false -mattr=+vsx <%s | FileCheck -check-prefix=LARGE-P9 %s
|
/external/llvm/test/CodeGen/PowerPC/ |
D | vsx-ldst-builtin-le.ll | 1 ; RUN: llc -mcpu=pwr8 -mattr=+vsx -O2 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s 20 %0 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vsi to i8*)) 24 %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vui to i8*)) 28 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x float>* @vf to i8*)) 33 %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vsll to i8*)) 38 %6 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vull to i8*)) 43 %8 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x double>* @vd to i8*)) 49 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %9, i8* bitcast (<4 x i32>* @res_vsi to i8*)) 53 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %10, i8* bitcast (<4 x i32>* @res_vui to i8*)) 58 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %12, i8* bitcast (<4 x float>* @res_vf to i8*)) [all …]
|
D | vsx-minmax.ll | 1 ; RUN: llc -mcpu=pwr7 -mattr=+vsx -O0 -fast-isel=0 -mtriple=powerpc64-unknown-linux-gnu < %s | File… 23 %2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1) 27 %4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3) 32 %7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6) 37 %10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9) 41 %12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11) 46 %15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14) 50 %17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16) 53 %18 = tail call double @llvm.ppc.vsx.xsmindp(double %16, double %16) 60 declare double @llvm.ppc.vsx.xsmaxdp(double, double) [all …]
|
D | i64_fp.ll | 4 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mattr=+64bit | \ 6 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mattr=+64bit | \ 8 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mcpu=g5 | \ 10 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mcpu=g5 | \ 12 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mattr=-64bit | \ 14 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mattr=-64bit | \ 16 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mcpu=g4 | \ 18 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mcpu=g4 | \
|
D | builtins-ppc-elf2-abi.ll | 22 %2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1) 34 %2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1) 70 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1) 82 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1) 94 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1) 106 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1) 118 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1) 130 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1) 143 %1 = call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float> %0) 155 %1 = call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double> %0) [all …]
|
D | vsx-div.ll | 1 ; RUN: llc -mcpu=pwr7 -mattr=+vsx -O1 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s 11 %1 = tail call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %0) 21 %1 = tail call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %0) 28 declare <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double>, <2 x double>) 29 declare <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float>, <4 x float>)
|
D | fsqrt.ll | 4 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=+fsqrt | \ 6 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mtriple=powerpc-apple-darwin8 -mcpu=g5 | \ 8 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=-fsqrt | \ 10 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mtriple=powerpc-apple-darwin8 -mcpu=g4 | \
|
D | mcm-4.ll | 1 ; RUN: llc -mcpu=pwr7 -O0 -code-model=medium -fast-isel=false -mattr=-vsx <%s | FileCheck -check-pr… 2 ; RUN: llc -mcpu=pwr7 -O0 -code-model=medium -fast-isel=false -mattr=+vsx <%s | FileCheck -check-pr… 3 ; RUN: llc -mcpu=pwr7 -O0 -code-model=large -fast-isel=false -mattr=-vsx <%s | FileCheck -check-pre… 4 ; RUN: llc -mcpu=pwr7 -O0 -code-model=large -fast-isel=false -mattr=+vsx <%s | FileCheck -check-pre…
|
/external/linux-kselftest/tools/testing/selftests/powerpc/ptrace/ |
D | ptrace-vsx.c | 22 void vsx(void) in vsx() function 43 unsigned long vsx[VSX_MAX]; in trace_vsx() local 47 FAIL_IF(show_vsx(child, vsx)); in trace_vsx() 48 FAIL_IF(validate_vsx(vsx, fp_load)); in trace_vsx() 52 memset(vsx, 0, sizeof(vsx)); in trace_vsx() 54 load_vsx_vmx(fp_load_new, vsx, vmx); in trace_vsx() 56 FAIL_IF(write_vsx(child, vsx)); in trace_vsx() 83 vsx(); in ptrace_vsx()
|
D | ptrace-tm-vsx.c | 91 unsigned long vsx[VSX_MAX]; in trace_tm_vsx() local 95 FAIL_IF(show_vsx(child, vsx)); in trace_tm_vsx() 96 FAIL_IF(validate_vsx(vsx, fp_load)); in trace_tm_vsx() 99 FAIL_IF(show_vsx_ckpt(child, vsx)); in trace_tm_vsx() 100 FAIL_IF(validate_vsx(vsx, fp_load_ckpt)); in trace_tm_vsx() 103 memset(vsx, 0, sizeof(vsx)); in trace_tm_vsx() 106 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_vsx() 108 FAIL_IF(write_vsx_ckpt(child, vsx)); in trace_tm_vsx()
|
D | ptrace-tm-spd-vsx.c | 103 unsigned long vsx[VSX_MAX]; in trace_tm_spd_vsx() local 107 FAIL_IF(show_vsx(child, vsx)); in trace_tm_spd_vsx() 108 FAIL_IF(validate_vsx(vsx, fp_load)); in trace_tm_spd_vsx() 111 FAIL_IF(show_vsx_ckpt(child, vsx)); in trace_tm_spd_vsx() 112 FAIL_IF(validate_vsx(vsx, fp_load_ckpt)); in trace_tm_spd_vsx() 116 memset(vsx, 0, sizeof(vsx)); in trace_tm_spd_vsx() 119 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_spd_vsx() 121 FAIL_IF(write_vsx_ckpt(child, vsx)); in trace_tm_spd_vsx()
|
D | ptrace-vsx.h | 17 int validate_vsx(unsigned long *vsx, unsigned long *load) in validate_vsx() argument 22 if (vsx[i] != load[2 * i + 1]) { in validate_vsx() 24 i, vsx[i], 2 * i + 1, load[2 * i + 1]); in validate_vsx() 112 void load_vsx_vmx(unsigned long *load, unsigned long *vsx, in load_vsx_vmx() argument 118 vsx[i] = load[1 + 2 * i]; in load_vsx_vmx()
|
D | .gitignore | 7 ptrace-vsx 8 ptrace-tm-vsx 9 ptrace-tm-spd-vsx
|
D | Makefile | 3 ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \ 4 ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
|
/external/libaom/libaom/av1/common/ppc/ |
D | cfl_ppc.c | 113 CFL_SUB_AVG_X(vsx, 8, 4, 16, 5) 114 CFL_SUB_AVG_X(vsx, 8, 8, 32, 6) 115 CFL_SUB_AVG_X(vsx, 8, 16, 64, 7) 116 CFL_SUB_AVG_X(vsx, 8, 32, 128, 8) 117 CFL_SUB_AVG_X(vsx, 16, 4, 32, 6) 118 CFL_SUB_AVG_X(vsx, 16, 8, 64, 7) 119 CFL_SUB_AVG_X(vsx, 16, 16, 128, 8) 120 CFL_SUB_AVG_X(vsx, 16, 32, 256, 9) 121 CFL_SUB_AVG_X(vsx, 32, 8, 128, 8) 122 CFL_SUB_AVG_X(vsx, 32, 16, 256, 9) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/PowerPC/ |
D | vsx-unaligned.ll | 19 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %1) 23 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %2, i8* %4) 27 %7 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %6) 31 call void @llvm.ppc.vsx.stxvd2x(<2 x double> %7, i8* %9) 41 declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*) 42 declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, i8*) 43 declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*) 44 declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*)
|
/external/llvm/test/Transforms/InstCombine/ |
D | vsx-unaligned.ll | 19 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %1) 23 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %2, i8* %4) 27 %7 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %6) 31 call void @llvm.ppc.vsx.stxvd2x(<2 x double> %7, i8* %9) 41 declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*) 42 declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, i8*) 43 declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*) 44 declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*)
|
/external/clang/test/CodeGen/ |
D | builtins-ppc-p8vector.c | 34 vector signed __int128 vsx = { 1 }; variable 103 res_vsx = vec_addc(vsx, vsx); in test1() 112 res_vsx = vec_adde(vsx, vsx, vsx); in test1() 121 res_vsx = vec_addec(vsx, vsx, vsx); in test1() 1470 res_vsx = vec_sub(vsx, vsx); in test1()
|