Home
last modified time | relevance | path

Searched refs:vsx (Results 1 – 25 of 234) sorted by relevance

12345678910

/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/
Dbuiltins-ppc-elf2-abi.ll22 %2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1)
34 %2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1)
70 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1)
82 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1)
94 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1)
106 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1)
118 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1)
130 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1)
143 %1 = call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float> %0)
155 %1 = call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double> %0)
[all …]
Dvsx-minmax.ll1 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=+vsx -O0 -fast-isel=0 -mtriple=powerpc64-unknown…
23 %2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1)
27 %4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3)
32 %7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6)
37 %10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9)
41 %12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11)
46 %15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14)
50 %17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16)
53 %18 = tail call double @llvm.ppc.vsx.xsmindp(double %16, double %16)
60 declare double @llvm.ppc.vsx.xsmaxdp(double, double)
[all …]
Dvsx-ldst-builtin-le.ll1 ; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mattr=+vsx -O2 \
30 %0 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vsi to i8*))
36 %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vui to i8*))
42 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x float>* @vf to i8*))
49 %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vsll to i8*))
56 %6 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vull to i8*))
63 %8 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x double>* @vd to i8*))
72 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %9, i8* bitcast (<4 x i32>* @res_vsi to i8*))
78 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %10, i8* bitcast (<4 x i32>* @res_vui to i8*))
85 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %12, i8* bitcast (<4 x float>* @res_vf to i8*))
[all …]
Di64_fp.ll4 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mattr=+64bit | \
6 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mattr=+64bit | \
8 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mcpu=g5 | \
10 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mcpu=g5 | \
12 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mattr=-64bit | \
14 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mattr=-64bit | \
16 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mcpu=g4 | \
18 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=ppc32-- -mcpu=g4 | \
Dvsx-p9.ll149 %0 = tail call <4 x float> @llvm.ppc.vsx.xviexpsp(<4 x i32> %a, <4 x i32> %b)
156 declare <4 x float> @llvm.ppc.vsx.xviexpsp(<4 x i32>, <4 x i32>)
161 %0 = tail call <2 x double> @llvm.ppc.vsx.xviexpdp(<2 x i64> %a, <2 x i64> %b)
168 declare <2 x double> @llvm.ppc.vsx.xviexpdp(<2 x i64>, <2 x i64>)
199 %0 = tail call <4 x float> @llvm.ppc.vsx.xvcvsphp(<4 x float> %a)
249 declare <4 x float> @llvm.ppc.vsx.xvcvsphp(<4 x float>)
265 %0 = tail call <4 x i32> @llvm.ppc.vsx.xvxexpsp(<4 x float> %a)
272 declare <4 x i32> @llvm.ppc.vsx.xvxexpsp(<4 x float>)
277 %0 = tail call <2 x i64> @llvm.ppc.vsx.xvxexpdp(<2 x double> %a)
284 declare <2 x i64>@llvm.ppc.vsx.xvxexpdp(<2 x double>)
[all …]
Dvsx-div.ll1 ; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=+vsx -O1 -mtriple=powerpc64-unknown-linux-gnu < …
11 %1 = tail call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %0)
21 %1 = tail call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %0)
28 declare <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double>, <2 x double>)
29 declare <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float>, <4 x float>)
Dfsqrt.ll4 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=powerpc-apple-darwin8 -mattr=+fsqrt | \
6 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=powerpc-apple-darwin8 -mcpu=g5 | \
8 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=powerpc-apple-darwin8 -mattr=-fsqrt | \
10 ; RUN: llc -verify-machineinstrs < %s -mattr=-vsx -mtriple=powerpc-apple-darwin8 -mcpu=g4 | \
Dmcm-4.ll2 ; RUN: -fast-isel=false -mattr=-vsx <%s | FileCheck -check-prefix=MEDIUM %s
4 ; RUN: -fast-isel=false -mattr=+vsx <%s | FileCheck -check-prefix=MEDIUM-VSX %s
6 ; RUN: -fast-isel=false -mattr=-vsx <%s | FileCheck -check-prefix=LARGE %s
8 ; RUN: -fast-isel=false -mattr=+vsx <%s | FileCheck -check-prefix=LARGE-VSX %s
10 ; RUN: -fast-isel=false -mattr=+vsx <%s | FileCheck -check-prefix=MEDIUM-P9 %s
12 ; RUN: -fast-isel=false -mattr=+vsx <%s | FileCheck -check-prefix=LARGE-P9 %s
/external/llvm/test/CodeGen/PowerPC/
Dvsx-ldst-builtin-le.ll1 ; RUN: llc -mcpu=pwr8 -mattr=+vsx -O2 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
20 %0 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vsi to i8*))
24 %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vui to i8*))
28 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x float>* @vf to i8*))
33 %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vsll to i8*))
38 %6 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vull to i8*))
43 %8 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x double>* @vd to i8*))
49 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %9, i8* bitcast (<4 x i32>* @res_vsi to i8*))
53 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %10, i8* bitcast (<4 x i32>* @res_vui to i8*))
58 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %12, i8* bitcast (<4 x float>* @res_vf to i8*))
[all …]
Dvsx-minmax.ll1 ; RUN: llc -mcpu=pwr7 -mattr=+vsx -O0 -fast-isel=0 -mtriple=powerpc64-unknown-linux-gnu < %s | File…
23 %2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1)
27 %4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3)
32 %7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6)
37 %10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9)
41 %12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11)
46 %15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14)
50 %17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16)
53 %18 = tail call double @llvm.ppc.vsx.xsmindp(double %16, double %16)
60 declare double @llvm.ppc.vsx.xsmaxdp(double, double)
[all …]
Di64_fp.ll4 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mattr=+64bit | \
6 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mattr=+64bit | \
8 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mcpu=g5 | \
10 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mcpu=g5 | \
12 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mattr=-64bit | \
14 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mattr=-64bit | \
16 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mcpu=g4 | \
18 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mcpu=g4 | \
Dbuiltins-ppc-elf2-abi.ll22 %2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1)
34 %2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1)
70 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1)
82 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1)
94 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1)
106 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1)
118 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1)
130 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1)
143 %1 = call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float> %0)
155 %1 = call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double> %0)
[all …]
Dvsx-div.ll1 ; RUN: llc -mcpu=pwr7 -mattr=+vsx -O1 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s
11 %1 = tail call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %0)
21 %1 = tail call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %0)
28 declare <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double>, <2 x double>)
29 declare <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float>, <4 x float>)
Dfsqrt.ll4 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=+fsqrt | \
6 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mtriple=powerpc-apple-darwin8 -mcpu=g5 | \
8 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=-fsqrt | \
10 ; RUN: llc < %s -mattr=-vsx -march=ppc32 -mtriple=powerpc-apple-darwin8 -mcpu=g4 | \
Dmcm-4.ll1 ; RUN: llc -mcpu=pwr7 -O0 -code-model=medium -fast-isel=false -mattr=-vsx <%s | FileCheck -check-pr…
2 ; RUN: llc -mcpu=pwr7 -O0 -code-model=medium -fast-isel=false -mattr=+vsx <%s | FileCheck -check-pr…
3 ; RUN: llc -mcpu=pwr7 -O0 -code-model=large -fast-isel=false -mattr=-vsx <%s | FileCheck -check-pre…
4 ; RUN: llc -mcpu=pwr7 -O0 -code-model=large -fast-isel=false -mattr=+vsx <%s | FileCheck -check-pre…
/external/linux-kselftest/tools/testing/selftests/powerpc/ptrace/
Dptrace-vsx.c22 void vsx(void) in vsx() function
43 unsigned long vsx[VSX_MAX]; in trace_vsx() local
47 FAIL_IF(show_vsx(child, vsx)); in trace_vsx()
48 FAIL_IF(validate_vsx(vsx, fp_load)); in trace_vsx()
52 memset(vsx, 0, sizeof(vsx)); in trace_vsx()
54 load_vsx_vmx(fp_load_new, vsx, vmx); in trace_vsx()
56 FAIL_IF(write_vsx(child, vsx)); in trace_vsx()
83 vsx(); in ptrace_vsx()
Dptrace-tm-vsx.c91 unsigned long vsx[VSX_MAX]; in trace_tm_vsx() local
95 FAIL_IF(show_vsx(child, vsx)); in trace_tm_vsx()
96 FAIL_IF(validate_vsx(vsx, fp_load)); in trace_tm_vsx()
99 FAIL_IF(show_vsx_ckpt(child, vsx)); in trace_tm_vsx()
100 FAIL_IF(validate_vsx(vsx, fp_load_ckpt)); in trace_tm_vsx()
103 memset(vsx, 0, sizeof(vsx)); in trace_tm_vsx()
106 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_vsx()
108 FAIL_IF(write_vsx_ckpt(child, vsx)); in trace_tm_vsx()
Dptrace-tm-spd-vsx.c103 unsigned long vsx[VSX_MAX]; in trace_tm_spd_vsx() local
107 FAIL_IF(show_vsx(child, vsx)); in trace_tm_spd_vsx()
108 FAIL_IF(validate_vsx(vsx, fp_load)); in trace_tm_spd_vsx()
111 FAIL_IF(show_vsx_ckpt(child, vsx)); in trace_tm_spd_vsx()
112 FAIL_IF(validate_vsx(vsx, fp_load_ckpt)); in trace_tm_spd_vsx()
116 memset(vsx, 0, sizeof(vsx)); in trace_tm_spd_vsx()
119 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_spd_vsx()
121 FAIL_IF(write_vsx_ckpt(child, vsx)); in trace_tm_spd_vsx()
Dptrace-vsx.h17 int validate_vsx(unsigned long *vsx, unsigned long *load) in validate_vsx() argument
22 if (vsx[i] != load[2 * i + 1]) { in validate_vsx()
24 i, vsx[i], 2 * i + 1, load[2 * i + 1]); in validate_vsx()
112 void load_vsx_vmx(unsigned long *load, unsigned long *vsx, in load_vsx_vmx() argument
118 vsx[i] = load[1 + 2 * i]; in load_vsx_vmx()
D.gitignore7 ptrace-vsx
8 ptrace-tm-vsx
9 ptrace-tm-spd-vsx
DMakefile3 ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
4 ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
/external/libaom/libaom/av1/common/ppc/
Dcfl_ppc.c113 CFL_SUB_AVG_X(vsx, 8, 4, 16, 5)
114 CFL_SUB_AVG_X(vsx, 8, 8, 32, 6)
115 CFL_SUB_AVG_X(vsx, 8, 16, 64, 7)
116 CFL_SUB_AVG_X(vsx, 8, 32, 128, 8)
117 CFL_SUB_AVG_X(vsx, 16, 4, 32, 6)
118 CFL_SUB_AVG_X(vsx, 16, 8, 64, 7)
119 CFL_SUB_AVG_X(vsx, 16, 16, 128, 8)
120 CFL_SUB_AVG_X(vsx, 16, 32, 256, 9)
121 CFL_SUB_AVG_X(vsx, 32, 8, 128, 8)
122 CFL_SUB_AVG_X(vsx, 32, 16, 256, 9)
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/PowerPC/
Dvsx-unaligned.ll19 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %1)
23 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %2, i8* %4)
27 %7 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %6)
31 call void @llvm.ppc.vsx.stxvd2x(<2 x double> %7, i8* %9)
41 declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*)
42 declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, i8*)
43 declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*)
44 declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*)
/external/llvm/test/Transforms/InstCombine/
Dvsx-unaligned.ll19 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %1)
23 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %2, i8* %4)
27 %7 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %6)
31 call void @llvm.ppc.vsx.stxvd2x(<2 x double> %7, i8* %9)
41 declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*)
42 declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, i8*)
43 declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*)
44 declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*)
/external/clang/test/CodeGen/
Dbuiltins-ppc-p8vector.c34 vector signed __int128 vsx = { 1 }; variable
103 res_vsx = vec_addc(vsx, vsx); in test1()
112 res_vsx = vec_adde(vsx, vsx, vsx); in test1()
121 res_vsx = vec_addec(vsx, vsx, vsx); in test1()
1470 res_vsx = vec_sub(vsx, vsx); in test1()

12345678910