Home
last modified time | relevance | path

Searched refs:ppc (Results 1 – 25 of 660) sorted by relevance

12345678910>>...27

/external/freetype/builds/amiga/
Dmakefile48 all: libft2_ppc.a ftsystem.ppc.o ftsystempure.ppc.o
55 CC = ppc-morphos-gcc
56 AR = ppc-morphos-ar rc
57 RANLIB = ppc-morphos-ranlib
58 LD = ppc-morphos-ld
64 ftbase.ppc.o: $(FTSRC)/base/ftbase.c
67 ftinit.ppc.o: $(FTSRC)/base/ftinit.c
70 ftsystem.ppc.o: $(FTSRC)/base/ftsystem.c
74 ftsystempure.ppc.o: src/base/ftsystem.c
77 ftdebug.ppc.o: $(FTSRC)/base/ftdebug.c
[all …]
Dmakefile.os445 all: assign libft2_ppc.a ftsystem.ppc.o ftsystempure.ppc.o
50 CC = ppc-amigaos-gcc
51 AR = ppc-amigaos-ar
52 RANLIB = ppc-amigaos-ranlib
67 ftbase.ppc.o: FT:src/base/ftbase.c
70 ftinit.ppc.o: FT:src/base/ftinit.c
73 ftsystem.ppc.o: FT:src/base/ftsystem.c
77 ftsystempure.ppc.o: src/base/ftsystem.c
83 ftbbox.ppc.o: FT:src/base/ftbbox.c
86 ftbdf.ppc.o: FT:src/base/ftbdf.c
[all …]
/external/llvm/test/CodeGen/PowerPC/
Dhtm.ll7 %0 = tail call i32 @llvm.ppc.tbegin(i32 0)
17 declare i32 @llvm.ppc.tbegin(i32) #1
22 %0 = tail call i32 @llvm.ppc.tend(i32 0)
29 declare i32 @llvm.ppc.tend(i32)
34 %0 = tail call i32 @llvm.ppc.tabort(i32 0)
35 %1 = tail call i32 @llvm.ppc.tabortdc(i32 0, i32 1, i32 2)
36 %2 = tail call i32 @llvm.ppc.tabortdci(i32 0, i32 1, i32 2)
37 %3 = tail call i32 @llvm.ppc.tabortwc(i32 0, i32 1, i32 2)
38 %4 = tail call i32 @llvm.ppc.tabortwci(i32 0, i32 1, i32 2)
48 declare i32 @llvm.ppc.tabort(i32)
[all …]
Dvsx-ldst-builtin-le.ll20 %0 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vsi to i8*))
24 %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vui to i8*))
28 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x float>* @vf to i8*))
33 %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vsll to i8*))
38 %6 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vull to i8*))
43 %8 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x double>* @vd to i8*))
49 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %9, i8* bitcast (<4 x i32>* @res_vsi to i8*))
53 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %10, i8* bitcast (<4 x i32>* @res_vui to i8*))
58 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %12, i8* bitcast (<4 x float>* @res_vf to i8*))
63 call void @llvm.ppc.vsx.stxvd2x(<2 x double> %14, i8* bitcast (<2 x i64>* @res_vsll to i8*))
[all …]
D2007-09-04-AltivecDST.ll5 tail call void @llvm.ppc.altivec.dst( i8* %image, i32 8, i32 0 )
6 tail call void @llvm.ppc.altivec.dstt( i8* %image, i32 8, i32 0 )
7 tail call void @llvm.ppc.altivec.dstst( i8* %image, i32 8, i32 0 )
8 tail call void @llvm.ppc.altivec.dststt( i8* %image, i32 8, i32 0 )
12 declare void @llvm.ppc.altivec.dst(i8*, i32, i32)
13 declare void @llvm.ppc.altivec.dstt(i8*, i32, i32)
14 declare void @llvm.ppc.altivec.dstst(i8*, i32, i32)
15 declare void @llvm.ppc.altivec.dststt(i8*, i32, i32)
Dbuiltins-ppc-elf2-abi.ll22 %2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1)
34 %2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1)
70 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1)
82 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1)
94 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1)
106 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1)
118 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1)
130 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1)
143 %1 = call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float> %0)
155 %1 = call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double> %0)
[all …]
Dvsx-minmax.ll23 %2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1)
27 %4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3)
32 %7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6)
37 %10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9)
41 %12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11)
46 %15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14)
50 %17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16)
53 %18 = tail call double @llvm.ppc.vsx.xsmindp(double %16, double %16)
60 declare double @llvm.ppc.vsx.xsmaxdp(double, double)
63 declare double @llvm.ppc.vsx.xsmindp(double, double)
[all …]
Dvec_mul_even_odd.ll6 declare <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32>, <4 x i32>) nounwind readnone
7 declare <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32>, <4 x i32>) nounwind readnone
8 declare <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32>, <4 x i32>) nounwind readnone
9 declare <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32>, <4 x i32>) nounwind readnone
10 declare <4 x i32> @llvm.ppc.altivec.vmuluwm(<4 x i32>, <4 x i32>) nounwind readnone
13 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32> %x, <4 x i32> %y)
19 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32> %x, <4 x i32> %y)
25 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32> %x, <4 x i32> %y)
31 %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32> %x, <4 x i32> %y)
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/
Dhtm.ll7 %0 = tail call i32 @llvm.ppc.tbegin(i32 0)
17 declare i32 @llvm.ppc.tbegin(i32) #1
22 %0 = tail call i32 @llvm.ppc.tend(i32 0)
29 declare i32 @llvm.ppc.tend(i32)
34 %0 = tail call i32 @llvm.ppc.tabort(i32 0)
35 %1 = tail call i32 @llvm.ppc.tabortdc(i32 0, i32 1, i32 2)
36 %2 = tail call i32 @llvm.ppc.tabortdci(i32 0, i32 1, i32 2)
37 %3 = tail call i32 @llvm.ppc.tabortwc(i32 0, i32 1, i32 2)
38 %4 = tail call i32 @llvm.ppc.tabortwci(i32 0, i32 1, i32 2)
48 declare i32 @llvm.ppc.tabort(i32)
[all …]
Dbuiltins-ppc-p9-f128.ll1 ; RUN: llc -verify-machineinstrs -mcpu=pwr9 -enable-ppc-quad-precision \
2 ; RUN: -mtriple=powerpc64le-unknown-unknown -ppc-vsr-nums-as-vr \
3 ; RUN: -ppc-asm-full-reg-names < %s | FileCheck %s
12 %0 = call fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128 %a)
19 declare fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128)
23 %0 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
26 %1 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub)
28 %2 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
32 %3 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub2)
44 declare fp128 @llvm.ppc.fmaf128.round.to.odd(fp128, fp128, fp128)
[all …]
Dbuiltins-ppc-elf2-abi.ll22 %2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1)
34 %2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1)
70 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1)
82 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1)
94 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1)
106 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1)
118 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1)
130 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1)
143 %1 = call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float> %0)
155 %1 = call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double> %0)
[all …]
D2007-09-04-AltivecDST.ll5 tail call void @llvm.ppc.altivec.dst( i8* %image, i32 8, i32 0 )
6 tail call void @llvm.ppc.altivec.dstt( i8* %image, i32 8, i32 0 )
7 tail call void @llvm.ppc.altivec.dstst( i8* %image, i32 8, i32 0 )
8 tail call void @llvm.ppc.altivec.dststt( i8* %image, i32 8, i32 0 )
12 declare void @llvm.ppc.altivec.dst(i8*, i32, i32)
13 declare void @llvm.ppc.altivec.dstt(i8*, i32, i32)
14 declare void @llvm.ppc.altivec.dstst(i8*, i32, i32)
15 declare void @llvm.ppc.altivec.dststt(i8*, i32, i32)
Dp9-vector-compares-and-counts.ll32 %0 = tail call <16 x i8> @llvm.ppc.altivec.vcmpneb(<16 x i8> %a, <16 x i8> %b)
39 declare <16 x i8> @llvm.ppc.altivec.vcmpneb(<16 x i8>, <16 x i8>)
44 %0 = tail call <16 x i8> @llvm.ppc.altivec.vcmpnezb(<16 x i8> %a, <16 x i8> %b)
51 declare <16 x i8> @llvm.ppc.altivec.vcmpnezb(<16 x i8>, <16 x i8>)
56 %0 = tail call <8 x i16> @llvm.ppc.altivec.vcmpneh(<8 x i16> %a, <8 x i16> %b)
63 declare <8 x i16> @llvm.ppc.altivec.vcmpneh(<8 x i16>, <8 x i16>)
68 %0 = tail call <8 x i16> @llvm.ppc.altivec.vcmpnezh(<8 x i16> %a, <8 x i16> %b)
75 declare <8 x i16> @llvm.ppc.altivec.vcmpnezh(<8 x i16>, <8 x i16>)
80 %0 = tail call <4 x i32> @llvm.ppc.altivec.vcmpnew(<4 x i32> %a, <4 x i32> %b)
87 declare <4 x i32> @llvm.ppc.altivec.vcmpnew(<4 x i32>, <4 x i32>)
[all …]
Dvsx-p9.ll149 %0 = tail call <4 x float> @llvm.ppc.vsx.xviexpsp(<4 x i32> %a, <4 x i32> %b)
156 declare <4 x float> @llvm.ppc.vsx.xviexpsp(<4 x i32>, <4 x i32>)
161 %0 = tail call <2 x double> @llvm.ppc.vsx.xviexpdp(<2 x i64> %a, <2 x i64> %b)
168 declare <2 x double> @llvm.ppc.vsx.xviexpdp(<2 x i64>, <2 x i64>)
172 %0 = tail call <16 x i8> @llvm.ppc.altivec.vslv(<16 x i8> %a, <16 x i8> %b)
179 declare <16 x i8> @llvm.ppc.altivec.vslv(<16 x i8>, <16 x i8>)
184 %0 = tail call <16 x i8> @llvm.ppc.altivec.vsrv(<16 x i8> %a, <16 x i8> %b)
191 declare <16 x i8> @llvm.ppc.altivec.vsrv(<16 x i8>, <16 x i8>)
199 %0 = tail call <4 x float> @llvm.ppc.vsx.xvcvsphp(<4 x float> %a)
210 %0 = tail call <4 x i32> @llvm.ppc.altivec.vrlwmi(<4 x i32> %a, <4 x i32> %c, <4 x i32> %b)
[all …]
Dvsx-ldst-builtin-le.ll30 %0 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vsi to i8*))
36 %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x i32>* @vui to i8*))
42 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* bitcast (<4 x float>* @vf to i8*))
49 %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vsll to i8*))
56 %6 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x i64>* @vull to i8*))
63 %8 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* bitcast (<2 x double>* @vd to i8*))
72 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %9, i8* bitcast (<4 x i32>* @res_vsi to i8*))
78 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %10, i8* bitcast (<4 x i32>* @res_vui to i8*))
85 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %12, i8* bitcast (<4 x float>* @res_vf to i8*))
92 call void @llvm.ppc.vsx.stxvd2x(<2 x double> %14, i8* bitcast (<2 x i64>* @res_vsll to i8*))
[all …]
Dvsx-minmax.ll23 %2 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %0, <4 x float> %1)
27 %4 = tail call <2 x double> @llvm.ppc.vsx.xvmaxdp(<2 x double> %3, <2 x double> %3)
32 %7 = tail call <4 x float> @llvm.ppc.vsx.xvmaxsp(<4 x float> %5, <4 x float> %6)
37 %10 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %8, <4 x float> %9)
41 %12 = tail call <2 x double> @llvm.ppc.vsx.xvmindp(<2 x double> %11, <2 x double> %11)
46 %15 = tail call <4 x float> @llvm.ppc.vsx.xvminsp(<4 x float> %13, <4 x float> %14)
50 %17 = tail call double @llvm.ppc.vsx.xsmaxdp(double %16, double %16)
53 %18 = tail call double @llvm.ppc.vsx.xsmindp(double %16, double %16)
60 declare double @llvm.ppc.vsx.xsmaxdp(double, double)
63 declare double @llvm.ppc.vsx.xsmindp(double, double)
[all …]
Dppc64-get-cache-line-size.ll1 ; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-ppc-prefetching=true | File…
2 ; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-ppc-prefetching=true -ppc-l…
3 ; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -enable-ppc-prefetching=true | Fi…
4 ; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -enable-ppc-prefetching=true -ppc
5 ; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -enable-ppc-prefetching=true | Fi…
6 ; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -enable-ppc-prefetching=true -ppc
7 ; RUN: llc < %s -mtriple=ppc64-- -mcpu=a2 -enable-ppc-prefetching=true | FileCheck %s -check-prefix…
/external/capstone/arch/PowerPC/
DPPCInstPrinter.c52 MI->flat_insn->detail->ppc.operands[MI->flat_insn->detail->ppc.op_count].type = PPC_OP_MEM; in set_mem_access()
53 …MI->flat_insn->detail->ppc.operands[MI->flat_insn->detail->ppc.op_count].mem.base = PPC_REG_INVALI… in set_mem_access()
54 MI->flat_insn->detail->ppc.operands[MI->flat_insn->detail->ppc.op_count].mem.disp = 0; in set_mem_access()
57 MI->flat_insn->detail->ppc.op_count++; in set_mem_access()
68 insn->detail->ppc.bh = PPC_BH_PLUS; in PPC_post_printer()
70 insn->detail->ppc.bh = PPC_BH_MINUS; in PPC_post_printer()
116 cs_ppc *ppc = &MI->flat_insn->detail->ppc; in PPC_printInst() local
118 ppc->operands[ppc->op_count].type = PPC_OP_IMM; in PPC_printInst()
119 ppc->operands[ppc->op_count].imm = SH; in PPC_printInst()
120 ++ppc->op_count; in PPC_printInst()
[all …]
/external/capstone/cstool/
Dcstool_ppc.c41 cs_ppc *ppc; in print_insn_detail_ppc() local
48 ppc = &(ins->detail->ppc); in print_insn_detail_ppc()
49 if (ppc->op_count) in print_insn_detail_ppc()
50 printf("\top_count: %u\n", ppc->op_count); in print_insn_detail_ppc()
52 for (i = 0; i < ppc->op_count; i++) { in print_insn_detail_ppc()
53 cs_ppc_op *op = &(ppc->operands[i]); in print_insn_detail_ppc()
81 if (ppc->bc != 0) in print_insn_detail_ppc()
82 printf("\tBranch code: %u\n", ppc->bc); in print_insn_detail_ppc()
84 if (ppc->bh != 0) in print_insn_detail_ppc()
85 printf("\tBranch hint: %u\n", ppc->bh); in print_insn_detail_ppc()
[all …]
/external/llvm/test/Transforms/InstCombine/
Daligned-altivec.ll5 declare <4 x i32> @llvm.ppc.altivec.lvx(i8*) #1
11 %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
14 ; CHECK: @llvm.ppc.altivec.lvx
26 %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
29 ; CHECK-NOT: @llvm.ppc.altivec.lvx
37 declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*) #0
43 call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
49 ; CHECK: @llvm.ppc.altivec.stvx
57 call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
63 ; CHECK-NOT: @llvm.ppc.altivec.stvx
[all …]
Daligned-qpx.ll5 declare <4 x double> @llvm.ppc.qpx.qvlfs(i8*) #1
11 %vl = call <4 x double> @llvm.ppc.qpx.qvlfs(i8* %hv)
14 ; CHECK: @llvm.ppc.qpx.qvlfs
27 %vl = call <4 x double> @llvm.ppc.qpx.qvlfs(i8* %hv)
30 ; CHECK-NOT: @llvm.ppc.qpx.qvlfs
40 declare void @llvm.ppc.qpx.qvstfs(<4 x double>, i8*) #0
46 call void @llvm.ppc.qpx.qvstfs(<4 x double> %d, i8* %hv)
52 ; CHECK: @llvm.ppc.qpx.qvstfs
60 call void @llvm.ppc.qpx.qvstfs(<4 x double> %d, i8* %hv)
67 ; CHECK-NOT: @llvm.ppc.qpx.qvstfs
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/PowerPC/
Daligned-altivec.ll5 declare <4 x i32> @llvm.ppc.altivec.lvx(i8*) #1
11 %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
14 ; CHECK: @llvm.ppc.altivec.lvx
26 %vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
29 ; CHECK-NOT: @llvm.ppc.altivec.lvx
37 declare void @llvm.ppc.altivec.stvx(<4 x i32>, i8*) #0
43 call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
49 ; CHECK: @llvm.ppc.altivec.stvx
57 call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
63 ; CHECK-NOT: @llvm.ppc.altivec.stvx
[all …]
Daligned-qpx.ll5 declare <4 x double> @llvm.ppc.qpx.qvlfs(i8*) #1
11 %vl = call <4 x double> @llvm.ppc.qpx.qvlfs(i8* %hv)
14 ; CHECK: @llvm.ppc.qpx.qvlfs
27 %vl = call <4 x double> @llvm.ppc.qpx.qvlfs(i8* %hv)
30 ; CHECK-NOT: @llvm.ppc.qpx.qvlfs
40 declare void @llvm.ppc.qpx.qvstfs(<4 x double>, i8*) #0
46 call void @llvm.ppc.qpx.qvstfs(<4 x double> %d, i8* %hv)
52 ; CHECK: @llvm.ppc.qpx.qvstfs
60 call void @llvm.ppc.qpx.qvstfs(<4 x double> %d, i8* %hv)
67 ; CHECK-NOT: @llvm.ppc.qpx.qvstfs
[all …]
/external/capstone/tests/
Dtest_ppc.c62 cs_ppc *ppc; in print_insn_detail() local
69 ppc = &(ins->detail->ppc); in print_insn_detail()
70 if (ppc->op_count) in print_insn_detail()
71 printf("\top_count: %u\n", ppc->op_count); in print_insn_detail()
73 for (i = 0; i < ppc->op_count; i++) { in print_insn_detail()
74 cs_ppc_op *op = &(ppc->operands[i]); in print_insn_detail()
102 if (ppc->bc != 0) in print_insn_detail()
103 printf("\tBranch code: %u\n", ppc->bc); in print_insn_detail()
105 if (ppc->bh != 0) in print_insn_detail()
106 printf("\tBranch hint: %u\n", ppc->bh); in print_insn_detail()
[all …]
/external/swiftshader/third_party/LLVM/test/CodeGen/PowerPC/
D2007-09-04-AltivecDST.ll5 tail call void @llvm.ppc.altivec.dst( i8* %image, i32 8, i32 0 )
6 tail call void @llvm.ppc.altivec.dstt( i8* %image, i32 8, i32 0 )
7 tail call void @llvm.ppc.altivec.dstst( i8* %image, i32 8, i32 0 )
8 tail call void @llvm.ppc.altivec.dststt( i8* %image, i32 8, i32 0 )
12 declare void @llvm.ppc.altivec.dst(i8*, i32, i32)
13 declare void @llvm.ppc.altivec.dstt(i8*, i32, i32)
14 declare void @llvm.ppc.altivec.dstst(i8*, i32, i32)
15 declare void @llvm.ppc.altivec.dststt(i8*, i32, i32)

12345678910>>...27