1; RUN: llc -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE 2; RUN: llc -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE 3; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE 4; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE 5 6target triple = "powerpc64-unknown-linux-gnu" 7 8; Trivial patchpoint codegen 9; 10define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { 11entry: 12; CHECK-LABEL: trivial_patchpoint_codegen: 13 14; CHECK: li 12, -8531 15; CHECK-NEXT: rldic 12, 12, 32, 16 16; CHECK-NEXT: oris 12, 12, 48879 17; CHECK-NEXT: ori 12, 12, 51966 18; CHECK-LE-NEXT: std 2, 24(1) 19; CHECK-BE-NEXT: std 2, 40(1) 20; CHECK-BE-NEXT: ld 2, 8(12) 21; CHECK-BE-NEXT: ld 12, 0(12) 22; CHECK-NEXT: mtctr 12 23; CHECK-NEXT: bctrl 24; CHECK-LE-NEXT: ld 2, 24(1) 25; CHECK-BE-NEXT: ld 2, 40(1) 26 27; CHECK: li 12, -8531 28; CHECK-NEXT: rldic 12, 12, 32, 16 29; CHECK-NEXT: oris 12, 12, 48879 30; CHECK-NEXT: ori 12, 12, 51967 31; CHECK-LE-NEXT: std 2, 24(1) 32; CHECK-BE-NEXT: std 2, 40(1) 33; CHECK-BE-NEXT: ld 2, 8(12) 34; CHECK-BE-NEXT: ld 12, 0(12) 35; CHECK-NEXT: mtctr 12 36; CHECK-NEXT: bctrl 37; CHECK-LE-NEXT: ld 2, 24(1) 38; CHECK-BE-NEXT: ld 2, 40(1) 39 40; CHECK: blr 41 42 %resolveCall2 = inttoptr i64 244837814094590 to i8* 43 %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 40, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4) 44 %resolveCall3 = inttoptr i64 244837814094591 to i8* 45 tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 40, i8* %resolveCall3, i32 2, i64 %p1, i64 %result) 46 ret i64 %result 47} 48 49; Caller frame metadata with stackmaps. This should not be optimized 50; as a leaf function. 51; 52; CHECK-LABEL: caller_meta_leaf 53; CHECK-BE: stdu 1, -80(1) 54; CHECK-LE: stdu 1, -64(1) 55; CHECK: Ltmp 56; CHECK-BE: addi 1, 1, 80 57; CHECK-LE: addi 1, 1, 64 58; CHECK: blr 59 60define void @caller_meta_leaf() { 61entry: 62 %metadata = alloca i64, i32 3, align 8 63 store i64 11, i64* %metadata 64 store i64 12, i64* %metadata 65 store i64 13, i64* %metadata 66 call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata) 67 ret void 68} 69 70; Test patchpoints reusing the same TargetConstant. 71; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4) 72; There is no way to verify this, since it depends on memory allocation. 73; But I think it's useful to include as a working example. 74define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64 %tmp79) { 75entry: 76 %tmp80 = add i64 %tmp79, -16 77 %tmp81 = inttoptr i64 %tmp80 to i64* 78 %tmp82 = load i64, i64* %tmp81, align 8 79 tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82) 80 tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 15, i32 48, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82) 81 %tmp83 = load i64, i64* %tmp33, align 8 82 %tmp84 = add i64 %tmp83, -24 83 %tmp85 = inttoptr i64 %tmp84 to i64* 84 %tmp86 = load i64, i64* %tmp85, align 8 85 tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86) 86 tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 18, i32 48, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86) 87 ret i64 10 88} 89 90; Test small patchpoints that don't emit calls. 91define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { 92entry: 93; CHECK-LABEL: small_patchpoint_codegen: 94; CHECK: Ltmp 95; CHECK: nop 96; CHECK-NEXT: nop 97; CHECK-NEXT: nop 98; CHECK-NEXT: nop 99; CHECK-NEXT: nop 100; CHECK-NOT: nop 101; CHECK: blr 102 %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2) 103 ret void 104} 105 106; Trivial symbolic patchpoint codegen. 107 108declare i64 @foo(i64 %p1, i64 %p2) 109define i64 @trivial_symbolic_patchpoint_codegen(i64 %p1, i64 %p2) { 110entry: 111; CHECK-LABEL: trivial_symbolic_patchpoint_codegen: 112; CHECK: bl foo 113; CHECK-NEXT: nop 114; CHECK-NEXT: nop 115; CHECK-NOT: nop 116; CHECK: blr 117 %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 9, i32 12, i8* bitcast (i64 (i64, i64)* @foo to i8*), i32 2, i64 %p1, i64 %p2) 118 ret i64 %result 119} 120 121declare void @llvm.experimental.stackmap(i64, i32, ...) 122declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...) 123declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...) 124 125