1; RUN: llc < %s -march=bpfeb -show-mc-encoding | FileCheck %s 2; test big endian 3 4define void @test() #0 { 5entry: 6; CHECK: test: 7 8; CHECK: r1 = 123 # encoding: [0xb7,0x10,0x00,0x00,0x00,0x00,0x00,0x7b] 9; CHECK: call f_i16 10 call void @f_i16(i16 123) 11 12; CHECK: r1 = 12345678 # encoding: [0xb7,0x10,0x00,0x00,0x00,0xbc,0x61,0x4e] 13; CHECK: call f_i32 14 call void @f_i32(i32 12345678) 15 16; CHECK: r1 = 72623859790382856 ll # encoding: [0x18,0x10,0x00,0x00,0x05,0x06,0x07,0x08,0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04] 17; CHECK: call f_i64 18 call void @f_i64(i64 72623859790382856) 19 20; CHECK: r1 = 1234 21; CHECK: r2 = 5678 22; CHECK: call f_i32_i32 23 call void @f_i32_i32(i32 1234, i32 5678) 24 25; CHECK: r1 = 2 26; CHECK: r2 = 3 27; CHECK: r3 = 4 28; CHECK: call f_i16_i32_i16 29 call void @f_i16_i32_i16(i16 2, i32 3, i16 4) 30 31; CHECK: r1 = 5 32; CHECK: r2 = 7262385979038285 ll 33; CHECK: r3 = 6 34; CHECK: call f_i16_i64_i16 35 call void @f_i16_i64_i16(i16 5, i64 7262385979038285, i16 6) 36 37 ret void 38} 39 40@g_i16 = common global i16 0, align 2 41@g_i32 = common global i32 0, align 2 42@g_i64 = common global i64 0, align 4 43 44define void @f_i16(i16 %a) #0 { 45; CHECK: f_i16: 46; CHECK: *(u16 *)(r2 + 0) = r1 # encoding: [0x6b,0x21,0x00,0x00,0x00,0x00,0x00,0x00] 47 store volatile i16 %a, i16* @g_i16, align 2 48 ret void 49} 50 51define void @f_i32(i32 %a) #0 { 52; CHECK: f_i32: 53; CHECK: *(u16 *)(r2 + 2) = r1 # encoding: [0x6b,0x21,0x00,0x02,0x00,0x00,0x00,0x00] 54; CHECK: *(u16 *)(r2 + 0) = r1 # encoding: [0x6b,0x21,0x00,0x00,0x00,0x00,0x00,0x00] 55 store volatile i32 %a, i32* @g_i32, align 2 56 ret void 57} 58 59define void @f_i64(i64 %a) #0 { 60; CHECK: f_i64: 61; CHECK: *(u32 *)(r2 + 4) = r1 # encoding: [0x63,0x21,0x00,0x04,0x00,0x00,0x00,0x00] 62; CHECK: *(u32 *)(r2 + 0) = r1 63 store volatile i64 %a, i64* @g_i64, align 2 64 ret void 65} 66 67define void @f_i32_i32(i32 %a, i32 %b) #0 { 68; CHECK: f_i32_i32: 69; CHECK: *(u32 *)(r3 + 0) = r1 70 store volatile i32 %a, i32* @g_i32, align 4 71; CHECK: *(u32 *)(r3 + 0) = r2 72 store volatile i32 %b, i32* @g_i32, align 4 73 ret void 74} 75 76define void @f_i16_i32_i16(i16 %a, i32 %b, i16 %c) #0 { 77; CHECK: f_i16_i32_i16: 78; CHECK: *(u16 *)(r4 + 0) = r1 79 store volatile i16 %a, i16* @g_i16, align 2 80; CHECK: *(u32 *)(r1 + 0) = r2 81 store volatile i32 %b, i32* @g_i32, align 4 82; CHECK: *(u16 *)(r4 + 0) = r3 83 store volatile i16 %c, i16* @g_i16, align 2 84 ret void 85} 86 87define void @f_i16_i64_i16(i16 %a, i64 %b, i16 %c) #0 { 88; CHECK: f_i16_i64_i16: 89; CHECK: *(u16 *)(r4 + 0) = r1 90 store volatile i16 %a, i16* @g_i16, align 2 91; CHECK: *(u64 *)(r1 + 0) = r2 # encoding: [0x7b,0x12,0x00,0x00,0x00,0x00,0x00,0x00] 92 store volatile i64 %b, i64* @g_i64, align 8 93; CHECK: *(u16 *)(r4 + 0) = r3 94 store volatile i16 %c, i16* @g_i16, align 2 95 ret void 96} 97