1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -show-mc-encoding -verify-machineinstrs < %s | FileCheck %s 2 3;;;==========================================================================;;; 4;;; MUBUF LOAD TESTS 5;;;==========================================================================;;; 6 7; MUBUF load with an immediate byte offset that fits into 12-bits 8; CHECK-LABEL: {{^}}mubuf_load0: 9; CHECK: buffer_load_dword v{{[0-9]}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ; encoding: [0x04,0x00,0x30,0xe0 10define amdgpu_kernel void @mubuf_load0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { 11entry: 12 %0 = getelementptr i32, i32 addrspace(1)* %in, i64 1 13 %1 = load i32, i32 addrspace(1)* %0 14 store i32 %1, i32 addrspace(1)* %out 15 ret void 16} 17 18; MUBUF load with the largest possible immediate offset 19; CHECK-LABEL: {{^}}mubuf_load1: 20; CHECK: buffer_load_ubyte v{{[0-9]}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe0 21define amdgpu_kernel void @mubuf_load1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) { 22entry: 23 %0 = getelementptr i8, i8 addrspace(1)* %in, i64 4095 24 %1 = load i8, i8 addrspace(1)* %0 25 store i8 %1, i8 addrspace(1)* %out 26 ret void 27} 28 29; MUBUF load with an immediate byte offset that doesn't fit into 12-bits 30; CHECK-LABEL: {{^}}mubuf_load2: 31; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x1000 32; CHECK: buffer_load_dword v{{[0-9]}}, off, s[{{[0-9]+:[0-9]+}}], [[SOFFSET]] ; encoding: [0x00,0x00,0x30,0xe0 33define amdgpu_kernel void @mubuf_load2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { 34entry: 35 %0 = getelementptr i32, i32 addrspace(1)* %in, i64 1024 36 %1 = load i32, i32 addrspace(1)* %0 37 store i32 %1, i32 addrspace(1)* %out 38 ret void 39} 40 41; MUBUF load with a 12-bit immediate offset and a register offset 42; CHECK-LABEL: {{^}}mubuf_load3: 43; CHECK-NOT: ADD 44; CHECK: buffer_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:4 ; encoding: [0x04,0x80,0x30,0xe0 45define amdgpu_kernel void @mubuf_load3(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i64 %offset) { 46entry: 47 %0 = getelementptr i32, i32 addrspace(1)* %in, i64 %offset 48 %1 = getelementptr i32, i32 addrspace(1)* %0, i64 1 49 %2 = load i32, i32 addrspace(1)* %1 50 store i32 %2, i32 addrspace(1)* %out 51 ret void 52} 53 54; CHECK-LABEL: {{^}}soffset_max_imm: 55; CHECK: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 64 offen glc 56define amdgpu_gs void @soffset_max_imm([6 x <4 x i32>] addrspace(4)* inreg, [17 x <4 x i32>] addrspace(4)* inreg, [16 x <4 x i32>] addrspace(4)* inreg, [32 x <8 x i32>] addrspace(4)* inreg, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) { 57main_body: 58 %tmp0 = getelementptr [6 x <4 x i32>], [6 x <4 x i32>] addrspace(4)* %0, i32 0, i32 0 59 %tmp1 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp0 60 %tmp2 = shl i32 %6, 2 61 %tmp3 = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %tmp1, i32 %tmp2, i32 64, i32 1) 62 %tmp4 = add i32 %6, 16 63 %tmp1.4xi32 = bitcast <4 x i32> %tmp1 to <4 x i32> 64 call void @llvm.amdgcn.raw.tbuffer.store.i32(i32 %tmp3, <4 x i32> %tmp1.4xi32, i32 %tmp4, i32 %4, i32 68, i32 3) 65 ret void 66} 67 68; Make sure immediates that aren't inline constants don't get folded into 69; the soffset operand. 70; FIXME: for this test we should be smart enough to shift the immediate into 71; the offset field. 72; CHECK-LABEL: {{^}}soffset_no_fold: 73; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x41 74; CHECK: buffer_load_dword v{{[0-9+]}}, v{{[0-9+]}}, s[{{[0-9]+}}:{{[0-9]+}}], [[SOFFSET]] offen glc 75define amdgpu_gs void @soffset_no_fold([6 x <4 x i32>] addrspace(4)* inreg, [17 x <4 x i32>] addrspace(4)* inreg, [16 x <4 x i32>] addrspace(4)* inreg, [32 x <8 x i32>] addrspace(4)* inreg, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) { 76main_body: 77 %tmp0 = getelementptr [6 x <4 x i32>], [6 x <4 x i32>] addrspace(4)* %0, i32 0, i32 0 78 %tmp1 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp0 79 %tmp2 = shl i32 %6, 2 80 %tmp3 = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %tmp1, i32 %tmp2, i32 65, i32 1) 81 %tmp4 = add i32 %6, 16 82 %tmp1.4xi32 = bitcast <4 x i32> %tmp1 to <4 x i32> 83 call void @llvm.amdgcn.raw.tbuffer.store.i32(i32 %tmp3, <4 x i32> %tmp1.4xi32, i32 %tmp4, i32 %4, i32 68, i32 3) 84 ret void 85} 86 87;;;==========================================================================;;; 88;;; MUBUF STORE TESTS 89;;;==========================================================================;;; 90 91; MUBUF store with an immediate byte offset that fits into 12-bits 92; CHECK-LABEL: {{^}}mubuf_store0: 93; CHECK: buffer_store_dword v{{[0-9]}}, off, s[{{[0-9]:[0-9]}}], 0 offset:4 ; encoding: [0x04,0x00,0x70,0xe0 94define amdgpu_kernel void @mubuf_store0(i32 addrspace(1)* %out) { 95entry: 96 %0 = getelementptr i32, i32 addrspace(1)* %out, i64 1 97 store i32 0, i32 addrspace(1)* %0 98 ret void 99} 100 101; MUBUF store with the largest possible immediate offset 102; CHECK-LABEL: {{^}}mubuf_store1: 103; CHECK: buffer_store_byte v{{[0-9]}}, off, s[{{[0-9]:[0-9]}}], 0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0 104 105define amdgpu_kernel void @mubuf_store1(i8 addrspace(1)* %out) { 106entry: 107 %0 = getelementptr i8, i8 addrspace(1)* %out, i64 4095 108 store i8 0, i8 addrspace(1)* %0 109 ret void 110} 111 112; MUBUF store with an immediate byte offset that doesn't fit into 12-bits 113; CHECK-LABEL: {{^}}mubuf_store2: 114; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x1000 115; CHECK: buffer_store_dword v{{[0-9]}}, off, s[{{[0-9]:[0-9]}}], [[SOFFSET]] ; encoding: [0x00,0x00,0x70,0xe0 116define amdgpu_kernel void @mubuf_store2(i32 addrspace(1)* %out) { 117entry: 118 %0 = getelementptr i32, i32 addrspace(1)* %out, i64 1024 119 store i32 0, i32 addrspace(1)* %0 120 ret void 121} 122 123; MUBUF store with a 12-bit immediate offset and a register offset 124; CHECK-LABEL: {{^}}mubuf_store3: 125; CHECK-NOT: ADD 126; CHECK: buffer_store_dword v{{[0-9]}}, v[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0 addr64 offset:4 ; encoding: [0x04,0x80,0x70,0xe0 127define amdgpu_kernel void @mubuf_store3(i32 addrspace(1)* %out, i64 %offset) { 128entry: 129 %0 = getelementptr i32, i32 addrspace(1)* %out, i64 %offset 130 %1 = getelementptr i32, i32 addrspace(1)* %0, i64 1 131 store i32 0, i32 addrspace(1)* %1 132 ret void 133} 134 135; CHECK-LABEL: {{^}}store_sgpr_ptr: 136; CHECK: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 137define amdgpu_kernel void @store_sgpr_ptr(i32 addrspace(1)* %out) { 138 store i32 99, i32 addrspace(1)* %out, align 4 139 ret void 140} 141 142; CHECK-LABEL: {{^}}store_sgpr_ptr_offset: 143; CHECK: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:40 144define amdgpu_kernel void @store_sgpr_ptr_offset(i32 addrspace(1)* %out) { 145 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 10 146 store i32 99, i32 addrspace(1)* %out.gep, align 4 147 ret void 148} 149 150; CHECK-LABEL: {{^}}store_sgpr_ptr_large_offset: 151; CHECK: s_mov_b32 [[SOFFSET:s[0-9]+]], 0x20000 152; CHECK: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, [[SOFFSET]] 153define amdgpu_kernel void @store_sgpr_ptr_large_offset(i32 addrspace(1)* %out) { 154 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 32768 155 store i32 99, i32 addrspace(1)* %out.gep, align 4 156 ret void 157} 158 159; CHECK-LABEL: {{^}}store_sgpr_ptr_large_offset_atomic: 160; CHECK: s_mov_b32 [[SOFFSET:s[0-9]+]], 0x20000 161; CHECK: buffer_atomic_add v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, [[SOFFSET]] 162define amdgpu_kernel void @store_sgpr_ptr_large_offset_atomic(i32 addrspace(1)* %out) { 163 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 32768 164 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 5 seq_cst 165 ret void 166} 167 168; CHECK-LABEL: {{^}}store_vgpr_ptr: 169; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 170define amdgpu_kernel void @store_vgpr_ptr(i32 addrspace(1)* %out) { 171 %tid = call i32 @llvm.amdgcn.workitem.id.x() readnone 172 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid 173 store i32 99, i32 addrspace(1)* %out.gep, align 4 174 ret void 175} 176 177 178declare i32 @llvm.amdgcn.workitem.id.x() #1 179declare void @llvm.amdgcn.raw.tbuffer.store.i32(i32, <4 x i32>, i32, i32, i32 immarg, i32 immarg) #2 180declare i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32>, i32, i32, i32 immarg) #3 181 182attributes #0 = { nounwind readonly } 183attributes #1 = { nounwind readnone speculatable willreturn } 184attributes #2 = { nounwind willreturn writeonly } 185attributes #3 = { nounwind readonly willreturn } 186attributes #4 = { readnone } 187