1; RUN: llc -march=amdgcn -mcpu=fiji < %s | FileCheck %s 2 3; Check transformation shl (or|add x, c2), c1 => or|add (shl x, c1), (c2 << c1) 4; Only one shift if expected, GEP shall not produce a separate shift 5 6; CHECK-LABEL: {{^}}add_const_offset: 7; CHECK: v_lshlrev_b32_e32 v[[SHL:[0-9]+]], 4, v0 8; CHECK: v_add_u32_e32 v[[ADD:[0-9]+]], vcc, 0xc80, v[[SHL]] 9; CHECK-NOT: v_lshl 10; CHECK: v_add_u32_e32 v[[ADDRLO:[0-9]+]], vcc, s{{[0-9]+}}, v[[ADD]] 11; CHECK: load_dword v{{[0-9]+}}, v{{\[}}[[ADDRLO]]: 12define amdgpu_kernel void @add_const_offset(i32 addrspace(1)* nocapture %arg) { 13bb: 14 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 15 %add = add i32 %id, 200 16 %shl = shl i32 %add, 2 17 %ptr = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %shl 18 %val = load i32, i32 addrspace(1)* %ptr, align 4 19 store i32 %val, i32 addrspace(1)* %arg, align 4 20 ret void 21} 22 23; CHECK-LABEL: {{^}}or_const_offset: 24; CHECK: v_lshlrev_b32_e32 v[[SHL:[0-9]+]], 4, v0 25; CHECK: v_or_b32_e32 v[[OR:[0-9]+]], 0x1000, v[[SHL]] 26; CHECK-NOT: v_lshl 27; CHECK: v_add_u32_e32 v[[ADDRLO:[0-9]+]], vcc, s{{[0-9]+}}, v[[OR]] 28; CHECK: load_dword v{{[0-9]+}}, v{{\[}}[[ADDRLO]]: 29define amdgpu_kernel void @or_const_offset(i32 addrspace(1)* nocapture %arg) { 30bb: 31 %id = tail call i32 @llvm.amdgcn.workitem.id.x() 32 %add = or i32 %id, 256 33 %shl = shl i32 %add, 2 34 %ptr = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %shl 35 %val = load i32, i32 addrspace(1)* %ptr, align 4 36 store i32 %val, i32 addrspace(1)* %arg, align 4 37 ret void 38} 39 40declare i32 @llvm.amdgcn.workitem.id.x() 41