1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs< %s | FileCheck -check-prefixes=GCN,SI,FUNC %s 2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefixes=GCN,VI,FUNC %s 3; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs< %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s 4; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs< %s 5 6declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone 7declare { i64, i1 } @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone 8 9; FUNC-LABEL: {{^}}ssubo_i64_zext: 10define amdgpu_kernel void @ssubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind { 11 %ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind 12 %val = extractvalue { i64, i1 } %ssub, 0 13 %carry = extractvalue { i64, i1 } %ssub, 1 14 %ext = zext i1 %carry to i64 15 %add2 = add i64 %val, %ext 16 store i64 %add2, i64 addrspace(1)* %out, align 8 17 ret void 18} 19 20; FUNC-LABEL: {{^}}s_ssubo_i32: 21define amdgpu_kernel void @s_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 %a, i32 %b) nounwind { 22 %ssub = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) nounwind 23 %val = extractvalue { i32, i1 } %ssub, 0 24 %carry = extractvalue { i32, i1 } %ssub, 1 25 store i32 %val, i32 addrspace(1)* %out, align 4 26 store i1 %carry, i1 addrspace(1)* %carryout 27 ret void 28} 29 30; FUNC-LABEL: {{^}}v_ssubo_i32: 31define amdgpu_kernel void @v_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)* %carryout, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind { 32 %a = load i32, i32 addrspace(1)* %aptr, align 4 33 %b = load i32, i32 addrspace(1)* %bptr, align 4 34 %ssub = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) nounwind 35 %val = extractvalue { i32, i1 } %ssub, 0 36 %carry = extractvalue { i32, i1 } %ssub, 1 37 store i32 %val, i32 addrspace(1)* %out, align 4 38 store i1 %carry, i1 addrspace(1)* %carryout 39 ret void 40} 41 42; FUNC-LABEL: {{^}}s_ssubo_i64: 43; GCN: s_sub_u32 44; GCN: s_subb_u32 45define amdgpu_kernel void @s_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind { 46 %ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind 47 %val = extractvalue { i64, i1 } %ssub, 0 48 %carry = extractvalue { i64, i1 } %ssub, 1 49 store i64 %val, i64 addrspace(1)* %out, align 8 50 store i1 %carry, i1 addrspace(1)* %carryout 51 ret void 52} 53 54; FUNC-LABEL: {{^}}v_ssubo_i64: 55; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, 56; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc, 57 58; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc, 59; VI: v_subb_u32_e32 v{{[0-9]+}}, vcc, 60 61; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc, 62; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc, 63define amdgpu_kernel void @v_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind { 64 %a = load i64, i64 addrspace(1)* %aptr, align 4 65 %b = load i64, i64 addrspace(1)* %bptr, align 4 66 %ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind 67 %val = extractvalue { i64, i1 } %ssub, 0 68 %carry = extractvalue { i64, i1 } %ssub, 1 69 store i64 %val, i64 addrspace(1)* %out, align 8 70 store i1 %carry, i1 addrspace(1)* %carryout 71 ret void 72} 73