1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s 2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s 3; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s 4 5; The code generated by urem is long and complex and may frequently 6; change. The goal of this test is to make sure the ISel doesn't fail 7; when it gets a v2i32/v4i32 urem 8 9; FUNC-LABEL: {{^}}test_urem_i32: 10; SI: s_endpgm 11; EG: CF_END 12define amdgpu_kernel void @test_urem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { 13 %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1 14 %a = load i32, i32 addrspace(1)* %in 15 %b = load i32, i32 addrspace(1)* %b_ptr 16 %result = urem i32 %a, %b 17 store i32 %result, i32 addrspace(1)* %out 18 ret void 19} 20 21; FUNC-LABEL: {{^}}test_urem_i32_7: 22; SI: s_mov_b32 [[MAGIC:s[0-9]+]], 0x24924925 23; SI: v_mul_hi_u32 {{v[0-9]+}}, {{v[0-9]+}}, [[MAGIC]] 24; SI: v_subrev_{{[iu]}}32 25; SI: v_mul_lo_u32 26; SI: v_sub_{{[iu]}}32 27; SI: buffer_store_dword 28; SI: s_endpgm 29define amdgpu_kernel void @test_urem_i32_7(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { 30 %num = load i32, i32 addrspace(1) * %in 31 %result = urem i32 %num, 7 32 store i32 %result, i32 addrspace(1)* %out 33 ret void 34} 35 36; FUNC-LABEL: {{^}}test_urem_v2i32: 37; SI: s_endpgm 38; EG: CF_END 39define amdgpu_kernel void @test_urem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { 40 %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1 41 %a = load <2 x i32>, <2 x i32> addrspace(1)* %in 42 %b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr 43 %result = urem <2 x i32> %a, %b 44 store <2 x i32> %result, <2 x i32> addrspace(1)* %out 45 ret void 46} 47 48; FUNC-LABEL: {{^}}test_urem_v4i32: 49; SI: s_endpgm 50; EG: CF_END 51define amdgpu_kernel void @test_urem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { 52 %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1 53 %a = load <4 x i32>, <4 x i32> addrspace(1)* %in 54 %b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr 55 %result = urem <4 x i32> %a, %b 56 store <4 x i32> %result, <4 x i32> addrspace(1)* %out 57 ret void 58} 59 60; FUNC-LABEL: {{^}}test_urem_i64: 61; SI: s_endpgm 62; EG: CF_END 63define amdgpu_kernel void @test_urem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) { 64 %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1 65 %a = load i64, i64 addrspace(1)* %in 66 %b = load i64, i64 addrspace(1)* %b_ptr 67 %result = urem i64 %a, %b 68 store i64 %result, i64 addrspace(1)* %out 69 ret void 70} 71 72; FUNC-LABEL: {{^}}test_urem_v2i64: 73; SI: s_endpgm 74; EG: CF_END 75define amdgpu_kernel void @test_urem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) { 76 %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1 77 %a = load <2 x i64>, <2 x i64> addrspace(1)* %in 78 %b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr 79 %result = urem <2 x i64> %a, %b 80 store <2 x i64> %result, <2 x i64> addrspace(1)* %out 81 ret void 82} 83 84; FUNC-LABEL: {{^}}test_urem_v4i64: 85; SI: s_endpgm 86; EG: CF_END 87define amdgpu_kernel void @test_urem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) { 88 %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1 89 %a = load <4 x i64>, <4 x i64> addrspace(1)* %in 90 %b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr 91 %result = urem <4 x i64> %a, %b 92 store <4 x i64> %result, <4 x i64> addrspace(1)* %out 93 ret void 94} 95