1; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI %s 2; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG %s 3 4declare i32 @llvm.r600.read.tidig.x() nounwind readnone 5 6define void @trunc_i64_to_i32_store(i32 addrspace(1)* %out, i64 %in) { 7; SI-LABEL: {{^}}trunc_i64_to_i32_store: 8; SI: s_load_dword [[SLOAD:s[0-9]+]], s[0:1], 0xb 9; SI: v_mov_b32_e32 [[VLOAD:v[0-9]+]], [[SLOAD]] 10; SI: buffer_store_dword [[VLOAD]] 11 12; EG-LABEL: {{^}}trunc_i64_to_i32_store: 13; EG: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1 14; EG: LSHR 15; EG-NEXT: 2( 16 17 %result = trunc i64 %in to i32 store i32 %result, i32 addrspace(1)* %out, align 4 18 ret void 19} 20 21; SI-LABEL: {{^}}trunc_load_shl_i64: 22; SI-DAG: s_load_dwordx2 23; SI-DAG: s_load_dword [[SREG:s[0-9]+]], 24; SI: s_lshl_b32 [[SHL:s[0-9]+]], [[SREG]], 2 25; SI: v_mov_b32_e32 [[VSHL:v[0-9]+]], [[SHL]] 26; SI: buffer_store_dword [[VSHL]], 27define void @trunc_load_shl_i64(i32 addrspace(1)* %out, i64 %a) { 28 %b = shl i64 %a, 2 29 %result = trunc i64 %b to i32 30 store i32 %result, i32 addrspace(1)* %out, align 4 31 ret void 32} 33 34; SI-LABEL: {{^}}trunc_shl_i64: 35; SI: s_load_dwordx2 s{{\[}}[[LO_SREG:[0-9]+]]:{{[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd 36; SI: s_lshl_b64 s{{\[}}[[LO_SHL:[0-9]+]]:{{[0-9]+\]}}, s{{\[}}[[LO_SREG]]:{{[0-9]+\]}}, 2 37; SI: s_add_u32 s[[LO_SREG2:[0-9]+]], s[[LO_SHL]], 38; SI: s_addc_u32 39; SI: v_mov_b32_e32 40; SI: v_mov_b32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG2]] 41; SI: v_mov_b32_e32 42; SI: buffer_store_dword v[[LO_VREG]], 43define void @trunc_shl_i64(i64 addrspace(1)* %out2, i32 addrspace(1)* %out, i64 %a) { 44 %aa = add i64 %a, 234 ; Prevent shrinking store. 45 %b = shl i64 %aa, 2 46 %result = trunc i64 %b to i32 47 store i32 %result, i32 addrspace(1)* %out, align 4 48 store i64 %b, i64 addrspace(1)* %out2, align 8 ; Prevent reducing ops to 32-bits 49 ret void 50} 51 52; SI-LABEL: {{^}}trunc_i32_to_i1: 53; SI: v_and_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}} 54; SI: v_cmp_eq_i32 55define void @trunc_i32_to_i1(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) { 56 %a = load i32, i32 addrspace(1)* %ptr, align 4 57 %trunc = trunc i32 %a to i1 58 %result = select i1 %trunc, i32 1, i32 0 59 store i32 %result, i32 addrspace(1)* %out, align 4 60 ret void 61} 62 63; SI-LABEL: {{^}}sgpr_trunc_i32_to_i1: 64; SI: s_and_b32 s{{[0-9]+}}, 1, s{{[0-9]+}} 65; SI: v_cmp_eq_i32 66define void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a) { 67 %trunc = trunc i32 %a to i1 68 %result = select i1 %trunc, i32 1, i32 0 69 store i32 %result, i32 addrspace(1)* %out, align 4 70 ret void 71} 72 73; SI-LABEL: {{^}}s_trunc_i64_to_i1: 74; SI: s_load_dwordx2 s{{\[}}[[SLO:[0-9]+]]:{{[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0xb 75; SI: s_and_b32 [[MASKED:s[0-9]+]], 1, s[[SLO]] 76; SI: v_cmp_eq_i32_e64 s{{\[}}[[VLO:[0-9]+]]:[[VHI:[0-9]+]]], 1, [[MASKED]] 77; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, s{{\[}}[[VLO]]:[[VHI]]] 78define void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 %x) { 79 %trunc = trunc i64 %x to i1 80 %sel = select i1 %trunc, i32 63, i32 -12 81 store i32 %sel, i32 addrspace(1)* %out 82 ret void 83} 84 85; SI-LABEL: {{^}}v_trunc_i64_to_i1: 86; SI: buffer_load_dwordx2 v{{\[}}[[VLO:[0-9]+]]:{{[0-9]+\]}} 87; SI: v_and_b32_e32 [[MASKED:v[0-9]+]], 1, v[[VLO]] 88; SI: v_cmp_eq_i32_e32 vcc, 1, [[MASKED]] 89; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, vcc 90define void @v_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) { 91 %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone 92 %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid 93 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid 94 %x = load i64, i64 addrspace(1)* %gep 95 96 %trunc = trunc i64 %x to i1 97 %sel = select i1 %trunc, i32 63, i32 -12 98 store i32 %sel, i32 addrspace(1)* %out.gep 99 ret void 100} 101