1; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s 2 3; Extract the high bit of the 1st quarter 4; GCN-LABEL: {{^}}v_uextract_bit_31_i128: 5; GCN: buffer_load_dword [[VAL:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 6 7; GCN: v_mov_b32_e32 v[[ZERO0:[0-9]+]], 0{{$}} 8; GCN: v_mov_b32_e32 v[[ZERO1:[0-9]+]], v[[ZERO0]]{{$}} 9; GCN: v_mov_b32_e32 v[[ZERO2:[0-9]+]], v[[ZERO0]]{{$}} 10; GCN: v_lshrrev_b32_e32 v[[SHIFT:[0-9]+]], 31, [[VAL]] 11 12; GCN: buffer_store_dwordx4 v{{\[}}[[SHIFT]]:[[ZERO2]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 13; GCN: s_endpgm 14define amdgpu_kernel void @v_uextract_bit_31_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 { 15 %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x() 16 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 17 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x 18 %ld.64 = load i128, i128 addrspace(1)* %in.gep 19 %srl = lshr i128 %ld.64, 31 20 %bit = and i128 %srl, 1 21 store i128 %bit, i128 addrspace(1)* %out.gep 22 ret void 23} 24 25; Extract the high bit of the 2nd quarter 26; GCN-LABEL: {{^}}v_uextract_bit_63_i128: 27; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}} 28 29; GCN-DAG: v_mov_b32_e32 v[[ZERO0:[0-9]+]], 0{{$}} 30; GCN: v_mov_b32_e32 v[[ZERO1:[0-9]+]], v[[ZERO0]]{{$}} 31; GCN: v_mov_b32_e32 v[[ZERO2:[0-9]+]], v[[ZERO0]]{{$}} 32; GCN: v_mov_b32_e32 v[[ZERO3:[0-9]+]], v[[ZERO0]]{{$}} 33; GCN-DAG: v_lshrrev_b32_e32 v[[SHIFT:[0-9]+]], 31, [[VAL]] 34 35; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[SHIFT]]:[[ZERO3]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 36; GCN: s_endpgm 37define amdgpu_kernel void @v_uextract_bit_63_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 { 38 %id.x = tail call i32 @llvm.amdgcn.workitem.id.x() 39 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 40 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x 41 %ld.64 = load i128, i128 addrspace(1)* %in.gep 42 %srl = lshr i128 %ld.64, 63 43 %bit = and i128 %srl, 1 44 store i128 %bit, i128 addrspace(1)* %out.gep 45 ret void 46} 47 48; Extract the high bit of the 3rd quarter 49; GCN-LABEL: {{^}}v_uextract_bit_95_i128: 50; GCN: buffer_load_dword [[VAL:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}} 51 52; GCN-DAG: v_mov_b32_e32 v[[ZERO0:[0-9]+]], 0{{$}} 53; GCN: v_mov_b32_e32 v[[ZERO1:[0-9]+]], v[[ZERO0]]{{$}} 54; GCN: v_mov_b32_e32 v[[ZERO2:[0-9]+]], v[[ZERO0]]{{$}} 55; GCN-DAG: v_lshrrev_b32_e32 v[[SHIFT:[0-9]+]], 31, [[VAL]] 56 57; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[SHIFT]]:[[ZERO2]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 58; GCN: s_endpgm 59define amdgpu_kernel void @v_uextract_bit_95_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 { 60 %id.x = tail call i32 @llvm.amdgcn.workgroup.id.x() 61 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 62 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x 63 %ld.64 = load i128, i128 addrspace(1)* %in.gep 64 %srl = lshr i128 %ld.64, 95 65 %bit = and i128 %srl, 1 66 store i128 %bit, i128 addrspace(1)* %out.gep 67 ret void 68} 69 70; Extract the high bit of the 4th quarter 71; GCN-LABEL: {{^}}v_uextract_bit_127_i128: 72; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}} 73 74; GCN-DAG: v_mov_b32_e32 v[[ZERO0:[0-9]+]], 0{{$}} 75; GCN: v_mov_b32_e32 v[[ZERO1:[0-9]+]], v[[ZERO0]]{{$}} 76; GCN: v_mov_b32_e32 v[[ZERO2:[0-9]+]], v[[ZERO0]]{{$}} 77; GCN: v_mov_b32_e32 v[[ZERO3:[0-9]+]], v[[ZERO0]]{{$}} 78; GCN-DAG: v_lshrrev_b32_e32 v[[SHIFT:[0-9]+]], 31, [[VAL]] 79 80; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[SHIFT]]:[[ZERO3]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 81; GCN: s_endpgm 82define amdgpu_kernel void @v_uextract_bit_127_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 { 83 %id.x = tail call i32 @llvm.amdgcn.workitem.id.x() 84 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 85 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x 86 %ld.64 = load i128, i128 addrspace(1)* %in.gep 87 %srl = lshr i128 %ld.64, 127 88 %bit = and i128 %srl, 1 89 store i128 %bit, i128 addrspace(1)* %out.gep 90 ret void 91} 92 93; Spans more than 2 dword boundaries 94; GCN-LABEL: {{^}}v_uextract_bit_34_100_i128: 95; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[VAL0:[0-9]+]]:[[VAL3:[0-9]+]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 96 97; GCN-DAG: v_lshl_b64 v{{\[}}[[SHLLO:[0-9]+]]:[[SHLHI:[0-9]+]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, 30 98; GCN-DAG: v_lshrrev_b32_e32 v[[ELT1PART:[0-9]+]], 2, v{{[[0-9]+}} 99; GCN-DAG: v_bfe_u32 v[[ELT2PART:[0-9]+]], v[[VAL3]], 2, 2{{$}} 100; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}} 101; GCN-DAG: v_or_b32_e32 v[[OR0:[0-9]+]], v[[ELT1PART]], v[[SHLLO]] 102; GCN-DAG: v_mov_b32_e32 v[[ZERO1:[0-9]+]], v[[ZERO]]{{$}} 103 104; GCN-DAG: buffer_store_dwordx4 v{{\[}}[[OR0]]:[[ZERO1]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} 105; GCN: s_endpgm 106define amdgpu_kernel void @v_uextract_bit_34_100_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %in) #1 { 107 %id.x = tail call i32 @llvm.amdgcn.workitem.id.x() 108 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 109 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x 110 %ld.64 = load i128, i128 addrspace(1)* %in.gep 111 %srl = lshr i128 %ld.64, 34 112 %bit = and i128 %srl, 73786976294838206463 113 store i128 %bit, i128 addrspace(1)* %out.gep 114 ret void 115} 116 117declare i32 @llvm.amdgcn.workitem.id.x() #0 118 119declare i32 @llvm.amdgcn.workgroup.id.x() #0 120 121attributes #0 = { nounwind readnone } 122attributes #1 = { nounwind } 123