1; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \ 2; RUN: -check-prefixes=ALL,M2,M2-M3 3; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \ 4; RUN: -check-prefixes=ALL,CMOV,CMOV-32,CMOV-32R1 5; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \ 6; RUN: -check-prefixes=ALL,CMOV,CMOV-32,CMOV-32R2-R5 7; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \ 8; RUN: -check-prefixes=ALL,CMOV,CMOV-32,CMOV-32R2-R5 9; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \ 10; RUN: -check-prefixes=ALL,CMOV,CMOV-32,CMOV-32R2-R5 11; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \ 12; RUN: -check-prefixes=ALL,SEL,SEL-32 13; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \ 14; RUN: -check-prefixes=ALL,M3,M2-M3 15; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \ 16; RUN: -check-prefixes=ALL,CMOV,CMOV-64 17; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \ 18; RUN: -check-prefixes=ALL,CMOV,CMOV-64 19; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \ 20; RUN: -check-prefixes=ALL,CMOV,CMOV-64 21; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \ 22; RUN: -check-prefixes=ALL,CMOV,CMOV-64 23; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \ 24; RUN: -check-prefixes=ALL,CMOV,CMOV-64 25; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \ 26; RUN: -check-prefixes=ALL,SEL,SEL-64 27; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -asm-show-inst | FileCheck %s \ 28; RUN: -check-prefixes=ALL,MM32R3 29; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \ 30; RUN: -check-prefixes=ALL,MMR6,MM32R6 31 32define signext i1 @tst_select_i1_i1(i1 signext %s, 33 i1 signext %x, i1 signext %y) { 34entry: 35 ; ALL-LABEL: tst_select_i1_i1: 36 37 ; M2-M3: andi $[[T0:[0-9]+]], $4, 1 38 ; M2: bnez $[[T0]], [[BB0:\$BB[0-9_]+]] 39 ; M3: bnez $[[T0]], [[BB0:\.LBB[0-9_]+]] 40 ; M2-M3: nop 41 ; M2-M3: move $5, $6 42 ; M2-M3: [[BB0]]: 43 ; M2-M3: jr $ra 44 ; M2-M3: move $2, $5 45 46 ; CMOV: andi $[[T0:[0-9]+]], $4, 1 47 ; CMOV: movn $6, $5, $[[T0]] 48 ; CMOV: move $2, $6 49 50 ; SEL: andi $[[T0:[0-9]+]], $4, 1 51 ; SEL: seleqz $[[T1:[0-9]+]], $6, $[[T0]] 52 ; SEL: selnez $[[T2:[0-9]+]], $5, $[[T0]] 53 ; SEL: or $2, $[[T2]], $[[T1]] 54 55 ; MM32R3: andi16 $[[T0:[0-9]+]], $4, 1 56 ; MM32R3: movn $[[T1:[0-9]+]], $5, $[[T0]] # <MCInst #{{[0-9]+}} MOVN_I_MM 57 ; MM32R3: move $2, $[[T1]] 58 59 ; MMR6: andi16 $[[T0:[0-9]+]], $4, 1 60 ; MMR6: seleqz $[[T1:[0-9]+]], $6, $[[T0]] 61 ; MMR6: selnez $[[T2:[0-9]+]], $5, $[[T0]] 62 ; MMR6: or $2, $[[T2]], $[[T1]] 63 64 %r = select i1 %s, i1 %x, i1 %y 65 ret i1 %r 66} 67 68define signext i8 @tst_select_i1_i8(i1 signext %s, 69 i8 signext %x, i8 signext %y) { 70entry: 71 ; ALL-LABEL: tst_select_i1_i8: 72 73 ; M2-M3: andi $[[T0:[0-9]+]], $4, 1 74 ; M2: bnez $[[T0]], [[BB0:\$BB[0-9_]+]] 75 ; M3: bnez $[[T0]], [[BB0:\.LBB[0-9_]+]] 76 ; M2-M3: nop 77 ; M2-M3: move $5, $6 78 ; M2-M3: [[BB0]]: 79 ; M2-M3: jr $ra 80 ; M2-M3: move $2, $5 81 82 ; CMOV: andi $[[T0:[0-9]+]], $4, 1 83 ; CMOV: movn $6, $5, $[[T0]] 84 ; CMOV: move $2, $6 85 86 ; SEL: andi $[[T0:[0-9]+]], $4, 1 87 ; SEL: seleqz $[[T1:[0-9]+]], $6, $[[T0]] 88 ; SEL: selnez $[[T2:[0-9]+]], $5, $[[T0]] 89 ; SEL: or $2, $[[T2]], $[[T1]] 90 91 ; MM32R3: andi16 $[[T0:[0-9]+]], $4, 1 92 ; MM32R3: movn $[[T1:[0-9]+]], $5, $[[T0]] # <MCInst #{{[0-9]+}} MOVN_I_MM 93 ; MM32R3: move $2, $[[T1]] 94 95 ; MMR6: andi16 $[[T0:[0-9]+]], $4, 1 96 ; MMR6: seleqz $[[T1:[0-9]+]], $6, $[[T0]] 97 ; MMR6: selnez $[[T2:[0-9]+]], $5, $[[T0]] 98 ; MMR6: or $2, $[[T2]], $[[T1]] 99 100 %r = select i1 %s, i8 %x, i8 %y 101 ret i8 %r 102} 103 104define signext i32 @tst_select_i1_i32(i1 signext %s, 105 i32 signext %x, i32 signext %y) { 106entry: 107 ; ALL-LABEL: tst_select_i1_i32: 108 109 ; M2-M3: andi $[[T0:[0-9]+]], $4, 1 110 ; M2: bnez $[[T0]], [[BB0:\$BB[0-9_]+]] 111 ; M3: bnez $[[T0]], [[BB0:\.LBB[0-9_]+]] 112 ; M2-M3: nop 113 ; M2-M3: move $5, $6 114 ; M2-M3: [[BB0]]: 115 ; M2-M3: jr $ra 116 ; M3: sll $2, $5, 0 117 118 ; CMOV: andi $[[T0:[0-9]+]], $4, 1 119 ; CMOV: movn $6, $5, $[[T0]] 120 ; CMOV-64:sll $2, $6, 0 121 122 ; SEL: andi $[[T0:[0-9]+]], $4, 1 123 ; SEL: seleqz $[[T1:[0-9]+]], $6, $[[T0]] 124 ; SEL: selnez $[[T2:[0-9]+]], $5, $[[T0]] 125 ; SEL: or $[[T3:[0-9]+]], $[[T2]], $[[T1]] 126 ; SEL-64: sll $2, $[[T3]], 0 127 128 ; MM32R3: andi16 $[[T0:[0-9]+]], $4, 1 129 ; MM32R3: movn $[[T1:[0-9]+]], $5, $[[T0]] # <MCInst #{{[0-9]+}} MOVN_I_MM 130 ; MM32R3: move $2, $[[T1]] 131 132 ; MMR6: andi16 $[[T0:[0-9]+]], $4, 1 133 ; MMR6: seleqz $[[T1:[0-9]+]], $6, $[[T0]] 134 ; MMR6: selnez $[[T2:[0-9]+]], $5, $[[T0]] 135 ; MMR6: or $2, $[[T2]], $[[T1]] 136 137 %r = select i1 %s, i32 %x, i32 %y 138 ret i32 %r 139} 140 141define signext i64 @tst_select_i1_i64(i1 signext %s, 142 i64 signext %x, i64 signext %y) { 143entry: 144 ; ALL-LABEL: tst_select_i1_i64: 145 146 ; M2: andi $[[T0:[0-9]+]], $4, 1 147 ; M2: bnez $[[T0]], $[[BB0:BB[0-9_]+]] 148 ; M2: nop 149 ; M2: lw $[[T1:[0-9]+]], 16($sp) 150 ; M2: $[[BB0]]: 151 ; FIXME: This branch is redundant 152 ; M2: bnez $[[T0]], $[[BB1:BB[0-9_]+]] 153 ; M2: nop 154 ; M2: lw $[[T2:[0-9]+]], 20($sp) 155 ; M2: $[[BB1]]: 156 ; M2: move $2, $[[T1]] 157 ; M2: jr $ra 158 ; M2: move $3, $[[T2]] 159 160 ; CMOV-32: andi $[[T0:[0-9]+]], $4, 1 161 ; CMOV-32: lw $2, 16($sp) 162 ; CMOV-32: movn $2, $6, $[[T0]] 163 ; CMOV-32: lw $3, 20($sp) 164 ; CMOV-32: movn $3, $7, $[[T0]] 165 166 ; SEL-32: andi $[[T0:[0-9]+]], $4, 1 167 ; SEL-32: lw $[[T1:[0-9]+]], 16($sp) 168 ; SEL-32: seleqz $[[T2:[0-9]+]], $[[T1]], $[[T0]] 169 ; SEL-32: selnez $[[T3:[0-9]+]], $6, $[[T0]] 170 ; SEL-32: or $2, $[[T3]], $[[T2]] 171 ; SEL-32: lw $[[T4:[0-9]+]], 20($sp) 172 ; SEL-32: seleqz $[[T5:[0-9]+]], $[[T4]], $[[T0]] 173 ; SEL-32: selnez $[[T6:[0-9]+]], $7, $[[T0]] 174 ; SEL-32: or $3, $[[T6]], $[[T5]] 175 176 ; M3: andi $[[T0:[0-9]+]], $4, 1 177 ; M3: bnez $[[T0]], [[BB0:\.LBB[0-9_]+]] 178 ; M3: nop 179 ; M3: move $5, $6 180 ; M3: [[BB0]]: 181 ; M3: jr $ra 182 ; M3: move $2, $5 183 184 ; CMOV-64: andi $[[T0:[0-9]+]], $4, 1 185 ; CMOV-64: movn $6, $5, $[[T0]] 186 ; CMOV-64: move $2, $6 187 188 ; SEL-64: andi $[[T0:[0-9]+]], $4, 1 189 ; FIXME: This shift is redundant 190 ; SEL-64: sll $[[T0]], $[[T0]], 0 191 ; SEL-64: seleqz $[[T1:[0-9]+]], $6, $[[T0]] 192 ; SEL-64: selnez $[[T0]], $5, $[[T0]] 193 ; SEL-64: or $2, $[[T0]], $[[T1]] 194 195 ; MM32R3: andi16 $[[T0:[0-9]+]], $4, 1 196 ; MM32R3: lw $2, 16($sp) 197 ; MM32R3: movn $2, $6, $[[T0]] # <MCInst #{{[0-9]+}} MOVN_I_MM 198 ; MM32R3: lw $3, 20($sp) 199 ; MM32R3: movn $3, $7, $[[T0]] # <MCInst #{{[0-9]+}} MOVN_I_MM 200 201 ; MM32R6: andi16 $[[T0:[0-9]+]], $4, 1 202 ; MM32R6: lw $[[T2:[0-9]+]], 16($sp) 203 ; MM32R6: seleqz $[[T3:[0-9]+]], $[[T2]], $[[T0]] 204 ; MM32R6: selnez $[[T1:[0-9]+]], $6, $[[T0]] 205 ; MM32R6: or $2, $[[T1]], $[[T3]] 206 ; MM32R6: lw $[[T4:[0-9]+]], 20($sp) 207 ; MM32R6: seleqz $[[T5:[0-9]+]], $[[T4]], $[[T0]] 208 ; MM32R6: selnez $[[T6:[0-9]+]], $7, $[[T0]] 209 ; MM32R6: or $3, $[[T6]], $[[T5]] 210 211 %r = select i1 %s, i64 %x, i64 %y 212 ret i64 %r 213} 214 215define i8* @tst_select_word_cst(i8* %a, i8* %b) { 216 ; ALL-LABEL: tst_select_word_cst: 217 218 ; M2: addiu $[[T0:[0-9]+]], $zero, -1 219 ; M2: xor $[[T1:[0-9]+]], $5, $[[T0]] 220 ; M2: sltu $[[T2:[0-9]+]], $zero, $[[T1]] 221 ; M2: bnez $[[T2]], [[BB0:\$BB[0-9_]+]] 222 ; M2: addiu $2, $zero, 0 223 ; M2: move $2, $4 224 ; M2: [[BB0]]: 225 ; M2: jr $ra 226 227 ; M3: daddiu $[[T0:[0-9]+]], $zero, -1 228 ; M3: xor $[[T1:[0-9]+]], $5, $[[T0]] 229 ; M3: sltu $[[T2:[0-9]+]], $zero, $[[T1]] 230 ; M3: bnez $[[T2]], [[BB0:\.LBB[0-9_]+]] 231 ; M3: daddiu $2, $zero, 0 232 ; M3: move $2, $4 233 ; M3: [[BB0]]: 234 ; M3: jr $ra 235 236 ; CMOV-32: addiu $[[T0:[0-9]+]], $zero, -1 237 ; CMOV-32: xor $[[T1:[0-9]+]], $5, $[[T0]] 238 ; CMOV-32: movn $[[T2:[0-9]+]], $zero, $[[T1]] 239 ; CMOV-32: jr $ra 240 ; CMOV-32: move $2, $[[T2]] 241 242 ; SEL-32: addiu $[[T0:[0-9]+]], $zero, -1 243 ; SEL-32: xor $[[T1:[0-9]+]], $5, $[[T0]] 244 ; SEL-32: sltu $[[T2:[0-9]+]], $zero, $[[T1]] 245 ; SEL-32: jr $ra 246 ; SEL-32: seleqz $2, $4, $[[T2]] 247 248 ; CMOV-64: daddiu $[[T0:[0-9]+]], $zero, -1 249 ; CMOV-64: xor $[[T1:[0-9]+]], $5, $[[T0]] 250 ; CMOV-64: movn $[[T2:[0-9]+]], $zero, $[[T1]] 251 ; CMOV-64: move $2, $[[T2]] 252 253 ; SEL-64: daddiu $[[T0:[0-9]+]], $zero, -1 254 ; SEL-64: xor $[[T1:[0-9]+]], $5, $[[T0]] 255 ; SEL-64: sltu $[[T2:[0-9]+]], $zero, $[[T1]] 256 ; FIXME: This shift is redundant. 257 ; SEL-64: sll $[[T2]], $[[T2]], 0 258 ; SEL-64: seleqz $2, $4, $[[T2]] 259 260 ; MM32R3: li16 $[[T0:[0-9]+]], -1 261 ; MM32R3: xor $[[T1:[0-9]+]], $5, $[[T0]] 262 ; MM32R3: li16 $[[T2:[0-9]+]], 0 263 ; MM32R3: movn $[[T3:[0-9]+]], $[[T2]], $[[T1]] # <MCInst #{{[0-9]+}} MOVN_I_MM 264 ; MM32R3: move $2, $[[T3]] 265 266 ; MM32R6: li16 $[[T0:[0-9]+]], -1 267 ; MM32R6: xor $[[T1:[0-9]+]], $5, $[[T0]] 268 ; MM32R6: sltu $[[T2:[0-9]+]], $zero, $[[T1]] 269 ; MM32R6: seleqz $2, $4, $[[T2]] 270 271 %cmp = icmp eq i8* %b, inttoptr (i64 -1 to i8*) 272 %r = select i1 %cmp, i8* %a, i8* null 273 ret i8* %r 274} 275