1%def bindiv(result="", second="", wide="", suffix="", rem="0", ext="cdq"): 2/* 3 * 32-bit binary div/rem operation. Handles special case of op1=-1. 4 */ 5 /* div/rem vAA, vBB, vCC */ 6 movzbq 2(rPC), %rax # rax <- BB 7 movzbq 3(rPC), %rcx # rcx <- CC 8 .if $wide 9 GET_WIDE_VREG %rax, %rax # eax <- vBB 10 GET_WIDE_VREG $second, %rcx # ecx <- vCC 11 .else 12 GET_VREG %eax, %rax # eax <- vBB 13 GET_VREG $second, %rcx # ecx <- vCC 14 .endif 15 test${suffix} $second, $second 16 jz common_errDivideByZero 17 cmp${suffix} $$-1, $second 18 je 2f 19 $ext # rdx:rax <- sign-extended of rax 20 idiv${suffix} $second 211: 22 .if $wide 23 SET_WIDE_VREG $result, rINSTq # eax <- vBB 24 .else 25 SET_VREG $result, rINSTq # eax <- vBB 26 .endif 27 ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 282: 29 .if $rem 30 xor${suffix} $result, $result 31 .else 32 neg${suffix} $result 33 .endif 34 jmp 1b 35 36%def bindiv2addr(result="", second="", wide="", suffix="", rem="0", ext="cdq"): 37/* 38 * 32-bit binary div/rem operation. Handles special case of op1=-1. 39 */ 40 /* div/rem/2addr vA, vB */ 41 movl rINST, %ecx # rcx <- BA 42 sarl $$4, %ecx # rcx <- B 43 andb $$0xf, rINSTbl # rINST <- A 44 .if $wide 45 GET_WIDE_VREG %rax, rINSTq # eax <- vA 46 GET_WIDE_VREG $second, %rcx # ecx <- vB 47 .else 48 GET_VREG %eax, rINSTq # eax <- vA 49 GET_VREG $second, %rcx # ecx <- vB 50 .endif 51 test${suffix} $second, $second 52 jz common_errDivideByZero 53 cmp${suffix} $$-1, $second 54 je 2f 55 $ext # rdx:rax <- sign-extended of rax 56 idiv${suffix} $second 571: 58 .if $wide 59 SET_WIDE_VREG $result, rINSTq # vA <- result 60 .else 61 SET_VREG $result, rINSTq # vA <- result 62 .endif 63 ADVANCE_PC_FETCH_AND_GOTO_NEXT 1 642: 65 .if $rem 66 xor${suffix} $result, $result 67 .else 68 neg${suffix} $result 69 .endif 70 jmp 1b 71 72%def bindivLit16(result="", rem="0"): 73/* 74 * 32-bit binary div/rem operation. Handles special case of op1=-1. 75 */ 76 /* div/rem/lit16 vA, vB, #+CCCC */ 77 /* Need A in rINST, ssssCCCC in ecx, vB in eax */ 78 movl rINST, %eax # rax <- 000000BA 79 sarl $$4, %eax # eax <- B 80 GET_VREG %eax, %rax # eax <- vB 81 movswl 2(rPC), %ecx # ecx <- ssssCCCC 82 andb $$0xf, rINSTbl # rINST <- A 83 testl %ecx, %ecx 84 jz common_errDivideByZero 85 cmpl $$-1, %ecx 86 je 2f 87 cdq # rax <- sign-extended of eax 88 idivl %ecx 891: 90 SET_VREG $result, rINSTq # vA <- result 91 ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 922: 93 .if $rem 94 xorl $result, $result 95 .else 96 negl $result 97 .endif 98 jmp 1b 99 100%def bindivLit8(result="", rem="0"): 101/* 102 * 32-bit div/rem "lit8" binary operation. Handles special case of 103 * op0=minint & op1=-1 104 */ 105 /* div/rem/lit8 vAA, vBB, #+CC */ 106 movzbq 2(rPC), %rax # eax <- BB 107 movsbl 3(rPC), %ecx # ecx <- ssssssCC 108 GET_VREG %eax, %rax # eax <- rBB 109 testl %ecx, %ecx 110 je common_errDivideByZero 111 cmpl $$-1, %ecx 112 je 2f 113 cdq # rax <- sign-extended of eax 114 idivl %ecx 1151: 116 SET_VREG $result, rINSTq # vA <- result 117 ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 1182: 119 .if $rem 120 xorl $result, $result 121 .else 122 negl $result 123 .endif 124 jmp 1b 125 126%def binop(result="%eax", instr=""): 127/* 128 * Generic 32-bit binary operation. Provide an "instr" line that 129 * specifies an instruction that performs "result = eax op (rFP,%ecx,4)". 130 * This could be an x86 instruction or a function call. (If the result 131 * comes back in a register other than eax, you can override "result".) 132 * 133 * For: add-int, sub-int, and-int, or-int, 134 * xor-int, shl-int, shr-int, ushr-int 135 */ 136 /* binop vAA, vBB, vCC */ 137 movzbq 2(rPC), %rax # rax <- BB 138 movzbq 3(rPC), %rcx # rcx <- CC 139 GET_VREG %eax, %rax # eax <- vBB 140 $instr VREG_ADDRESS(%rcx),%eax 141 SET_VREG $result, rINSTq 142 ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 143 144%def binop1(wide="0", instr=""): 145/* 146 * Generic 32-bit binary operation in which both operands loaded to 147 * registers (op0 in eax, op1 in ecx). 148 */ 149 /* binop vAA, vBB, vCC */ 150 movzbq 2(rPC), %rax # eax <- BB 151 movzbq 3(rPC), %rcx # ecx <- CC 152 GET_VREG %ecx, %rcx # eax <- vCC 153 .if $wide 154 GET_WIDE_VREG %rax, %rax # rax <- vBB 155 $instr # ex: addl %ecx,%eax 156 SET_WIDE_VREG %rax, rINSTq 157 .else 158 GET_VREG %eax, %rax # eax <- vBB 159 $instr # ex: addl %ecx,%eax 160 SET_VREG %eax, rINSTq 161 .endif 162 ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 163 164%def binop2addr(result="%eax", instr=""): 165/* 166 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 167 * that specifies an instruction that performs "result = r0 op r1". 168 * This could be an instruction or a function call. 169 * 170 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 171 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 172 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 173 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 174 */ 175 /* binop/2addr vA, vB */ 176 movl rINST, %ecx # rcx <- A+ 177 sarl $$4, rINST # rINST <- B 178 andb $$0xf, %cl # ecx <- A 179 GET_VREG %eax, rINSTq # eax <- vB 180 $instr %eax, VREG_ADDRESS(%rcx) 181 CLEAR_REF %rcx 182 ADVANCE_PC_FETCH_AND_GOTO_NEXT 1 183 184%def binopLit16(result="%eax", instr=""): 185/* 186 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 187 * that specifies an instruction that performs "result = eax op ecx". 188 * This could be an x86 instruction or a function call. (If the result 189 * comes back in a register other than eax, you can override "result".) 190 * 191 * For: add-int/lit16, rsub-int, 192 * and-int/lit16, or-int/lit16, xor-int/lit16 193 */ 194 /* binop/lit16 vA, vB, #+CCCC */ 195 movl rINST, %eax # rax <- 000000BA 196 sarl $$4, %eax # eax <- B 197 GET_VREG %eax, %rax # eax <- vB 198 andb $$0xf, rINSTbl # rINST <- A 199 movswl 2(rPC), %ecx # ecx <- ssssCCCC 200 $instr # for example: addl %ecx, %eax 201 SET_VREG $result, rINSTq 202 ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 203 204%def binopLit8(result="%eax", instr=""): 205/* 206 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 207 * that specifies an instruction that performs "result = eax op ecx". 208 * This could be an x86 instruction or a function call. (If the result 209 * comes back in a register other than r0, you can override "result".) 210 * 211 * For: add-int/lit8, rsub-int/lit8 212 * and-int/lit8, or-int/lit8, xor-int/lit8, 213 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 214 */ 215 /* binop/lit8 vAA, vBB, #+CC */ 216 movzbq 2(rPC), %rax # rax <- BB 217 movsbl 3(rPC), %ecx # rcx <- ssssssCC 218 GET_VREG %eax, %rax # eax <- rBB 219 $instr # ex: addl %ecx,%eax 220 SET_VREG $result, rINSTq 221 ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 222 223%def binopWide(instr=""): 224/* 225 * Generic 64-bit binary operation. 226 */ 227 /* binop vAA, vBB, vCC */ 228 movzbq 2(rPC), %rax # eax <- BB 229 movzbq 3(rPC), %rcx # ecx <- CC 230 GET_WIDE_VREG %rax, %rax # rax <- v[BB] 231 $instr VREG_ADDRESS(%rcx),%rax 232 SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax 233 ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 234 235%def binopWide2addr(instr=""): 236/* 237 * Generic 64-bit binary operation. 238 */ 239 /* binop/2addr vA, vB */ 240 movl rINST, %ecx # rcx <- A+ 241 sarl $$4, rINST # rINST <- B 242 andb $$0xf, %cl # ecx <- A 243 GET_WIDE_VREG %rax, rINSTq # rax <- vB 244 $instr %rax,VREG_ADDRESS(%rcx) 245 CLEAR_WIDE_REF %rcx 246 ADVANCE_PC_FETCH_AND_GOTO_NEXT 1 247 248%def cvtfp_int(fp_suffix="", i_suffix="", max_const="", result_reg="", wide=""): 249/* On fp to int conversions, Java requires that 250 * if the result > maxint, it should be clamped to maxint. If it is less 251 * than minint, it should be clamped to minint. If it is a nan, the result 252 * should be zero. Further, the rounding mode is to truncate. 253 */ 254 /* float/double to int/long vA, vB */ 255 movl rINST, %ecx # rcx <- A+ 256 sarl $$4, rINST # rINST <- B 257 andb $$0xf, %cl # ecx <- A 258 GET_VREG_XMM${fp_suffix} %xmm0, rINSTq 259 mov${i_suffix} ${max_const}, ${result_reg} 260 cvtsi2s${fp_suffix}${i_suffix} ${result_reg}, %xmm1 261 comis${fp_suffix} %xmm1, %xmm0 262 jae 1f 263 jp 2f 264 cvtts${fp_suffix}2si${i_suffix} %xmm0, ${result_reg} 265 jmp 1f 2662: 267 xor${i_suffix} ${result_reg}, ${result_reg} 2681: 269 .if $wide 270 SET_WIDE_VREG ${result_reg}, %rcx 271 .else 272 SET_VREG ${result_reg}, %rcx 273 .endif 274 ADVANCE_PC_FETCH_AND_GOTO_NEXT 1 275 276%def shop2addr(wide="0", instr=""): 277/* 278 * Generic 32-bit "shift/2addr" operation. 279 */ 280 /* shift/2addr vA, vB */ 281 movl rINST, %ecx # ecx <- BA 282 sarl $$4, %ecx # ecx <- B 283 GET_VREG %ecx, %rcx # ecx <- vBB 284 andb $$0xf, rINSTbl # rINST <- A 285 .if $wide 286 GET_WIDE_VREG %rax, rINSTq # rax <- vAA 287 $instr # ex: sarl %cl, %eax 288 SET_WIDE_VREG %rax, rINSTq 289 .else 290 GET_VREG %eax, rINSTq # eax <- vAA 291 $instr # ex: sarl %cl, %eax 292 SET_VREG %eax, rINSTq 293 .endif 294 ADVANCE_PC_FETCH_AND_GOTO_NEXT 1 295 296%def unop(preinstr="", instr="", wide="0"): 297/* 298 * Generic 32/64-bit unary operation. Provide an "instr" line that 299 * specifies an instruction that performs "result = op eax". 300 */ 301 /* unop vA, vB */ 302 movl rINST, %ecx # rcx <- A+ 303 sarl $$4,rINST # rINST <- B 304 .if ${wide} 305 GET_WIDE_VREG %rax, rINSTq # rax <- vB 306 .else 307 GET_VREG %eax, rINSTq # eax <- vB 308 .endif 309 andb $$0xf,%cl # ecx <- A 310$preinstr 311$instr 312 .if ${wide} 313 SET_WIDE_VREG %rax, %rcx 314 .else 315 SET_VREG %eax, %rcx 316 .endif 317 ADVANCE_PC_FETCH_AND_GOTO_NEXT 1 318 319%def op_add_int(): 320% binop(instr="addl") 321 322%def op_add_int_2addr(): 323% binop2addr(instr="addl") 324 325%def op_add_int_lit16(): 326% binopLit16(instr="addl %ecx, %eax") 327 328%def op_add_int_lit8(): 329% binopLit8(instr="addl %ecx, %eax") 330 331%def op_add_long(): 332% binopWide(instr="addq") 333 334%def op_add_long_2addr(): 335% binopWide2addr(instr="addq") 336 337%def op_and_int(): 338% binop(instr="andl") 339 340%def op_and_int_2addr(): 341% binop2addr(instr="andl") 342 343%def op_and_int_lit16(): 344% binopLit16(instr="andl %ecx, %eax") 345 346%def op_and_int_lit8(): 347% binopLit8(instr="andl %ecx, %eax") 348 349%def op_and_long(): 350% binopWide(instr="andq") 351 352%def op_and_long_2addr(): 353% binopWide2addr(instr="andq") 354 355%def op_cmp_long(): 356/* 357 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 358 * register based on the results of the comparison. 359 */ 360 /* cmp-long vAA, vBB, vCC */ 361 movzbq 2(rPC), %rdx # edx <- BB 362 movzbq 3(rPC), %rcx # ecx <- CC 363 GET_WIDE_VREG %rdx, %rdx # rdx <- v[BB] 364 xorl %eax, %eax 365 xorl %edi, %edi 366 addb $$1, %al 367 movl $$-1, %esi 368 cmpq VREG_ADDRESS(%rcx), %rdx 369 cmovl %esi, %edi 370 cmovg %eax, %edi 371 SET_VREG %edi, rINSTq 372 ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 373 374%def op_div_int(): 375% bindiv(result="%eax", second="%ecx", wide="0", suffix="l") 376 377%def op_div_int_2addr(): 378% bindiv2addr(result="%eax", second="%ecx", wide="0", suffix="l") 379 380%def op_div_int_lit16(): 381% bindivLit16(result="%eax") 382 383%def op_div_int_lit8(): 384% bindivLit8(result="%eax") 385 386%def op_div_long(): 387% bindiv(result="%rax", second="%rcx", wide="1", suffix="q", ext="cqo") 388 389%def op_div_long_2addr(): 390% bindiv2addr(result="%rax", second="%rcx", wide="1", suffix="q", ext="cqo") 391 392%def op_int_to_byte(): 393% unop(instr="movsbl %al, %eax") 394 395%def op_int_to_char(): 396% unop(instr="movzwl %ax,%eax") 397 398%def op_int_to_long(): 399 /* int to long vA, vB */ 400 movzbq rINSTbl, %rax # rax <- +A 401 sarl $$4, %eax # eax <- B 402 andb $$0xf, rINSTbl # rINST <- A 403 movslq VREG_ADDRESS(%rax), %rax 404 SET_WIDE_VREG %rax, rINSTq # v[A] <- %rax 405 ADVANCE_PC_FETCH_AND_GOTO_NEXT 1 406 407 408%def op_int_to_short(): 409% unop(instr="movswl %ax, %eax") 410 411%def op_long_to_int(): 412/* we ignore the high word, making this equivalent to a 32-bit reg move */ 413% op_move() 414 415%def op_mul_int(): 416% binop(instr="imull") 417 418%def op_mul_int_2addr(): 419 /* mul vA, vB */ 420 movl rINST, %ecx # rcx <- A+ 421 sarl $$4, rINST # rINST <- B 422 andb $$0xf, %cl # ecx <- A 423 GET_VREG %eax, %rcx # eax <- vA 424 imull (rFP,rINSTq,4), %eax 425 SET_VREG %eax, %rcx 426 ADVANCE_PC_FETCH_AND_GOTO_NEXT 1 427 428%def op_mul_int_lit16(): 429% binopLit16(instr="imull %ecx, %eax") 430 431%def op_mul_int_lit8(): 432% binopLit8(instr="imull %ecx, %eax") 433 434%def op_mul_long(): 435% binopWide(instr="imulq") 436 437%def op_mul_long_2addr(): 438 /* mul vA, vB */ 439 movl rINST, %ecx # rcx <- A+ 440 sarl $$4, rINST # rINST <- B 441 andb $$0xf, %cl # ecx <- A 442 GET_WIDE_VREG %rax, %rcx # rax <- vA 443 imulq (rFP,rINSTq,4), %rax 444 SET_WIDE_VREG %rax, %rcx 445 ADVANCE_PC_FETCH_AND_GOTO_NEXT 1 446 447%def op_neg_int(): 448% unop(instr=" negl %eax") 449 450%def op_neg_long(): 451% unop(instr=" negq %rax", wide="1") 452 453%def op_not_int(): 454% unop(instr=" notl %eax") 455 456%def op_not_long(): 457% unop(instr=" notq %rax", wide="1") 458 459%def op_or_int(): 460% binop(instr="orl") 461 462%def op_or_int_2addr(): 463% binop2addr(instr="orl") 464 465%def op_or_int_lit16(): 466% binopLit16(instr="orl %ecx, %eax") 467 468%def op_or_int_lit8(): 469% binopLit8(instr="orl %ecx, %eax") 470 471%def op_or_long(): 472% binopWide(instr="orq") 473 474%def op_or_long_2addr(): 475% binopWide2addr(instr="orq") 476 477%def op_rem_int(): 478% bindiv(result="%edx", second="%ecx", wide="0", suffix="l", rem="1") 479 480%def op_rem_int_2addr(): 481% bindiv2addr(result="%edx", second="%ecx", wide="0", suffix="l", rem="1") 482 483%def op_rem_int_lit16(): 484% bindivLit16(result="%edx", rem="1") 485 486%def op_rem_int_lit8(): 487% bindivLit8(result="%edx", rem="1") 488 489%def op_rem_long(): 490% bindiv(result="%rdx", second="%rcx", wide="1", suffix="q", ext="cqo", rem="1") 491 492%def op_rem_long_2addr(): 493% bindiv2addr(result="%rdx", second="%rcx", wide="1", suffix="q", rem="1", ext="cqo") 494 495%def op_rsub_int(): 496/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 497% binopLit16(instr="subl %eax, %ecx", result="%ecx") 498 499%def op_rsub_int_lit8(): 500% binopLit8(instr="subl %eax, %ecx", result="%ecx") 501 502%def op_shl_int(): 503% binop1(instr="sall %cl, %eax") 504 505%def op_shl_int_2addr(): 506% shop2addr(instr="sall %cl, %eax") 507 508%def op_shl_int_lit8(): 509% binopLit8(instr="sall %cl, %eax") 510 511%def op_shl_long(): 512% binop1(instr="salq %cl, %rax", wide="1") 513 514%def op_shl_long_2addr(): 515% shop2addr(instr="salq %cl, %rax", wide="1") 516 517%def op_shr_int(): 518% binop1(instr="sarl %cl, %eax") 519 520%def op_shr_int_2addr(): 521% shop2addr(instr="sarl %cl, %eax") 522 523%def op_shr_int_lit8(): 524% binopLit8(instr="sarl %cl, %eax") 525 526%def op_shr_long(): 527% binop1(instr="sarq %cl, %rax", wide="1") 528 529%def op_shr_long_2addr(): 530% shop2addr(instr="sarq %cl, %rax", wide="1") 531 532%def op_sub_int(): 533% binop(instr="subl") 534 535%def op_sub_int_2addr(): 536% binop2addr(instr="subl") 537 538%def op_sub_long(): 539% binopWide(instr="subq") 540 541%def op_sub_long_2addr(): 542% binopWide2addr(instr="subq") 543 544%def op_ushr_int(): 545% binop1(instr="shrl %cl, %eax") 546 547%def op_ushr_int_2addr(): 548% shop2addr(instr="shrl %cl, %eax") 549 550%def op_ushr_int_lit8(): 551% binopLit8(instr="shrl %cl, %eax") 552 553%def op_ushr_long(): 554% binop1(instr="shrq %cl, %rax", wide="1") 555 556%def op_ushr_long_2addr(): 557% shop2addr(instr="shrq %cl, %rax", wide="1") 558 559%def op_xor_int(): 560% binop(instr="xorl") 561 562%def op_xor_int_2addr(): 563% binop2addr(instr="xorl") 564 565%def op_xor_int_lit16(): 566% binopLit16(instr="xorl %ecx, %eax") 567 568%def op_xor_int_lit8(): 569% binopLit8(instr="xorl %ecx, %eax") 570 571%def op_xor_long(): 572% binopWide(instr="xorq") 573 574%def op_xor_long_2addr(): 575% binopWide2addr(instr="xorq") 576