Lines Matching refs:r9
1209 mov r9, r0, asl #1 @ r9<- byte offset
1213 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1219 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1245 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign
1250 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1256 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1284 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign
1289 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1295 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1340 GET_VREG(r9, r2) @ r9<- vBB
1342 mov r0, r9 @ copy to arg registers
1394 GET_VREG(r9, r2) @ r9<- vBB
1396 mov r0, r9 @ copy to arg registers
1427 and r9, r0, #255 @ r9<- BB
1429 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB]
1431 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1
1462 and r9, r0, #255 @ r9<- BB
1464 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB]
1466 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1
1506 mov r9, rINST, lsr #8 @ r9<- AA
1539 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1542 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1543 movs r9, r9, asl #1 @ convert to bytes, check sign
1548 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1551 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1576 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1579 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1580 movs r9, r9, asl #1 @ convert to bytes, check sign
1585 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1588 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1613 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1616 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1617 movs r9, r9, asl #1 @ convert to bytes, check sign
1622 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1625 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1650 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1653 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1654 movs r9, r9, asl #1 @ convert to bytes, check sign
1659 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1662 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1687 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1690 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1691 movs r9, r9, asl #1 @ convert to bytes, check sign
1696 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1699 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1724 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1727 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1728 movs r9, r9, asl #1 @ convert to bytes, check sign
1733 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1736 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1758 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1761 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1762 movs r9, r9, asl #1 @ convert to bytes, check sign
1767 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1773 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1795 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1798 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1799 movs r9, r9, asl #1 @ convert to bytes, check sign
1804 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1810 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1832 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1835 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1836 movs r9, r9, asl #1 @ convert to bytes, check sign
1841 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1847 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1869 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1872 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1873 movs r9, r9, asl #1 @ convert to bytes, check sign
1878 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1884 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1906 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1909 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1910 movs r9, r9, asl #1 @ convert to bytes, check sign
1915 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1921 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1943 mov r9, #4 @ r0<- BYTE branch dist for not-taken
1946 FETCH_S(r9, 1) @ r9<- branch offset, in code units
1947 movs r9, r9, asl #1 @ convert to bytes, check sign
1952 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
1958 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
2033 mov r9, rINST, lsr #8 @ r9<- AA
2046 SET_VREG(r2, r9) @ vAA<- r2
2061 mov r9, rINST, lsr #8 @ r9<- AA
2092 mov r9, rINST, lsr #8 @ r9<- AA
2105 SET_VREG(r2, r9) @ vAA<- r2
2125 mov r9, rINST, lsr #8 @ r9<- AA
2138 SET_VREG(r2, r9) @ vAA<- r2
2158 mov r9, rINST, lsr #8 @ r9<- AA
2171 SET_VREG(r2, r9) @ vAA<- r2
2191 mov r9, rINST, lsr #8 @ r9<- AA
2204 SET_VREG(r2, r9) @ vAA<- r2
2224 mov r9, rINST, lsr #8 @ r9<- AA
2237 SET_VREG(r2, r9) @ vAA<- r2
2256 mov r9, rINST, lsr #8 @ r9<- AA
2267 GET_VREG(r2, r9) @ r2<- vAA
2282 mov r9, rINST, lsr #8 @ r9<- AA
2292 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
2311 mov r9, rINST, lsr #8 @ r9<- AA
2317 GET_VREG(r9, r9) @ r9<- vAA
3410 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method
3413 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz
3547 mov r9, rINST, lsr #8 @ r9<- A+
3549 and r9, r9, #15
3554 SET_VREG(r0, r9) @ vAA<- r0
3574 mov r9, rINST, lsr #8 @ r9<- A+
3576 and r9, r9, #15
3581 SET_VREG(r0, r9) @ vAA<- r0
3599 mov r9, rINST, lsr #8 @ r9<- A+
3601 and r9, r9, #15
3603 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3609 stmia r9, {r0-r1} @ vAA<- r0/r1
3628 mov r9, rINST, lsr #8 @ r9<- A+
3630 and r9, r9, #15
3632 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3638 stmia r9, {r0-r1} @ vAA<- r0/r1
3659 mov r9, rINST, lsr #8 @ r9<- A+
3661 and r9, r9, #15
3666 SET_VREG(r0, r9) @ vAA<- r0
3684 mov r9, rINST, lsr #8 @ r9<- A+
3686 and r9, r9, #15
3688 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3694 stmia r9, {r0-r1} @ vAA<- r0/r1
3713 mov r9, rINST, lsr #8 @ r9<- A+
3715 and r9, r9, #15
3717 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3722 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
3742 mov r9, rINST, lsr #8 @ r9<- A+
3744 and r9, r9, #15
3749 SET_VREG(r0, r9) @ vAA<- r0
3767 mov r9, rINST, lsr #8 @ r9<- A+
3769 and r9, r9, #15
3771 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3776 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
3817 mov r9, rINST, lsr #8 @ r9<- A+
3819 and r9, r9, #15
3825 SET_VREG(r0, r9) @ vA<- r0
3843 mov r9, rINST, lsr #8 @ r9<- A+
3845 and r9, r9, #15
3847 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3853 stmia r9, {r0-r1} @ vAA<- r0/r1
3875 mov r9, rINST, lsr #8 @ r9<- A+
3877 and r9, r9, #15
3882 SET_VREG(r0, r9) @ vAA<- r0
3939 mov r9, rINST, lsr #8 @ r9<- A+
3941 and r9, r9, #15
3943 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3948 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
3967 mov r9, rINST, lsr #8 @ r9<- A+
3969 and r9, r9, #15
3971 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
3976 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
3999 mov r9, rINST, lsr #8 @ r9<- A+
4001 and r9, r9, #15
4007 SET_VREG(r0, r9) @ vA<- r0
4078 mov r9, rINST, lsr #8 @ r9<- A+
4080 and r9, r9, #15
4082 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
4088 stmia r9, {r0-r1} @ vAA<- r0/r1
4112 mov r9, rINST, lsr #8 @ r9<- A+
4114 and r9, r9, #15
4120 SET_VREG(r0, r9) @ vA<- r0
4140 mov r9, rINST, lsr #8 @ r9<- A+
4142 and r9, r9, #15
4147 SET_VREG(r0, r9) @ vAA<- r0
4167 mov r9, rINST, lsr #8 @ r9<- A+
4169 and r9, r9, #15
4174 SET_VREG(r0, r9) @ vAA<- r0
4194 mov r9, rINST, lsr #8 @ r9<- A+
4196 and r9, r9, #15
4201 SET_VREG(r0, r9) @ vAA<- r0
4228 mov r9, rINST, lsr #8 @ r9<- AA
4242 SET_VREG(r0, r9) @ vAA<- r0
4270 mov r9, rINST, lsr #8 @ r9<- AA
4284 SET_VREG(r0, r9) @ vAA<- r0
4313 mov r9, rINST, lsr #8 @ r9<- AA
4327 SET_VREG(r0, r9) @ vAA<- r0
4355 mov r9, rINST, lsr #8 @ r9<- AA
4369 SET_VREG(r0, r9) @ vAA<- r0
4398 mov r9, rINST, lsr #8 @ r9<- AA
4412 SET_VREG(r1, r9) @ vAA<- r1
4440 mov r9, rINST, lsr #8 @ r9<- AA
4454 SET_VREG(r0, r9) @ vAA<- r0
4482 mov r9, rINST, lsr #8 @ r9<- AA
4496 SET_VREG(r0, r9) @ vAA<- r0
4524 mov r9, rINST, lsr #8 @ r9<- AA
4538 SET_VREG(r0, r9) @ vAA<- r0
4566 mov r9, rINST, lsr #8 @ r9<- AA
4580 SET_VREG(r0, r9) @ vAA<- r0
4608 mov r9, rINST, lsr #8 @ r9<- AA
4622 SET_VREG(r0, r9) @ vAA<- r0
4650 mov r9, rINST, lsr #8 @ r9<- AA
4664 SET_VREG(r0, r9) @ vAA<- r0
4692 mov r9, rINST, lsr #8 @ r9<- AA
4695 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4709 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
4737 mov r9, rINST, lsr #8 @ r9<- AA
4740 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4754 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
4791 umull r9, r10, r2, r0 @ r9/r10 <- ZxX
4821 mov r9, rINST, lsr #8 @ r9<- AA
4824 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4838 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
4867 mov r9, rINST, lsr #8 @ r9<- AA
4870 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4884 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
4912 mov r9, rINST, lsr #8 @ r9<- AA
4915 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4929 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
4957 mov r9, rINST, lsr #8 @ r9<- AA
4960 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
4974 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5002 mov r9, rINST, lsr #8 @ r9<- AA
5005 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
5019 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5037 mov r9, rINST, lsr #8 @ r9<- AA
5044 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
5066 mov r9, rINST, lsr #8 @ r9<- AA
5073 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
5095 mov r9, rINST, lsr #8 @ r9<- AA
5102 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
5134 mov r9, rINST, lsr #8 @ r9<- AA
5148 SET_VREG(r0, r9) @ vAA<- r0
5176 mov r9, rINST, lsr #8 @ r9<- AA
5190 SET_VREG(r0, r9) @ vAA<- r0
5218 mov r9, rINST, lsr #8 @ r9<- AA
5232 SET_VREG(r0, r9) @ vAA<- r0
5260 mov r9, rINST, lsr #8 @ r9<- AA
5274 SET_VREG(r0, r9) @ vAA<- r0
5303 mov r9, rINST, lsr #8 @ r9<- AA
5317 SET_VREG(r0, r9) @ vAA<- r0
5345 mov r9, rINST, lsr #8 @ r9<- AA
5348 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
5362 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5390 mov r9, rINST, lsr #8 @ r9<- AA
5393 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
5407 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5435 mov r9, rINST, lsr #8 @ r9<- AA
5438 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
5452 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5480 mov r9, rINST, lsr #8 @ r9<- AA
5483 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
5497 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5526 mov r9, rINST, lsr #8 @ r9<- AA
5529 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
5543 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
5569 mov r9, rINST, lsr #8 @ r9<- A+
5571 and r9, r9, #15
5573 GET_VREG(r0, r9) @ r0<- vA
5583 SET_VREG(r0, r9) @ vAA<- r0
5609 mov r9, rINST, lsr #8 @ r9<- A+
5611 and r9, r9, #15
5613 GET_VREG(r0, r9) @ r0<- vA
5623 SET_VREG(r0, r9) @ vAA<- r0
5650 mov r9, rINST, lsr #8 @ r9<- A+
5652 and r9, r9, #15
5654 GET_VREG(r0, r9) @ r0<- vA
5664 SET_VREG(r0, r9) @ vAA<- r0
5690 mov r9, rINST, lsr #8 @ r9<- A+
5692 and r9, r9, #15
5694 GET_VREG(r0, r9) @ r0<- vA
5704 SET_VREG(r0, r9) @ vAA<- r0
5731 mov r9, rINST, lsr #8 @ r9<- A+
5733 and r9, r9, #15
5735 GET_VREG(r0, r9) @ r0<- vA
5745 SET_VREG(r1, r9) @ vAA<- r1
5771 mov r9, rINST, lsr #8 @ r9<- A+
5773 and r9, r9, #15
5775 GET_VREG(r0, r9) @ r0<- vA
5785 SET_VREG(r0, r9) @ vAA<- r0
5811 mov r9, rINST, lsr #8 @ r9<- A+
5813 and r9, r9, #15
5815 GET_VREG(r0, r9) @ r0<- vA
5825 SET_VREG(r0, r9) @ vAA<- r0
5851 mov r9, rINST, lsr #8 @ r9<- A+
5853 and r9, r9, #15
5855 GET_VREG(r0, r9) @ r0<- vA
5865 SET_VREG(r0, r9) @ vAA<- r0
5891 mov r9, rINST, lsr #8 @ r9<- A+
5893 and r9, r9, #15
5895 GET_VREG(r0, r9) @ r0<- vA
5905 SET_VREG(r0, r9) @ vAA<- r0
5931 mov r9, rINST, lsr #8 @ r9<- A+
5933 and r9, r9, #15
5935 GET_VREG(r0, r9) @ r0<- vA
5945 SET_VREG(r0, r9) @ vAA<- r0
5971 mov r9, rINST, lsr #8 @ r9<- A+
5973 and r9, r9, #15
5975 GET_VREG(r0, r9) @ r0<- vA
5985 SET_VREG(r0, r9) @ vAA<- r0
6011 mov r9, rINST, lsr #8 @ r9<- A+
6013 and r9, r9, #15
6015 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6017 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6027 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6053 mov r9, rINST, lsr #8 @ r9<- A+
6055 and r9, r9, #15
6057 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6059 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6069 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6088 mov r9, rINST, lsr #8 @ r9<- A+
6090 and r9, r9, #15
6092 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
6096 umull r9, r10, r2, r0 @ r9/r10 <- ZxX
6102 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
6126 mov r9, rINST, lsr #8 @ r9<- A+
6128 and r9, r9, #15
6130 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6132 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6142 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6169 mov r9, rINST, lsr #8 @ r9<- A+
6171 and r9, r9, #15
6173 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6175 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6185 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
6211 mov r9, rINST, lsr #8 @ r9<- A+
6213 and r9, r9, #15
6215 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6217 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6227 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6253 mov r9, rINST, lsr #8 @ r9<- A+
6255 and r9, r9, #15
6257 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6259 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6269 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6295 mov r9, rINST, lsr #8 @ r9<- A+
6297 and r9, r9, #15
6299 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6301 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6311 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6326 mov r9, rINST, lsr #8 @ r9<- A+
6328 and r9, r9, #15
6330 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6332 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6352 mov r9, rINST, lsr #8 @ r9<- A+
6354 and r9, r9, #15
6356 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6358 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6378 mov r9, rINST, lsr #8 @ r9<- A+
6380 and r9, r9, #15
6382 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6384 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6415 mov r9, rINST, lsr #8 @ r9<- A+
6417 and r9, r9, #15
6419 GET_VREG(r0, r9) @ r0<- vA
6429 SET_VREG(r0, r9) @ vAA<- r0
6455 mov r9, rINST, lsr #8 @ r9<- A+
6457 and r9, r9, #15
6459 GET_VREG(r0, r9) @ r0<- vA
6469 SET_VREG(r0, r9) @ vAA<- r0
6495 mov r9, rINST, lsr #8 @ r9<- A+
6497 and r9, r9, #15
6499 GET_VREG(r0, r9) @ r0<- vA
6509 SET_VREG(r0, r9) @ vAA<- r0
6535 mov r9, rINST, lsr #8 @ r9<- A+
6537 and r9, r9, #15
6539 GET_VREG(r0, r9) @ r0<- vA
6549 SET_VREG(r0, r9) @ vAA<- r0
6576 mov r9, rINST, lsr #8 @ r9<- A+
6578 and r9, r9, #15
6580 GET_VREG(r0, r9) @ r0<- vA
6590 SET_VREG(r0, r9) @ vAA<- r0
6616 mov r9, rINST, lsr #8 @ r9<- A+
6618 and r9, r9, #15
6620 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6622 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6632 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6658 mov r9, rINST, lsr #8 @ r9<- A+
6660 and r9, r9, #15
6662 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6664 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6674 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6700 mov r9, rINST, lsr #8 @ r9<- A+
6702 and r9, r9, #15
6704 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6706 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6716 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6742 mov r9, rINST, lsr #8 @ r9<- A+
6744 and r9, r9, #15
6746 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6748 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6758 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6785 mov r9, rINST, lsr #8 @ r9<- A+
6787 and r9, r9, #15
6789 add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
6791 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
6801 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
6827 mov r9, rINST, lsr #8 @ r9<- A+
6829 and r9, r9, #15
6838 SET_VREG(r0, r9) @ vAA<- r0
6865 mov r9, rINST, lsr #8 @ r9<- A+
6867 and r9, r9, #15
6876 SET_VREG(r0, r9) @ vAA<- r0
6903 mov r9, rINST, lsr #8 @ r9<- A+
6905 and r9, r9, #15
6914 SET_VREG(r0, r9) @ vAA<- r0
6940 mov r9, rINST, lsr #8 @ r9<- A+
6942 and r9, r9, #15
6951 SET_VREG(r0, r9) @ vAA<- r0
6978 mov r9, rINST, lsr #8 @ r9<- A+
6980 and r9, r9, #15
6989 SET_VREG(r1, r9) @ vAA<- r1
7015 mov r9, rINST, lsr #8 @ r9<- A+
7017 and r9, r9, #15
7026 SET_VREG(r0, r9) @ vAA<- r0
7052 mov r9, rINST, lsr #8 @ r9<- A+
7054 and r9, r9, #15
7063 SET_VREG(r0, r9) @ vAA<- r0
7089 mov r9, rINST, lsr #8 @ r9<- A+
7091 and r9, r9, #15
7100 SET_VREG(r0, r9) @ vAA<- r0
7126 mov r9, rINST, lsr #8 @ r9<- AA
7139 SET_VREG(r0, r9) @ vAA<- r0
7165 mov r9, rINST, lsr #8 @ r9<- AA
7178 SET_VREG(r0, r9) @ vAA<- r0
7205 mov r9, rINST, lsr #8 @ r9<- AA
7218 SET_VREG(r0, r9) @ vAA<- r0
7244 mov r9, rINST, lsr #8 @ r9<- AA
7257 SET_VREG(r0, r9) @ vAA<- r0
7284 mov r9, rINST, lsr #8 @ r9<- AA
7297 SET_VREG(r1, r9) @ vAA<- r1
7323 mov r9, rINST, lsr #8 @ r9<- AA
7336 SET_VREG(r0, r9) @ vAA<- r0
7362 mov r9, rINST, lsr #8 @ r9<- AA
7375 SET_VREG(r0, r9) @ vAA<- r0
7401 mov r9, rINST, lsr #8 @ r9<- AA
7414 SET_VREG(r0, r9) @ vAA<- r0
7440 mov r9, rINST, lsr #8 @ r9<- AA
7453 SET_VREG(r0, r9) @ vAA<- r0
7479 mov r9, rINST, lsr #8 @ r9<- AA
7492 SET_VREG(r0, r9) @ vAA<- r0
7518 mov r9, rINST, lsr #8 @ r9<- AA
7531 SET_VREG(r0, r9) @ vAA<- r0
7733 add r9, r3, r1 @ r9<- object + offset
7734 ldmia r9, {r0-r1} @ r0/r1<- obj.field (64 bits, aligned)
8255 mov r9, r1 @ save length in r9
8264 subs r9, r9, #1 @ length--, check for neg
8269 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
8273 subs r9, r9, #1 @ count--
8278 cmp r9, #4 @ length was initially 5?
8282 sub r9, r9, #1 @ count--
8287 subs r9, r9, #1 @ count--
8334 mov r9, r1 @ save length in r9
8343 subs r9, r9, #1 @ length--, check for neg
8348 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
8352 subs r9, r9, #1 @ count--
8357 cmp r9, #4 @ length was initially 5?
8361 sub r9, r9, #1 @ count--
8366 subs r9, r9, #1 @ count--
8464 mov r1, r9 @ reverse order
8478 GET_VREG(r9, r2) @ r9<- vBB
8480 mov r0, r9 @ r0<- vBB
8486 mov r0, r9 @ r0<- vBB
8496 mov r0, r9 @ r0<- vBB
8545 ldmia r9, {r2-r3}
9377 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
9386 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
9395 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
9404 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
9412 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
9420 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
9428 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
9448 FETCH(r9, 2) @ r9<- FEDC
9451 4: and ip, r9, #0xf000 @ isolate F
9453 3: and ip, r9, #0x0f00 @ isolate E
9455 2: and ip, r9, #0x00f0 @ isolate D
9457 1: and ip, r9, #0x000f @ isolate C
9460 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
9669 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
9675 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST
9736 add rPC, rPC, r9 @ update rPC
9780 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize
9800 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize
9804 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs
9831 .LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize
9836 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize)
9839 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd
9841 cmp r3, r9 @ bottom < interpStackEnd?
9853 mov r9, #0
9854 str r9, [r10, #offStackSaveArea_returnAddr]
9875 ldrh r9, [r2] @ r9 <- load INST from new PC
9881 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST
9887 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9
9888 mov rINST, r9 @ publish new rINST
9895 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9
9896 mov rINST, r9 @ publish new rINST
9905 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->...
9907 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top
9908 mov r9, r3 @ r9<- glue->self (preserve)
9929 @ native return; r9=self, r10=newSaveArea
9932 ldr r1, [r9, #offThread_exception] @ check for exception
9933 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp
9935 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top
9969 mov r1, r9 @ A1<- methodCallRange
9987 mov r9, #0
9992 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
10074 mov r2, r9 @ r2<- exception
10108 mov r0, r9 @ r0<- exception
10116 streq r9, [r10, #offThread_exception] @ yes, restore the exception
10119 .LnotCaughtLocally: @ r9=exception, r10=self
10142 ldr r3, [r9, #offObject_clazz]
10150 str r9, [r10, #offThread_exception] @ restore exception
10151 mov r0, r9 @ r0<- exception