Lines Matching refs:Rm
375 uint32_t Rm; in dataProcessingCommon() local
381 Rm = mAddrMode.reg_imm_Rm; in dataProcessingCommon()
387 Rm = Op2; in dataProcessingCommon()
397 case opADD: *mPC++ = A64_ADD_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon()
398 case opAND: *mPC++ = A64_AND_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon()
399 case opORR: *mPC++ = A64_ORR_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon()
400 case opMVN: *mPC++ = A64_ORN_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon()
401 case opSUB: *mPC++ = A64_SUB_W(Rd, Rn, Rm, shift, amount, s);break; in dataProcessingCommon()
471 int Rm = mAddrMode.reg_imm_Rm; in ADDR_ADD() local
473 *mPC++ = A64_ADD_X_Wm_SXTW(Rd, Rn, Rm, amount); in ADDR_ADD()
477 int Rm = Op2; in ADDR_ADD() local
479 *mPC++ = A64_ADD_X_Wm_SXTW(Rd, Rn, Rm, amount); in ADDR_ADD()
487 int Rm = mTmpReg1; in ADDR_ADD() local
489 *mPC++ = A64_ADD_X_Wm_SXTW(Rd, Rn, Rm, amount); in ADDR_ADD()
518 void ArmToArm64Assembler::MLA(int cc, int s,int Rd, int Rm, int Rs, int Rn) in MLA() argument
522 *mPC++ = A64_MADD_W(Rd, Rm, Rs, Rn); in MLA()
526 void ArmToArm64Assembler::MUL(int cc, int s, int Rd, int Rm, int Rs) in MUL() argument
530 *mPC++ = A64_MADD_W(Rd, Rm, Rs, mZeroReg); in MUL()
774 int Rd, int Rm, int Rs) in SMUL() argument
779 *mPC++ = A64_SBFM_W(mTmpReg1, Rm, 16, 31); in SMUL()
781 *mPC++ = A64_SBFM_W(mTmpReg1, Rm, 0, 15); in SMUL()
793 void ArmToArm64Assembler::SMULW(int cc, int y, int Rd, int Rm, int Rs) in SMULW() argument
802 *mPC++ = A64_SBFM_W(mTmpReg2, Rm, 0, 31); in SMULW()
809 void ArmToArm64Assembler::SMLA(int cc, int xy, int Rd, int Rm, int Rs, int Rn) in SMLA() argument
814 *mPC++ = A64_SBFM_W(mTmpReg1, Rm, 0, 15); in SMLA()
836 void ArmToArm64Assembler::UXTB16(int cc, int Rd, int Rm, int rotate) in UXTB16() argument
840 *mPC++ = A64_EXTR_W(mTmpReg1, Rm, Rm, rotate * 8); in UXTB16()
884 uint32_t ArmToArm64Assembler::reg_imm(int Rm, int type, uint32_t shift) in reg_imm() argument
886 mAddrMode.reg_imm_Rm = Rm; in reg_imm()
924 uint32_t ArmToArm64Assembler::reg_scale_pre(int Rm, int type, in reg_scale_pre() argument
934 mAddrMode.reg_offset = Rm; in reg_scale_pre()
963 uint32_t ArmToArm64Assembler::reg_pre(int Rm, int W) in reg_pre() argument
972 mAddrMode.reg_offset = Rm; in reg_pre()
1003 uint32_t Rn, uint32_t Rm) in A64_LDRSTR_Wm_SXTW_0() argument
1008 dataTransferOpName[op], Rt, Rn, Rm); in A64_LDRSTR_Wm_SXTW_0()
1009 return(dataTransferOpCode[op] | (Rm << 16) | (Rn << 5) | Rt); in A64_LDRSTR_Wm_SXTW_0()
1014 dataTransferOpName[op], Rt, Rn, Rm); in A64_LDRSTR_Wm_SXTW_0()
1015 return(dataTransferOpCode[op] | (0x1<<30) | (Rm<<16) | (Rn<<5)|Rt); in A64_LDRSTR_Wm_SXTW_0()
1046 uint32_t Rm, in A64_ADD_X_Wm_SXTW() argument
1049 LOG_INSTR("ADD X%d, X%d, W%d, SXTW #%d\n", Rd, Rn, Rm, amount); in A64_ADD_X_Wm_SXTW()
1050 return ((0x8B << 24) | (0x1 << 21) |(Rm << 16) | in A64_ADD_X_Wm_SXTW()
1057 uint32_t Rm, in A64_SUB_X_Wm_SXTW() argument
1060 LOG_INSTR("SUB X%d, X%d, W%d, SXTW #%d\n", Rd, Rn, Rm, amount); in A64_SUB_X_Wm_SXTW()
1061 return ((0xCB << 24) | (0x1 << 21) |(Rm << 16) | in A64_SUB_X_Wm_SXTW()
1073 uint32_t Rm, uint32_t shift, in A64_ADD_X() argument
1077 Rd, Rn, Rm, shift_codes[shift], amount); in A64_ADD_X()
1078 return ((0x8B << 24) | (shift << 22) | ( Rm << 16) | in A64_ADD_X()
1096 uint32_t Rm, uint32_t shift, in A64_ADD_W() argument
1100 Rd, Rn, Rm, shift_codes[shift], amount); in A64_ADD_W()
1101 return ((0x0B << 24) | (shift << 22) | ( Rm << 16) | in A64_ADD_W()
1106 uint32_t Rm, uint32_t shift, in A64_SUB_W() argument
1113 Rd, Rn, Rm, shift_codes[shift], amount); in A64_SUB_W()
1114 return ((0x4B << 24) | (shift << 22) | ( Rm << 16) | in A64_SUB_W()
1120 Rd, Rn, Rm, shift_codes[shift], amount); in A64_SUB_W()
1121 return ((0x6B << 24) | (shift << 22) | ( Rm << 16) | in A64_SUB_W()
1127 uint32_t Rm, uint32_t shift, in A64_AND_W() argument
1131 Rd, Rn, Rm, shift_codes[shift], amount); in A64_AND_W()
1132 return ((0x0A << 24) | (shift << 22) | ( Rm << 16) | in A64_AND_W()
1137 uint32_t Rm, uint32_t shift, in A64_ORR_W() argument
1141 Rd, Rn, Rm, shift_codes[shift], amount); in A64_ORR_W()
1142 return ((0x2A << 24) | (shift << 22) | ( Rm << 16) | in A64_ORR_W()
1147 uint32_t Rm, uint32_t shift, in A64_ORN_W() argument
1151 Rd, Rn, Rm, shift_codes[shift], amount); in A64_ORN_W()
1152 return ((0x2A << 24) | (shift << 22) | (0x1 << 21) | ( Rm << 16) | in A64_ORN_W()
1157 uint32_t Rm, uint32_t cond) in A64_CSEL_X() argument
1159 LOG_INSTR("CSEL X%d, X%d, X%d, %s\n", Rd, Rn, Rm, cc_codes[cond]); in A64_CSEL_X()
1160 return ((0x9A << 24)|(0x1 << 23)|(Rm << 16) |(cond << 12)| (Rn << 5) | Rd); in A64_CSEL_X()
1164 uint32_t Rm, uint32_t cond) in A64_CSEL_W() argument
1166 LOG_INSTR("CSEL W%d, W%d, W%d, %s\n", Rd, Rn, Rm, cc_codes[cond]); in A64_CSEL_W()
1167 return ((0x1A << 24)|(0x1 << 23)|(Rm << 16) |(cond << 12)| (Rn << 5) | Rd); in A64_CSEL_W()
1198 uint32_t Rm, uint32_t Ra) in A64_SMADDL() argument
1200 LOG_INSTR("SMADDL X%d, W%d, W%d, X%d\n",Rd, Rn, Rm, Ra); in A64_SMADDL()
1201 return ((0x9B << 24) | (0x1 << 21) | (Rm << 16)|(Ra << 10)|(Rn << 5) | Rd); in A64_SMADDL()
1205 uint32_t Rm, uint32_t Ra) in A64_MADD_W() argument
1207 LOG_INSTR("MADD W%d, W%d, W%d, W%d\n",Rd, Rn, Rm, Ra); in A64_MADD_W()
1208 return ((0x1B << 24) | (Rm << 16) | (Ra << 10) |(Rn << 5) | Rd); in A64_MADD_W()
1234 uint32_t Rm, uint32_t lsb) in A64_EXTR_W() argument
1236 LOG_INSTR("EXTR W%d, W%d, W%d, #%d\n", Rd, Rn, Rm, lsb); in A64_EXTR_W()
1237 return (0x13 << 24)|(0x1 << 23) | (Rm << 16) | (lsb << 10)|(Rn << 5) | Rd; in A64_EXTR_W()