• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 
18 /*! \file LowerHelper.cpp
19     \brief This file implements helper functions for lowering
20 
21 With NCG O0: all registers are hard-coded ;
22 With NCG O1: the lowering module will use variables that will be allocated to a physical register by the register allocator.
23 
24 register types: FS 32-bit or 64-bit;
25                 XMM: SS(32-bit) SD (64-bit);
26                 GPR: 8-bit, 16-bit, 32-bit;
27 LowOpndRegType tells whether it is gpr, xmm or fs;
28 OpndSize can be OpndSize_8, OpndSize_16, OpndSize_32, OpndSize_64
29 
30 A single native instruction can use multiple physical registers.
31   we can't call freeReg in the middle of emitting a native instruction,
32   since it may free the physical register used by an operand and cause two operands being allocated to the same physical register.
33 
34 When allocating a physical register for an operand, we can't spill the operands that are already allocated. To avoid that, we call startNativeCode before each native instruction, here flag "canSpill" is set to true for each physical register;
35   when a physical register is allocated, we set its flag "canSpill" to false;
36   at end of each native instruction, call endNativeCode to set flag "canSpill" to true.
37 */
38 
39 #include "libdex/DexOpcodes.h"
40 #include "libdex/DexFile.h"
41 #include "Lower.h"
42 #include "NcgAot.h"
43 #include "enc_wrapper.h"
44 #include "vm/mterp/Mterp.h"
45 #include "NcgHelper.h"
46 #include <math.h>
47 #include "interp/InterpState.h"
48 
49 extern "C" int64_t __divdi3(int64_t, int64_t);
50 extern "C" int64_t __moddi3(int64_t, int64_t);
51 bool isScratchPhysical;
52 LowOp* lirTable[200];
53 int num_lirs_in_table = 0;
54 
55 //4 tables are defined: GPR integer ALU ops, ALU ops in FPU, SSE 32-bit, SSE 64-bit
56 //the index to the table is the opcode
57 //add_opc,    or_opc,     adc_opc,    sbb_opc,
58 //and_opc,    sub_opc,    xor_opc,    cmp_opc,
59 //mul_opc,    imul_opc,   div_opc,    idiv_opc,
60 //sll_opc,    srl_opc,    sra, (SSE)
61 //shl_opc,    shr_opc,    sal_opc,    sar_opc, //integer shift
62 //neg_opc,    not_opc,    andn_opc, (SSE)
63 //n_alu
64 //!mnemonic for integer ALU operations
65 const  Mnemonic map_of_alu_opcode_2_mnemonic[] = {
66     Mnemonic_ADD,  Mnemonic_OR,   Mnemonic_ADC,  Mnemonic_SBB,
67     Mnemonic_AND,  Mnemonic_SUB,  Mnemonic_XOR,  Mnemonic_CMP,
68     Mnemonic_MUL,  Mnemonic_IMUL, Mnemonic_DIV,  Mnemonic_IDIV,
69     Mnemonic_Null, Mnemonic_Null, Mnemonic_Null,
70     Mnemonic_SHL,  Mnemonic_SHR,  Mnemonic_SAL,  Mnemonic_SAR,
71     Mnemonic_NEG,  Mnemonic_NOT,  Mnemonic_Null,
72     Mnemonic_Null
73 };
74 //!mnemonic for ALU operations in FPU
75 const  Mnemonic map_of_fpu_opcode_2_mnemonic[] = {
76     Mnemonic_FADD,  Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
77     Mnemonic_Null,  Mnemonic_FSUB,  Mnemonic_Null,  Mnemonic_Null,
78     Mnemonic_FMUL,  Mnemonic_Null,  Mnemonic_FDIV,  Mnemonic_Null,
79     Mnemonic_Null,  Mnemonic_Null,
80     Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
81     Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
82     Mnemonic_Null
83 };
84 //!mnemonic for SSE 32-bit
85 const  Mnemonic map_of_sse_opcode_2_mnemonic[] = {
86     Mnemonic_ADDSD,  Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
87     Mnemonic_Null,   Mnemonic_SUBSD, Mnemonic_XORPD, Mnemonic_Null,
88     Mnemonic_MULSD,  Mnemonic_Null,  Mnemonic_DIVSD,  Mnemonic_Null,
89     Mnemonic_Null,   Mnemonic_Null,
90     Mnemonic_Null,   Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
91     Mnemonic_Null,   Mnemonic_Null,  Mnemonic_Null,
92     Mnemonic_Null
93 };
94 //!mnemonic for SSE 64-bit integer
95 const  Mnemonic map_of_64_opcode_2_mnemonic[] = {
96     Mnemonic_PADDQ, Mnemonic_POR,   Mnemonic_Null,  Mnemonic_Null,
97     Mnemonic_PAND,  Mnemonic_PSUBQ, Mnemonic_PXOR,  Mnemonic_Null,
98     Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
99     Mnemonic_PSLLQ, Mnemonic_PSRLQ, Mnemonic_Null,
100     Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
101     Mnemonic_Null,  Mnemonic_Null,  Mnemonic_PANDN,
102     Mnemonic_Null
103 };
104 
105 ////////////////////////////////////////////////
106 //!update fields of LowOpndReg
107 
108 //!
set_reg_opnd(LowOpndReg * op_reg,int reg,bool isPhysical,LowOpndRegType type)109 void set_reg_opnd(LowOpndReg* op_reg, int reg, bool isPhysical, LowOpndRegType type) {
110     op_reg->regType = type;
111     if(isPhysical) {
112         op_reg->logicalReg = -1;
113         op_reg->physicalReg = reg;
114     }
115     else
116         op_reg->logicalReg = reg;
117     return;
118 }
119 //!update fields of LowOpndMem
120 
121 //!
set_mem_opnd(LowOpndMem * mem,int disp,int base,bool isPhysical)122 void set_mem_opnd(LowOpndMem* mem, int disp, int base, bool isPhysical) {
123     mem->m_disp.value = disp;
124     mem->hasScale = false;
125     mem->m_base.regType = LowOpndRegType_gp;
126     if(isPhysical) {
127         mem->m_base.logicalReg = -1;
128         mem->m_base.physicalReg = base;
129     } else {
130         mem->m_base.logicalReg = base;
131     }
132     return;
133 }
134 //!update fields of LowOpndMem
135 
136 //!
set_mem_opnd_scale(LowOpndMem * mem,int base,bool isPhysical,int disp,int index,bool indexPhysical,int scale)137 void set_mem_opnd_scale(LowOpndMem* mem, int base, bool isPhysical, int disp, int index, bool indexPhysical, int scale) {
138     mem->hasScale = true;
139     mem->m_base.regType = LowOpndRegType_gp;
140     if(isPhysical) {
141         mem->m_base.logicalReg = -1;
142         mem->m_base.physicalReg = base;
143     } else {
144         mem->m_base.logicalReg = base;
145     }
146     if(indexPhysical) {
147         mem->m_index.logicalReg = -1;
148         mem->m_index.physicalReg = index;
149     } else {
150         mem->m_index.logicalReg = index;
151     }
152     mem->m_disp.value = disp;
153     mem->m_scale.value = scale;
154     return;
155 }
156 //!return either LowOpndRegType_xmm or LowOpndRegType_gp
157 
158 //!
getTypeFromIntSize(OpndSize size)159 inline LowOpndRegType getTypeFromIntSize(OpndSize size) {
160     return size == OpndSize_64 ? LowOpndRegType_xmm : LowOpndRegType_gp;
161 }
162 
163 // copied from JIT compiler
164 typedef struct AtomMemBlock {
165     size_t bytesAllocated;
166     struct AtomMemBlock *next;
167     char ptr[0];
168 } AtomMemBlock;
169 
170 #define ATOMBLOCK_DEFAULT_SIZE 4096
171 AtomMemBlock *atomMemHead = NULL;
172 AtomMemBlock *currentAtomMem = NULL;
atomNew(size_t size)173 void * atomNew(size_t size) {
174     lowOpTimeStamp++; //one LowOp constructed
175     if(atomMemHead == NULL) {
176         atomMemHead = (AtomMemBlock*)malloc(sizeof(AtomMemBlock) + ATOMBLOCK_DEFAULT_SIZE);
177         if(atomMemHead == NULL) {
178             ALOGE("Memory allocation failed");
179             return NULL;
180         }
181         currentAtomMem = atomMemHead;
182         currentAtomMem->bytesAllocated = 0;
183         currentAtomMem->next = NULL;
184     }
185     size = (size + 3) & ~3;
186     if (size > ATOMBLOCK_DEFAULT_SIZE) {
187         ALOGE("Requesting %d bytes which exceed the maximal size allowed", size);
188         return NULL;
189     }
190 retry:
191     if (size + currentAtomMem->bytesAllocated <= ATOMBLOCK_DEFAULT_SIZE) {
192         void *ptr;
193         ptr = &currentAtomMem->ptr[currentAtomMem->bytesAllocated];
194         return ptr;
195     }
196     if (currentAtomMem->next) {
197         currentAtomMem = currentAtomMem->next;
198         goto retry;
199     }
200     /* Time to allocate a new arena */
201     AtomMemBlock *newAtomMem = (AtomMemBlock*)malloc(sizeof(AtomMemBlock) + ATOMBLOCK_DEFAULT_SIZE);
202     if(newAtomMem == NULL) {
203         ALOGE("Memory allocation failed");
204         return NULL;
205     }
206     newAtomMem->bytesAllocated = 0;
207     newAtomMem->next = NULL;
208     currentAtomMem->next = newAtomMem;
209     currentAtomMem = newAtomMem;
210     goto retry;
211     ALOGE("atomNew requesting %d bytes", size);
212     return NULL;
213 }
214 
freeAtomMem()215 void freeAtomMem() {
216     //LOGI("free all atom memory");
217     AtomMemBlock * tmpMem = atomMemHead;
218     while(tmpMem != NULL) {
219         tmpMem->bytesAllocated = 0;
220         tmpMem = tmpMem->next;
221     }
222     currentAtomMem = atomMemHead;
223 }
224 
dump_special(AtomOpCode cc,int imm)225 LowOpImm* dump_special(AtomOpCode cc, int imm) {
226     LowOpImm* op = (LowOpImm*)atomNew(sizeof(LowOpImm));
227     op->lop.opCode = Mnemonic_NULL;
228     op->lop.opCode2 = cc;
229     op->lop.opnd1.type = LowOpndType_Imm;
230     op->lop.numOperands = 1;
231     op->immOpnd.value = imm;
232     //stream = encoder_imm(m, size, imm, stream);
233     return op;
234 }
235 
lower_label(Mnemonic m,OpndSize size,int imm,const char * label,bool isLocal)236 LowOpLabel* lower_label(Mnemonic m, OpndSize size, int imm, const char* label, bool isLocal) {
237     stream = encoder_imm(m, size, imm, stream);
238     return NULL;
239 }
240 
dump_label(Mnemonic m,OpndSize size,int imm,const char * label,bool isLocal)241 LowOpLabel* dump_label(Mnemonic m, OpndSize size, int imm,
242                const char* label, bool isLocal) {
243     return lower_label(m, size, imm, label, isLocal);
244 }
245 
dump_ncg(Mnemonic m,OpndSize size,int imm)246 LowOpNCG* dump_ncg(Mnemonic m, OpndSize size, int imm) {
247     stream = encoder_imm(m, size, imm, stream);
248     return NULL;
249 }
250 
251 //!update fields of LowOp and generate a x86 instruction with a single immediate operand
252 
253 //!
lower_imm(Mnemonic m,OpndSize size,int imm,bool updateTable)254 LowOpImm* lower_imm(Mnemonic m, OpndSize size, int imm, bool updateTable) {
255     stream = encoder_imm(m, size, imm, stream);
256     return NULL;
257 }
258 
dump_imm(Mnemonic m,OpndSize size,int imm)259 LowOpImm* dump_imm(Mnemonic m, OpndSize size, int imm) {
260     return lower_imm(m, size, imm, true);
261 }
262 
dump_imm_with_codeaddr(Mnemonic m,OpndSize size,int imm,char * codePtr)263 LowOpImm* dump_imm_with_codeaddr(Mnemonic m, OpndSize size,
264                int imm, char* codePtr) {
265     encoder_imm(m, size, imm, codePtr);
266     return NULL;
267 }
268 
269 //!update fields of LowOp and generate a x86 instruction that takes a single memory operand
270 
271 //!With NCG O1, we call freeReg to free up physical registers, then call registerAlloc to allocate a physical register for memory base
lower_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int disp,int base_reg)272 LowOpMem* lower_mem(Mnemonic m, AtomOpCode m2, OpndSize size,
273                int disp, int base_reg) {
274     stream = encoder_mem(m, size, disp, base_reg, true, stream);
275     return NULL;
276 }
277 
dump_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int disp,int base_reg,bool isBasePhysical)278 LowOpMem* dump_mem(Mnemonic m, AtomOpCode m2, OpndSize size,
279                int disp, int base_reg, bool isBasePhysical) {
280     if(gDvm.executionMode == kExecutionModeNcgO1) {
281         freeReg(true);
282         //type of the base is gpr
283         int regAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
284         return lower_mem(m, m2, size, disp, regAll);
285     } else {
286         stream = encoder_mem(m, size, disp, base_reg, isBasePhysical, stream);
287         return NULL;
288     }
289 }
290 //!update fields of LowOp and generate a x86 instruction that takes a single reg operand
291 
292 //!With NCG O1, wecall freeReg to free up physical registers, then call registerAlloc to allocate a physical register for the single operand
lower_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,LowOpndRegType type)293 LowOpReg* lower_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
294                int reg, LowOpndRegType type) {
295     stream = encoder_reg(m, size, reg, true, type, stream);
296     return NULL;
297 }
298 
dump_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,bool isPhysical,LowOpndRegType type)299 LowOpReg* dump_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
300                int reg, bool isPhysical, LowOpndRegType type) {
301     if(gDvm.executionMode == kExecutionModeNcgO1) {
302         freeReg(true);
303         if(m == Mnemonic_MUL || m == Mnemonic_IDIV) {
304             //these two instructions use eax & edx implicitly
305             touchEax();
306             touchEdx();
307         }
308         int regAll = registerAlloc(type, reg, isPhysical, true);
309         return lower_reg(m, m2, size, regAll, type);
310     } else {
311         stream = encoder_reg(m, size, reg, isPhysical, type, stream);
312         return NULL;
313     }
314 }
dump_reg_noalloc(Mnemonic m,OpndSize size,int reg,bool isPhysical,LowOpndRegType type)315 LowOpReg* dump_reg_noalloc(Mnemonic m, OpndSize size,
316                int reg, bool isPhysical, LowOpndRegType type) {
317     return lower_reg(m, ATOM_NORMAL, size, reg, type);
318 }
319 
lower_reg_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,int reg2,LowOpndRegType type)320 LowOpRegReg* lower_reg_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
321                  int reg, int reg2, LowOpndRegType type) {
322     if(m == Mnemonic_FUCOMP || m == Mnemonic_FUCOM) {
323         stream = encoder_compare_fp_stack(m == Mnemonic_FUCOMP,
324                                           reg-reg2, size==OpndSize_64, stream);
325     }
326     else {
327         stream = encoder_reg_reg(m, size, reg, true, reg2, true, type, stream);
328     }
329     return NULL;
330 }
331 
332 //!update fields of LowOp and generate a x86 instruction that takes two reg operands
333 
334 //Here, both registers are physical
dump_reg_reg_noalloc(Mnemonic m,OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2,LowOpndRegType type)335 LowOpRegReg* dump_reg_reg_noalloc(Mnemonic m, OpndSize size,
336                            int reg, bool isPhysical,
337                            int reg2, bool isPhysical2, LowOpndRegType type) {
338     return lower_reg_reg(m, ATOM_NORMAL, size, reg, reg2, type);
339 }
340 
isMnemonicMove(Mnemonic m)341 inline bool isMnemonicMove(Mnemonic m) {
342     return (m == Mnemonic_MOV || m == Mnemonic_MOVQ ||
343             m == Mnemonic_MOVSS || m == Mnemonic_MOVSD);
344 }
345 //!update fields of LowOp and generate a x86 instruction that takes two reg operands
346 
347 //!here dst reg is already allocated to a physical reg
348 //! we should not spill the physical register for dst when allocating for src
dump_reg_reg_noalloc_dst(Mnemonic m,OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2,LowOpndRegType type)349 LowOpRegReg* dump_reg_reg_noalloc_dst(Mnemonic m, OpndSize size,
350                                int reg, bool isPhysical,
351                                int reg2, bool isPhysical2, LowOpndRegType type) {
352     if(gDvm.executionMode == kExecutionModeNcgO1) {
353         int regAll = registerAlloc(type, reg, isPhysical, true);
354         /* remove move from one register to the same register */
355         if(isMnemonicMove(m) && regAll == reg2) return NULL;
356         return lower_reg_reg(m, ATOM_NORMAL, size, regAll, reg2, type);
357     } else {
358         stream = encoder_reg_reg(m, size, reg, isPhysical, reg2, isPhysical2, type, stream);
359         return NULL;
360     }
361 }
362 //!update fields of LowOp and generate a x86 instruction that takes two reg operands
363 
364 //!here src reg is already allocated to a physical reg
dump_reg_reg_noalloc_src(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2,LowOpndRegType type)365 LowOpRegReg* dump_reg_reg_noalloc_src(Mnemonic m, AtomOpCode m2, OpndSize size,
366                                int reg, bool isPhysical,
367                                int reg2, bool isPhysical2, LowOpndRegType type) {
368     if(gDvm.executionMode == kExecutionModeNcgO1) {
369         int regAll2;
370         if(isMnemonicMove(m) && checkTempReg2(reg2, type, isPhysical2, reg)) { //dst reg is logical
371             //only from get_virtual_reg_all
372             regAll2 = registerAllocMove(reg2, type, isPhysical2, reg);
373         } else {
374             regAll2 = registerAlloc(type, reg2, isPhysical2, true);
375             return lower_reg_reg(m, m2, size, reg, regAll2, type);
376         }
377     } else {
378         stream = encoder_reg_reg(m, size, reg, isPhysical, reg2, isPhysical2, type, stream);
379         return NULL;
380     }
381     return NULL;
382 }
383 //!update fields of LowOp and generate a x86 instruction that takes two reg operands
384 
385 //!
dump_reg_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2,LowOpndRegType type)386 LowOpRegReg* dump_reg_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
387                    int reg, bool isPhysical,
388                    int reg2, bool isPhysical2, LowOpndRegType type) {
389     if(gDvm.executionMode == kExecutionModeNcgO1) {
390         startNativeCode(-1, -1);
391         //reg is source if m is MOV
392         freeReg(true);
393         int regAll = registerAlloc(type, reg, isPhysical, true);
394         int regAll2;
395         LowOpRegReg* op = NULL;
396 #ifdef MOVE_OPT2
397         if(isMnemonicMove(m) &&
398            ((reg != PhysicalReg_EDI && reg != PhysicalReg_ESP && reg != PhysicalReg_EBP) || (!isPhysical)) &&
399            isPhysical2 == false) { //dst reg is logical
400             //called from move_reg_to_reg
401             regAll2 = registerAllocMove(reg2, type, isPhysical2, regAll);
402         } else {
403 #endif
404             donotSpillReg(regAll);
405             regAll2 = registerAlloc(type, reg2, isPhysical2, true);
406             op = lower_reg_reg(m, m2, size, regAll, regAll2, type);
407 #ifdef MOVE_OPT2
408         }
409 #endif
410         endNativeCode();
411         return op;
412     }
413     else {
414         stream = encoder_reg_reg(m, size, reg, isPhysical, reg2, isPhysical2, type, stream);
415     }
416     return NULL;
417 }
418 
lower_mem_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int disp,int base_reg,MemoryAccessType mType,int mIndex,int reg,LowOpndRegType type,bool isMoves)419 LowOpRegMem* lower_mem_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
420                  int disp, int base_reg,
421                  MemoryAccessType mType, int mIndex,
422                  int reg, LowOpndRegType type, bool isMoves) {
423     if(m == Mnemonic_MOVSX) {
424         stream = encoder_moves_mem_to_reg(size, disp, base_reg, true,
425                                           reg, true, stream);
426     }
427     else if(m == Mnemonic_MOVZX) {
428         stream = encoder_movez_mem_to_reg(size, disp, base_reg, true,
429                                           reg, true, stream);
430     }
431     else {
432         stream = encoder_mem_reg(m, size, disp, base_reg, true,
433                                  reg, true, type, stream);
434     }
435     return NULL;
436 }
437 
438 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
439 
440 //!Here, operands are already allocated to physical registers
dump_mem_reg_noalloc(Mnemonic m,OpndSize size,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg,bool isPhysical,LowOpndRegType type)441 LowOpRegMem* dump_mem_reg_noalloc(Mnemonic m, OpndSize size,
442                            int disp, int base_reg, bool isBasePhysical,
443                            MemoryAccessType mType, int mIndex,
444                            int reg, bool isPhysical, LowOpndRegType type) {
445     return lower_mem_reg(m, ATOM_NORMAL, size, disp, base_reg, mType, mIndex, reg, type, false);
446 }
447 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
448 
449 //!Here, memory operand is already allocated to physical register
dump_mem_reg_noalloc_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg,bool isPhysical,LowOpndRegType type)450 LowOpRegMem* dump_mem_reg_noalloc_mem(Mnemonic m, AtomOpCode m2, OpndSize size,
451                                int disp, int base_reg, bool isBasePhysical,
452                                MemoryAccessType mType, int mIndex,
453                                int reg, bool isPhysical, LowOpndRegType type) {
454     if(gDvm.executionMode == kExecutionModeNcgO1) {
455         int regAll = registerAlloc(type, reg, isPhysical, true);
456         return lower_mem_reg(m, m2, size, disp, base_reg, mType, mIndex, regAll, type, false);
457     } else {
458         stream = encoder_mem_reg(m, size, disp, base_reg, isBasePhysical,
459                                  reg, isPhysical, type, stream);
460     }
461     return NULL;
462 }
463 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
464 
465 //!
dump_mem_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg,bool isPhysical,LowOpndRegType type)466 LowOpRegMem* dump_mem_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
467                    int disp, int base_reg, bool isBasePhysical,
468                    MemoryAccessType mType, int mIndex,
469                    int reg, bool isPhysical, LowOpndRegType type) {
470     if(gDvm.executionMode == kExecutionModeNcgO1) {
471         startNativeCode(-1, -1);
472         freeReg(true);
473         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
474         //it is okay to use the same physical register
475         if(isMnemonicMove(m)) {
476             freeReg(true);
477         } else {
478             donotSpillReg(baseAll);
479         }
480         int regAll = registerAlloc(type, reg, isPhysical, true);
481         endNativeCode();
482         return lower_mem_reg(m, m2, size, disp, baseAll, mType, mIndex, regAll, type, false);
483     } else {
484         stream = encoder_mem_reg(m, size, disp, base_reg, isBasePhysical,
485                                  reg, isPhysical, type, stream);
486     }
487     return NULL;
488 }
489 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
490 
491 //!
dump_moves_mem_reg(Mnemonic m,OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)492 LowOpRegMem* dump_moves_mem_reg(Mnemonic m, OpndSize size,
493                          int disp, int base_reg, bool isBasePhysical,
494              int reg, bool isPhysical) {
495     if(gDvm.executionMode == kExecutionModeNcgO1) {
496         startNativeCode(-1, -1);
497         freeReg(true);
498         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
499         donotSpillReg(baseAll);
500         int regAll = registerAlloc(LowOpndRegType_gp, reg, isPhysical, true);
501         endNativeCode();
502         return lower_mem_reg(m, ATOM_NORMAL, size, disp, baseAll, MemoryAccess_Unknown, -1,
503             regAll, LowOpndRegType_gp, true/*moves*/);
504     } else {
505         stream = encoder_moves_mem_to_reg(size, disp, base_reg, isBasePhysical, reg, isPhysical, stream);
506     }
507     return NULL;
508 }
509 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
510 
511 //!
dump_movez_mem_reg(Mnemonic m,OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)512 LowOpRegMem* dump_movez_mem_reg(Mnemonic m, OpndSize size,
513              int disp, int base_reg, bool isBasePhysical,
514              int reg, bool isPhysical) {
515     if(gDvm.executionMode == kExecutionModeNcgO1) {
516         startNativeCode(-1, -1);
517         freeReg(true);
518         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
519         donotSpillReg(baseAll);
520         int regAll = registerAlloc(LowOpndRegType_gp, reg, isPhysical, true);
521         endNativeCode();
522         return lower_mem_reg(m, ATOM_NORMAL, size, disp, baseAll, MemoryAccess_Unknown, -1,
523             regAll, LowOpndRegType_gp, true/*moves*/);
524     } else {
525         stream = encoder_movez_mem_to_reg(size, disp, base_reg, isBasePhysical, reg, isPhysical, stream);
526     }
527     return NULL;
528 }
529 
530 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one reg operand
531 
532 //!
dump_movez_reg_reg(Mnemonic m,OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2)533 LowOpRegReg* dump_movez_reg_reg(Mnemonic m, OpndSize size,
534              int reg, bool isPhysical,
535              int reg2, bool isPhysical2) {
536     LowOpRegReg* op = (LowOpRegReg*)atomNew(sizeof(LowOpRegReg));
537     op->lop.opCode = m;
538     op->lop.opnd1.size = OpndSize_32;
539     op->lop.opnd1.type = LowOpndType_Reg;
540     op->lop.opnd2.size = size;
541     op->lop.opnd2.type = LowOpndType_Reg;
542     set_reg_opnd(&(op->regOpnd1), reg2, isPhysical2, LowOpndRegType_gp);
543     set_reg_opnd(&(op->regOpnd2), reg, isPhysical, LowOpndRegType_gp);
544     if(gDvm.executionMode == kExecutionModeNcgO1) {
545         startNativeCode(-1, -1);
546         //reg is source if m is MOV
547         freeReg(true);
548         int regAll = registerAlloc(LowOpndRegType_gp, reg, isPhysical, true);
549         donotSpillReg(regAll);
550         int regAll2 = registerAlloc(LowOpndRegType_gp, reg2, isPhysical2, true);
551         stream = encoder_movez_reg_to_reg(size, regAll, true, regAll2, true,
552                                           LowOpndRegType_gp, stream);
553         endNativeCode();
554     }
555     else {
556         stream = encoder_movez_reg_to_reg(size, reg, isPhysical, reg2,
557                                         isPhysical2, LowOpndRegType_gp, stream);
558     }
559     return NULL;
560 }
561 
562 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
563 
564 //!
lower_mem_scale_reg(Mnemonic m,OpndSize size,int base_reg,int disp,int index_reg,int scale,int reg,LowOpndRegType type)565 LowOpRegMem* lower_mem_scale_reg(Mnemonic m, OpndSize size, int base_reg, int disp, int index_reg,
566                  int scale, int reg, LowOpndRegType type) {
567     bool isMovzs = (m == Mnemonic_MOVZX || m == Mnemonic_MOVSX);
568     if(isMovzs)
569         stream = encoder_movzs_mem_disp_scale_reg(m, size, base_reg, true, disp, index_reg, true,
570                                                   scale, reg, true, type, stream);
571     else {
572         if(disp == 0)
573             stream = encoder_mem_scale_reg(m, size, base_reg, true, index_reg, true,
574                                            scale, reg, true, type, stream);
575         else
576             stream = encoder_mem_disp_scale_reg(m, size, base_reg, true, disp, index_reg, true,
577                                                 scale, reg, true, type, stream);
578     }
579     return NULL;
580 }
581 
dump_mem_scale_reg(Mnemonic m,OpndSize size,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical,LowOpndRegType type)582 LowOpRegMem* dump_mem_scale_reg(Mnemonic m, OpndSize size,
583                          int base_reg, bool isBasePhysical, int disp, int index_reg, bool isIndexPhysical, int scale,
584                          int reg, bool isPhysical, LowOpndRegType type) {
585     if(gDvm.executionMode == kExecutionModeNcgO1) {
586         startNativeCode(-1, -1);
587         freeReg(true);
588         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
589         donotSpillReg(baseAll); //make sure index will not use the same physical reg
590         int indexAll = registerAlloc(LowOpndRegType_gp, index_reg, isIndexPhysical, true);
591         if(isMnemonicMove(m)) {
592             freeReg(true);
593             doSpillReg(baseAll); //base can be used now
594         } else {
595             donotSpillReg(indexAll);
596         }
597         bool isMovzs = (m == Mnemonic_MOVZX || m == Mnemonic_MOVSX);
598         int regAll = registerAlloc(isMovzs ? LowOpndRegType_gp : type, reg, isPhysical, true);
599         endNativeCode();
600         return lower_mem_scale_reg(m, size, baseAll, disp, indexAll, scale, regAll, type);
601     } else {
602         stream = encoder_mem_scale_reg(m, size, base_reg, isBasePhysical, index_reg,
603                                        isIndexPhysical, scale, reg, isPhysical, type, stream);
604     }
605     return NULL;
606 }
607 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
608 
609 //!
lower_reg_mem_scale(Mnemonic m,OpndSize size,int reg,int base_reg,int disp,int index_reg,int scale,LowOpndRegType type)610 LowOpMemReg* lower_reg_mem_scale(Mnemonic m, OpndSize size, int reg,
611                  int base_reg, int disp, int index_reg, int scale, LowOpndRegType type) {
612     if(disp == 0)
613         stream = encoder_reg_mem_scale(m, size, reg, true, base_reg, true,
614                                        index_reg, true, scale, type, stream);
615     else
616         stream = encoder_reg_mem_disp_scale(m, size, reg, true, base_reg, true,
617                                             disp, index_reg, true, scale, type, stream);
618     return NULL;
619 }
620 
dump_reg_mem_scale(Mnemonic m,OpndSize size,int reg,bool isPhysical,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale,LowOpndRegType type)621 LowOpMemReg* dump_reg_mem_scale(Mnemonic m, OpndSize size,
622                          int reg, bool isPhysical,
623                          int base_reg, bool isBasePhysical, int disp, int index_reg, bool isIndexPhysical, int scale,
624                          LowOpndRegType type) {
625     if(gDvm.executionMode == kExecutionModeNcgO1) {
626         startNativeCode(-1, -1);
627         freeReg(true);
628         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
629         donotSpillReg(baseAll);
630         int indexAll = registerAlloc(LowOpndRegType_gp, index_reg, isIndexPhysical, true);
631         donotSpillReg(indexAll);
632         int regAll = registerAlloc(type, reg, isPhysical, true);
633         endNativeCode();
634         return lower_reg_mem_scale(m, size, regAll, baseAll, disp, indexAll, scale, type);
635     } else {
636         stream = encoder_reg_mem_scale(m, size, reg, isPhysical, base_reg, isBasePhysical,
637                                        index_reg, isIndexPhysical, scale, type, stream);
638     }
639     return NULL;
640 }
641 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
642 
643 //!Here operands are already allocated
lower_reg_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,int disp,int base_reg,MemoryAccessType mType,int mIndex,LowOpndRegType type)644 LowOpMemReg* lower_reg_mem(Mnemonic m, AtomOpCode m2, OpndSize size, int reg,
645                  int disp, int base_reg, MemoryAccessType mType, int mIndex,
646                  LowOpndRegType type) {
647     stream = encoder_reg_mem(m, size, reg, true, disp, base_reg, true, type, stream);
648     return NULL;
649 }
650 
dump_reg_mem_noalloc(Mnemonic m,OpndSize size,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,LowOpndRegType type)651 LowOpMemReg* dump_reg_mem_noalloc(Mnemonic m, OpndSize size,
652                            int reg, bool isPhysical,
653                            int disp, int base_reg, bool isBasePhysical,
654                            MemoryAccessType mType, int mIndex, LowOpndRegType type) {
655     return lower_reg_mem(m, ATOM_NORMAL, size, reg, disp, base_reg, mType, mIndex, type);
656 }
657 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
658 
659 //!
dump_reg_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,LowOpndRegType type)660 LowOpMemReg* dump_reg_mem(Mnemonic m, AtomOpCode m2, OpndSize size,
661                    int reg, bool isPhysical,
662                    int disp, int base_reg, bool isBasePhysical,
663                    MemoryAccessType mType, int mIndex, LowOpndRegType type) {
664     if(gDvm.executionMode == kExecutionModeNcgO1) {
665         startNativeCode(-1, -1);
666         freeReg(true);
667         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
668         donotSpillReg(baseAll);
669         int regAll = registerAlloc(type, reg, isPhysical, true);
670         endNativeCode();
671         return lower_reg_mem(m, m2, size, regAll, disp, baseAll, mType, mIndex, type);
672     } else {
673         stream = encoder_reg_mem(m, size, reg, isPhysical, disp, base_reg, isBasePhysical, type, stream);
674     }
675     return NULL;
676 }
677 //!update fields of LowOp and generate a x86 instruction that takes one immediate and one reg operand
678 
679 //!The reg operand is allocated already
lower_imm_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int imm,int reg,LowOpndRegType type,bool chaining)680 LowOpRegImm* lower_imm_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
681                  int imm, int reg, LowOpndRegType type, bool chaining) {
682     stream = encoder_imm_reg(m, size, imm, reg, true, type, stream);
683     return NULL;
684 }
685 
dump_imm_reg_noalloc(Mnemonic m,OpndSize size,int imm,int reg,bool isPhysical,LowOpndRegType type)686 LowOpRegImm* dump_imm_reg_noalloc(Mnemonic m, OpndSize size,
687                            int imm, int reg, bool isPhysical, LowOpndRegType type) {
688     return lower_imm_reg(m, ATOM_NORMAL, size, imm, reg, type, false);
689 }
690 //!update fields of LowOp and generate a x86 instruction that takes one immediate and one reg operand
691 
692 //!
dump_imm_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int imm,int reg,bool isPhysical,LowOpndRegType type,bool chaining)693 LowOpRegImm* dump_imm_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
694                    int imm, int reg, bool isPhysical, LowOpndRegType type, bool chaining) {
695     if(gDvm.executionMode == kExecutionModeNcgO1) {
696         freeReg(true);
697         int regAll = registerAlloc(type, reg, isPhysical, true);
698         return lower_imm_reg(m, m2, size, imm, regAll, type, chaining);
699     } else {
700         stream = encoder_imm_reg(m, size, imm, reg, isPhysical, type, stream);
701     }
702     return NULL;
703 }
704 //!update fields of LowOp and generate a x86 instruction that takes one immediate and one mem operand
705 
706 //!The mem operand is already allocated
lower_imm_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int imm,int disp,int base_reg,MemoryAccessType mType,int mIndex,bool chaining)707 LowOpMemImm* lower_imm_mem(Mnemonic m, AtomOpCode m2, OpndSize size, int imm,
708                  int disp, int base_reg, MemoryAccessType mType, int mIndex,
709                  bool chaining) {
710     stream = encoder_imm_mem(m, size, imm, disp, base_reg, true, stream);
711     return NULL;
712 }
713 
dump_imm_mem_noalloc(Mnemonic m,OpndSize size,int imm,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex)714 LowOpMemImm* dump_imm_mem_noalloc(Mnemonic m, OpndSize size,
715                            int imm,
716                            int disp, int base_reg, bool isBasePhysical,
717                            MemoryAccessType mType, int mIndex) {
718     return lower_imm_mem(m, ATOM_NORMAL, size, imm, disp, base_reg, mType, mIndex, false);
719 }
720 //!update fields of LowOp and generate a x86 instruction that takes one immediate and one mem operand
721 
722 //!
dump_imm_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int imm,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,bool chaining)723 LowOpMemImm* dump_imm_mem(Mnemonic m, AtomOpCode m2, OpndSize size,
724                    int imm,
725                    int disp, int base_reg, bool isBasePhysical,
726                    MemoryAccessType mType, int mIndex, bool chaining) {
727     if(gDvm.executionMode == kExecutionModeNcgO1) {
728         /* do not free register if the base is %edi, %esp, or %ebp
729            make sure dump_imm_mem will only generate a single instruction */
730         if(!isBasePhysical || (base_reg != PhysicalReg_EDI &&
731                                base_reg != PhysicalReg_ESP &&
732                                base_reg != PhysicalReg_EBP)) {
733             freeReg(true);
734         }
735         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
736         return lower_imm_mem(m, m2, size, imm, disp, baseAll, mType, mIndex, chaining);
737     } else {
738         stream = encoder_imm_mem(m, size, imm, disp, base_reg, isBasePhysical, stream);
739     }
740     return NULL;
741 }
742 //!update fields of LowOp and generate a x86 instruction that uses the FP stack and takes one mem operand
743 
744 //!
lower_fp_mem(Mnemonic m,OpndSize size,int reg,int disp,int base_reg,MemoryAccessType mType,int mIndex)745 LowOpMemReg* lower_fp_mem(Mnemonic m, OpndSize size, int reg,
746                   int disp, int base_reg, MemoryAccessType mType, int mIndex) {
747     stream = encoder_fp_mem(m, size, reg, disp, base_reg, true, stream);
748     return NULL;
749 }
750 
dump_fp_mem(Mnemonic m,OpndSize size,int reg,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex)751 LowOpMemReg* dump_fp_mem(Mnemonic m, OpndSize size, int reg,
752                   int disp, int base_reg, bool isBasePhysical,
753                   MemoryAccessType mType, int mIndex) {
754     if(gDvm.executionMode == kExecutionModeNcgO1) {
755         freeReg(true);
756         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
757         return lower_fp_mem(m, size, reg, disp, baseAll, mType, mIndex);
758     } else {
759         stream = encoder_fp_mem(m, size, reg, disp, base_reg, isBasePhysical, stream);
760     }
761     return NULL;
762 }
763 //!update fields of LowOp and generate a x86 instruction that uses the FP stack and takes one mem operand
764 
765 //!
lower_mem_fp(Mnemonic m,OpndSize size,int disp,int base_reg,MemoryAccessType mType,int mIndex,int reg)766 LowOpRegMem* lower_mem_fp(Mnemonic m, OpndSize size, int disp, int base_reg,
767                  MemoryAccessType mType, int mIndex, int reg) {
768     stream = encoder_mem_fp(m, size, disp, base_reg, true, reg, stream);
769     return NULL;
770 }
771 
dump_mem_fp(Mnemonic m,OpndSize size,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg)772 LowOpRegMem* dump_mem_fp(Mnemonic m, OpndSize size,
773                   int disp, int base_reg, bool isBasePhysical,
774                   MemoryAccessType mType, int mIndex,
775                   int reg) {
776     if(gDvm.executionMode == kExecutionModeNcgO1) {
777         freeReg(true);
778         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
779         return lower_mem_fp(m, size, disp, baseAll, mType, mIndex, reg);
780     } else {
781         stream = encoder_mem_fp(m, size, disp, base_reg, isBasePhysical, reg, stream);
782     }
783     return NULL;
784 }
785 ///////////////////////////////////////////////////////////////
786 ///////////////////////////////////////////////////////////////
787 //OPERAND ORDER:
788 //LowOp same as EncoderBase destination first
789 //parameter order of function: src first
790 
791 ////////////////////////////////// IA32 native instructions //////////////
792 //! generate a native instruction lea
793 
794 //!
load_effective_addr(int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)795 void load_effective_addr(int disp, int base_reg, bool isBasePhysical,
796                           int reg, bool isPhysical) {
797     Mnemonic m = Mnemonic_LEA;
798     dump_mem_reg(m, ATOM_NORMAL, OpndSize_32, disp, base_reg, isBasePhysical,
799         MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_gp);
800 }
801 //! generate a native instruction lea
802 
803 //!
load_effective_addr_scale(int base_reg,bool isBasePhysical,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical)804 void load_effective_addr_scale(int base_reg, bool isBasePhysical,
805                 int index_reg, bool isIndexPhysical, int scale,
806                 int reg, bool isPhysical) {
807     Mnemonic m = Mnemonic_LEA;
808     dump_mem_scale_reg(m, OpndSize_32,
809                               base_reg, isBasePhysical, 0/*disp*/, index_reg, isIndexPhysical, scale,
810                               reg, isPhysical, LowOpndRegType_gp);
811 }
812 //!fldcw
813 
814 //!
load_fpu_cw(int disp,int base_reg,bool isBasePhysical)815 void load_fpu_cw(int disp, int base_reg, bool isBasePhysical) {
816     Mnemonic m = Mnemonic_FLDCW;
817     dump_mem(m, ATOM_NORMAL, OpndSize_16, disp, base_reg, isBasePhysical);
818 }
819 //!fnstcw
820 
821 //!
store_fpu_cw(bool checkException,int disp,int base_reg,bool isBasePhysical)822 void store_fpu_cw(bool checkException, int disp, int base_reg, bool isBasePhysical) {
823     assert(!checkException);
824     Mnemonic m = Mnemonic_FNSTCW;
825     dump_mem(m, ATOM_NORMAL, OpndSize_16, disp, base_reg, isBasePhysical);
826 }
827 //!cdq
828 
829 //!
convert_integer(OpndSize srcSize,OpndSize dstSize)830 void convert_integer(OpndSize srcSize, OpndSize dstSize) { //cbw, cwd, cdq
831     assert(srcSize == OpndSize_32 && dstSize == OpndSize_64);
832     Mnemonic m = Mnemonic_CDQ;
833     dump_reg_reg(m, ATOM_NORMAL, OpndSize_32, PhysicalReg_EAX, true, PhysicalReg_EDX, true, LowOpndRegType_gp);
834 }
835 //!fld: load from memory (float or double) to stack
836 
837 //!
load_fp_stack(LowOp * op,OpndSize size,int disp,int base_reg,bool isBasePhysical)838 void load_fp_stack(LowOp* op, OpndSize size, int disp, int base_reg, bool isBasePhysical) {//fld(s|l)
839     Mnemonic m = Mnemonic_FLD;
840     dump_mem_fp(m, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, 0); //ST0
841 }
842 //! fild: load from memory (int or long) to stack
843 
844 //!
load_int_fp_stack(OpndSize size,int disp,int base_reg,bool isBasePhysical)845 void load_int_fp_stack(OpndSize size, int disp, int base_reg, bool isBasePhysical) {//fild(ll|l)
846     Mnemonic m = Mnemonic_FILD;
847     dump_mem_fp(m, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, 0); //ST0
848 }
849 //!fild: load from memory (absolute addr)
850 
851 //!
load_int_fp_stack_imm(OpndSize size,int imm)852 void load_int_fp_stack_imm(OpndSize size, int imm) {//fild(ll|l)
853     return load_int_fp_stack(size, imm, PhysicalReg_Null, true);
854 }
855 //!fst: store from stack to memory (float or double)
856 
857 //!
store_fp_stack(LowOp * op,bool pop,OpndSize size,int disp,int base_reg,bool isBasePhysical)858 void store_fp_stack(LowOp* op, bool pop, OpndSize size, int disp, int base_reg, bool isBasePhysical) {//fst(p)(s|l)
859     Mnemonic m = pop ? Mnemonic_FSTP : Mnemonic_FST;
860     dump_fp_mem(m, size, 0, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1);
861 }
862 //!fist: store from stack to memory (int or long)
863 
864 //!
store_int_fp_stack(LowOp * op,bool pop,OpndSize size,int disp,int base_reg,bool isBasePhysical)865 void store_int_fp_stack(LowOp* op, bool pop, OpndSize size, int disp, int base_reg, bool isBasePhysical) {//fist(p)(l)
866     Mnemonic m = pop ? Mnemonic_FISTP : Mnemonic_FIST;
867     dump_fp_mem(m, size, 0, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1);
868 }
869 //!cmp reg, mem
870 
871 //!
compare_reg_mem(LowOp * op,OpndSize size,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical)872 void compare_reg_mem(LowOp* op, OpndSize size, int reg, bool isPhysical,
873               int disp, int base_reg, bool isBasePhysical) {
874     Mnemonic m = Mnemonic_CMP;
875     dump_reg_mem(m, ATOM_NORMAL, size, reg, isPhysical, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, getTypeFromIntSize(size));
876 }
877 //!cmp mem, reg
878 
879 //!
compare_mem_reg(OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)880 void compare_mem_reg(OpndSize size,
881               int disp, int base_reg, bool isBasePhysical,
882               int reg, bool isPhysical) {
883     Mnemonic m = Mnemonic_CMP;
884     dump_mem_reg(m, ATOM_NORMAL, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, reg, isPhysical, getTypeFromIntSize(size));
885 }
886 //! compare a VR with a temporary variable
887 
888 //!
compare_VR_reg_all(OpndSize size,int vA,int reg,bool isPhysical,Mnemonic m)889 void compare_VR_reg_all(OpndSize size,
890              int vA,
891              int reg, bool isPhysical, Mnemonic m) {
892     LowOpndRegType type = getTypeFromIntSize(size);
893     LowOpndRegType pType = type;
894     if(m == Mnemonic_COMISS) {
895         size = OpndSize_32;
896         type = LowOpndRegType_ss;
897         pType = LowOpndRegType_xmm;
898     }
899     if(gDvm.executionMode == kExecutionModeNcgO1) {
900         int tmpValue[2];
901         int isConst = isVirtualRegConstant(vA, type, tmpValue, true/*updateRefCount*/);
902         if(isConst == 3) {
903             if(m == Mnemonic_COMISS) {
904 #ifdef DEBUG_NCG_O1
905                 LOGI("VR is const and SS in compare_VR_reg");
906 #endif
907                 dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
908                 //dumpImmToMem(vA+1, OpndSize_32, 0); //CHECK necessary? will overwrite vA+1!!!
909                 dump_mem_reg(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA, reg, isPhysical, pType);
910                 return;
911             }
912             else if(size != OpndSize_64) {
913 #ifdef DEBUG_NCG_O1
914                 LOGI("VR is const and 32 bits in compare_VR_reg");
915 #endif
916                 dump_imm_reg(m, ATOM_NORMAL, size, tmpValue[0], reg, isPhysical, pType, false);
917                 return;
918             }
919             else if(size == OpndSize_64) {
920 #ifdef DEBUG_NCG_O1
921                 LOGI("VR is const and 64 bits in compare_VR_reg");
922 #endif
923                 dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
924                 dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
925                 dump_mem_reg(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true,
926                     MemoryAccess_VR, vA, reg, isPhysical, pType);
927                 return;
928             }
929         }
930         if(isConst == 1) dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
931         if(isConst == 2) dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
932         freeReg(true);
933         int regAll = checkVirtualReg(vA, type, 0/*do not update*/);
934         if(regAll != PhysicalReg_Null) { //do not spill regAll when allocating register for dst
935             startNativeCode(-1, -1);
936             donotSpillReg(regAll);
937             dump_reg_reg_noalloc_src(m, ATOM_NORMAL, size, regAll, true, reg, isPhysical, pType);
938             endNativeCode();
939         }
940         else {
941             //virtual register is not allocated to a physical register
942             dump_mem_reg_noalloc_mem(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true,
943                 MemoryAccess_VR, vA, reg, isPhysical, pType);
944         }
945         updateRefCount(vA, type);
946         return;
947     } else {
948         dump_mem_reg(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true,
949             MemoryAccess_VR, vA, reg, isPhysical, pType);
950         return;
951     }
952 }
compare_VR_reg(OpndSize size,int vA,int reg,bool isPhysical)953 void compare_VR_reg(OpndSize size,
954              int vA,
955              int reg, bool isPhysical) {
956     Mnemonic m = Mnemonic_CMP;
957     return compare_VR_reg_all(size, vA, reg, isPhysical, m);
958 }
compare_VR_ss_reg(int vA,int reg,bool isPhysical)959 void compare_VR_ss_reg(int vA, int reg, bool isPhysical) {
960     Mnemonic m = Mnemonic_COMISS;
961     return compare_VR_reg_all(OpndSize_32, vA, reg, isPhysical, m);
962 }
compare_VR_sd_reg(int vA,int reg,bool isPhysical)963 void compare_VR_sd_reg(int vA, int reg, bool isPhysical) {
964     Mnemonic m = Mnemonic_COMISD;
965     return compare_VR_reg_all(OpndSize_64, vA, reg, isPhysical, m);
966 }
967 //!load VR to stack
968 
969 //!
load_fp_stack_VR_all(OpndSize size,int vB,Mnemonic m)970 void load_fp_stack_VR_all(OpndSize size, int vB, Mnemonic m) {
971     if(gDvm.executionMode == kExecutionModeNcgO1) {
972         //can't load from immediate to fp stack
973         int tmpValue[2];
974         int isConst = isVirtualRegConstant(vB, getTypeFromIntSize(size), tmpValue, false/*updateRefCount*/);
975         if(isConst > 0) {
976             if(size != OpndSize_64) {
977 #ifdef DEBUG_NCG_O1
978                 LOGI("VR is const and 32 bits in load_fp_stack");
979 #endif
980                 dumpImmToMem(vB, OpndSize_32, tmpValue[0]);
981             }
982             else {
983 #ifdef DEBUG_NCG_O1
984                 LOGI("VR is const and 64 bits in load_fp_stack_VR");
985 #endif
986                 if(isConst == 1 || isConst == 3) dumpImmToMem(vB, OpndSize_32, tmpValue[0]);
987                 if(isConst == 2 || isConst == 3) dumpImmToMem(vB+1, OpndSize_32, tmpValue[1]);
988             }
989         }
990         else { //if VR was updated by a def of gp, a xfer point was inserted
991             //if VR was updated by a def of xmm, a xfer point was inserted
992 #if 0
993             int regAll = checkVirtualReg(vB, size, 1);
994             if(regAll != PhysicalReg_Null) //dump from register to memory
995                 dump_reg_mem_noalloc(m, size, regAll, true, 4*vB, PhysicalReg_FP, true,
996                     MemoryAccess_VR, vB, getTypeFromIntSize(size));
997 #endif
998         }
999         dump_mem_fp(m, size, 4*vB, PhysicalReg_FP, true, MemoryAccess_VR, vB, 0);
1000     } else {
1001         dump_mem_fp(m, size, 4*vB, PhysicalReg_FP, true, MemoryAccess_VR, vB, 0);
1002     }
1003 }
1004 //!load VR(float or double) to stack
1005 
1006 //!
load_fp_stack_VR(OpndSize size,int vA)1007 void load_fp_stack_VR(OpndSize size, int vA) {//fld(s|l)
1008     Mnemonic m = Mnemonic_FLD;
1009     return load_fp_stack_VR_all(size, vA, m);
1010 }
1011 //!load VR(int or long) to stack
1012 
1013 //!
load_int_fp_stack_VR(OpndSize size,int vA)1014 void load_int_fp_stack_VR(OpndSize size, int vA) {//fild(ll|l)
1015     Mnemonic m = Mnemonic_FILD;
1016     return load_fp_stack_VR_all(size, vA, m);
1017 }
1018 //!store from stack to VR (float or double)
1019 
1020 //!
store_fp_stack_VR(bool pop,OpndSize size,int vA)1021 void store_fp_stack_VR(bool pop, OpndSize size, int vA) {//fst(p)(s|l)
1022     Mnemonic m = pop ? Mnemonic_FSTP : Mnemonic_FST;
1023     dump_fp_mem(m, size, 0, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA);
1024     if(gDvm.executionMode == kExecutionModeNcgO1) {
1025         if(size == OpndSize_32)
1026             updateVirtualReg(vA, LowOpndRegType_fs_s);
1027         else
1028             updateVirtualReg(vA, LowOpndRegType_fs);
1029     }
1030 }
1031 //!store from stack to VR (int or long)
1032 
1033 //!
store_int_fp_stack_VR(bool pop,OpndSize size,int vA)1034 void store_int_fp_stack_VR(bool pop, OpndSize size, int vA) {//fist(p)(l)
1035     Mnemonic m = pop ? Mnemonic_FISTP : Mnemonic_FIST;
1036     dump_fp_mem(m, size, 0, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA);
1037     if(gDvm.executionMode == kExecutionModeNcgO1) {
1038         if(size == OpndSize_32)
1039             updateVirtualReg(vA, LowOpndRegType_fs_s);
1040         else
1041             updateVirtualReg(vA, LowOpndRegType_fs);
1042     }
1043 }
1044 //! ALU ops in FPU, one operand is a VR
1045 
1046 //!
fpu_VR(ALU_Opcode opc,OpndSize size,int vA)1047 void fpu_VR(ALU_Opcode opc, OpndSize size, int vA) {
1048     Mnemonic m = map_of_fpu_opcode_2_mnemonic[opc];
1049     if(gDvm.executionMode == kExecutionModeNcgO1) {
1050         int tmpValue[2];
1051         int isConst = isVirtualRegConstant(vA, getTypeFromIntSize(size), tmpValue, false/*updateRefCount*/);
1052         if(isConst > 0) {
1053             if(size != OpndSize_64) {
1054                 //allocate a register for dst
1055                 dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1056             }
1057             else {
1058                 if((isConst == 1 || isConst == 3) && size == OpndSize_64) {
1059                     dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1060                 }
1061                 if((isConst == 2 || isConst == 3) && size == OpndSize_64) {
1062                     dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
1063                 }
1064             }
1065         }
1066         if(!isInMemory(vA, size)) {
1067             ALOGE("fpu_VR");
1068         }
1069         dump_mem_fp(m, size, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA, 0);
1070     } else {
1071         dump_mem_fp(m, size, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA, 0);
1072     }
1073 }
1074 //! cmp imm reg
1075 
1076 //!
compare_imm_reg(OpndSize size,int imm,int reg,bool isPhysical)1077 void compare_imm_reg(OpndSize size, int imm,
1078               int reg, bool isPhysical) {
1079     if(imm == 0) {
1080         LowOpndRegType type = getTypeFromIntSize(size);
1081         Mnemonic m = Mnemonic_TEST;
1082         if(gDvm.executionMode == kExecutionModeNcgO1) {
1083             freeReg(true);
1084             int regAll = registerAlloc(type, reg, isPhysical, true);
1085             lower_reg_reg(m, ATOM_NORMAL, size, regAll, regAll, type);
1086         } else {
1087             stream = encoder_reg_reg(m, size, reg, isPhysical, reg, isPhysical, type, stream);
1088         }
1089         return;
1090     }
1091     Mnemonic m = Mnemonic_CMP;
1092     dump_imm_reg(m, ATOM_NORMAL, size, imm, reg, isPhysical, getTypeFromIntSize(size), false);
1093 }
1094 //! cmp imm mem
1095 
1096 //!
compare_imm_mem(OpndSize size,int imm,int disp,int base_reg,bool isBasePhysical)1097 void compare_imm_mem(OpndSize size, int imm,
1098               int disp, int base_reg, bool isBasePhysical) {
1099     Mnemonic m = Mnemonic_CMP;
1100     dump_imm_mem(m, ATOM_NORMAL, size, imm, disp,
1101                         base_reg, isBasePhysical, MemoryAccess_Unknown, -1, false);
1102 }
1103 //! cmp imm VR
1104 
1105 //!
compare_imm_VR(OpndSize size,int imm,int vA)1106 void compare_imm_VR(OpndSize size, int imm,
1107              int vA) {
1108     Mnemonic m = Mnemonic_CMP;
1109     if(gDvm.executionMode == kExecutionModeNcgO1) {
1110         if(size != OpndSize_32) ALOGE("only 32 bits supported in compare_imm_VR");
1111         int tmpValue[2];
1112         int isConst = isVirtualRegConstant(vA, getTypeFromIntSize(size), tmpValue, false/*updateRefCount*/);
1113         if(isConst > 0) {
1114             dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1115         }
1116         int regAll = checkVirtualReg(vA, getTypeFromIntSize(size), 0);
1117         if(regAll != PhysicalReg_Null)
1118             dump_imm_reg_noalloc(m, size, imm, regAll, true, LowOpndRegType_gp);
1119         else
1120             dump_imm_mem_noalloc(m, size, imm, 4*vA, PhysicalReg_FP, true,
1121                 MemoryAccess_VR, vA);
1122         updateRefCount(vA, getTypeFromIntSize(size));
1123     } else {
1124         dump_imm_mem(m, ATOM_NORMAL, size, imm, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA, false);
1125     }
1126 }
1127 //! cmp reg reg
1128 
1129 //!
compare_reg_reg(int reg1,bool isPhysical1,int reg2,bool isPhysical2)1130 void compare_reg_reg(int reg1, bool isPhysical1,
1131               int reg2, bool isPhysical2) {
1132     Mnemonic m = Mnemonic_CMP;
1133     dump_reg_reg(m, ATOM_NORMAL, OpndSize_32, reg1, isPhysical1, reg2, isPhysical2, LowOpndRegType_gp);
1134 }
compare_reg_reg_16(int reg1,bool isPhysical1,int reg2,bool isPhysical2)1135 void compare_reg_reg_16(int reg1, bool isPhysical1,
1136               int reg2, bool isPhysical2) {
1137     Mnemonic m = Mnemonic_CMP;
1138     dump_reg_reg(m, ATOM_NORMAL, OpndSize_16, reg1, isPhysical1, reg2, isPhysical2, LowOpndRegType_gp);
1139 }
1140 
1141 //! comiss mem reg
1142 
1143 //!SSE, XMM: comparison of floating point numbers
compare_ss_mem_reg(LowOp * op,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1144 void compare_ss_mem_reg(LowOp* op, int disp, int base_reg, bool isBasePhysical,
1145              int reg, bool isPhysical) {
1146     Mnemonic m = Mnemonic_COMISS;
1147     dump_mem_reg(m, ATOM_NORMAL, OpndSize_32, disp, base_reg, isBasePhysical,
1148         MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_xmm);
1149 }
1150 //! comiss reg reg
1151 
1152 //!
compare_ss_reg_with_reg(LowOp * op,int reg1,bool isPhysical1,int reg2,bool isPhysical2)1153 void compare_ss_reg_with_reg(LowOp* op, int reg1, bool isPhysical1,
1154                   int reg2, bool isPhysical2) {
1155     Mnemonic m = Mnemonic_COMISS;
1156     dump_reg_reg(m,  ATOM_NORMAL, OpndSize_32, reg1, isPhysical1, reg2, isPhysical2, LowOpndRegType_xmm);
1157 }
1158 //! comisd mem reg
1159 
1160 //!
compare_sd_mem_with_reg(LowOp * op,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1161 void compare_sd_mem_with_reg(LowOp* op, int disp, int base_reg, bool isBasePhysical,
1162                   int reg, bool isPhysical) {
1163     Mnemonic m = Mnemonic_COMISD;
1164     dump_mem_reg(m, ATOM_NORMAL, OpndSize_64, disp, base_reg, isBasePhysical,
1165         MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_xmm);
1166 }
1167 //! comisd reg reg
1168 
1169 //!
compare_sd_reg_with_reg(LowOp * op,int reg1,bool isPhysical1,int reg2,bool isPhysical2)1170 void compare_sd_reg_with_reg(LowOp* op, int reg1, bool isPhysical1,
1171                   int reg2, bool isPhysical2) {
1172     Mnemonic m = Mnemonic_COMISD;
1173     dump_reg_reg(m, ATOM_NORMAL, OpndSize_64, reg1, isPhysical1, reg2, isPhysical2, LowOpndRegType_xmm);
1174 }
1175 //! fucom[p]
1176 
1177 //!
compare_fp_stack(bool pop,int reg,bool isDouble)1178 void compare_fp_stack(bool pop, int reg, bool isDouble) { //compare ST(0) with ST(reg)
1179     Mnemonic m = pop ? Mnemonic_FUCOMP : Mnemonic_FUCOM;
1180     lower_reg_reg(m, ATOM_NORMAL, isDouble ? OpndSize_64 : OpndSize_32,
1181                   PhysicalReg_ST0+reg, PhysicalReg_ST0, LowOpndRegType_fs);
1182 }
1183 /*!
1184 \brief generate a single return instruction
1185 
1186 */
lower_return()1187 LowOp* lower_return() {
1188     stream = encoder_return(stream);
1189     return NULL;
1190 }
1191 
x86_return()1192 void x86_return() {
1193     lower_return();
1194 }
1195 
1196 //!test imm reg
1197 
1198 //!
test_imm_reg(OpndSize size,int imm,int reg,bool isPhysical)1199 void test_imm_reg(OpndSize size, int imm, int reg, bool isPhysical) {
1200     dump_imm_reg(Mnemonic_TEST, ATOM_NORMAL, size, imm, reg, isPhysical, getTypeFromIntSize(size), false);
1201 }
1202 //!test imm mem
1203 
1204 //!
test_imm_mem(OpndSize size,int imm,int disp,int reg,bool isPhysical)1205 void test_imm_mem(OpndSize size, int imm, int disp, int reg, bool isPhysical) {
1206     dump_imm_mem(Mnemonic_TEST, ATOM_NORMAL, size, imm, disp, reg, isPhysical, MemoryAccess_Unknown, -1, false);
1207 }
1208 //!alu unary op with one reg operand
1209 
1210 //!
alu_unary_reg(OpndSize size,ALU_Opcode opc,int reg,bool isPhysical)1211 void alu_unary_reg(OpndSize size, ALU_Opcode opc, int reg, bool isPhysical) {
1212     Mnemonic m;
1213     if(size == OpndSize_64)
1214         m = map_of_64_opcode_2_mnemonic[opc];
1215     else
1216         m = map_of_alu_opcode_2_mnemonic[opc];
1217     dump_reg(m, ATOM_NORMAL_ALU, size, reg, isPhysical, getTypeFromIntSize(size));
1218 }
1219 //!alu unary op with one mem operand
1220 
1221 //!
alu_unary_mem(LowOp * op,OpndSize size,ALU_Opcode opc,int disp,int base_reg,bool isBasePhysical)1222 void alu_unary_mem(LowOp* op, OpndSize size, ALU_Opcode opc, int disp, int base_reg, bool isBasePhysical) {
1223     Mnemonic m;
1224     if(size == OpndSize_64)
1225         m = map_of_64_opcode_2_mnemonic[opc];
1226     else
1227         m = map_of_alu_opcode_2_mnemonic[opc];
1228     dump_mem(m, ATOM_NORMAL_ALU, size, disp, base_reg, isBasePhysical);
1229 }
1230 //!alu binary op with immediate and one mem operand
1231 
1232 //!
alu_binary_imm_mem(OpndSize size,ALU_Opcode opc,int imm,int disp,int base_reg,bool isBasePhysical)1233 void alu_binary_imm_mem(OpndSize size, ALU_Opcode opc, int imm, int disp, int base_reg, bool isBasePhysical) {
1234     Mnemonic m;
1235     if(size == OpndSize_64)
1236         m = map_of_64_opcode_2_mnemonic[opc];
1237     else
1238         m = map_of_alu_opcode_2_mnemonic[opc];
1239     dump_imm_mem(m, ATOM_NORMAL_ALU, size, imm, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, false);
1240 }
1241 //!alu binary op with immediate and one reg operand
1242 
1243 //!
alu_binary_imm_reg(OpndSize size,ALU_Opcode opc,int imm,int reg,bool isPhysical)1244 void alu_binary_imm_reg(OpndSize size, ALU_Opcode opc, int imm, int reg, bool isPhysical) {
1245     Mnemonic m;
1246     if(size == OpndSize_64)
1247         m = map_of_64_opcode_2_mnemonic[opc];
1248     else
1249         m = map_of_alu_opcode_2_mnemonic[opc];
1250     dump_imm_reg(m, ATOM_NORMAL_ALU, size, imm, reg, isPhysical, getTypeFromIntSize(size), false);
1251 }
1252 //!alu binary op with one mem operand and one reg operand
1253 
1254 //!
alu_binary_mem_reg(OpndSize size,ALU_Opcode opc,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1255 void alu_binary_mem_reg(OpndSize size, ALU_Opcode opc,
1256              int disp, int base_reg, bool isBasePhysical,
1257              int reg, bool isPhysical) {
1258     Mnemonic m;
1259     if(size == OpndSize_64)
1260         m = map_of_64_opcode_2_mnemonic[opc];
1261     else
1262         m = map_of_alu_opcode_2_mnemonic[opc];
1263     dump_mem_reg(m, ATOM_NORMAL_ALU, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, reg, isPhysical, getTypeFromIntSize(size));
1264 }
1265 
alu_sd_binary_VR_reg(ALU_Opcode opc,int vA,int reg,bool isPhysical,bool isSD)1266 void alu_sd_binary_VR_reg(ALU_Opcode opc, int vA, int reg, bool isPhysical, bool isSD) {
1267     Mnemonic m;
1268     if(isSD) m = map_of_sse_opcode_2_mnemonic[opc];
1269     else m = (Mnemonic)(map_of_sse_opcode_2_mnemonic[opc]+1); //from SD to SS
1270     OpndSize size = isSD ? OpndSize_64 : OpndSize_32;
1271     if(gDvm.executionMode == kExecutionModeNcgO1) {
1272         LowOpndRegType type = isSD ? LowOpndRegType_xmm : LowOpndRegType_ss; //type of the mem operand
1273         int tmpValue[2];
1274         int isConst = isVirtualRegConstant(vA, type, tmpValue,
1275                           true/*updateRefCount*/);
1276         if(isConst == 3 && !isSD) {
1277             //isConst can be 0 or 3, mem32, use xmm
1278             dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1279             dump_mem_reg(m, ATOM_NORMAL_ALU, OpndSize_32, 4*vA, PhysicalReg_FP, true,
1280                        MemoryAccess_VR, vA, reg, isPhysical,
1281                        LowOpndRegType_xmm);
1282             return;
1283         }
1284         if(isConst == 3 && isSD) {
1285             dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1286             dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
1287             dump_mem_reg(m, ATOM_NORMAL_ALU, OpndSize_64, 4*vA, PhysicalReg_FP, true,
1288                        MemoryAccess_VR, vA, reg, isPhysical, LowOpndRegType_xmm);
1289             return;
1290         }
1291         if(isConst == 1) dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1292         if(isConst == 2) dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
1293         freeReg(true);
1294 
1295         int regAll = checkVirtualReg(vA, type, 0/*do not update refCount*/);
1296         if(regAll != PhysicalReg_Null) {
1297             startNativeCode(-1, -1); //should we use vA, type
1298             //CHECK: callupdateVRAtUse
1299             donotSpillReg(regAll);
1300             dump_reg_reg_noalloc_src(m, ATOM_NORMAL_ALU, size, regAll, true, reg,
1301                          isPhysical, LowOpndRegType_xmm);
1302             endNativeCode();
1303         }
1304         else {
1305             dump_mem_reg_noalloc_mem(m, ATOM_NORMAL_ALU, size, 4*vA, PhysicalReg_FP, true,
1306                          MemoryAccess_VR, vA, reg, isPhysical, LowOpndRegType_xmm);
1307         }
1308         updateRefCount(vA, type);
1309     }
1310     else {
1311         dump_mem_reg(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true,
1312                     MemoryAccess_VR, vA, reg, isPhysical, LowOpndRegType_xmm);
1313     }
1314 }
1315 
1316 //!alu binary op with a VR and one reg operand
1317 
1318 //!
alu_binary_VR_reg(OpndSize size,ALU_Opcode opc,int vA,int reg,bool isPhysical)1319 void alu_binary_VR_reg(OpndSize size, ALU_Opcode opc, int vA, int reg, bool isPhysical) {
1320     Mnemonic m;
1321     if(size == OpndSize_64)
1322         m = map_of_64_opcode_2_mnemonic[opc];
1323     else
1324         m = map_of_alu_opcode_2_mnemonic[opc];
1325     if(gDvm.executionMode == kExecutionModeNcgO1) {
1326         int tmpValue[2];
1327         int isConst = isVirtualRegConstant(vA, getTypeFromIntSize(size), tmpValue,
1328                           true/*updateRefCount*/);
1329         if(isConst == 3 && size != OpndSize_64) {
1330             //allocate a register for dst
1331             dump_imm_reg(m, ATOM_NORMAL_ALU, size, tmpValue[0], reg, isPhysical,
1332                        getTypeFromIntSize(size), false);
1333             return;
1334         }
1335         if(isConst == 3 && size == OpndSize_64) {
1336             dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1337             dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
1338             dump_mem_reg(m, ATOM_NORMAL_ALU, size, 4*vA, PhysicalReg_FP, true,
1339                 MemoryAccess_VR, vA, reg, isPhysical, getTypeFromIntSize(size));
1340             return;
1341         }
1342         if(isConst == 1) dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1343         if(isConst == 2) dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
1344 
1345         freeReg(true);
1346         int regAll = checkVirtualReg(vA, getTypeFromIntSize(size), 0);
1347         if(regAll != PhysicalReg_Null) {
1348             startNativeCode(-1, -1);
1349             donotSpillReg(regAll);
1350             dump_reg_reg_noalloc_src(m, ATOM_NORMAL_ALU, size, regAll, true, reg,
1351                          isPhysical, getTypeFromIntSize(size));
1352             endNativeCode();
1353         }
1354         else {
1355             dump_mem_reg_noalloc_mem(m, ATOM_NORMAL_ALU, size, 4*vA, PhysicalReg_FP, true,
1356                 MemoryAccess_VR, vA, reg, isPhysical, getTypeFromIntSize(size));
1357         }
1358         updateRefCount(vA, getTypeFromIntSize(size));
1359     }
1360     else {
1361         dump_mem_reg(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true,
1362             MemoryAccess_VR, vA, reg, isPhysical, getTypeFromIntSize(size));
1363     }
1364 }
1365 //!alu binary op with two reg operands
1366 
1367 //!
alu_binary_reg_reg(OpndSize size,ALU_Opcode opc,int reg1,bool isPhysical1,int reg2,bool isPhysical2)1368 void alu_binary_reg_reg(OpndSize size, ALU_Opcode opc,
1369                          int reg1, bool isPhysical1,
1370                          int reg2, bool isPhysical2) {
1371     Mnemonic m;
1372     if(size == OpndSize_64)
1373         m = map_of_64_opcode_2_mnemonic[opc];
1374     else
1375         m = map_of_alu_opcode_2_mnemonic[opc];
1376     dump_reg_reg(m, ATOM_NORMAL_ALU, size, reg1, isPhysical1, reg2, isPhysical2, getTypeFromIntSize(size));
1377 }
1378 //!alu binary op with one reg operand and one mem operand
1379 
1380 //!
alu_binary_reg_mem(OpndSize size,ALU_Opcode opc,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical)1381 void alu_binary_reg_mem(OpndSize size, ALU_Opcode opc,
1382              int reg, bool isPhysical,
1383              int disp, int base_reg, bool isBasePhysical) { //destination is mem!!
1384     Mnemonic m;
1385     if(size == OpndSize_64)
1386         m = map_of_64_opcode_2_mnemonic[opc];
1387     else
1388         m = map_of_alu_opcode_2_mnemonic[opc];
1389     dump_reg_mem(m, ATOM_NORMAL_ALU, size, reg, isPhysical, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, getTypeFromIntSize(size));
1390 }
1391 //!FPU ops with one mem operand
1392 
1393 //!
fpu_mem(LowOp * op,ALU_Opcode opc,OpndSize size,int disp,int base_reg,bool isBasePhysical)1394 void fpu_mem(LowOp* op, ALU_Opcode opc, OpndSize size, int disp, int base_reg, bool isBasePhysical) {
1395     Mnemonic m = map_of_fpu_opcode_2_mnemonic[opc];
1396     dump_mem_fp(m, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, 0);
1397 }
1398 //!SSE 32-bit ALU
1399 
1400 //!
alu_ss_binary_reg_reg(ALU_Opcode opc,int reg,bool isPhysical,int reg2,bool isPhysical2)1401 void alu_ss_binary_reg_reg(ALU_Opcode opc, int reg, bool isPhysical,
1402                 int reg2, bool isPhysical2) {
1403     Mnemonic m = (Mnemonic)(map_of_sse_opcode_2_mnemonic[opc]+1); //from SD to SS
1404     dump_reg_reg(m, ATOM_NORMAL_ALU, OpndSize_32, reg, isPhysical, reg2, isPhysical2, LowOpndRegType_xmm);
1405 }
1406 //!SSE 64-bit ALU
1407 
1408 //!
alu_sd_binary_reg_reg(ALU_Opcode opc,int reg,bool isPhysical,int reg2,bool isPhysical2)1409 void alu_sd_binary_reg_reg(ALU_Opcode opc, int reg, bool isPhysical,
1410                 int reg2, bool isPhysical2) {
1411     Mnemonic m = map_of_sse_opcode_2_mnemonic[opc];
1412     dump_reg_reg(m, ATOM_NORMAL_ALU, OpndSize_64, reg, isPhysical, reg2, isPhysical2, LowOpndRegType_xmm);
1413 }
1414 //!push reg to native stack
1415 
1416 //!
push_reg_to_stack(OpndSize size,int reg,bool isPhysical)1417 void push_reg_to_stack(OpndSize size, int reg, bool isPhysical) {
1418     dump_reg(Mnemonic_PUSH, ATOM_NORMAL, size, reg, isPhysical, getTypeFromIntSize(size));
1419 }
1420 //!push mem to native stack
1421 
1422 //!
push_mem_to_stack(OpndSize size,int disp,int base_reg,bool isBasePhysical)1423 void push_mem_to_stack(OpndSize size, int disp, int base_reg, bool isBasePhysical) {
1424     dump_mem(Mnemonic_PUSH, ATOM_NORMAL, size, disp, base_reg, isBasePhysical);
1425 }
1426 //!move from reg to memory
1427 
1428 //!
move_reg_to_mem(OpndSize size,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical)1429 void move_reg_to_mem(OpndSize size,
1430                       int reg, bool isPhysical,
1431                       int disp, int base_reg, bool isBasePhysical) {
1432     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1433     dump_reg_mem(m, ATOM_NORMAL, size, reg, isPhysical, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, getTypeFromIntSize(size));
1434 }
1435 //!move from reg to memory
1436 
1437 //!Operands are already allocated
move_reg_to_mem_noalloc(OpndSize size,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex)1438 void move_reg_to_mem_noalloc(OpndSize size,
1439                   int reg, bool isPhysical,
1440                   int disp, int base_reg, bool isBasePhysical,
1441                   MemoryAccessType mType, int mIndex) {
1442     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1443     dump_reg_mem_noalloc(m, size, reg, isPhysical, disp, base_reg, isBasePhysical, mType, mIndex, getTypeFromIntSize(size));
1444 }
1445 //!move from memory to reg
1446 
1447 //!
move_mem_to_reg(OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1448 LowOpRegMem* move_mem_to_reg(OpndSize size,
1449                       int disp, int base_reg, bool isBasePhysical,
1450                       int reg, bool isPhysical) {
1451     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1452     return dump_mem_reg(m, ATOM_NORMAL, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, reg, isPhysical, getTypeFromIntSize(size));
1453 }
1454 //!move from memory to reg
1455 
1456 //!Operands are already allocated
move_mem_to_reg_noalloc(OpndSize size,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg,bool isPhysical)1457 LowOpRegMem* move_mem_to_reg_noalloc(OpndSize size,
1458                   int disp, int base_reg, bool isBasePhysical,
1459                   MemoryAccessType mType, int mIndex,
1460                   int reg, bool isPhysical) {
1461     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1462     return dump_mem_reg_noalloc(m, size, disp, base_reg, isBasePhysical, mType, mIndex, reg, isPhysical, getTypeFromIntSize(size));
1463 }
1464 //!movss from memory to reg
1465 
1466 //!Operands are already allocated
move_ss_mem_to_reg_noalloc(int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg,bool isPhysical)1467 LowOpRegMem* move_ss_mem_to_reg_noalloc(int disp, int base_reg, bool isBasePhysical,
1468                  MemoryAccessType mType, int mIndex,
1469                  int reg, bool isPhysical) {
1470     return dump_mem_reg_noalloc(Mnemonic_MOVSS, OpndSize_32, disp, base_reg, isBasePhysical, mType, mIndex, reg, isPhysical, LowOpndRegType_xmm);
1471 }
1472 //!movss from reg to memory
1473 
1474 //!Operands are already allocated
move_ss_reg_to_mem_noalloc(int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex)1475 LowOpMemReg* move_ss_reg_to_mem_noalloc(int reg, bool isPhysical,
1476                  int disp, int base_reg, bool isBasePhysical,
1477                  MemoryAccessType mType, int mIndex) {
1478     return dump_reg_mem_noalloc(Mnemonic_MOVSS, OpndSize_32, reg, isPhysical, disp, base_reg, isBasePhysical, mType, mIndex, LowOpndRegType_xmm);
1479 }
1480 //!movzx from memory to reg
1481 
1482 //!
movez_mem_to_reg(OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1483 void movez_mem_to_reg(OpndSize size,
1484                int disp, int base_reg, bool isBasePhysical,
1485                int reg, bool isPhysical) {
1486     Mnemonic m = Mnemonic_MOVZX;
1487     dump_movez_mem_reg(m, size, disp, base_reg, isBasePhysical, reg, isPhysical);
1488 }
1489 
1490 //!movzx from one reg to another reg
1491 
1492 //!
movez_reg_to_reg(OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2)1493 void movez_reg_to_reg(OpndSize size,
1494                       int reg, bool isPhysical,
1495                       int reg2, bool isPhysical2) {
1496     Mnemonic m = Mnemonic_MOVZX;
1497     dump_movez_reg_reg(m, size, reg, isPhysical, reg2, isPhysical2);
1498 }
1499 
movez_mem_disp_scale_to_reg(OpndSize size,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical)1500 void movez_mem_disp_scale_to_reg(OpndSize size,
1501                  int base_reg, bool isBasePhysical,
1502                  int disp, int index_reg, bool isIndexPhysical, int scale,
1503                  int reg, bool isPhysical) {
1504     dump_mem_scale_reg(Mnemonic_MOVZX, size, base_reg, isBasePhysical,
1505                  disp, index_reg, isIndexPhysical, scale,
1506                  reg, isPhysical, LowOpndRegType_gp);
1507 }
moves_mem_disp_scale_to_reg(OpndSize size,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical)1508 void moves_mem_disp_scale_to_reg(OpndSize size,
1509                   int base_reg, bool isBasePhysical,
1510                   int disp, int index_reg, bool isIndexPhysical, int scale,
1511                   int reg, bool isPhysical) {
1512     dump_mem_scale_reg(Mnemonic_MOVSX, size, base_reg, isBasePhysical,
1513                   disp, index_reg, isIndexPhysical, scale,
1514                   reg, isPhysical, LowOpndRegType_gp);
1515 }
1516 
1517 //!movsx from memory to reg
1518 
1519 //!
moves_mem_to_reg(LowOp * op,OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1520 void moves_mem_to_reg(LowOp* op, OpndSize size,
1521                int disp, int base_reg, bool isBasePhysical,
1522                int reg, bool isPhysical) {
1523     Mnemonic m = Mnemonic_MOVSX;
1524     dump_moves_mem_reg(m, size, disp, base_reg, isBasePhysical, reg, isPhysical);
1525 }
1526 //!mov from one reg to another reg
1527 
1528 //!
move_reg_to_reg(OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2)1529 void move_reg_to_reg(OpndSize size,
1530                       int reg, bool isPhysical,
1531                       int reg2, bool isPhysical2) {
1532     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1533     dump_reg_reg(m, ATOM_NORMAL, size, reg, isPhysical, reg2, isPhysical2, getTypeFromIntSize(size));
1534 }
1535 //!mov from one reg to another reg
1536 
1537 //!Operands are already allocated
move_reg_to_reg_noalloc(OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2)1538 void move_reg_to_reg_noalloc(OpndSize size,
1539                   int reg, bool isPhysical,
1540                   int reg2, bool isPhysical2) {
1541     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1542     dump_reg_reg_noalloc(m, size, reg, isPhysical, reg2, isPhysical2, getTypeFromIntSize(size));
1543 }
1544 //!move from memory to reg
1545 
1546 //!
move_mem_scale_to_reg(OpndSize size,int base_reg,bool isBasePhysical,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical)1547 void move_mem_scale_to_reg(OpndSize size,
1548                 int base_reg, bool isBasePhysical, int index_reg, bool isIndexPhysical, int scale,
1549                 int reg, bool isPhysical) {
1550     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1551     dump_mem_scale_reg(m, size, base_reg, isBasePhysical, 0/*disp*/, index_reg, isIndexPhysical, scale,
1552                               reg, isPhysical, getTypeFromIntSize(size));
1553 }
move_mem_disp_scale_to_reg(OpndSize size,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical)1554 void move_mem_disp_scale_to_reg(OpndSize size,
1555                 int base_reg, bool isBasePhysical, int disp, int index_reg, bool isIndexPhysical, int scale,
1556                 int reg, bool isPhysical) {
1557     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1558     dump_mem_scale_reg(m, size, base_reg, isBasePhysical, disp, index_reg, isIndexPhysical, scale,
1559                               reg, isPhysical, getTypeFromIntSize(size));
1560 }
1561 //!move from reg to memory
1562 
1563 //!
move_reg_to_mem_scale(OpndSize size,int reg,bool isPhysical,int base_reg,bool isBasePhysical,int index_reg,bool isIndexPhysical,int scale)1564 void move_reg_to_mem_scale(OpndSize size,
1565                 int reg, bool isPhysical,
1566                 int base_reg, bool isBasePhysical, int index_reg, bool isIndexPhysical, int scale) {
1567     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1568     dump_reg_mem_scale(m, size, reg, isPhysical,
1569                               base_reg, isBasePhysical, 0/*disp*/, index_reg, isIndexPhysical, scale,
1570                               getTypeFromIntSize(size));
1571 }
move_reg_to_mem_disp_scale(OpndSize size,int reg,bool isPhysical,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale)1572 void move_reg_to_mem_disp_scale(OpndSize size,
1573                 int reg, bool isPhysical,
1574                 int base_reg, bool isBasePhysical, int disp, int index_reg, bool isIndexPhysical, int scale) {
1575     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1576     dump_reg_mem_scale(m, size, reg, isPhysical,
1577                               base_reg, isBasePhysical, disp, index_reg, isIndexPhysical, scale,
1578                               getTypeFromIntSize(size));
1579 }
1580 
move_chain_to_mem(OpndSize size,int imm,int disp,int base_reg,bool isBasePhysical)1581 void move_chain_to_mem(OpndSize size, int imm,
1582                         int disp, int base_reg, bool isBasePhysical) {
1583     dump_imm_mem(Mnemonic_MOV, ATOM_NORMAL, size, imm, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, true);
1584 }
1585 
1586 //!move an immediate to memory
1587 
1588 //!
move_imm_to_mem(OpndSize size,int imm,int disp,int base_reg,bool isBasePhysical)1589 void move_imm_to_mem(OpndSize size, int imm,
1590                       int disp, int base_reg, bool isBasePhysical) {
1591     assert(size != OpndSize_64);
1592     if(size == OpndSize_64) ALOGE("move_imm_to_mem with 64 bits");
1593     dump_imm_mem(Mnemonic_MOV, ATOM_NORMAL, size, imm, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, false);
1594 }
1595 //! set a VR to an immediate
1596 
1597 //!
set_VR_to_imm(u2 vA,OpndSize size,int imm)1598 void set_VR_to_imm(u2 vA, OpndSize size, int imm) {
1599     assert(size != OpndSize_64);
1600     if(size == OpndSize_64) ALOGE("move_imm_to_mem with 64 bits");
1601     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1602     if(gDvm.executionMode == kExecutionModeNcgO1) {
1603         int regAll = checkVirtualReg(vA, getTypeFromIntSize(size), 0);
1604         if(regAll != PhysicalReg_Null) {
1605             dump_imm_reg_noalloc(m, size, imm, regAll, true, LowOpndRegType_gp);
1606             updateRefCount(vA, getTypeFromIntSize(size));
1607             updateVirtualReg(vA, getTypeFromIntSize(size));
1608             return;
1609         }
1610         //will call freeReg
1611         freeReg(true);
1612         regAll = registerAlloc(LowOpndRegType_virtual | getTypeFromIntSize(size), vA, false/*dummy*/, true);
1613         if(regAll == PhysicalReg_Null) {
1614             dump_imm_mem_noalloc(m, size, imm, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA);
1615             return;
1616         }
1617         dump_imm_reg_noalloc(m, size, imm, regAll, true, LowOpndRegType_gp);
1618         updateVirtualReg(vA, getTypeFromIntSize(size));
1619     }
1620     else {
1621         dump_imm_mem(m, ATOM_NORMAL, size, imm, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA, false);
1622     }
1623 }
set_VR_to_imm_noupdateref(LowOp * op,u2 vA,OpndSize size,int imm)1624 void set_VR_to_imm_noupdateref(LowOp* op, u2 vA, OpndSize size, int imm) {
1625     return;
1626 }
1627 //! set a VR to an immediate
1628 
1629 //! Do not allocate a physical register for the VR
set_VR_to_imm_noalloc(u2 vA,OpndSize size,int imm)1630 void set_VR_to_imm_noalloc(u2 vA, OpndSize size, int imm) {
1631     assert(size != OpndSize_64);
1632     if(size == OpndSize_64) ALOGE("move_imm_to_mem with 64 bits");
1633     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1634     dump_imm_mem_noalloc(m, size, imm, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA);
1635 }
1636 
move_chain_to_reg(OpndSize size,int imm,int reg,bool isPhysical)1637 void move_chain_to_reg(OpndSize size, int imm, int reg, bool isPhysical) {
1638     dump_imm_reg(Mnemonic_MOV, ATOM_NORMAL, size, imm, reg, isPhysical, LowOpndRegType_gp, true);
1639 }
1640 
1641 //! move an immediate to reg
1642 
1643 //!
move_imm_to_reg(OpndSize size,int imm,int reg,bool isPhysical)1644 void move_imm_to_reg(OpndSize size, int imm, int reg, bool isPhysical) {
1645     assert(size != OpndSize_64);
1646     if(size == OpndSize_64) ALOGE("move_imm_to_reg with 64 bits");
1647     Mnemonic m = Mnemonic_MOV;
1648     dump_imm_reg(m, ATOM_NORMAL, size, imm, reg, isPhysical, LowOpndRegType_gp, false);
1649 }
1650 //! move an immediate to reg
1651 
1652 //! The operand is already allocated
move_imm_to_reg_noalloc(OpndSize size,int imm,int reg,bool isPhysical)1653 void move_imm_to_reg_noalloc(OpndSize size, int imm, int reg, bool isPhysical) {
1654     assert(size != OpndSize_64);
1655     if(size == OpndSize_64) ALOGE("move_imm_to_reg with 64 bits");
1656     Mnemonic m = Mnemonic_MOV;
1657     dump_imm_reg_noalloc(m, size, imm, reg, isPhysical, LowOpndRegType_gp);
1658 }
1659 //!cmov from reg to reg
1660 
1661 //!
conditional_move_reg_to_reg(OpndSize size,ConditionCode cc,int reg1,bool isPhysical1,int reg,bool isPhysical)1662 void conditional_move_reg_to_reg(OpndSize size, ConditionCode cc, int reg1, bool isPhysical1, int reg, bool isPhysical) {
1663     Mnemonic m = (Mnemonic)(Mnemonic_CMOVcc+cc);
1664     dump_reg_reg(m, ATOM_NORMAL, size, reg1, isPhysical1, reg, isPhysical, LowOpndRegType_gp);
1665 }
1666 //!movss from memory to reg
1667 
1668 //!
move_ss_mem_to_reg(LowOp * op,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1669 void move_ss_mem_to_reg(LowOp* op, int disp, int base_reg, bool isBasePhysical,
1670                          int reg, bool isPhysical) {
1671     dump_mem_reg(Mnemonic_MOVSS, ATOM_NORMAL, OpndSize_32, disp, base_reg, isBasePhysical,
1672         MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_xmm);
1673 }
1674 //!movss from reg to memory
1675 
1676 //!
move_ss_reg_to_mem(LowOp * op,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical)1677 void move_ss_reg_to_mem(LowOp* op, int reg, bool isPhysical,
1678                          int disp, int base_reg, bool isBasePhysical) {
1679     dump_reg_mem(Mnemonic_MOVSS, ATOM_NORMAL, OpndSize_32, reg, isPhysical, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, LowOpndRegType_xmm);
1680 }
1681 //!movsd from memory to reg
1682 
1683 //!
move_sd_mem_to_reg(int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1684 void move_sd_mem_to_reg(int disp, int base_reg, bool isBasePhysical,
1685                          int reg, bool isPhysical) {
1686     dump_mem_reg(Mnemonic_MOVSD, ATOM_NORMAL, OpndSize_64, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_xmm);
1687 }
1688 //!movsd from reg to memory
1689 
1690 //!
move_sd_reg_to_mem(LowOp * op,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical)1691 void move_sd_reg_to_mem(LowOp* op, int reg, bool isPhysical,
1692                          int disp, int base_reg, bool isBasePhysical) {
1693     dump_reg_mem(Mnemonic_MOVSD, ATOM_NORMAL, OpndSize_64, reg, isPhysical,
1694                         disp, base_reg, isBasePhysical,
1695                         MemoryAccess_Unknown, -1, LowOpndRegType_xmm);
1696 }
1697 //!load from VR to a temporary
1698 
1699 //!
get_virtual_reg_all(u2 vB,OpndSize size,int reg,bool isPhysical,Mnemonic m)1700 void get_virtual_reg_all(u2 vB, OpndSize size, int reg, bool isPhysical, Mnemonic m) {
1701     LowOpndRegType type = getTypeFromIntSize(size);
1702     LowOpndRegType pType = type;//gp or xmm
1703     OpndSize size2 = size;
1704     Mnemonic m2 = m;
1705     if(m == Mnemonic_MOVSS) {
1706         size = OpndSize_32;
1707         size2 = OpndSize_64;
1708         type = LowOpndRegType_ss;
1709         pType = LowOpndRegType_xmm;
1710         m2 = Mnemonic_MOVQ; //to move from one xmm register to another
1711     }
1712     if(gDvm.executionMode == kExecutionModeNcgO1) {
1713         int tmpValue[2];
1714         int isConst;
1715         isConst = isVirtualRegConstant(vB, type, tmpValue, true/*updateRefCount*/);
1716         if(isConst == 3) {
1717             if(m == Mnemonic_MOVSS) { //load 32 bits from VR
1718                 //VR is not mapped to a register but in memory
1719                 dumpImmToMem(vB, OpndSize_32, tmpValue[0]);
1720                 //temporary reg has "pType" (which is xmm)
1721                 dump_mem_reg(m, ATOM_NORMAL, size, 4*vB, PhysicalReg_FP, true,
1722                     MemoryAccess_VR, vB, reg, isPhysical, pType);
1723                 return;
1724             }
1725             else if(m == Mnemonic_MOVSD || size == OpndSize_64) {
1726                 //VR is not mapped to a register but in memory
1727                 dumpImmToMem(vB, OpndSize_32, tmpValue[0]);
1728                 dumpImmToMem(vB+1, OpndSize_32, tmpValue[1]);
1729                 dump_mem_reg(m, ATOM_NORMAL, size, 4*vB, PhysicalReg_FP, true,
1730                     MemoryAccess_VR, vB, reg, isPhysical, pType);
1731                 return;
1732             }
1733             else if(size != OpndSize_64) {
1734                 //VR is not mapped to a register
1735                 dump_imm_reg(m, ATOM_NORMAL, size, tmpValue[0], reg, isPhysical, pType, false);
1736                 return;
1737             }
1738         }
1739         if(isConst == 1) dumpImmToMem(vB, OpndSize_32, tmpValue[0]);
1740         if(isConst == 2) dumpImmToMem(vB+1, OpndSize_32, tmpValue[1]);
1741         freeReg(true);
1742         int regAll = checkVirtualReg(vB, type, 0);
1743         if(regAll != PhysicalReg_Null) {
1744             startNativeCode(vB, type);
1745             donotSpillReg(regAll);
1746             //check XFER_MEM_TO_XMM
1747             updateVRAtUse(vB, type, regAll);
1748             //temporary reg has "pType"
1749             dump_reg_reg_noalloc_src(m2, ATOM_NORMAL, size2, regAll, true, reg, isPhysical, pType); //register allocator handles assembly move
1750             endNativeCode();
1751             updateRefCount(vB, type);
1752             return;
1753         }
1754         //not allocated to a register yet, no need to check XFER_MEM_TO_XMM
1755         regAll = registerAlloc(LowOpndRegType_virtual | type, vB, false/*dummy*/, false);
1756         if(regAll == PhysicalReg_Null) {
1757             dump_mem_reg_noalloc(m, size, 4*vB, PhysicalReg_FP, true,
1758                 MemoryAccess_VR, vB, reg, isPhysical, pType);
1759             return;
1760         }
1761 
1762         //temporary reg has pType
1763         if(checkTempReg2(reg, pType, isPhysical, regAll)) {
1764             registerAllocMove(reg, pType, isPhysical, regAll);
1765             dump_mem_reg_noalloc(m, size, 4*vB, PhysicalReg_FP, true,
1766                 MemoryAccess_VR, vB, regAll, true, pType);
1767             updateRefCount(vB, type);
1768             return;
1769         }
1770         else {
1771             dump_mem_reg_noalloc(m, size, 4*vB, PhysicalReg_FP, true,
1772                 MemoryAccess_VR, vB, regAll, true, pType);
1773             //xmm with 32 bits
1774             startNativeCode(vB, type);
1775             donotSpillReg(regAll);
1776             dump_reg_reg_noalloc_src(m2, ATOM_NORMAL, size2, regAll, true, reg, isPhysical, pType);
1777             endNativeCode();
1778             updateRefCount(vB, type);
1779             return;
1780         }
1781     }
1782     else {
1783         dump_mem_reg(m, ATOM_NORMAL, size, 4*vB, PhysicalReg_FP, true,
1784             MemoryAccess_VR, vB, reg, isPhysical, pType);
1785     }
1786 }
get_virtual_reg(u2 vB,OpndSize size,int reg,bool isPhysical)1787 void get_virtual_reg(u2 vB, OpndSize size, int reg, bool isPhysical) {
1788     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1789     return get_virtual_reg_all(vB, size, reg, isPhysical, m);
1790 }
get_virtual_reg_noalloc(u2 vB,OpndSize size,int reg,bool isPhysical)1791 void get_virtual_reg_noalloc(u2 vB, OpndSize size, int reg, bool isPhysical) {
1792     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1793     dump_mem_reg_noalloc(m, size, 4*vB, PhysicalReg_FP, true,
1794         MemoryAccess_VR, vB, reg, isPhysical, getTypeFromIntSize(size));
1795 }
1796 //3 cases: gp, xmm, ss
1797 //ss: the temporary register is xmm
1798 //!load from a temporary to a VR
1799 
1800 //!
set_virtual_reg_all(u2 vA,OpndSize size,int reg,bool isPhysical,Mnemonic m)1801 void set_virtual_reg_all(u2 vA, OpndSize size, int reg, bool isPhysical, Mnemonic m) {
1802     LowOpndRegType type = getTypeFromIntSize(size);
1803     LowOpndRegType pType = type;//gp or xmm
1804     OpndSize size2 = size;
1805     Mnemonic m2 = m;
1806     if(m == Mnemonic_MOVSS) {
1807         size = OpndSize_32;
1808         size2 = OpndSize_64;
1809         type = LowOpndRegType_ss;
1810         pType = LowOpndRegType_xmm;
1811         m2 = Mnemonic_MOVQ;
1812     }
1813     if(gDvm.executionMode == kExecutionModeNcgO1) {
1814         //3 cases
1815         //1: virtual register is already allocated to a physical register
1816         //   call dump_reg_reg_noalloc_dst
1817         //2: src reg is already allocated, VR is not yet allocated
1818         //   allocate VR to the same physical register used by src reg
1819         //   [call registerAllocMove]
1820         //3: both not yet allocated
1821         //   allocate a physical register for the VR
1822         //   then call dump_reg_reg_noalloc_dst
1823         //may need to convert from gp to xmm or the other way
1824         freeReg(true);
1825         int regAll = checkVirtualReg(vA, type, 0);
1826         if(regAll != PhysicalReg_Null)  { //case 1
1827             startNativeCode(-1, -1);
1828             donotSpillReg(regAll);
1829             dump_reg_reg_noalloc_dst(m2, size2, reg, isPhysical, regAll, true, pType); //temporary reg is "pType"
1830             endNativeCode();
1831             updateRefCount(vA, type);
1832             updateVirtualReg(vA, type); //will dump VR to memory, should happen afterwards
1833             return;
1834         }
1835         regAll = checkTempReg(reg, pType, isPhysical, vA); //vA is not used inside
1836         if(regAll != PhysicalReg_Null) { //case 2
1837             registerAllocMove(vA, LowOpndRegType_virtual | type, false, regAll);
1838             updateVirtualReg(vA, type); //will dump VR to memory, should happen afterwards
1839             return; //next native instruction starts at op
1840         }
1841         //case 3
1842         regAll = registerAlloc(LowOpndRegType_virtual | type, vA, false/*dummy*/, false);
1843         if(regAll == PhysicalReg_Null) {
1844             dump_reg_mem_noalloc(m, size, reg, isPhysical, 4*vA, PhysicalReg_FP, true,
1845                 MemoryAccess_VR, vA, pType);
1846             return;
1847         }
1848         startNativeCode(-1, -1);
1849         donotSpillReg(regAll);
1850         dump_reg_reg_noalloc_dst(m2, size2, reg, isPhysical, regAll, true, pType);
1851         endNativeCode();
1852         updateRefCount(vA, type);
1853         updateVirtualReg(vA, type);
1854     }
1855     else {
1856         dump_reg_mem(m, ATOM_NORMAL, size, reg, isPhysical, 4*vA, PhysicalReg_FP, true,
1857             MemoryAccess_VR, vA, pType);
1858     }
1859 }
set_virtual_reg(u2 vA,OpndSize size,int reg,bool isPhysical)1860 void set_virtual_reg(u2 vA, OpndSize size, int reg, bool isPhysical) {
1861     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1862     return set_virtual_reg_all(vA, size, reg, isPhysical, m);
1863 }
set_virtual_reg_noalloc(u2 vA,OpndSize size,int reg,bool isPhysical)1864 void set_virtual_reg_noalloc(u2 vA, OpndSize size, int reg, bool isPhysical) {
1865     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1866     dump_reg_mem_noalloc(m, size, reg, isPhysical, 4*vA, PhysicalReg_FP, true,
1867         MemoryAccess_VR, vA, getTypeFromIntSize(size));
1868 }
get_VR_ss(int vB,int reg,bool isPhysical)1869 void get_VR_ss(int vB, int reg, bool isPhysical) {
1870     return get_virtual_reg_all(vB, OpndSize_64, reg, isPhysical, Mnemonic_MOVSS);
1871 }
set_VR_ss(int vA,int reg,bool isPhysical)1872 void set_VR_ss(int vA, int reg, bool isPhysical) {
1873     return set_virtual_reg_all(vA, OpndSize_64, reg, isPhysical, Mnemonic_MOVSS);
1874 }
get_VR_sd(int vB,int reg,bool isPhysical)1875 void get_VR_sd(int vB, int reg, bool isPhysical) {
1876     return get_virtual_reg_all(vB, OpndSize_64, reg, isPhysical, Mnemonic_MOVSD);
1877 }
set_VR_sd(int vA,int reg,bool isPhysical)1878 void set_VR_sd(int vA, int reg, bool isPhysical) {
1879     return set_virtual_reg_all(vA, OpndSize_64, reg, isPhysical, Mnemonic_MOVSD);
1880 }
1881 ////////////////////////////////// END: IA32 native instructions //////////////
1882 //! generate native instructions to get current PC in the stack frame
1883 
1884 //!
get_currentpc(int reg,bool isPhysical)1885 int get_currentpc(int reg, bool isPhysical) {
1886     move_mem_to_reg(OpndSize_32, -sizeofStackSaveArea+offStackSaveArea_localRefTop, PhysicalReg_FP, true, reg, isPhysical);
1887     return 1;
1888 }
1889 //!generate native code to perform null check
1890 
1891 //!This function does not export PC
simpleNullCheck(int reg,bool isPhysical,int vr)1892 int simpleNullCheck(int reg, bool isPhysical, int vr) {
1893     if(isVRNullCheck(vr, OpndSize_32)) {
1894         updateRefCount2(reg, LowOpndRegType_gp, isPhysical);
1895         num_removed_nullCheck++;
1896         return 0;
1897     }
1898     compare_imm_reg(OpndSize_32, 0, reg, isPhysical);
1899     conditional_jump_global_API(Condition_E, "common_errNullObject", false);
1900     setVRNullCheck(vr, OpndSize_32);
1901     return 0;
1902 }
1903 
1904 /* only for O1 code generator */
boundCheck(int vr_array,int reg_array,bool isPhysical_array,int vr_index,int reg_index,bool isPhysical_index,int exceptionNum)1905 int boundCheck(int vr_array, int reg_array, bool isPhysical_array,
1906                int vr_index, int reg_index, bool isPhysical_index,
1907                int exceptionNum) {
1908 #ifdef BOUNDCHECK_OPT
1909     if(isVRBoundCheck(vr_array, vr_index)) {
1910         updateRefCount2(reg_array, LowOpndRegType_gp, isPhysical_array);
1911         updateRefCount2(reg_index, LowOpndRegType_gp, isPhysical_index);
1912         return 0;
1913     }
1914 #endif
1915     compare_mem_reg(OpndSize_32, offArrayObject_length,
1916                     reg_array, isPhysical_array,
1917                     reg_index, isPhysical_index);
1918 
1919     char errName[256];
1920     sprintf(errName, "common_errArrayIndex");
1921     handlePotentialException(
1922                                        Condition_NC, Condition_C,
1923                                        exceptionNum, errName);
1924 #ifdef BOUNDCHECK_OPT
1925     setVRBoundCheck(vr_array, vr_index);
1926 #endif
1927     return 0;
1928 }
1929 
1930 //!generate native code to perform null check
1931 
1932 //!
nullCheck(int reg,bool isPhysical,int exceptionNum,int vr)1933 int nullCheck(int reg, bool isPhysical, int exceptionNum, int vr) {
1934     char label[LABEL_SIZE];
1935 
1936     if(gDvm.executionMode == kExecutionModeNcgO1) {
1937         //nullCheck optimization is available in O1 mode only
1938         if(isVRNullCheck(vr, OpndSize_32)) {
1939             updateRefCount2(reg, LowOpndRegType_gp, isPhysical);
1940             if(exceptionNum <= 1) {
1941                 updateRefCount2(PhysicalReg_EDX, LowOpndRegType_gp, true);
1942                 updateRefCount2(PhysicalReg_EDX, LowOpndRegType_gp, true);
1943             }
1944             num_removed_nullCheck++;
1945             return 0;
1946         }
1947         compare_imm_reg(OpndSize_32, 0, reg, isPhysical);
1948         rememberState(exceptionNum);
1949         snprintf(label, LABEL_SIZE, "after_exception_%d", exceptionNum);
1950         conditional_jump(Condition_NE, label, true);
1951         if(exceptionNum > 1)
1952             nextVersionOfHardReg(PhysicalReg_EDX, 2); //next version has 2 ref count
1953         export_pc(); //use %edx
1954         constVREndOfBB();
1955         beforeCall("exception"); //dump GG, GL VRs
1956         unconditional_jump_global_API("common_errNullObject", false);
1957         insertLabel(label, true);
1958         goToState(exceptionNum);
1959         setVRNullCheck(vr, OpndSize_32);
1960     } else {
1961         compare_imm_reg(OpndSize_32, 0, reg, isPhysical);
1962         snprintf(label, LABEL_SIZE, "after_exception_%d", exceptionNum);
1963         conditional_jump(Condition_NE, label, true);
1964         export_pc(); //use %edx
1965         unconditional_jump_global_API("common_errNullObject", false);
1966         insertLabel(label, true);
1967     }
1968     return 0;
1969 }
1970 //!generate native code to handle potential exception
1971 
1972 //!
handlePotentialException(ConditionCode code_excep,ConditionCode code_okay,int exceptionNum,const char * errName)1973 int handlePotentialException(
1974                              ConditionCode code_excep, ConditionCode code_okay,
1975                              int exceptionNum, const char* errName) {
1976     char label[LABEL_SIZE];
1977 
1978     if(gDvm.executionMode == kExecutionModeNcgO1) {
1979         rememberState(exceptionNum);
1980         snprintf(label, LABEL_SIZE, "after_exception_%d", exceptionNum);
1981         conditional_jump(code_okay, label, true);
1982         if(exceptionNum > 1)
1983             nextVersionOfHardReg(PhysicalReg_EDX, 2); //next version has 2 ref count
1984         export_pc(); //use %edx
1985         constVREndOfBB();
1986         beforeCall("exception"); //dump GG, GL VRs
1987         if(!strcmp(errName, "common_throw_message")) {
1988             move_imm_to_reg(OpndSize_32, LstrInstantiationErrorPtr, PhysicalReg_ECX, true);
1989         }
1990         unconditional_jump_global_API(errName, false);
1991         insertLabel(label, true);
1992         goToState(exceptionNum);
1993     } else {
1994         snprintf(label, LABEL_SIZE, "after_exception_%d", exceptionNum);
1995         conditional_jump(code_okay, label, true);
1996         export_pc(); //use %edx
1997         if(!strcmp(errName, "common_throw_message")) {
1998             move_imm_to_reg(OpndSize_32, LstrInstantiationErrorPtr, PhysicalReg_ECX, true);
1999         }
2000         unconditional_jump_global_API(errName, false);
2001         insertLabel(label, true);
2002     }
2003     return 0;
2004 }
2005 //!generate native code to get the self pointer from glue
2006 
2007 //!It uses one scratch register
get_self_pointer(int reg,bool isPhysical)2008 int get_self_pointer(int reg, bool isPhysical) {
2009     move_mem_to_reg(OpndSize_32, offEBP_self, PhysicalReg_EBP, true, reg, isPhysical);
2010     return 0;
2011 }
2012 //!generate native code to get ResStrings from glue
2013 
2014 //!It uses two scratch registers
get_res_strings(int reg,bool isPhysical)2015 int get_res_strings(int reg, bool isPhysical) {
2016     //if spill_loc_index > 0 || reg != NULL, use registerAlloc
2017     if(isGlueHandled(PhysicalReg_GLUE_DVMDEX)) {
2018         //if spill_loc_index > 0
2019         //  load from spilled location, update spill_loc_index & physicalReg
2020 #if 0
2021         updateRefCount2(C_SCRATCH_1, LowOpndRegType_gp, isScratchPhysical);
2022         updateRefCount2(C_SCRATCH_1, LowOpndRegType_gp, isScratchPhysical);
2023         updateRefCount2(C_SCRATCH_2, LowOpndRegType_gp, isScratchPhysical);
2024         updateRefCount2(C_SCRATCH_2, LowOpndRegType_gp, isScratchPhysical);
2025 #endif
2026         startNativeCode(-1, -1);
2027         freeReg(true);
2028         int regAll = registerAlloc(LowOpndRegType_gp, PhysicalReg_GLUE_DVMDEX, false, false/*updateRefCount*/);
2029         donotSpillReg(regAll);
2030         dump_mem_reg_noalloc_mem(Mnemonic_MOV, ATOM_NORMAL, OpndSize_32, offDvmDex_pResStrings, regAll, true, MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_gp);
2031         endNativeCode();
2032     }
2033     else
2034         {
2035             get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2036             move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical, C_SCRATCH_2, isScratchPhysical);
2037             //glue is not in a physical reg nor in a spilled location
2038             updateGlue(C_SCRATCH_2, isScratchPhysical, PhysicalReg_GLUE_DVMDEX); //spill_loc_index is -1, set physicalReg
2039             move_mem_to_reg(OpndSize_32, offDvmDex_pResStrings, C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2040         }
2041     return 0;
2042 }
get_res_classes(int reg,bool isPhysical)2043 int get_res_classes(int reg, bool isPhysical) {
2044     //if spill_loc_index > 0 || reg != NULL, use registerAlloc
2045     if(isGlueHandled(PhysicalReg_GLUE_DVMDEX)) {
2046         //if spill_loc_index > 0
2047         //  load from spilled location, updte spill_loc_index & physicalReg
2048         startNativeCode(-1, -1);
2049         freeReg(true);
2050         int regAll = registerAlloc(LowOpndRegType_gp, PhysicalReg_GLUE_DVMDEX, false, false/*updateRefCount*/);
2051         donotSpillReg(regAll);
2052         dump_mem_reg_noalloc_mem(Mnemonic_MOV, ATOM_NORMAL, OpndSize_32, offDvmDex_pResClasses, regAll, true, MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_gp);
2053         endNativeCode();
2054     }
2055     else
2056         {
2057             get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2058             move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical, C_SCRATCH_2, isScratchPhysical);
2059             //glue is not in a physical reg nor in a spilled location
2060             updateGlue(C_SCRATCH_2, isScratchPhysical, PhysicalReg_GLUE_DVMDEX); //spill_loc_index is -1, set physicalReg
2061             move_mem_to_reg(OpndSize_32, offDvmDex_pResClasses, C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2062         }
2063     return 0;
2064 }
2065 //!generate native code to get ResFields from glue
2066 
2067 //!It uses two scratch registers
get_res_fields(int reg,bool isPhysical)2068 int get_res_fields(int reg, bool isPhysical) {
2069     //if spill_loc_index > 0 || reg != NULL, use registerAlloc
2070     if(isGlueHandled(PhysicalReg_GLUE_DVMDEX)) {
2071         //if spill_loc_index > 0
2072         //  load from spilled location, updte spill_loc_index & physicalReg
2073         startNativeCode(-1, -1);
2074         freeReg(true);
2075         int regAll = registerAlloc(LowOpndRegType_gp, PhysicalReg_GLUE_DVMDEX, false, false/*updateRefCount*/);
2076         donotSpillReg(regAll);
2077         dump_mem_reg_noalloc_mem(Mnemonic_MOV, ATOM_NORMAL, OpndSize_32, offDvmDex_pResFields, regAll, true, MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_gp);
2078         endNativeCode();
2079     }
2080     else
2081         {
2082             get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2083             move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical, C_SCRATCH_2, isScratchPhysical);
2084             //glue is not in a physical reg nor in a spilled location
2085             updateGlue(C_SCRATCH_2, isScratchPhysical, PhysicalReg_GLUE_DVMDEX); //spill_loc_index is -1, set physicalReg
2086             move_mem_to_reg(OpndSize_32, offDvmDex_pResFields, C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2087         }
2088     return 0;
2089 }
2090 //!generate native code to get ResMethods from glue
2091 
2092 //!It uses two scratch registers
get_res_methods(int reg,bool isPhysical)2093 int get_res_methods(int reg, bool isPhysical) {
2094     //if spill_loc_index > 0 || reg != NULL, use registerAlloc
2095     if(isGlueHandled(PhysicalReg_GLUE_DVMDEX)) {
2096         //if spill_loc_index > 0
2097         //  load from spilled location, updte spill_loc_index & physicalReg
2098         startNativeCode(-1, -1);
2099         freeReg(true);
2100         int regAll = registerAlloc(LowOpndRegType_gp, PhysicalReg_GLUE_DVMDEX, false, false/*updateRefCount*/);
2101         donotSpillReg(regAll);
2102         dump_mem_reg_noalloc_mem(Mnemonic_MOV, ATOM_NORMAL, OpndSize_32, offDvmDex_pResMethods, regAll, true, MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_gp);
2103         endNativeCode();
2104     }
2105     else
2106         {
2107             get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2108             move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical, C_SCRATCH_2, isScratchPhysical);
2109             //glue is not in a physical reg nor in a spilled location
2110             updateGlue(C_SCRATCH_2, isScratchPhysical, PhysicalReg_GLUE_DVMDEX); //spill_loc_index is -1, set physicalReg
2111             move_mem_to_reg(OpndSize_32, offDvmDex_pResMethods, C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2112         }
2113     return 0;
2114 }
2115 //!generate native code to get the current class object from glue
2116 
2117 //!It uses two scratch registers
get_glue_method_class(int reg,bool isPhysical)2118 int get_glue_method_class(int reg, bool isPhysical) {
2119     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2120     move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.method), C_SCRATCH_1, isScratchPhysical, C_SCRATCH_2, isScratchPhysical);
2121     move_mem_to_reg(OpndSize_32, offMethod_clazz, C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2122     return 0;
2123 }
2124 //!generate native code to get the current method from glue
2125 
2126 //!It uses one scratch register
get_glue_method(int reg,bool isPhysical)2127 int get_glue_method(int reg, bool isPhysical) {
2128     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2129     move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.method), C_SCRATCH_1, isScratchPhysical, reg, isPhysical);
2130     return 0;
2131 }
2132 //!generate native code to set the current method in glue
2133 
2134 //!It uses one scratch register
set_glue_method(int reg,bool isPhysical)2135 int set_glue_method(int reg, bool isPhysical) {
2136     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2137     move_reg_to_mem(OpndSize_32, reg, isPhysical, offsetof(Thread, interpSave.method), C_SCRATCH_1, isScratchPhysical);
2138     return 0;
2139 }
2140 
2141 //!generate native code to get DvmDex from glue
2142 
2143 //!It uses one scratch register
get_glue_dvmdex(int reg,bool isPhysical)2144 int get_glue_dvmdex(int reg, bool isPhysical) {
2145     //if spill_loc_index > 0 || reg != NULL, use registerAlloc
2146     if(isGlueHandled(PhysicalReg_GLUE_DVMDEX)) {
2147         //if spill_loc_index > 0
2148         //  load from spilled location, updte spill_loc_index & physicalReg
2149         startNativeCode(-1, -1);
2150         freeReg(true);
2151         int regAll = registerAlloc(LowOpndRegType_gp, PhysicalReg_GLUE_DVMDEX, false, false/*updateRefCount*/);
2152         donotSpillReg(regAll);
2153         dump_reg_reg_noalloc_src(Mnemonic_MOV, ATOM_NORMAL, OpndSize_32, regAll, true,
2154                                           reg, isPhysical, LowOpndRegType_gp);
2155         endNativeCode();
2156     }
2157     else
2158         {
2159             get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2160             move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical, reg, isPhysical);
2161             //glue is not in a physical reg nor in a spilled location
2162             updateGlue(reg, isPhysical, PhysicalReg_GLUE_DVMDEX); //spill_loc_index is -1, set physicalReg
2163         }
2164     return 0;
2165 }
2166 //!generate native code to set DvmDex in glue
2167 
2168 //!It uses one scratch register
set_glue_dvmdex(int reg,bool isPhysical)2169 int set_glue_dvmdex(int reg, bool isPhysical) {
2170     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2171     move_reg_to_mem(OpndSize_32, reg, isPhysical, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical);
2172     return 0;
2173 }
2174 //!generate native code to get SuspendCount from glue
2175 
2176 //!It uses one scratch register
get_suspendCount(int reg,bool isPhysical)2177 int get_suspendCount(int reg, bool isPhysical) {
2178     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2179     move_mem_to_reg(OpndSize_32, offsetof(Thread, suspendCount), C_SCRATCH_1, isScratchPhysical, reg, isPhysical);
2180     return 0;
2181 }
2182 
2183 //!generate native code to get retval from glue
2184 
2185 //!It uses one scratch register
get_return_value(OpndSize size,int reg,bool isPhysical)2186 int get_return_value(OpndSize size, int reg, bool isPhysical) {
2187     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2188     move_mem_to_reg(size, offsetof(Thread, interpSave.retval), C_SCRATCH_1, isScratchPhysical, reg, isPhysical);
2189     return 0;
2190 }
2191 //!generate native code to set retval in glue
2192 
2193 //!It uses one scratch register
set_return_value(OpndSize size,int reg,bool isPhysical)2194 int set_return_value(OpndSize size, int reg, bool isPhysical) {
2195     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2196     move_reg_to_mem(size, reg, isPhysical, offsetof(Thread, interpSave.retval), C_SCRATCH_1, isScratchPhysical);
2197     return 0;
2198 }
2199 //!generate native code to clear exception object in glue
2200 
2201 //!It uses two scratch registers
clear_exception()2202 int clear_exception() {
2203     get_self_pointer(C_SCRATCH_2, isScratchPhysical);
2204     move_imm_to_mem(OpndSize_32, 0, offsetof(Thread, exception), C_SCRATCH_2, isScratchPhysical);
2205     return 0;
2206 }
2207 //!generate native code to get exception object in glue
2208 
2209 //!It uses two scratch registers
get_exception(int reg,bool isPhysical)2210 int get_exception(int reg, bool isPhysical) {
2211     get_self_pointer(C_SCRATCH_2, isScratchPhysical);
2212     move_mem_to_reg(OpndSize_32, offsetof(Thread, exception), C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2213     return 0;
2214 }
2215 //!generate native code to set exception object in glue
2216 
2217 //!It uses two scratch registers
set_exception(int reg,bool isPhysical)2218 int set_exception(int reg, bool isPhysical) {
2219     get_self_pointer(C_SCRATCH_2, isScratchPhysical);
2220     move_reg_to_mem(OpndSize_32, reg, isPhysical, offsetof(Thread, exception), C_SCRATCH_2, isScratchPhysical);
2221     return 0;
2222 }
2223 //!generate native code to save frame pointer and current PC in stack frame to glue
2224 
2225 //!It uses two scratch registers
save_pc_fp_to_glue()2226 int save_pc_fp_to_glue() {
2227     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2228     move_reg_to_mem(OpndSize_32, PhysicalReg_FP, true, offsetof(Thread, interpSave.curFrame), C_SCRATCH_1, isScratchPhysical);
2229 
2230     //from stack-save currentPc
2231     move_mem_to_reg(OpndSize_32, -sizeofStackSaveArea+offStackSaveArea_localRefTop, PhysicalReg_FP, true, C_SCRATCH_2, isScratchPhysical);
2232     move_reg_to_mem(OpndSize_32, C_SCRATCH_2, isScratchPhysical, offsetof(Thread, interpSave.pc), C_SCRATCH_1, isScratchPhysical);
2233     return 0;
2234 }
2235 //! get SaveArea pointer
2236 
2237 //!
savearea_from_fp(int reg,bool isPhysical)2238 int savearea_from_fp(int reg, bool isPhysical) {
2239     load_effective_addr(-sizeofStackSaveArea, PhysicalReg_FP, true, reg, isPhysical);
2240     return 0;
2241 }
2242 
2243 #ifdef DEBUG_CALL_STACK3
call_debug_dumpSwitch()2244 int call_debug_dumpSwitch() {
2245     typedef void (*vmHelper)(int);
2246     vmHelper funcPtr = debug_dumpSwitch;
2247     callFuncPtr((int)funcPtr, "debug_dumpSwitch");
2248     return 0;
2249 }
2250 #endif
2251 
call_dvmQuasiAtomicSwap64()2252 int call_dvmQuasiAtomicSwap64() {
2253     typedef int64_t (*vmHelper)(int64_t, volatile int64_t*);
2254     vmHelper funcPtr = dvmQuasiAtomicSwap64;
2255     if(gDvm.executionMode == kExecutionModeNcgO1) {
2256         beforeCall("dvmQuasiAtomicSwap64");
2257         callFuncPtr((int)funcPtr, "dvmQuasiAtomicSwap64");
2258         afterCall("dvmQuasiAtomicSwap64");
2259     } else {
2260         callFuncPtr((int)funcPtr, "dvmQuasiAtomicSwap64");
2261     }
2262     return 0;
2263 }
2264 
call_dvmQuasiAtomicRead64()2265 int call_dvmQuasiAtomicRead64() {
2266     typedef int64_t (*vmHelper)(volatile const int64_t*);
2267     vmHelper funcPtr = dvmQuasiAtomicRead64;
2268     if(gDvm.executionMode == kExecutionModeNcgO1) {
2269         beforeCall("dvmQuasiAtomiRead64");
2270         callFuncPtr((int)funcPtr, "dvmQuasiAtomicRead64");
2271         afterCall("dvmQuasiAtomicRead64");
2272         touchEax(); //for return value
2273         touchEdx();
2274     } else {
2275         callFuncPtr((int)funcPtr, "dvmQuasiAtomicRead64");
2276     }
2277     return 0;
2278 }
2279 
call_dvmJitToInterpPunt()2280 int call_dvmJitToInterpPunt() {
2281     typedef void (*vmHelper)(int);
2282     vmHelper funcPtr = dvmJitToInterpPunt;
2283     callFuncPtr((int)funcPtr, "dvmJitToInterpPunt");
2284     return 0;
2285 }
2286 
call_dvmJitToInterpNormal()2287 int call_dvmJitToInterpNormal() {
2288     typedef void (*vmHelper)(int);
2289     vmHelper funcPtr = dvmJitToInterpNormal;
2290     if(gDvm.executionMode == kExecutionModeNcgO1) {
2291         beforeCall("dvmJitToInterpNormal");
2292         callFuncPtr((int)funcPtr, "dvmJitToInterpNormal");
2293         afterCall("dvmJitToInterpNormal");
2294         touchEbx();
2295     } else {
2296         callFuncPtr((int)funcPtr, "dvmJitToInterpNormal");
2297     }
2298     return 0;
2299 }
2300 
call_dvmJitToInterpTraceSelectNoChain()2301 int call_dvmJitToInterpTraceSelectNoChain() {
2302     typedef void (*vmHelper)(int);
2303     vmHelper funcPtr = dvmJitToInterpTraceSelectNoChain;
2304     if(gDvm.executionMode == kExecutionModeNcgO1) {
2305         beforeCall("dvmJitToInterpTraceSelectNoChain");
2306         callFuncPtr((int)funcPtr, "dvmJitToInterpTraceSelectNoChain");
2307         afterCall("dvmJitToInterpTraceSelectNoChain");
2308         touchEbx();
2309     } else {
2310         callFuncPtr((int)funcPtr, "dvmJitToInterpTraceSelectNoChain");
2311     }
2312     return 0;
2313 }
2314 
call_dvmJitToInterpTraceSelect()2315 int call_dvmJitToInterpTraceSelect() {
2316     typedef void (*vmHelper)(int);
2317     vmHelper funcPtr = dvmJitToInterpTraceSelect;
2318     if(gDvm.executionMode == kExecutionModeNcgO1) {
2319         beforeCall("dvmJitToInterpTraceSelect");
2320         callFuncPtr((int)funcPtr, "dvmJitToInterpTraceSelect");
2321         afterCall("dvmJitToInterpTraceSelect");
2322         touchEbx();
2323     } else {
2324         callFuncPtr((int)funcPtr, "dvmJitToInterpTraceSelect");
2325     }
2326     return 0;
2327 }
2328 
call_dvmJitToPatchPredictedChain()2329 int call_dvmJitToPatchPredictedChain() {
2330     typedef const Method * (*vmHelper)(const Method *method,
2331                                        Thread *self,
2332                                        PredictedChainingCell *cell,
2333                                        const ClassObject *clazz);
2334     vmHelper funcPtr = dvmJitToPatchPredictedChain;
2335     if(gDvm.executionMode == kExecutionModeNcgO1) {
2336         beforeCall("dvmJitToPatchPredictedChain");
2337         callFuncPtr((int)funcPtr, "dvmJitToPatchPredictedChain");
2338         afterCall("dvmJitToPatchPredictedChain");
2339     } else {
2340         callFuncPtr((int)funcPtr, "dvmJitToPatchPredictedChain");
2341     }
2342     return 0;
2343 }
2344 
2345 //!generate native code to call __moddi3
2346 
2347 //!
call_moddi3()2348 int call_moddi3() {
2349     if(gDvm.executionMode == kExecutionModeNcgO1) {
2350         beforeCall("moddi3");
2351         callFuncPtr((intptr_t)__moddi3, "__moddi3");
2352         afterCall("moddi3");
2353     } else {
2354         callFuncPtr((intptr_t)__moddi3, "__moddi3");
2355     }
2356     return 0;
2357 }
2358 //!generate native code to call __divdi3
2359 
2360 //!
call_divdi3()2361 int call_divdi3() {
2362     if(gDvm.executionMode == kExecutionModeNcgO1) {
2363         beforeCall("divdi3");
2364         callFuncPtr((intptr_t)__divdi3, "__divdi3");
2365         afterCall("divdi3");
2366     } else {
2367         callFuncPtr((intptr_t)__divdi3, "__divdi3");
2368     }
2369     return 0;
2370 }
2371 
2372 //!generate native code to call fmod
2373 
2374 //!
call_fmod()2375 int call_fmod() {
2376     typedef double (*libHelper)(double, double);
2377     libHelper funcPtr = fmod;
2378     if(gDvm.executionMode == kExecutionModeNcgO1) {
2379         beforeCall("fmod");
2380         callFuncPtr((int)funcPtr, "fmod");
2381         afterCall("fmod");
2382     } else {
2383         callFuncPtr((int)funcPtr, "fmod");
2384     }
2385     return 0;
2386 }
2387 //!generate native code to call fmodf
2388 
2389 //!
call_fmodf()2390 int call_fmodf() {
2391     typedef float (*libHelper)(float, float);
2392     libHelper funcPtr = fmodf;
2393     if(gDvm.executionMode == kExecutionModeNcgO1) {
2394         beforeCall("fmodf");
2395         callFuncPtr((int)funcPtr, "fmodf");
2396         afterCall("fmodf");
2397     } else {
2398         callFuncPtr((int)funcPtr, "fmodf");
2399     }
2400     return 0;
2401 }
2402 //!generate native code to call dvmFindCatchBlock
2403 
2404 //!
call_dvmFindCatchBlock()2405 int call_dvmFindCatchBlock() {
2406     //int dvmFindCatchBlock(Thread* self, int relPc, Object* exception,
2407     //bool doUnroll, void** newFrame)
2408     typedef int (*vmHelper)(Thread*, int, Object*, bool, void**);
2409     vmHelper funcPtr = dvmFindCatchBlock;
2410     if(gDvm.executionMode == kExecutionModeNcgO1) {
2411         beforeCall("dvmFindCatchBlock");
2412         callFuncPtr((int)funcPtr, "dvmFindCatchBlock");
2413         afterCall("dvmFindCatchBlock");
2414     } else {
2415         callFuncPtr((int)funcPtr, "dvmFindCatchBlock");
2416     }
2417     return 0;
2418 }
2419 //!generate native code to call dvmThrowVerificationError
2420 
2421 //!
call_dvmThrowVerificationError()2422 int call_dvmThrowVerificationError() {
2423     typedef void (*vmHelper)(const Method*, int, int);
2424     vmHelper funcPtr = dvmThrowVerificationError;
2425     if(gDvm.executionMode == kExecutionModeNcgO1) {
2426         beforeCall("dvmThrowVerificationError");
2427         callFuncPtr((int)funcPtr, "dvmThrowVerificationError");
2428         afterCall("dvmThrowVerificationError");
2429     } else {
2430         callFuncPtr((int)funcPtr, "dvmThrowVerificationError");
2431     }
2432     return 0;
2433 }
2434 
2435 //!generate native code to call dvmResolveMethod
2436 
2437 //!
call_dvmResolveMethod()2438 int call_dvmResolveMethod() {
2439     //Method* dvmResolveMethod(const ClassObject* referrer, u4 methodIdx, MethodType methodType);
2440     typedef Method* (*vmHelper)(const ClassObject*, u4, MethodType);
2441     vmHelper funcPtr = dvmResolveMethod;
2442     if(gDvm.executionMode == kExecutionModeNcgO1) {
2443         beforeCall("dvmResolveMethod");
2444         callFuncPtr((int)funcPtr, "dvmResolveMethod");
2445         afterCall("dvmResolveMethod");
2446     } else {
2447         callFuncPtr((int)funcPtr, "dvmResolveMethod");
2448     }
2449     return 0;
2450 }
2451 //!generate native code to call dvmResolveClass
2452 
2453 //!
call_dvmResolveClass()2454 int call_dvmResolveClass() {
2455     //ClassObject* dvmResolveClass(const ClassObject* referrer, u4 classIdx, bool fromUnverifiedConstant)
2456     typedef ClassObject* (*vmHelper)(const ClassObject*, u4, bool);
2457     vmHelper funcPtr = dvmResolveClass;
2458     if(gDvm.executionMode == kExecutionModeNcgO1) {
2459         beforeCall("dvmResolveClass");
2460         callFuncPtr((int)funcPtr, "dvmResolveClass");
2461         afterCall("dvmResolveClass");
2462     } else {
2463         callFuncPtr((int)funcPtr, "dvmResolveClass");
2464     }
2465     return 0;
2466 }
2467 
2468 //!generate native code to call dvmInstanceofNonTrivial
2469 
2470 //!
call_dvmInstanceofNonTrivial()2471 int call_dvmInstanceofNonTrivial() {
2472     typedef int (*vmHelper)(const ClassObject*, const ClassObject*);
2473     vmHelper funcPtr = dvmInstanceofNonTrivial;
2474     if(gDvm.executionMode == kExecutionModeNcgO1) {
2475         beforeCall("dvmInstanceofNonTrivial");
2476         callFuncPtr((int)funcPtr, "dvmInstanceofNonTrivial");
2477         afterCall("dvmInstanceofNonTrivial");
2478     } else {
2479         callFuncPtr((int)funcPtr, "dvmInstanceofNonTrivial");
2480     }
2481     return 0;
2482 }
2483 //!generate native code to call dvmThrowException
2484 
2485 //!
call_dvmThrow()2486 int call_dvmThrow() {
2487     typedef void (*vmHelper)(ClassObject* exceptionClass, const char*);
2488     vmHelper funcPtr = dvmThrowException;
2489     if(gDvm.executionMode == kExecutionModeNcgO1) {
2490         beforeCall("dvmThrowException");
2491         callFuncPtr((int)funcPtr, "dvmThrowException");
2492         afterCall("dvmThrowException");
2493     } else {
2494         callFuncPtr((int)funcPtr, "dvmThrowException");
2495     }
2496     return 0;
2497 }
2498 //!generate native code to call dvmThrowExceptionWithClassMessage
2499 
2500 //!
call_dvmThrowWithMessage()2501 int call_dvmThrowWithMessage() {
2502     typedef void (*vmHelper)(ClassObject* exceptionClass, const char*);
2503     vmHelper funcPtr = dvmThrowExceptionWithClassMessage;
2504     if(gDvm.executionMode == kExecutionModeNcgO1) {
2505         beforeCall("dvmThrowExceptionWithClassMessage");
2506         callFuncPtr((int)funcPtr, "dvmThrowExceptionWithClassMessage");
2507         afterCall("dvmThrowExceptionWithClassMessage");
2508     } else {
2509         callFuncPtr((int)funcPtr, "dvmThrowExceptionWithClassMessage");
2510     }
2511     return 0;
2512 }
2513 //!generate native code to call dvmCheckSuspendPending
2514 
2515 //!
call_dvmCheckSuspendPending()2516 int call_dvmCheckSuspendPending() {
2517     typedef bool (*vmHelper)(Thread*);
2518     vmHelper funcPtr = dvmCheckSuspendPending;
2519     if(gDvm.executionMode == kExecutionModeNcgO1) {
2520         beforeCall("dvmCheckSuspendPending");
2521         callFuncPtr((int)funcPtr, "dvmCheckSuspendPending");
2522         afterCall("dvmCheckSuspendPending");
2523     } else {
2524         callFuncPtr((int)funcPtr, "dvmCheckSuspendPending");
2525     }
2526     return 0;
2527 }
2528 //!generate native code to call dvmLockObject
2529 
2530 //!
call_dvmLockObject()2531 int call_dvmLockObject() {
2532     typedef void (*vmHelper)(struct Thread*, struct Object*);
2533     vmHelper funcPtr = dvmLockObject;
2534     if(gDvm.executionMode == kExecutionModeNcgO1) {
2535         beforeCall("dvmLockObject");
2536         callFuncPtr((int)funcPtr, "dvmLockObject");
2537         afterCall("dvmLockObject");
2538     } else {
2539         callFuncPtr((int)funcPtr, "dvmLockObject");
2540     }
2541     return 0;
2542 }
2543 //!generate native code to call dvmUnlockObject
2544 
2545 //!
call_dvmUnlockObject()2546 int call_dvmUnlockObject() {
2547     typedef bool (*vmHelper)(Thread*, Object*);
2548     vmHelper funcPtr = dvmUnlockObject;
2549     if(gDvm.executionMode == kExecutionModeNcgO1) {
2550         beforeCall("dvmUnlockObject");
2551         callFuncPtr((int)funcPtr, "dvmUnlockObject");
2552         afterCall("dvmUnlockObject");
2553     } else {
2554         callFuncPtr((int)funcPtr, "dvmUnlockObject");
2555     }
2556     return 0;
2557 }
2558 //!generate native code to call dvmInitClass
2559 
2560 //!
call_dvmInitClass()2561 int call_dvmInitClass() {
2562     typedef bool (*vmHelper)(ClassObject*);
2563     vmHelper funcPtr = dvmInitClass;
2564     if(gDvm.executionMode == kExecutionModeNcgO1) {
2565         beforeCall("dvmInitClass");
2566         callFuncPtr((int)funcPtr, "dvmInitClass");
2567         afterCall("dvmInitClass");
2568     } else {
2569         callFuncPtr((int)funcPtr, "dvmInitClass");
2570     }
2571     return 0;
2572 }
2573 //!generate native code to call dvmAllocObject
2574 
2575 //!
call_dvmAllocObject()2576 int call_dvmAllocObject() {
2577     typedef Object* (*vmHelper)(ClassObject*, int);
2578     vmHelper funcPtr = dvmAllocObject;
2579     if(gDvm.executionMode == kExecutionModeNcgO1) {
2580         beforeCall("dvmAllocObject");
2581         callFuncPtr((int)funcPtr, "dvmAllocObject");
2582         afterCall("dvmAllocObject");
2583     } else {
2584         callFuncPtr((int)funcPtr, "dvmAllocObject");
2585     }
2586     return 0;
2587 }
2588 //!generate native code to call dvmAllocArrayByClass
2589 
2590 //!
call_dvmAllocArrayByClass()2591 int call_dvmAllocArrayByClass() {
2592     typedef ArrayObject* (*vmHelper)(ClassObject*, size_t, int);
2593     vmHelper funcPtr = dvmAllocArrayByClass;
2594     if(gDvm.executionMode == kExecutionModeNcgO1) {
2595         beforeCall("dvmAllocArrayByClass");
2596         callFuncPtr((int)funcPtr, "dvmAllocArrayByClass");
2597         afterCall("dvmAllocArrayByClass");
2598     } else {
2599         callFuncPtr((int)funcPtr, "dvmAllocArrayByClass");
2600     }
2601     return 0;
2602 }
2603 //!generate native code to call dvmAllocPrimitiveArray
2604 
2605 //!
call_dvmAllocPrimitiveArray()2606 int call_dvmAllocPrimitiveArray() {
2607     typedef ArrayObject* (*vmHelper)(char, size_t, int);
2608     vmHelper funcPtr = dvmAllocPrimitiveArray;
2609     if(gDvm.executionMode == kExecutionModeNcgO1) {
2610         beforeCall("dvmAllocPrimitiveArray");
2611         callFuncPtr((int)funcPtr, "dvmAllocPrimitiveArray");
2612         afterCall("dvmAllocPrimitiveArray");
2613     } else {
2614         callFuncPtr((int)funcPtr, "dvmAllocPrimitiveArray");
2615     }
2616     return 0;
2617 }
2618 //!generate native code to call dvmInterpHandleFillArrayData
2619 
2620 //!
call_dvmInterpHandleFillArrayData()2621 int call_dvmInterpHandleFillArrayData() {
2622     typedef bool (*vmHelper)(ArrayObject*, const u2*);
2623     vmHelper funcPtr = dvmInterpHandleFillArrayData;
2624     if(gDvm.executionMode == kExecutionModeNcgO1) {
2625         beforeCall("dvmInterpHandleFillArrayData"); //before move_imm_to_reg to avoid spilling C_SCRATCH_1
2626         callFuncPtr((int)funcPtr, "dvmInterpHandleFillArrayData");
2627         afterCall("dvmInterpHandleFillArrayData");
2628     } else {
2629         callFuncPtr((int)funcPtr, "dvmInterpHandleFillArrayData");
2630     }
2631     return 0;
2632 }
2633 
2634 //!generate native code to call dvmNcgHandlePackedSwitch
2635 
2636 //!
call_dvmNcgHandlePackedSwitch()2637 int call_dvmNcgHandlePackedSwitch() {
2638     typedef s4 (*vmHelper)(const s4*, s4, u2, s4);
2639     vmHelper funcPtr = dvmNcgHandlePackedSwitch;
2640     if(gDvm.executionMode == kExecutionModeNcgO1) {
2641         beforeCall("dvmNcgHandlePackedSwitch");
2642         callFuncPtr((int)funcPtr, "dvmNcgHandlePackedSwitch");
2643         afterCall("dvmNcgHandlePackedSwitch");
2644     } else {
2645         callFuncPtr((int)funcPtr, "dvmNcgHandlePackedSwitch");
2646     }
2647     return 0;
2648 }
2649 
call_dvmJitHandlePackedSwitch()2650 int call_dvmJitHandlePackedSwitch() {
2651     typedef s4 (*vmHelper)(const s4*, s4, u2, s4);
2652     vmHelper funcPtr = dvmJitHandlePackedSwitch;
2653     if(gDvm.executionMode == kExecutionModeNcgO1) {
2654         beforeCall("dvmJitHandlePackedSwitch");
2655         callFuncPtr((int)funcPtr, "dvmJitHandlePackedSwitch");
2656         afterCall("dvmJitHandlePackedSwitch");
2657     } else {
2658         callFuncPtr((int)funcPtr, "dvmJitHandlePackedSwitch");
2659     }
2660     return 0;
2661 }
2662 
2663 //!generate native code to call dvmNcgHandleSparseSwitch
2664 
2665 //!
call_dvmNcgHandleSparseSwitch()2666 int call_dvmNcgHandleSparseSwitch() {
2667     typedef s4 (*vmHelper)(const s4*, u2, s4);
2668     vmHelper funcPtr = dvmNcgHandleSparseSwitch;
2669     if(gDvm.executionMode == kExecutionModeNcgO1) {
2670         beforeCall("dvmNcgHandleSparseSwitch");
2671         callFuncPtr((int)funcPtr, "dvmNcgHandleSparseSwitch");
2672         afterCall("dvmNcgHandleSparseSwitch");
2673     } else {
2674         callFuncPtr((int)funcPtr, "dvmNcgHandleSparseSwitch");
2675     }
2676     return 0;
2677 }
2678 
call_dvmJitHandleSparseSwitch()2679 int call_dvmJitHandleSparseSwitch() {
2680     typedef s4 (*vmHelper)(const s4*, u2, s4);
2681     vmHelper funcPtr = dvmJitHandleSparseSwitch;
2682     if(gDvm.executionMode == kExecutionModeNcgO1) {
2683         beforeCall("dvmJitHandleSparseSwitch");
2684         callFuncPtr((int)funcPtr, "dvmJitHandleSparseSwitch");
2685         afterCall("dvmJitHandleSparseSwitch");
2686     } else {
2687         callFuncPtr((int)funcPtr, "dvmJitHandleSparseSwitch");
2688     }
2689     return 0;
2690 }
2691 
2692 //!generate native code to call dvmCanPutArrayElement
2693 
2694 //!
call_dvmCanPutArrayElement()2695 int call_dvmCanPutArrayElement() {
2696     typedef bool (*vmHelper)(const ClassObject*, const ClassObject*);
2697     vmHelper funcPtr = dvmCanPutArrayElement;
2698     if(gDvm.executionMode == kExecutionModeNcgO1) {
2699         beforeCall("dvmCanPutArrayElement");
2700         callFuncPtr((int)funcPtr, "dvmCanPutArrayElement");
2701         afterCall("dvmCanPutArrayElement");
2702     } else {
2703         callFuncPtr((int)funcPtr, "dvmCanPutArrayElement");
2704     }
2705     return 0;
2706 }
2707 
2708 //!generate native code to call dvmFindInterfaceMethodInCache2
2709 
2710 //!
call_dvmFindInterfaceMethodInCache()2711 int call_dvmFindInterfaceMethodInCache() {
2712     typedef Method* (*vmHelper)(ClassObject*, u4, const Method*, DvmDex*);
2713     vmHelper funcPtr = dvmFindInterfaceMethodInCache2;
2714     if(gDvm.executionMode == kExecutionModeNcgO1) {
2715         beforeCall("dvmFindInterfaceMethodInCache2");
2716         callFuncPtr((int)funcPtr, "dvmFindInterfaceMethodInCache2");
2717         afterCall("dvmFindInterfaceMethodInCache2");
2718     } else {
2719         callFuncPtr((int)funcPtr, "dvmFindInterfaceMethodInCache2");
2720     }
2721     return 0;
2722 }
2723 
2724 //!generate native code to call dvmHandleStackOverflow
2725 
2726 //!
call_dvmHandleStackOverflow()2727 int call_dvmHandleStackOverflow() {
2728     typedef void (*vmHelper)(Thread*, const Method*);
2729     vmHelper funcPtr = dvmHandleStackOverflow;
2730     if(gDvm.executionMode == kExecutionModeNcgO1) {
2731         beforeCall("dvmHandleStackOverflow");
2732         callFuncPtr((int)funcPtr, "dvmHandleStackOverflow");
2733         afterCall("dvmHandleStackOverflow");
2734     } else {
2735         callFuncPtr((int)funcPtr, "dvmHandleStackOverflow");
2736     }
2737     return 0;
2738 }
2739 //!generate native code to call dvmResolveString
2740 
2741 //!
call_dvmResolveString()2742 int call_dvmResolveString() {
2743     //StringObject* dvmResolveString(const ClassObject* referrer, u4 stringIdx)
2744     typedef StringObject* (*vmHelper)(const ClassObject*, u4);
2745     vmHelper funcPtr = dvmResolveString;
2746     if(gDvm.executionMode == kExecutionModeNcgO1) {
2747         beforeCall("dvmResolveString");
2748         callFuncPtr((int)funcPtr, "dvmResolveString");
2749         afterCall("dvmResolveString");
2750     } else {
2751         callFuncPtr((int)funcPtr, "dvmResolveString");
2752     }
2753     return 0;
2754 }
2755 //!generate native code to call dvmResolveInstField
2756 
2757 //!
call_dvmResolveInstField()2758 int call_dvmResolveInstField() {
2759     //InstField* dvmResolveInstField(const ClassObject* referrer, u4 ifieldIdx)
2760     typedef InstField* (*vmHelper)(const ClassObject*, u4);
2761     vmHelper funcPtr = dvmResolveInstField;
2762     if(gDvm.executionMode == kExecutionModeNcgO1) {
2763         beforeCall("dvmResolveInstField");
2764         callFuncPtr((int)funcPtr, "dvmResolveInstField");
2765         afterCall("dvmResolveInstField");
2766     } else {
2767         callFuncPtr((int)funcPtr, "dvmResolveInstField");
2768     }
2769     return 0;
2770 }
2771 //!generate native code to call dvmResolveStaticField
2772 
2773 //!
call_dvmResolveStaticField()2774 int call_dvmResolveStaticField() {
2775     //StaticField* dvmResolveStaticField(const ClassObject* referrer, u4 sfieldIdx)
2776     typedef StaticField* (*vmHelper)(const ClassObject*, u4);
2777     vmHelper funcPtr = dvmResolveStaticField;
2778     if(gDvm.executionMode == kExecutionModeNcgO1) {
2779         beforeCall("dvmResolveStaticField");
2780         callFuncPtr((int)funcPtr, "dvmResolveStaticField");
2781         afterCall("dvmResolveStaticField");
2782     } else {
2783         callFuncPtr((int)funcPtr, "dvmResolveStaticField");
2784     }
2785     return 0;
2786 }
2787 
2788 #define P_GPR_2 PhysicalReg_ECX
2789 /*!
2790 \brief This function is used to resolve a string reference
2791 
2792 INPUT: const pool index in %eax
2793 
2794 OUTPUT: resolved string in %eax
2795 
2796 The registers are hard-coded, 2 physical registers %esi and %edx are used as scratch registers;
2797 It calls a C function dvmResolveString;
2798 The only register that is still live after this function is ebx
2799 */
const_string_resolve()2800 int const_string_resolve() {
2801     scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EDX;
2802     scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
2803     insertLabel(".const_string_resolve", false);
2804     //method stored in glue structure as well as on the interpreted stack
2805     get_glue_method_class(P_GPR_2, true);
2806     load_effective_addr(-8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2807     move_reg_to_mem(OpndSize_32, PhysicalReg_EAX, true, 4, PhysicalReg_ESP, true);
2808     move_reg_to_mem(OpndSize_32, P_GPR_2, true, 0, PhysicalReg_ESP, true);
2809     call_dvmResolveString();
2810     load_effective_addr(8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2811     compare_imm_reg( OpndSize_32, 0, PhysicalReg_EAX, true);
2812     conditional_jump(Condition_E, "common_exceptionThrown", false);
2813     x86_return();
2814     return 0;
2815 }
2816 #undef P_GPR_2
2817 /*!
2818 \brief This function is used to resolve a class
2819 
2820 INPUT: const pool index in argument "indexReg" (%eax)
2821 
2822 OUTPUT: resolved class in %eax
2823 
2824 The registers are hard-coded, 3 physical registers (%esi, %edx, startLR:%eax) are used as scratch registers.
2825 It calls a C function dvmResolveClass;
2826 The only register that is still live after this function is ebx
2827 */
resolve_class2(int startLR,bool isPhysical,int indexReg,bool indexPhysical,int thirdArg)2828 int resolve_class2(
2829            int startLR/*scratch register*/, bool isPhysical, int indexReg/*const pool index*/,
2830            bool indexPhysical, int thirdArg) {
2831     insertLabel(".class_resolve", false);
2832     scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EDX;
2833     scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
2834 
2835     //push index to stack first, to free indexReg
2836     load_effective_addr(-12, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2837     move_reg_to_mem(OpndSize_32, indexReg, indexPhysical, 4, PhysicalReg_ESP, true);
2838     get_glue_method_class(startLR, isPhysical);
2839     move_imm_to_mem(OpndSize_32, thirdArg, 8, PhysicalReg_ESP, true);
2840     move_reg_to_mem(OpndSize_32, startLR, isPhysical, 0, PhysicalReg_ESP, true);
2841     call_dvmResolveClass();
2842     load_effective_addr(12, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2843     compare_imm_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
2844     conditional_jump(Condition_E, "common_exceptionThrown", false);
2845 
2846     x86_return();
2847     return 0;
2848 }
2849 /*!
2850 \brief This function is used to resolve a method, and it is called once with %eax for both indexReg and startLR
2851 
2852 INPUT: const pool index in argument "indexReg" (%eax)
2853 
2854 OUTPUT: resolved method in %eax
2855 
2856 The registers are hard-coded, 3 physical registers (%esi, %edx, startLR:%eax) are used as scratch registers.
2857 It calls a C function dvmResolveMethod;
2858 The only register that is still live after this function is ebx
2859 */
resolve_method2(int startLR,bool isPhysical,int indexReg,bool indexPhysical,int thirdArg)2860 int resolve_method2(
2861             int startLR/*logical register index*/, bool isPhysical, int indexReg/*const pool index*/,
2862             bool indexPhysical,
2863             int thirdArg/*VIRTUAL*/) {
2864     if(thirdArg == METHOD_VIRTUAL)
2865         insertLabel(".virtual_method_resolve", false);
2866     else if(thirdArg == METHOD_DIRECT)
2867         insertLabel(".direct_method_resolve", false);
2868     else if(thirdArg == METHOD_STATIC)
2869         insertLabel(".static_method_resolve", false);
2870 
2871     load_effective_addr(-12, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2872     move_reg_to_mem(OpndSize_32, indexReg, indexPhysical, 4, PhysicalReg_ESP, true);
2873 
2874     scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EDX;
2875     scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
2876     get_glue_method_class(startLR, isPhysical);
2877 
2878     move_imm_to_mem(OpndSize_32, thirdArg, 8, PhysicalReg_ESP, true);
2879     move_reg_to_mem(OpndSize_32, startLR, isPhysical, 0, PhysicalReg_ESP, true);
2880     call_dvmResolveMethod();
2881     load_effective_addr(12, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2882     compare_imm_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
2883     conditional_jump(Condition_E, "common_exceptionThrown", false);
2884 
2885     x86_return();
2886     return 0;
2887 }
2888 /*!
2889 \brief This function is used to resolve an instance field
2890 
2891 INPUT: const pool index in argument "indexReg" (%eax)
2892 
2893 OUTPUT: resolved field in %eax
2894 
2895 The registers are hard-coded, 3 physical registers (%esi, %edx, startLR:%eax) are used as scratch registers.
2896 It calls a C function dvmResolveInstField;
2897 The only register that is still live after this function is ebx
2898 */
resolve_inst_field2(int startLR,bool isPhysical,int indexReg,bool indexPhysical)2899 int resolve_inst_field2(
2900             int startLR/*logical register index*/, bool isPhysical,
2901             int indexReg/*const pool index*/, bool indexPhysical) {
2902     insertLabel(".inst_field_resolve", false);
2903     scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EDX;
2904     scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
2905 
2906     load_effective_addr(-8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2907     move_reg_to_mem(OpndSize_32, indexReg, indexPhysical, 4, PhysicalReg_ESP, true);
2908     //method stored in glue structure as well as interpreted stack
2909     get_glue_method_class(startLR, isPhysical);
2910     move_reg_to_mem(OpndSize_32, startLR, isPhysical, 0, PhysicalReg_ESP, true);
2911     call_dvmResolveInstField();
2912     load_effective_addr(8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2913     compare_imm_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
2914     conditional_jump(Condition_E, "common_exceptionThrown", false);
2915 
2916     x86_return();
2917     return 0;
2918 }
2919 /*!
2920 \brief This function is used to resolve a static field
2921 
2922 INPUT: const pool index in argument "indexReg" (%eax)
2923 
2924 OUTPUT: resolved field in %eax
2925 
2926 The registers are hard-coded, 3 physical registers (%esi, %edx, startLR:%eax) are used as scratch registers.
2927 It calls a C function dvmResolveStaticField;
2928 The only register that is still live after this function is ebx
2929 */
resolve_static_field2(int startLR,bool isPhysical,int indexReg,bool indexPhysical)2930 int resolve_static_field2(
2931               int startLR/*logical register index*/, bool isPhysical, int indexReg/*const pool index*/,
2932               bool indexPhysical) {
2933     insertLabel(".static_field_resolve", false);
2934     scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EDX;
2935     scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
2936 
2937     load_effective_addr(-8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2938     move_reg_to_mem(OpndSize_32, indexReg, indexPhysical, 4, PhysicalReg_ESP, true);
2939     get_glue_method_class(startLR, isPhysical);
2940     move_reg_to_mem(OpndSize_32, startLR, isPhysical, 0, PhysicalReg_ESP, true);
2941     call_dvmResolveStaticField();
2942     load_effective_addr(8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2943     compare_imm_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
2944     conditional_jump(Condition_E, "common_exceptionThrown", false);
2945 
2946     x86_return();
2947     return 0;
2948 }
2949 
pushAllRegs()2950 int pushAllRegs() {
2951     load_effective_addr(-28, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2952     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_EAX, true, 24, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2953     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_EBX, true, 20, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2954     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_ECX, true, 16, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2955     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_EDX, true, 12, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2956     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_ESI, true, 8, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2957     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_EDI, true, 4, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2958     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_EBP, true, 0, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2959     return 0;
2960 }
popAllRegs()2961 int popAllRegs() {
2962     move_mem_to_reg_noalloc(OpndSize_32, 24, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_EAX, true);
2963     move_mem_to_reg_noalloc(OpndSize_32, 20, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_EBX, true);
2964     move_mem_to_reg_noalloc(OpndSize_32, 16, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_ECX, true);
2965     move_mem_to_reg_noalloc(OpndSize_32, 12, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_EDX, true);
2966     move_mem_to_reg_noalloc(OpndSize_32, 8, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_ESI, true);
2967     move_mem_to_reg_noalloc(OpndSize_32, 4, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_EDI, true);
2968     move_mem_to_reg_noalloc(OpndSize_32, 0, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_EBP, true);
2969     load_effective_addr(28, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2970     return 0;
2971 }
2972 
dump_nop(int size)2973 void dump_nop(int size) {
2974     switch(size) {
2975         case 1:
2976           *stream = 0x90;
2977           break;
2978         case 2:
2979           *stream = 0x66;
2980           *(stream +1) = 0x90;
2981           break;
2982         case 3:
2983           *stream = 0x0f;
2984           *(stream + 1) = 0x1f;
2985           *(stream + 2) = 0x00;
2986           break;
2987         default:
2988           //TODO: add more cases.
2989           break;
2990     }
2991     stream += size;
2992 }
2993