• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 
18 /*! \file LowerHelper.cpp
19     \brief This file implements helper functions for lowering
20 
21 With NCG O0: all registers are hard-coded ;
22 With NCG O1: the lowering module will use variables that will be allocated to a physical register by the register allocator.
23 
24 register types: FS 32-bit or 64-bit;
25                 XMM: SS(32-bit) SD (64-bit);
26                 GPR: 8-bit, 16-bit, 32-bit;
27 LowOpndRegType tells whether it is gpr, xmm or fs;
28 OpndSize can be OpndSize_8, OpndSize_16, OpndSize_32, OpndSize_64
29 
30 A single native instruction can use multiple physical registers.
31   we can't call freeReg in the middle of emitting a native instruction,
32   since it may free the physical register used by an operand and cause two operands being allocated to the same physical register.
33 
34 When allocating a physical register for an operand, we can't spill the operands that are already allocated. To avoid that, we call startNativeCode before each native instruction, here flag "canSpill" is set to true for each physical register;
35   when a physical register is allocated, we set its flag "canSpill" to false;
36   at end of each native instruction, call endNativeCode to set flag "canSpill" to true.
37 */
38 
39 #include "libdex/DexOpcodes.h"
40 #include "libdex/DexFile.h"
41 #include "Lower.h"
42 #include "NcgAot.h"
43 #include "enc_wrapper.h"
44 #include "vm/mterp/Mterp.h"
45 #include "vm/mterp/common/FindInterface.h"
46 #include "NcgHelper.h"
47 #include <math.h>
48 #include "interp/InterpState.h"
49 
50 extern "C" int64_t __divdi3(int64_t, int64_t);
51 extern "C" int64_t __moddi3(int64_t, int64_t);
52 bool isScratchPhysical;
53 LowOp* lirTable[200];
54 int num_lirs_in_table = 0;
55 
56 //4 tables are defined: GPR integer ALU ops, ALU ops in FPU, SSE 32-bit, SSE 64-bit
57 //the index to the table is the opcode
58 //add_opc,    or_opc,     adc_opc,    sbb_opc,
59 //and_opc,    sub_opc,    xor_opc,    cmp_opc,
60 //mul_opc,    imul_opc,   div_opc,    idiv_opc,
61 //sll_opc,    srl_opc,    sra, (SSE)
62 //shl_opc,    shr_opc,    sal_opc,    sar_opc, //integer shift
63 //neg_opc,    not_opc,    andn_opc, (SSE)
64 //n_alu
65 //!mnemonic for integer ALU operations
66 const  Mnemonic map_of_alu_opcode_2_mnemonic[] = {
67     Mnemonic_ADD,  Mnemonic_OR,   Mnemonic_ADC,  Mnemonic_SBB,
68     Mnemonic_AND,  Mnemonic_SUB,  Mnemonic_XOR,  Mnemonic_CMP,
69     Mnemonic_MUL,  Mnemonic_IMUL, Mnemonic_DIV,  Mnemonic_IDIV,
70     Mnemonic_Null, Mnemonic_Null, Mnemonic_Null,
71     Mnemonic_SHL,  Mnemonic_SHR,  Mnemonic_SAL,  Mnemonic_SAR,
72     Mnemonic_NEG,  Mnemonic_NOT,  Mnemonic_Null,
73     Mnemonic_Null
74 };
75 //!mnemonic for ALU operations in FPU
76 const  Mnemonic map_of_fpu_opcode_2_mnemonic[] = {
77     Mnemonic_FADD,  Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
78     Mnemonic_Null,  Mnemonic_FSUB,  Mnemonic_Null,  Mnemonic_Null,
79     Mnemonic_FMUL,  Mnemonic_Null,  Mnemonic_FDIV,  Mnemonic_Null,
80     Mnemonic_Null,  Mnemonic_Null,
81     Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
82     Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
83     Mnemonic_Null
84 };
85 //!mnemonic for SSE 32-bit
86 const  Mnemonic map_of_sse_opcode_2_mnemonic[] = {
87     Mnemonic_ADDSD,  Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
88     Mnemonic_Null,   Mnemonic_SUBSD, Mnemonic_XORPD, Mnemonic_Null,
89     Mnemonic_MULSD,  Mnemonic_Null,  Mnemonic_DIVSD,  Mnemonic_Null,
90     Mnemonic_Null,   Mnemonic_Null,
91     Mnemonic_Null,   Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
92     Mnemonic_Null,   Mnemonic_Null,  Mnemonic_Null,
93     Mnemonic_Null
94 };
95 //!mnemonic for SSE 64-bit integer
96 const  Mnemonic map_of_64_opcode_2_mnemonic[] = {
97     Mnemonic_PADDQ, Mnemonic_POR,   Mnemonic_Null,  Mnemonic_Null,
98     Mnemonic_PAND,  Mnemonic_PSUBQ, Mnemonic_PXOR,  Mnemonic_Null,
99     Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
100     Mnemonic_PSLLQ, Mnemonic_PSRLQ, Mnemonic_Null,
101     Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,  Mnemonic_Null,
102     Mnemonic_Null,  Mnemonic_Null,  Mnemonic_PANDN,
103     Mnemonic_Null
104 };
105 
106 ////////////////////////////////////////////////
107 //!update fields of LowOpndReg
108 
109 //!
set_reg_opnd(LowOpndReg * op_reg,int reg,bool isPhysical,LowOpndRegType type)110 void set_reg_opnd(LowOpndReg* op_reg, int reg, bool isPhysical, LowOpndRegType type) {
111     op_reg->regType = type;
112     if(isPhysical) {
113         op_reg->logicalReg = -1;
114         op_reg->physicalReg = reg;
115     }
116     else
117         op_reg->logicalReg = reg;
118     return;
119 }
120 //!update fields of LowOpndMem
121 
122 //!
set_mem_opnd(LowOpndMem * mem,int disp,int base,bool isPhysical)123 void set_mem_opnd(LowOpndMem* mem, int disp, int base, bool isPhysical) {
124     mem->m_disp.value = disp;
125     mem->hasScale = false;
126     mem->m_base.regType = LowOpndRegType_gp;
127     if(isPhysical) {
128         mem->m_base.logicalReg = -1;
129         mem->m_base.physicalReg = base;
130     } else {
131         mem->m_base.logicalReg = base;
132     }
133     return;
134 }
135 //!update fields of LowOpndMem
136 
137 //!
set_mem_opnd_scale(LowOpndMem * mem,int base,bool isPhysical,int disp,int index,bool indexPhysical,int scale)138 void set_mem_opnd_scale(LowOpndMem* mem, int base, bool isPhysical, int disp, int index, bool indexPhysical, int scale) {
139     mem->hasScale = true;
140     mem->m_base.regType = LowOpndRegType_gp;
141     if(isPhysical) {
142         mem->m_base.logicalReg = -1;
143         mem->m_base.physicalReg = base;
144     } else {
145         mem->m_base.logicalReg = base;
146     }
147     if(indexPhysical) {
148         mem->m_index.logicalReg = -1;
149         mem->m_index.physicalReg = index;
150     } else {
151         mem->m_index.logicalReg = index;
152     }
153     mem->m_disp.value = disp;
154     mem->m_scale.value = scale;
155     return;
156 }
157 //!return either LowOpndRegType_xmm or LowOpndRegType_gp
158 
159 //!
getTypeFromIntSize(OpndSize size)160 inline LowOpndRegType getTypeFromIntSize(OpndSize size) {
161     return size == OpndSize_64 ? LowOpndRegType_xmm : LowOpndRegType_gp;
162 }
163 
164 // copied from JIT compiler
165 typedef struct AtomMemBlock {
166     size_t bytesAllocated;
167     struct AtomMemBlock *next;
168     char ptr[0];
169 } AtomMemBlock;
170 
171 #define ATOMBLOCK_DEFAULT_SIZE 4096
172 AtomMemBlock *atomMemHead = NULL;
173 AtomMemBlock *currentAtomMem = NULL;
atomNew(size_t size)174 void * atomNew(size_t size) {
175     lowOpTimeStamp++; //one LowOp constructed
176     if(atomMemHead == NULL) {
177         atomMemHead = (AtomMemBlock*)malloc(sizeof(AtomMemBlock) + ATOMBLOCK_DEFAULT_SIZE);
178         if(atomMemHead == NULL) {
179             ALOGE("Memory allocation failed");
180             return NULL;
181         }
182         currentAtomMem = atomMemHead;
183         currentAtomMem->bytesAllocated = 0;
184         currentAtomMem->next = NULL;
185     }
186     size = (size + 3) & ~3;
187     if (size > ATOMBLOCK_DEFAULT_SIZE) {
188         ALOGE("Requesting %d bytes which exceed the maximal size allowed", size);
189         return NULL;
190     }
191 retry:
192     if (size + currentAtomMem->bytesAllocated <= ATOMBLOCK_DEFAULT_SIZE) {
193         void *ptr;
194         ptr = &currentAtomMem->ptr[currentAtomMem->bytesAllocated];
195         return ptr;
196     }
197     if (currentAtomMem->next) {
198         currentAtomMem = currentAtomMem->next;
199         goto retry;
200     }
201     /* Time to allocate a new arena */
202     AtomMemBlock *newAtomMem = (AtomMemBlock*)malloc(sizeof(AtomMemBlock) + ATOMBLOCK_DEFAULT_SIZE);
203     if(newAtomMem == NULL) {
204         ALOGE("Memory allocation failed");
205         return NULL;
206     }
207     newAtomMem->bytesAllocated = 0;
208     newAtomMem->next = NULL;
209     currentAtomMem->next = newAtomMem;
210     currentAtomMem = newAtomMem;
211     goto retry;
212     ALOGE("atomNew requesting %d bytes", size);
213     return NULL;
214 }
215 
freeAtomMem()216 void freeAtomMem() {
217     //LOGI("free all atom memory");
218     AtomMemBlock * tmpMem = atomMemHead;
219     while(tmpMem != NULL) {
220         tmpMem->bytesAllocated = 0;
221         tmpMem = tmpMem->next;
222     }
223     currentAtomMem = atomMemHead;
224 }
225 
dump_special(AtomOpCode cc,int imm)226 LowOpImm* dump_special(AtomOpCode cc, int imm) {
227     LowOpImm* op = (LowOpImm*)atomNew(sizeof(LowOpImm));
228     op->lop.opCode = Mnemonic_NULL;
229     op->lop.opCode2 = cc;
230     op->lop.opnd1.type = LowOpndType_Imm;
231     op->lop.numOperands = 1;
232     op->immOpnd.value = imm;
233     //stream = encoder_imm(m, size, imm, stream);
234     return op;
235 }
236 
lower_label(Mnemonic m,OpndSize size,int imm,const char * label,bool isLocal)237 LowOpLabel* lower_label(Mnemonic m, OpndSize size, int imm, const char* label, bool isLocal) {
238     stream = encoder_imm(m, size, imm, stream);
239     return NULL;
240 }
241 
dump_label(Mnemonic m,OpndSize size,int imm,const char * label,bool isLocal)242 LowOpLabel* dump_label(Mnemonic m, OpndSize size, int imm,
243                const char* label, bool isLocal) {
244     return lower_label(m, size, imm, label, isLocal);
245 }
246 
dump_ncg(Mnemonic m,OpndSize size,int imm)247 LowOpNCG* dump_ncg(Mnemonic m, OpndSize size, int imm) {
248     stream = encoder_imm(m, size, imm, stream);
249     return NULL;
250 }
251 
252 //!update fields of LowOp and generate a x86 instruction with a single immediate operand
253 
254 //!
lower_imm(Mnemonic m,OpndSize size,int imm,bool updateTable)255 LowOpImm* lower_imm(Mnemonic m, OpndSize size, int imm, bool updateTable) {
256     stream = encoder_imm(m, size, imm, stream);
257     return NULL;
258 }
259 
dump_imm(Mnemonic m,OpndSize size,int imm)260 LowOpImm* dump_imm(Mnemonic m, OpndSize size, int imm) {
261     return lower_imm(m, size, imm, true);
262 }
263 
dump_imm_with_codeaddr(Mnemonic m,OpndSize size,int imm,char * codePtr)264 LowOpImm* dump_imm_with_codeaddr(Mnemonic m, OpndSize size,
265                int imm, char* codePtr) {
266     encoder_imm(m, size, imm, codePtr);
267     return NULL;
268 }
269 
270 //!update fields of LowOp and generate a x86 instruction that takes a single memory operand
271 
272 //!With NCG O1, we call freeReg to free up physical registers, then call registerAlloc to allocate a physical register for memory base
lower_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int disp,int base_reg)273 LowOpMem* lower_mem(Mnemonic m, AtomOpCode m2, OpndSize size,
274                int disp, int base_reg) {
275     stream = encoder_mem(m, size, disp, base_reg, true, stream);
276     return NULL;
277 }
278 
dump_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int disp,int base_reg,bool isBasePhysical)279 LowOpMem* dump_mem(Mnemonic m, AtomOpCode m2, OpndSize size,
280                int disp, int base_reg, bool isBasePhysical) {
281     if(gDvm.executionMode == kExecutionModeNcgO1) {
282         freeReg(true);
283         //type of the base is gpr
284         int regAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
285         return lower_mem(m, m2, size, disp, regAll);
286     } else {
287         stream = encoder_mem(m, size, disp, base_reg, isBasePhysical, stream);
288         return NULL;
289     }
290 }
291 //!update fields of LowOp and generate a x86 instruction that takes a single reg operand
292 
293 //!With NCG O1, wecall freeReg to free up physical registers, then call registerAlloc to allocate a physical register for the single operand
lower_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,LowOpndRegType type)294 LowOpReg* lower_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
295                int reg, LowOpndRegType type) {
296     stream = encoder_reg(m, size, reg, true, type, stream);
297     return NULL;
298 }
299 
dump_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,bool isPhysical,LowOpndRegType type)300 LowOpReg* dump_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
301                int reg, bool isPhysical, LowOpndRegType type) {
302     if(gDvm.executionMode == kExecutionModeNcgO1) {
303         freeReg(true);
304         if(m == Mnemonic_MUL || m == Mnemonic_IDIV) {
305             //these two instructions use eax & edx implicitly
306             touchEax();
307             touchEdx();
308         }
309         int regAll = registerAlloc(type, reg, isPhysical, true);
310         return lower_reg(m, m2, size, regAll, type);
311     } else {
312         stream = encoder_reg(m, size, reg, isPhysical, type, stream);
313         return NULL;
314     }
315 }
dump_reg_noalloc(Mnemonic m,OpndSize size,int reg,bool isPhysical,LowOpndRegType type)316 LowOpReg* dump_reg_noalloc(Mnemonic m, OpndSize size,
317                int reg, bool isPhysical, LowOpndRegType type) {
318     return lower_reg(m, ATOM_NORMAL, size, reg, type);
319 }
320 
lower_reg_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,int reg2,LowOpndRegType type)321 LowOpRegReg* lower_reg_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
322                  int reg, int reg2, LowOpndRegType type) {
323     if(m == Mnemonic_FUCOMP || m == Mnemonic_FUCOM) {
324         stream = encoder_compare_fp_stack(m == Mnemonic_FUCOMP,
325                                           reg-reg2, size==OpndSize_64, stream);
326     }
327     else {
328         stream = encoder_reg_reg(m, size, reg, true, reg2, true, type, stream);
329     }
330     return NULL;
331 }
332 
333 //!update fields of LowOp and generate a x86 instruction that takes two reg operands
334 
335 //Here, both registers are physical
dump_reg_reg_noalloc(Mnemonic m,OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2,LowOpndRegType type)336 LowOpRegReg* dump_reg_reg_noalloc(Mnemonic m, OpndSize size,
337                            int reg, bool isPhysical,
338                            int reg2, bool isPhysical2, LowOpndRegType type) {
339     return lower_reg_reg(m, ATOM_NORMAL, size, reg, reg2, type);
340 }
341 
isMnemonicMove(Mnemonic m)342 inline bool isMnemonicMove(Mnemonic m) {
343     return (m == Mnemonic_MOV || m == Mnemonic_MOVQ ||
344             m == Mnemonic_MOVSS || m == Mnemonic_MOVSD);
345 }
346 //!update fields of LowOp and generate a x86 instruction that takes two reg operands
347 
348 //!here dst reg is already allocated to a physical reg
349 //! we should not spill the physical register for dst when allocating for src
dump_reg_reg_noalloc_dst(Mnemonic m,OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2,LowOpndRegType type)350 LowOpRegReg* dump_reg_reg_noalloc_dst(Mnemonic m, OpndSize size,
351                                int reg, bool isPhysical,
352                                int reg2, bool isPhysical2, LowOpndRegType type) {
353     if(gDvm.executionMode == kExecutionModeNcgO1) {
354         int regAll = registerAlloc(type, reg, isPhysical, true);
355         /* remove move from one register to the same register */
356         if(isMnemonicMove(m) && regAll == reg2) return NULL;
357         return lower_reg_reg(m, ATOM_NORMAL, size, regAll, reg2, type);
358     } else {
359         stream = encoder_reg_reg(m, size, reg, isPhysical, reg2, isPhysical2, type, stream);
360         return NULL;
361     }
362 }
363 //!update fields of LowOp and generate a x86 instruction that takes two reg operands
364 
365 //!here src reg is already allocated to a physical reg
dump_reg_reg_noalloc_src(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2,LowOpndRegType type)366 LowOpRegReg* dump_reg_reg_noalloc_src(Mnemonic m, AtomOpCode m2, OpndSize size,
367                                int reg, bool isPhysical,
368                                int reg2, bool isPhysical2, LowOpndRegType type) {
369     if(gDvm.executionMode == kExecutionModeNcgO1) {
370         int regAll2;
371         if(isMnemonicMove(m) && checkTempReg2(reg2, type, isPhysical2, reg)) { //dst reg is logical
372             //only from get_virtual_reg_all
373             regAll2 = registerAllocMove(reg2, type, isPhysical2, reg);
374         } else {
375             regAll2 = registerAlloc(type, reg2, isPhysical2, true);
376             return lower_reg_reg(m, m2, size, reg, regAll2, type);
377         }
378     } else {
379         stream = encoder_reg_reg(m, size, reg, isPhysical, reg2, isPhysical2, type, stream);
380         return NULL;
381     }
382     return NULL;
383 }
384 //!update fields of LowOp and generate a x86 instruction that takes two reg operands
385 
386 //!
dump_reg_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2,LowOpndRegType type)387 LowOpRegReg* dump_reg_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
388                    int reg, bool isPhysical,
389                    int reg2, bool isPhysical2, LowOpndRegType type) {
390     if(gDvm.executionMode == kExecutionModeNcgO1) {
391         startNativeCode(-1, -1);
392         //reg is source if m is MOV
393         freeReg(true);
394         int regAll = registerAlloc(type, reg, isPhysical, true);
395         int regAll2;
396         LowOpRegReg* op = NULL;
397 #ifdef MOVE_OPT2
398         if(isMnemonicMove(m) &&
399            ((reg != PhysicalReg_EDI && reg != PhysicalReg_ESP && reg != PhysicalReg_EBP) || (!isPhysical)) &&
400            isPhysical2 == false) { //dst reg is logical
401             //called from move_reg_to_reg
402             regAll2 = registerAllocMove(reg2, type, isPhysical2, regAll);
403         } else {
404 #endif
405             donotSpillReg(regAll);
406             regAll2 = registerAlloc(type, reg2, isPhysical2, true);
407             op = lower_reg_reg(m, m2, size, regAll, regAll2, type);
408 #ifdef MOVE_OPT2
409         }
410 #endif
411         endNativeCode();
412         return op;
413     }
414     else {
415         stream = encoder_reg_reg(m, size, reg, isPhysical, reg2, isPhysical2, type, stream);
416     }
417     return NULL;
418 }
419 
lower_mem_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int disp,int base_reg,MemoryAccessType mType,int mIndex,int reg,LowOpndRegType type,bool isMoves)420 LowOpRegMem* lower_mem_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
421                  int disp, int base_reg,
422                  MemoryAccessType mType, int mIndex,
423                  int reg, LowOpndRegType type, bool isMoves) {
424     if(m == Mnemonic_MOVSX) {
425         stream = encoder_moves_mem_to_reg(size, disp, base_reg, true,
426                                           reg, true, stream);
427     }
428     else if(m == Mnemonic_MOVZX) {
429         stream = encoder_movez_mem_to_reg(size, disp, base_reg, true,
430                                           reg, true, stream);
431     }
432     else {
433         stream = encoder_mem_reg(m, size, disp, base_reg, true,
434                                  reg, true, type, stream);
435     }
436     return NULL;
437 }
438 
439 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
440 
441 //!Here, operands are already allocated to physical registers
dump_mem_reg_noalloc(Mnemonic m,OpndSize size,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg,bool isPhysical,LowOpndRegType type)442 LowOpRegMem* dump_mem_reg_noalloc(Mnemonic m, OpndSize size,
443                            int disp, int base_reg, bool isBasePhysical,
444                            MemoryAccessType mType, int mIndex,
445                            int reg, bool isPhysical, LowOpndRegType type) {
446     return lower_mem_reg(m, ATOM_NORMAL, size, disp, base_reg, mType, mIndex, reg, type, false);
447 }
448 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
449 
450 //!Here, memory operand is already allocated to physical register
dump_mem_reg_noalloc_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg,bool isPhysical,LowOpndRegType type)451 LowOpRegMem* dump_mem_reg_noalloc_mem(Mnemonic m, AtomOpCode m2, OpndSize size,
452                                int disp, int base_reg, bool isBasePhysical,
453                                MemoryAccessType mType, int mIndex,
454                                int reg, bool isPhysical, LowOpndRegType type) {
455     if(gDvm.executionMode == kExecutionModeNcgO1) {
456         int regAll = registerAlloc(type, reg, isPhysical, true);
457         return lower_mem_reg(m, m2, size, disp, base_reg, mType, mIndex, regAll, type, false);
458     } else {
459         stream = encoder_mem_reg(m, size, disp, base_reg, isBasePhysical,
460                                  reg, isPhysical, type, stream);
461     }
462     return NULL;
463 }
464 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
465 
466 //!
dump_mem_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg,bool isPhysical,LowOpndRegType type)467 LowOpRegMem* dump_mem_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
468                    int disp, int base_reg, bool isBasePhysical,
469                    MemoryAccessType mType, int mIndex,
470                    int reg, bool isPhysical, LowOpndRegType type) {
471     if(gDvm.executionMode == kExecutionModeNcgO1) {
472         startNativeCode(-1, -1);
473         freeReg(true);
474         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
475         //it is okay to use the same physical register
476         if(isMnemonicMove(m)) {
477             freeReg(true);
478         } else {
479             donotSpillReg(baseAll);
480         }
481         int regAll = registerAlloc(type, reg, isPhysical, true);
482         endNativeCode();
483         return lower_mem_reg(m, m2, size, disp, baseAll, mType, mIndex, regAll, type, false);
484     } else {
485         stream = encoder_mem_reg(m, size, disp, base_reg, isBasePhysical,
486                                  reg, isPhysical, type, stream);
487     }
488     return NULL;
489 }
490 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
491 
492 //!
dump_moves_mem_reg(Mnemonic m,OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)493 LowOpRegMem* dump_moves_mem_reg(Mnemonic m, OpndSize size,
494                          int disp, int base_reg, bool isBasePhysical,
495              int reg, bool isPhysical) {
496     if(gDvm.executionMode == kExecutionModeNcgO1) {
497         startNativeCode(-1, -1);
498         freeReg(true);
499         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
500         donotSpillReg(baseAll);
501         int regAll = registerAlloc(LowOpndRegType_gp, reg, isPhysical, true);
502         endNativeCode();
503         return lower_mem_reg(m, ATOM_NORMAL, size, disp, baseAll, MemoryAccess_Unknown, -1,
504             regAll, LowOpndRegType_gp, true/*moves*/);
505     } else {
506         stream = encoder_moves_mem_to_reg(size, disp, base_reg, isBasePhysical, reg, isPhysical, stream);
507     }
508     return NULL;
509 }
510 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
511 
512 //!
dump_movez_mem_reg(Mnemonic m,OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)513 LowOpRegMem* dump_movez_mem_reg(Mnemonic m, OpndSize size,
514              int disp, int base_reg, bool isBasePhysical,
515              int reg, bool isPhysical) {
516     if(gDvm.executionMode == kExecutionModeNcgO1) {
517         startNativeCode(-1, -1);
518         freeReg(true);
519         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
520         donotSpillReg(baseAll);
521         int regAll = registerAlloc(LowOpndRegType_gp, reg, isPhysical, true);
522         endNativeCode();
523         return lower_mem_reg(m, ATOM_NORMAL, size, disp, baseAll, MemoryAccess_Unknown, -1,
524             regAll, LowOpndRegType_gp, true/*moves*/);
525     } else {
526         stream = encoder_movez_mem_to_reg(size, disp, base_reg, isBasePhysical, reg, isPhysical, stream);
527     }
528     return NULL;
529 }
530 
531 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one reg operand
532 
533 //!
dump_movez_reg_reg(Mnemonic m,OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2)534 LowOpRegReg* dump_movez_reg_reg(Mnemonic m, OpndSize size,
535              int reg, bool isPhysical,
536              int reg2, bool isPhysical2) {
537     LowOpRegReg* op = (LowOpRegReg*)atomNew(sizeof(LowOpRegReg));
538     op->lop.opCode = m;
539     op->lop.opnd1.size = OpndSize_32;
540     op->lop.opnd1.type = LowOpndType_Reg;
541     op->lop.opnd2.size = size;
542     op->lop.opnd2.type = LowOpndType_Reg;
543     set_reg_opnd(&(op->regOpnd1), reg2, isPhysical2, LowOpndRegType_gp);
544     set_reg_opnd(&(op->regOpnd2), reg, isPhysical, LowOpndRegType_gp);
545     if(gDvm.executionMode == kExecutionModeNcgO1) {
546         startNativeCode(-1, -1);
547         //reg is source if m is MOV
548         freeReg(true);
549         int regAll = registerAlloc(LowOpndRegType_gp, reg, isPhysical, true);
550         donotSpillReg(regAll);
551         int regAll2 = registerAlloc(LowOpndRegType_gp, reg2, isPhysical2, true);
552         stream = encoder_movez_reg_to_reg(size, regAll, true, regAll2, true,
553                                           LowOpndRegType_gp, stream);
554         endNativeCode();
555     }
556     else {
557         stream = encoder_movez_reg_to_reg(size, reg, isPhysical, reg2,
558                                         isPhysical2, LowOpndRegType_gp, stream);
559     }
560     return NULL;
561 }
562 
563 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
564 
565 //!
lower_mem_scale_reg(Mnemonic m,OpndSize size,int base_reg,int disp,int index_reg,int scale,int reg,LowOpndRegType type)566 LowOpRegMem* lower_mem_scale_reg(Mnemonic m, OpndSize size, int base_reg, int disp, int index_reg,
567                  int scale, int reg, LowOpndRegType type) {
568     bool isMovzs = (m == Mnemonic_MOVZX || m == Mnemonic_MOVSX);
569     if(isMovzs)
570         stream = encoder_movzs_mem_disp_scale_reg(m, size, base_reg, true, disp, index_reg, true,
571                                                   scale, reg, true, type, stream);
572     else {
573         if(disp == 0)
574             stream = encoder_mem_scale_reg(m, size, base_reg, true, index_reg, true,
575                                            scale, reg, true, type, stream);
576         else
577             stream = encoder_mem_disp_scale_reg(m, size, base_reg, true, disp, index_reg, true,
578                                                 scale, reg, true, type, stream);
579     }
580     return NULL;
581 }
582 
dump_mem_scale_reg(Mnemonic m,OpndSize size,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical,LowOpndRegType type)583 LowOpRegMem* dump_mem_scale_reg(Mnemonic m, OpndSize size,
584                          int base_reg, bool isBasePhysical, int disp, int index_reg, bool isIndexPhysical, int scale,
585                          int reg, bool isPhysical, LowOpndRegType type) {
586     if(gDvm.executionMode == kExecutionModeNcgO1) {
587         startNativeCode(-1, -1);
588         freeReg(true);
589         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
590         donotSpillReg(baseAll); //make sure index will not use the same physical reg
591         int indexAll = registerAlloc(LowOpndRegType_gp, index_reg, isIndexPhysical, true);
592         if(isMnemonicMove(m)) {
593             freeReg(true);
594             doSpillReg(baseAll); //base can be used now
595         } else {
596             donotSpillReg(indexAll);
597         }
598         bool isMovzs = (m == Mnemonic_MOVZX || m == Mnemonic_MOVSX);
599         int regAll = registerAlloc(isMovzs ? LowOpndRegType_gp : type, reg, isPhysical, true);
600         endNativeCode();
601         return lower_mem_scale_reg(m, size, baseAll, disp, indexAll, scale, regAll, type);
602     } else {
603         stream = encoder_mem_scale_reg(m, size, base_reg, isBasePhysical, index_reg,
604                                        isIndexPhysical, scale, reg, isPhysical, type, stream);
605     }
606     return NULL;
607 }
608 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
609 
610 //!
lower_reg_mem_scale(Mnemonic m,OpndSize size,int reg,int base_reg,int disp,int index_reg,int scale,LowOpndRegType type)611 LowOpMemReg* lower_reg_mem_scale(Mnemonic m, OpndSize size, int reg,
612                  int base_reg, int disp, int index_reg, int scale, LowOpndRegType type) {
613     if(disp == 0)
614         stream = encoder_reg_mem_scale(m, size, reg, true, base_reg, true,
615                                        index_reg, true, scale, type, stream);
616     else
617         stream = encoder_reg_mem_disp_scale(m, size, reg, true, base_reg, true,
618                                             disp, index_reg, true, scale, type, stream);
619     return NULL;
620 }
621 
dump_reg_mem_scale(Mnemonic m,OpndSize size,int reg,bool isPhysical,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale,LowOpndRegType type)622 LowOpMemReg* dump_reg_mem_scale(Mnemonic m, OpndSize size,
623                          int reg, bool isPhysical,
624                          int base_reg, bool isBasePhysical, int disp, int index_reg, bool isIndexPhysical, int scale,
625                          LowOpndRegType type) {
626     if(gDvm.executionMode == kExecutionModeNcgO1) {
627         startNativeCode(-1, -1);
628         freeReg(true);
629         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
630         donotSpillReg(baseAll);
631         int indexAll = registerAlloc(LowOpndRegType_gp, index_reg, isIndexPhysical, true);
632         donotSpillReg(indexAll);
633         int regAll = registerAlloc(type, reg, isPhysical, true);
634         endNativeCode();
635         return lower_reg_mem_scale(m, size, regAll, baseAll, disp, indexAll, scale, type);
636     } else {
637         stream = encoder_reg_mem_scale(m, size, reg, isPhysical, base_reg, isBasePhysical,
638                                        index_reg, isIndexPhysical, scale, type, stream);
639     }
640     return NULL;
641 }
642 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
643 
644 //!Here operands are already allocated
lower_reg_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,int disp,int base_reg,MemoryAccessType mType,int mIndex,LowOpndRegType type)645 LowOpMemReg* lower_reg_mem(Mnemonic m, AtomOpCode m2, OpndSize size, int reg,
646                  int disp, int base_reg, MemoryAccessType mType, int mIndex,
647                  LowOpndRegType type) {
648     stream = encoder_reg_mem(m, size, reg, true, disp, base_reg, true, type, stream);
649     return NULL;
650 }
651 
dump_reg_mem_noalloc(Mnemonic m,OpndSize size,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,LowOpndRegType type)652 LowOpMemReg* dump_reg_mem_noalloc(Mnemonic m, OpndSize size,
653                            int reg, bool isPhysical,
654                            int disp, int base_reg, bool isBasePhysical,
655                            MemoryAccessType mType, int mIndex, LowOpndRegType type) {
656     return lower_reg_mem(m, ATOM_NORMAL, size, reg, disp, base_reg, mType, mIndex, type);
657 }
658 //!update fields of LowOp and generate a x86 instruction that takes one reg operand and one mem operand
659 
660 //!
dump_reg_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,LowOpndRegType type)661 LowOpMemReg* dump_reg_mem(Mnemonic m, AtomOpCode m2, OpndSize size,
662                    int reg, bool isPhysical,
663                    int disp, int base_reg, bool isBasePhysical,
664                    MemoryAccessType mType, int mIndex, LowOpndRegType type) {
665     if(gDvm.executionMode == kExecutionModeNcgO1) {
666         startNativeCode(-1, -1);
667         freeReg(true);
668         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
669         donotSpillReg(baseAll);
670         int regAll = registerAlloc(type, reg, isPhysical, true);
671         endNativeCode();
672         return lower_reg_mem(m, m2, size, regAll, disp, baseAll, mType, mIndex, type);
673     } else {
674         stream = encoder_reg_mem(m, size, reg, isPhysical, disp, base_reg, isBasePhysical, type, stream);
675     }
676     return NULL;
677 }
678 //!update fields of LowOp and generate a x86 instruction that takes one immediate and one reg operand
679 
680 //!The reg operand is allocated already
lower_imm_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int imm,int reg,LowOpndRegType type,bool chaining)681 LowOpRegImm* lower_imm_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
682                  int imm, int reg, LowOpndRegType type, bool chaining) {
683     stream = encoder_imm_reg(m, size, imm, reg, true, type, stream);
684     return NULL;
685 }
686 
dump_imm_reg_noalloc(Mnemonic m,OpndSize size,int imm,int reg,bool isPhysical,LowOpndRegType type)687 LowOpRegImm* dump_imm_reg_noalloc(Mnemonic m, OpndSize size,
688                            int imm, int reg, bool isPhysical, LowOpndRegType type) {
689     return lower_imm_reg(m, ATOM_NORMAL, size, imm, reg, type, false);
690 }
691 //!update fields of LowOp and generate a x86 instruction that takes one immediate and one reg operand
692 
693 //!
dump_imm_reg(Mnemonic m,AtomOpCode m2,OpndSize size,int imm,int reg,bool isPhysical,LowOpndRegType type,bool chaining)694 LowOpRegImm* dump_imm_reg(Mnemonic m, AtomOpCode m2, OpndSize size,
695                    int imm, int reg, bool isPhysical, LowOpndRegType type, bool chaining) {
696     if(gDvm.executionMode == kExecutionModeNcgO1) {
697         freeReg(true);
698         int regAll = registerAlloc(type, reg, isPhysical, true);
699         return lower_imm_reg(m, m2, size, imm, regAll, type, chaining);
700     } else {
701         stream = encoder_imm_reg(m, size, imm, reg, isPhysical, type, stream);
702     }
703     return NULL;
704 }
705 //!update fields of LowOp and generate a x86 instruction that takes one immediate and one mem operand
706 
707 //!The mem operand is already allocated
lower_imm_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int imm,int disp,int base_reg,MemoryAccessType mType,int mIndex,bool chaining)708 LowOpMemImm* lower_imm_mem(Mnemonic m, AtomOpCode m2, OpndSize size, int imm,
709                  int disp, int base_reg, MemoryAccessType mType, int mIndex,
710                  bool chaining) {
711     stream = encoder_imm_mem(m, size, imm, disp, base_reg, true, stream);
712     return NULL;
713 }
714 
dump_imm_mem_noalloc(Mnemonic m,OpndSize size,int imm,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex)715 LowOpMemImm* dump_imm_mem_noalloc(Mnemonic m, OpndSize size,
716                            int imm,
717                            int disp, int base_reg, bool isBasePhysical,
718                            MemoryAccessType mType, int mIndex) {
719     return lower_imm_mem(m, ATOM_NORMAL, size, imm, disp, base_reg, mType, mIndex, false);
720 }
721 //!update fields of LowOp and generate a x86 instruction that takes one immediate and one mem operand
722 
723 //!
dump_imm_mem(Mnemonic m,AtomOpCode m2,OpndSize size,int imm,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,bool chaining)724 LowOpMemImm* dump_imm_mem(Mnemonic m, AtomOpCode m2, OpndSize size,
725                    int imm,
726                    int disp, int base_reg, bool isBasePhysical,
727                    MemoryAccessType mType, int mIndex, bool chaining) {
728     if(gDvm.executionMode == kExecutionModeNcgO1) {
729         /* do not free register if the base is %edi, %esp, or %ebp
730            make sure dump_imm_mem will only generate a single instruction */
731         if(!isBasePhysical || (base_reg != PhysicalReg_EDI &&
732                                base_reg != PhysicalReg_ESP &&
733                                base_reg != PhysicalReg_EBP)) {
734             freeReg(true);
735         }
736         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
737         return lower_imm_mem(m, m2, size, imm, disp, baseAll, mType, mIndex, chaining);
738     } else {
739         stream = encoder_imm_mem(m, size, imm, disp, base_reg, isBasePhysical, stream);
740     }
741     return NULL;
742 }
743 //!update fields of LowOp and generate a x86 instruction that uses the FP stack and takes one mem operand
744 
745 //!
lower_fp_mem(Mnemonic m,OpndSize size,int reg,int disp,int base_reg,MemoryAccessType mType,int mIndex)746 LowOpMemReg* lower_fp_mem(Mnemonic m, OpndSize size, int reg,
747                   int disp, int base_reg, MemoryAccessType mType, int mIndex) {
748     stream = encoder_fp_mem(m, size, reg, disp, base_reg, true, stream);
749     return NULL;
750 }
751 
dump_fp_mem(Mnemonic m,OpndSize size,int reg,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex)752 LowOpMemReg* dump_fp_mem(Mnemonic m, OpndSize size, int reg,
753                   int disp, int base_reg, bool isBasePhysical,
754                   MemoryAccessType mType, int mIndex) {
755     if(gDvm.executionMode == kExecutionModeNcgO1) {
756         freeReg(true);
757         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
758         return lower_fp_mem(m, size, reg, disp, baseAll, mType, mIndex);
759     } else {
760         stream = encoder_fp_mem(m, size, reg, disp, base_reg, isBasePhysical, stream);
761     }
762     return NULL;
763 }
764 //!update fields of LowOp and generate a x86 instruction that uses the FP stack and takes one mem operand
765 
766 //!
lower_mem_fp(Mnemonic m,OpndSize size,int disp,int base_reg,MemoryAccessType mType,int mIndex,int reg)767 LowOpRegMem* lower_mem_fp(Mnemonic m, OpndSize size, int disp, int base_reg,
768                  MemoryAccessType mType, int mIndex, int reg) {
769     stream = encoder_mem_fp(m, size, disp, base_reg, true, reg, stream);
770     return NULL;
771 }
772 
dump_mem_fp(Mnemonic m,OpndSize size,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg)773 LowOpRegMem* dump_mem_fp(Mnemonic m, OpndSize size,
774                   int disp, int base_reg, bool isBasePhysical,
775                   MemoryAccessType mType, int mIndex,
776                   int reg) {
777     if(gDvm.executionMode == kExecutionModeNcgO1) {
778         freeReg(true);
779         int baseAll = registerAlloc(LowOpndRegType_gp, base_reg, isBasePhysical, true);
780         return lower_mem_fp(m, size, disp, baseAll, mType, mIndex, reg);
781     } else {
782         stream = encoder_mem_fp(m, size, disp, base_reg, isBasePhysical, reg, stream);
783     }
784     return NULL;
785 }
786 ///////////////////////////////////////////////////////////////
787 ///////////////////////////////////////////////////////////////
788 //OPERAND ORDER:
789 //LowOp same as EncoderBase destination first
790 //parameter order of function: src first
791 
792 ////////////////////////////////// IA32 native instructions //////////////
793 //! generate a native instruction lea
794 
795 //!
load_effective_addr(int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)796 void load_effective_addr(int disp, int base_reg, bool isBasePhysical,
797                           int reg, bool isPhysical) {
798     Mnemonic m = Mnemonic_LEA;
799     dump_mem_reg(m, ATOM_NORMAL, OpndSize_32, disp, base_reg, isBasePhysical,
800         MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_gp);
801 }
802 //! generate a native instruction lea
803 
804 //!
load_effective_addr_scale(int base_reg,bool isBasePhysical,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical)805 void load_effective_addr_scale(int base_reg, bool isBasePhysical,
806                 int index_reg, bool isIndexPhysical, int scale,
807                 int reg, bool isPhysical) {
808     Mnemonic m = Mnemonic_LEA;
809     dump_mem_scale_reg(m, OpndSize_32,
810                               base_reg, isBasePhysical, 0/*disp*/, index_reg, isIndexPhysical, scale,
811                               reg, isPhysical, LowOpndRegType_gp);
812 }
813 //!fldcw
814 
815 //!
load_fpu_cw(int disp,int base_reg,bool isBasePhysical)816 void load_fpu_cw(int disp, int base_reg, bool isBasePhysical) {
817     Mnemonic m = Mnemonic_FLDCW;
818     dump_mem(m, ATOM_NORMAL, OpndSize_16, disp, base_reg, isBasePhysical);
819 }
820 //!fnstcw
821 
822 //!
store_fpu_cw(bool checkException,int disp,int base_reg,bool isBasePhysical)823 void store_fpu_cw(bool checkException, int disp, int base_reg, bool isBasePhysical) {
824     assert(!checkException);
825     Mnemonic m = Mnemonic_FNSTCW;
826     dump_mem(m, ATOM_NORMAL, OpndSize_16, disp, base_reg, isBasePhysical);
827 }
828 //!cdq
829 
830 //!
convert_integer(OpndSize srcSize,OpndSize dstSize)831 void convert_integer(OpndSize srcSize, OpndSize dstSize) { //cbw, cwd, cdq
832     assert(srcSize == OpndSize_32 && dstSize == OpndSize_64);
833     Mnemonic m = Mnemonic_CDQ;
834     dump_reg_reg(m, ATOM_NORMAL, OpndSize_32, PhysicalReg_EAX, true, PhysicalReg_EDX, true, LowOpndRegType_gp);
835 }
836 //!fld: load from memory (float or double) to stack
837 
838 //!
load_fp_stack(LowOp * op,OpndSize size,int disp,int base_reg,bool isBasePhysical)839 void load_fp_stack(LowOp* op, OpndSize size, int disp, int base_reg, bool isBasePhysical) {//fld(s|l)
840     Mnemonic m = Mnemonic_FLD;
841     dump_mem_fp(m, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, 0); //ST0
842 }
843 //! fild: load from memory (int or long) to stack
844 
845 //!
load_int_fp_stack(OpndSize size,int disp,int base_reg,bool isBasePhysical)846 void load_int_fp_stack(OpndSize size, int disp, int base_reg, bool isBasePhysical) {//fild(ll|l)
847     Mnemonic m = Mnemonic_FILD;
848     dump_mem_fp(m, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, 0); //ST0
849 }
850 //!fild: load from memory (absolute addr)
851 
852 //!
load_int_fp_stack_imm(OpndSize size,int imm)853 void load_int_fp_stack_imm(OpndSize size, int imm) {//fild(ll|l)
854     return load_int_fp_stack(size, imm, PhysicalReg_Null, true);
855 }
856 //!fst: store from stack to memory (float or double)
857 
858 //!
store_fp_stack(LowOp * op,bool pop,OpndSize size,int disp,int base_reg,bool isBasePhysical)859 void store_fp_stack(LowOp* op, bool pop, OpndSize size, int disp, int base_reg, bool isBasePhysical) {//fst(p)(s|l)
860     Mnemonic m = pop ? Mnemonic_FSTP : Mnemonic_FST;
861     dump_fp_mem(m, size, 0, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1);
862 }
863 //!fist: store from stack to memory (int or long)
864 
865 //!
store_int_fp_stack(LowOp * op,bool pop,OpndSize size,int disp,int base_reg,bool isBasePhysical)866 void store_int_fp_stack(LowOp* op, bool pop, OpndSize size, int disp, int base_reg, bool isBasePhysical) {//fist(p)(l)
867     Mnemonic m = pop ? Mnemonic_FISTP : Mnemonic_FIST;
868     dump_fp_mem(m, size, 0, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1);
869 }
870 //!cmp reg, mem
871 
872 //!
compare_reg_mem(LowOp * op,OpndSize size,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical)873 void compare_reg_mem(LowOp* op, OpndSize size, int reg, bool isPhysical,
874               int disp, int base_reg, bool isBasePhysical) {
875     Mnemonic m = Mnemonic_CMP;
876     dump_reg_mem(m, ATOM_NORMAL, size, reg, isPhysical, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, getTypeFromIntSize(size));
877 }
878 //!cmp mem, reg
879 
880 //!
compare_mem_reg(OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)881 void compare_mem_reg(OpndSize size,
882               int disp, int base_reg, bool isBasePhysical,
883               int reg, bool isPhysical) {
884     Mnemonic m = Mnemonic_CMP;
885     dump_mem_reg(m, ATOM_NORMAL, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, reg, isPhysical, getTypeFromIntSize(size));
886 }
887 //! compare a VR with a temporary variable
888 
889 //!
compare_VR_reg_all(OpndSize size,int vA,int reg,bool isPhysical,Mnemonic m)890 void compare_VR_reg_all(OpndSize size,
891              int vA,
892              int reg, bool isPhysical, Mnemonic m) {
893     LowOpndRegType type = getTypeFromIntSize(size);
894     LowOpndRegType pType = type;
895     if(m == Mnemonic_COMISS) {
896         size = OpndSize_32;
897         type = LowOpndRegType_ss;
898         pType = LowOpndRegType_xmm;
899     }
900     if(gDvm.executionMode == kExecutionModeNcgO1) {
901         int tmpValue[2];
902         int isConst = isVirtualRegConstant(vA, type, tmpValue, true/*updateRefCount*/);
903         if(isConst == 3) {
904             if(m == Mnemonic_COMISS) {
905 #ifdef DEBUG_NCG_O1
906                 LOGI("VR is const and SS in compare_VR_reg");
907 #endif
908                 dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
909                 //dumpImmToMem(vA+1, OpndSize_32, 0); //CHECK necessary? will overwrite vA+1!!!
910                 dump_mem_reg(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA, reg, isPhysical, pType);
911                 return;
912             }
913             else if(size != OpndSize_64) {
914 #ifdef DEBUG_NCG_O1
915                 LOGI("VR is const and 32 bits in compare_VR_reg");
916 #endif
917                 dump_imm_reg(m, ATOM_NORMAL, size, tmpValue[0], reg, isPhysical, pType, false);
918                 return;
919             }
920             else if(size == OpndSize_64) {
921 #ifdef DEBUG_NCG_O1
922                 LOGI("VR is const and 64 bits in compare_VR_reg");
923 #endif
924                 dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
925                 dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
926                 dump_mem_reg(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true,
927                     MemoryAccess_VR, vA, reg, isPhysical, pType);
928                 return;
929             }
930         }
931         if(isConst == 1) dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
932         if(isConst == 2) dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
933         freeReg(true);
934         int regAll = checkVirtualReg(vA, type, 0/*do not update*/);
935         if(regAll != PhysicalReg_Null) { //do not spill regAll when allocating register for dst
936             startNativeCode(-1, -1);
937             donotSpillReg(regAll);
938             dump_reg_reg_noalloc_src(m, ATOM_NORMAL, size, regAll, true, reg, isPhysical, pType);
939             endNativeCode();
940         }
941         else {
942             //virtual register is not allocated to a physical register
943             dump_mem_reg_noalloc_mem(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true,
944                 MemoryAccess_VR, vA, reg, isPhysical, pType);
945         }
946         updateRefCount(vA, type);
947         return;
948     } else {
949         dump_mem_reg(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true,
950             MemoryAccess_VR, vA, reg, isPhysical, pType);
951         return;
952     }
953 }
compare_VR_reg(OpndSize size,int vA,int reg,bool isPhysical)954 void compare_VR_reg(OpndSize size,
955              int vA,
956              int reg, bool isPhysical) {
957     Mnemonic m = Mnemonic_CMP;
958     return compare_VR_reg_all(size, vA, reg, isPhysical, m);
959 }
compare_VR_ss_reg(int vA,int reg,bool isPhysical)960 void compare_VR_ss_reg(int vA, int reg, bool isPhysical) {
961     Mnemonic m = Mnemonic_COMISS;
962     return compare_VR_reg_all(OpndSize_32, vA, reg, isPhysical, m);
963 }
compare_VR_sd_reg(int vA,int reg,bool isPhysical)964 void compare_VR_sd_reg(int vA, int reg, bool isPhysical) {
965     Mnemonic m = Mnemonic_COMISD;
966     return compare_VR_reg_all(OpndSize_64, vA, reg, isPhysical, m);
967 }
968 //!load VR to stack
969 
970 //!
load_fp_stack_VR_all(OpndSize size,int vB,Mnemonic m)971 void load_fp_stack_VR_all(OpndSize size, int vB, Mnemonic m) {
972     if(gDvm.executionMode == kExecutionModeNcgO1) {
973         //can't load from immediate to fp stack
974         int tmpValue[2];
975         int isConst = isVirtualRegConstant(vB, getTypeFromIntSize(size), tmpValue, false/*updateRefCount*/);
976         if(isConst > 0) {
977             if(size != OpndSize_64) {
978 #ifdef DEBUG_NCG_O1
979                 LOGI("VR is const and 32 bits in load_fp_stack");
980 #endif
981                 dumpImmToMem(vB, OpndSize_32, tmpValue[0]);
982             }
983             else {
984 #ifdef DEBUG_NCG_O1
985                 LOGI("VR is const and 64 bits in load_fp_stack_VR");
986 #endif
987                 if(isConst == 1 || isConst == 3) dumpImmToMem(vB, OpndSize_32, tmpValue[0]);
988                 if(isConst == 2 || isConst == 3) dumpImmToMem(vB+1, OpndSize_32, tmpValue[1]);
989             }
990         }
991         else { //if VR was updated by a def of gp, a xfer point was inserted
992             //if VR was updated by a def of xmm, a xfer point was inserted
993 #if 0
994             int regAll = checkVirtualReg(vB, size, 1);
995             if(regAll != PhysicalReg_Null) //dump from register to memory
996                 dump_reg_mem_noalloc(m, size, regAll, true, 4*vB, PhysicalReg_FP, true,
997                     MemoryAccess_VR, vB, getTypeFromIntSize(size));
998 #endif
999         }
1000         dump_mem_fp(m, size, 4*vB, PhysicalReg_FP, true, MemoryAccess_VR, vB, 0);
1001     } else {
1002         dump_mem_fp(m, size, 4*vB, PhysicalReg_FP, true, MemoryAccess_VR, vB, 0);
1003     }
1004 }
1005 //!load VR(float or double) to stack
1006 
1007 //!
load_fp_stack_VR(OpndSize size,int vA)1008 void load_fp_stack_VR(OpndSize size, int vA) {//fld(s|l)
1009     Mnemonic m = Mnemonic_FLD;
1010     return load_fp_stack_VR_all(size, vA, m);
1011 }
1012 //!load VR(int or long) to stack
1013 
1014 //!
load_int_fp_stack_VR(OpndSize size,int vA)1015 void load_int_fp_stack_VR(OpndSize size, int vA) {//fild(ll|l)
1016     Mnemonic m = Mnemonic_FILD;
1017     return load_fp_stack_VR_all(size, vA, m);
1018 }
1019 //!store from stack to VR (float or double)
1020 
1021 //!
store_fp_stack_VR(bool pop,OpndSize size,int vA)1022 void store_fp_stack_VR(bool pop, OpndSize size, int vA) {//fst(p)(s|l)
1023     Mnemonic m = pop ? Mnemonic_FSTP : Mnemonic_FST;
1024     dump_fp_mem(m, size, 0, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA);
1025     if(gDvm.executionMode == kExecutionModeNcgO1) {
1026         if(size == OpndSize_32)
1027             updateVirtualReg(vA, LowOpndRegType_fs_s);
1028         else
1029             updateVirtualReg(vA, LowOpndRegType_fs);
1030     }
1031 }
1032 //!store from stack to VR (int or long)
1033 
1034 //!
store_int_fp_stack_VR(bool pop,OpndSize size,int vA)1035 void store_int_fp_stack_VR(bool pop, OpndSize size, int vA) {//fist(p)(l)
1036     Mnemonic m = pop ? Mnemonic_FISTP : Mnemonic_FIST;
1037     dump_fp_mem(m, size, 0, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA);
1038     if(gDvm.executionMode == kExecutionModeNcgO1) {
1039         if(size == OpndSize_32)
1040             updateVirtualReg(vA, LowOpndRegType_fs_s);
1041         else
1042             updateVirtualReg(vA, LowOpndRegType_fs);
1043     }
1044 }
1045 //! ALU ops in FPU, one operand is a VR
1046 
1047 //!
fpu_VR(ALU_Opcode opc,OpndSize size,int vA)1048 void fpu_VR(ALU_Opcode opc, OpndSize size, int vA) {
1049     Mnemonic m = map_of_fpu_opcode_2_mnemonic[opc];
1050     if(gDvm.executionMode == kExecutionModeNcgO1) {
1051         int tmpValue[2];
1052         int isConst = isVirtualRegConstant(vA, getTypeFromIntSize(size), tmpValue, false/*updateRefCount*/);
1053         if(isConst > 0) {
1054             if(size != OpndSize_64) {
1055                 //allocate a register for dst
1056                 dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1057             }
1058             else {
1059                 if((isConst == 1 || isConst == 3) && size == OpndSize_64) {
1060                     dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1061                 }
1062                 if((isConst == 2 || isConst == 3) && size == OpndSize_64) {
1063                     dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
1064                 }
1065             }
1066         }
1067         if(!isInMemory(vA, size)) {
1068             ALOGE("fpu_VR");
1069         }
1070         dump_mem_fp(m, size, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA, 0);
1071     } else {
1072         dump_mem_fp(m, size, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA, 0);
1073     }
1074 }
1075 //! cmp imm reg
1076 
1077 //!
compare_imm_reg(OpndSize size,int imm,int reg,bool isPhysical)1078 void compare_imm_reg(OpndSize size, int imm,
1079               int reg, bool isPhysical) {
1080     if(imm == 0) {
1081         LowOpndRegType type = getTypeFromIntSize(size);
1082         Mnemonic m = Mnemonic_TEST;
1083         if(gDvm.executionMode == kExecutionModeNcgO1) {
1084             freeReg(true);
1085             int regAll = registerAlloc(type, reg, isPhysical, true);
1086             lower_reg_reg(m, ATOM_NORMAL, size, regAll, regAll, type);
1087         } else {
1088             stream = encoder_reg_reg(m, size, reg, isPhysical, reg, isPhysical, type, stream);
1089         }
1090         return;
1091     }
1092     Mnemonic m = Mnemonic_CMP;
1093     dump_imm_reg(m, ATOM_NORMAL, size, imm, reg, isPhysical, getTypeFromIntSize(size), false);
1094 }
1095 //! cmp imm mem
1096 
1097 //!
compare_imm_mem(OpndSize size,int imm,int disp,int base_reg,bool isBasePhysical)1098 void compare_imm_mem(OpndSize size, int imm,
1099               int disp, int base_reg, bool isBasePhysical) {
1100     Mnemonic m = Mnemonic_CMP;
1101     dump_imm_mem(m, ATOM_NORMAL, size, imm, disp,
1102                         base_reg, isBasePhysical, MemoryAccess_Unknown, -1, false);
1103 }
1104 //! cmp imm VR
1105 
1106 //!
compare_imm_VR(OpndSize size,int imm,int vA)1107 void compare_imm_VR(OpndSize size, int imm,
1108              int vA) {
1109     Mnemonic m = Mnemonic_CMP;
1110     if(gDvm.executionMode == kExecutionModeNcgO1) {
1111         if(size != OpndSize_32) ALOGE("only 32 bits supported in compare_imm_VR");
1112         int tmpValue[2];
1113         int isConst = isVirtualRegConstant(vA, getTypeFromIntSize(size), tmpValue, false/*updateRefCount*/);
1114         if(isConst > 0) {
1115             dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1116         }
1117         int regAll = checkVirtualReg(vA, getTypeFromIntSize(size), 0);
1118         if(regAll != PhysicalReg_Null)
1119             dump_imm_reg_noalloc(m, size, imm, regAll, true, LowOpndRegType_gp);
1120         else
1121             dump_imm_mem_noalloc(m, size, imm, 4*vA, PhysicalReg_FP, true,
1122                 MemoryAccess_VR, vA);
1123         updateRefCount(vA, getTypeFromIntSize(size));
1124     } else {
1125         dump_imm_mem(m, ATOM_NORMAL, size, imm, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA, false);
1126     }
1127 }
1128 //! cmp reg reg
1129 
1130 //!
compare_reg_reg(int reg1,bool isPhysical1,int reg2,bool isPhysical2)1131 void compare_reg_reg(int reg1, bool isPhysical1,
1132               int reg2, bool isPhysical2) {
1133     Mnemonic m = Mnemonic_CMP;
1134     dump_reg_reg(m, ATOM_NORMAL, OpndSize_32, reg1, isPhysical1, reg2, isPhysical2, LowOpndRegType_gp);
1135 }
compare_reg_reg_16(int reg1,bool isPhysical1,int reg2,bool isPhysical2)1136 void compare_reg_reg_16(int reg1, bool isPhysical1,
1137               int reg2, bool isPhysical2) {
1138     Mnemonic m = Mnemonic_CMP;
1139     dump_reg_reg(m, ATOM_NORMAL, OpndSize_16, reg1, isPhysical1, reg2, isPhysical2, LowOpndRegType_gp);
1140 }
1141 
1142 //! comiss mem reg
1143 
1144 //!SSE, XMM: comparison of floating point numbers
compare_ss_mem_reg(LowOp * op,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1145 void compare_ss_mem_reg(LowOp* op, int disp, int base_reg, bool isBasePhysical,
1146              int reg, bool isPhysical) {
1147     Mnemonic m = Mnemonic_COMISS;
1148     dump_mem_reg(m, ATOM_NORMAL, OpndSize_32, disp, base_reg, isBasePhysical,
1149         MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_xmm);
1150 }
1151 //! comiss reg reg
1152 
1153 //!
compare_ss_reg_with_reg(LowOp * op,int reg1,bool isPhysical1,int reg2,bool isPhysical2)1154 void compare_ss_reg_with_reg(LowOp* op, int reg1, bool isPhysical1,
1155                   int reg2, bool isPhysical2) {
1156     Mnemonic m = Mnemonic_COMISS;
1157     dump_reg_reg(m,  ATOM_NORMAL, OpndSize_32, reg1, isPhysical1, reg2, isPhysical2, LowOpndRegType_xmm);
1158 }
1159 //! comisd mem reg
1160 
1161 //!
compare_sd_mem_with_reg(LowOp * op,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1162 void compare_sd_mem_with_reg(LowOp* op, int disp, int base_reg, bool isBasePhysical,
1163                   int reg, bool isPhysical) {
1164     Mnemonic m = Mnemonic_COMISD;
1165     dump_mem_reg(m, ATOM_NORMAL, OpndSize_64, disp, base_reg, isBasePhysical,
1166         MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_xmm);
1167 }
1168 //! comisd reg reg
1169 
1170 //!
compare_sd_reg_with_reg(LowOp * op,int reg1,bool isPhysical1,int reg2,bool isPhysical2)1171 void compare_sd_reg_with_reg(LowOp* op, int reg1, bool isPhysical1,
1172                   int reg2, bool isPhysical2) {
1173     Mnemonic m = Mnemonic_COMISD;
1174     dump_reg_reg(m, ATOM_NORMAL, OpndSize_64, reg1, isPhysical1, reg2, isPhysical2, LowOpndRegType_xmm);
1175 }
1176 //! fucom[p]
1177 
1178 //!
compare_fp_stack(bool pop,int reg,bool isDouble)1179 void compare_fp_stack(bool pop, int reg, bool isDouble) { //compare ST(0) with ST(reg)
1180     Mnemonic m = pop ? Mnemonic_FUCOMP : Mnemonic_FUCOM;
1181     lower_reg_reg(m, ATOM_NORMAL, isDouble ? OpndSize_64 : OpndSize_32,
1182                   PhysicalReg_ST0+reg, PhysicalReg_ST0, LowOpndRegType_fs);
1183 }
1184 /*!
1185 \brief generate a single return instruction
1186 
1187 */
lower_return()1188 LowOp* lower_return() {
1189     stream = encoder_return(stream);
1190     return NULL;
1191 }
1192 
x86_return()1193 void x86_return() {
1194     lower_return();
1195 }
1196 
1197 //!test imm reg
1198 
1199 //!
test_imm_reg(OpndSize size,int imm,int reg,bool isPhysical)1200 void test_imm_reg(OpndSize size, int imm, int reg, bool isPhysical) {
1201     dump_imm_reg(Mnemonic_TEST, ATOM_NORMAL, size, imm, reg, isPhysical, getTypeFromIntSize(size), false);
1202 }
1203 //!test imm mem
1204 
1205 //!
test_imm_mem(OpndSize size,int imm,int disp,int reg,bool isPhysical)1206 void test_imm_mem(OpndSize size, int imm, int disp, int reg, bool isPhysical) {
1207     dump_imm_mem(Mnemonic_TEST, ATOM_NORMAL, size, imm, disp, reg, isPhysical, MemoryAccess_Unknown, -1, false);
1208 }
1209 //!alu unary op with one reg operand
1210 
1211 //!
alu_unary_reg(OpndSize size,ALU_Opcode opc,int reg,bool isPhysical)1212 void alu_unary_reg(OpndSize size, ALU_Opcode opc, int reg, bool isPhysical) {
1213     Mnemonic m;
1214     if(size == OpndSize_64)
1215         m = map_of_64_opcode_2_mnemonic[opc];
1216     else
1217         m = map_of_alu_opcode_2_mnemonic[opc];
1218     dump_reg(m, ATOM_NORMAL_ALU, size, reg, isPhysical, getTypeFromIntSize(size));
1219 }
1220 //!alu unary op with one mem operand
1221 
1222 //!
alu_unary_mem(LowOp * op,OpndSize size,ALU_Opcode opc,int disp,int base_reg,bool isBasePhysical)1223 void alu_unary_mem(LowOp* op, OpndSize size, ALU_Opcode opc, int disp, int base_reg, bool isBasePhysical) {
1224     Mnemonic m;
1225     if(size == OpndSize_64)
1226         m = map_of_64_opcode_2_mnemonic[opc];
1227     else
1228         m = map_of_alu_opcode_2_mnemonic[opc];
1229     dump_mem(m, ATOM_NORMAL_ALU, size, disp, base_reg, isBasePhysical);
1230 }
1231 //!alu binary op with immediate and one mem operand
1232 
1233 //!
alu_binary_imm_mem(OpndSize size,ALU_Opcode opc,int imm,int disp,int base_reg,bool isBasePhysical)1234 void alu_binary_imm_mem(OpndSize size, ALU_Opcode opc, int imm, int disp, int base_reg, bool isBasePhysical) {
1235     Mnemonic m;
1236     if(size == OpndSize_64)
1237         m = map_of_64_opcode_2_mnemonic[opc];
1238     else
1239         m = map_of_alu_opcode_2_mnemonic[opc];
1240     dump_imm_mem(m, ATOM_NORMAL_ALU, size, imm, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, false);
1241 }
1242 //!alu binary op with immediate and one reg operand
1243 
1244 //!
alu_binary_imm_reg(OpndSize size,ALU_Opcode opc,int imm,int reg,bool isPhysical)1245 void alu_binary_imm_reg(OpndSize size, ALU_Opcode opc, int imm, int reg, bool isPhysical) {
1246     Mnemonic m;
1247     if(size == OpndSize_64)
1248         m = map_of_64_opcode_2_mnemonic[opc];
1249     else
1250         m = map_of_alu_opcode_2_mnemonic[opc];
1251     dump_imm_reg(m, ATOM_NORMAL_ALU, size, imm, reg, isPhysical, getTypeFromIntSize(size), false);
1252 }
1253 //!alu binary op with one mem operand and one reg operand
1254 
1255 //!
alu_binary_mem_reg(OpndSize size,ALU_Opcode opc,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1256 void alu_binary_mem_reg(OpndSize size, ALU_Opcode opc,
1257              int disp, int base_reg, bool isBasePhysical,
1258              int reg, bool isPhysical) {
1259     Mnemonic m;
1260     if(size == OpndSize_64)
1261         m = map_of_64_opcode_2_mnemonic[opc];
1262     else
1263         m = map_of_alu_opcode_2_mnemonic[opc];
1264     dump_mem_reg(m, ATOM_NORMAL_ALU, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, reg, isPhysical, getTypeFromIntSize(size));
1265 }
1266 
alu_sd_binary_VR_reg(ALU_Opcode opc,int vA,int reg,bool isPhysical,bool isSD)1267 void alu_sd_binary_VR_reg(ALU_Opcode opc, int vA, int reg, bool isPhysical, bool isSD) {
1268     Mnemonic m;
1269     if(isSD) m = map_of_sse_opcode_2_mnemonic[opc];
1270     else m = (Mnemonic)(map_of_sse_opcode_2_mnemonic[opc]+1); //from SD to SS
1271     OpndSize size = isSD ? OpndSize_64 : OpndSize_32;
1272     if(gDvm.executionMode == kExecutionModeNcgO1) {
1273         LowOpndRegType type = isSD ? LowOpndRegType_xmm : LowOpndRegType_ss; //type of the mem operand
1274         int tmpValue[2];
1275         int isConst = isVirtualRegConstant(vA, type, tmpValue,
1276                           true/*updateRefCount*/);
1277         if(isConst == 3 && !isSD) {
1278             //isConst can be 0 or 3, mem32, use xmm
1279             dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1280             dump_mem_reg(m, ATOM_NORMAL_ALU, OpndSize_32, 4*vA, PhysicalReg_FP, true,
1281                        MemoryAccess_VR, vA, reg, isPhysical,
1282                        LowOpndRegType_xmm);
1283             return;
1284         }
1285         if(isConst == 3 && isSD) {
1286             dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1287             dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
1288             dump_mem_reg(m, ATOM_NORMAL_ALU, OpndSize_64, 4*vA, PhysicalReg_FP, true,
1289                        MemoryAccess_VR, vA, reg, isPhysical, LowOpndRegType_xmm);
1290             return;
1291         }
1292         if(isConst == 1) dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1293         if(isConst == 2) dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
1294         freeReg(true);
1295 
1296         int regAll = checkVirtualReg(vA, type, 0/*do not update refCount*/);
1297         if(regAll != PhysicalReg_Null) {
1298             startNativeCode(-1, -1); //should we use vA, type
1299             //CHECK: callupdateVRAtUse
1300             donotSpillReg(regAll);
1301             dump_reg_reg_noalloc_src(m, ATOM_NORMAL_ALU, size, regAll, true, reg,
1302                          isPhysical, LowOpndRegType_xmm);
1303             endNativeCode();
1304         }
1305         else {
1306             dump_mem_reg_noalloc_mem(m, ATOM_NORMAL_ALU, size, 4*vA, PhysicalReg_FP, true,
1307                          MemoryAccess_VR, vA, reg, isPhysical, LowOpndRegType_xmm);
1308         }
1309         updateRefCount(vA, type);
1310     }
1311     else {
1312         dump_mem_reg(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true,
1313                     MemoryAccess_VR, vA, reg, isPhysical, LowOpndRegType_xmm);
1314     }
1315 }
1316 
1317 //!alu binary op with a VR and one reg operand
1318 
1319 //!
alu_binary_VR_reg(OpndSize size,ALU_Opcode opc,int vA,int reg,bool isPhysical)1320 void alu_binary_VR_reg(OpndSize size, ALU_Opcode opc, int vA, int reg, bool isPhysical) {
1321     Mnemonic m;
1322     if(size == OpndSize_64)
1323         m = map_of_64_opcode_2_mnemonic[opc];
1324     else
1325         m = map_of_alu_opcode_2_mnemonic[opc];
1326     if(gDvm.executionMode == kExecutionModeNcgO1) {
1327         int tmpValue[2];
1328         int isConst = isVirtualRegConstant(vA, getTypeFromIntSize(size), tmpValue,
1329                           true/*updateRefCount*/);
1330         if(isConst == 3 && size != OpndSize_64) {
1331             //allocate a register for dst
1332             dump_imm_reg(m, ATOM_NORMAL_ALU, size, tmpValue[0], reg, isPhysical,
1333                        getTypeFromIntSize(size), false);
1334             return;
1335         }
1336         if(isConst == 3 && size == OpndSize_64) {
1337             dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1338             dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
1339             dump_mem_reg(m, ATOM_NORMAL_ALU, size, 4*vA, PhysicalReg_FP, true,
1340                 MemoryAccess_VR, vA, reg, isPhysical, getTypeFromIntSize(size));
1341             return;
1342         }
1343         if(isConst == 1) dumpImmToMem(vA, OpndSize_32, tmpValue[0]);
1344         if(isConst == 2) dumpImmToMem(vA+1, OpndSize_32, tmpValue[1]);
1345 
1346         freeReg(true);
1347         int regAll = checkVirtualReg(vA, getTypeFromIntSize(size), 0);
1348         if(regAll != PhysicalReg_Null) {
1349             startNativeCode(-1, -1);
1350             donotSpillReg(regAll);
1351             dump_reg_reg_noalloc_src(m, ATOM_NORMAL_ALU, size, regAll, true, reg,
1352                          isPhysical, getTypeFromIntSize(size));
1353             endNativeCode();
1354         }
1355         else {
1356             dump_mem_reg_noalloc_mem(m, ATOM_NORMAL_ALU, size, 4*vA, PhysicalReg_FP, true,
1357                 MemoryAccess_VR, vA, reg, isPhysical, getTypeFromIntSize(size));
1358         }
1359         updateRefCount(vA, getTypeFromIntSize(size));
1360     }
1361     else {
1362         dump_mem_reg(m, ATOM_NORMAL, size, 4*vA, PhysicalReg_FP, true,
1363             MemoryAccess_VR, vA, reg, isPhysical, getTypeFromIntSize(size));
1364     }
1365 }
1366 //!alu binary op with two reg operands
1367 
1368 //!
alu_binary_reg_reg(OpndSize size,ALU_Opcode opc,int reg1,bool isPhysical1,int reg2,bool isPhysical2)1369 void alu_binary_reg_reg(OpndSize size, ALU_Opcode opc,
1370                          int reg1, bool isPhysical1,
1371                          int reg2, bool isPhysical2) {
1372     Mnemonic m;
1373     if(size == OpndSize_64)
1374         m = map_of_64_opcode_2_mnemonic[opc];
1375     else
1376         m = map_of_alu_opcode_2_mnemonic[opc];
1377     dump_reg_reg(m, ATOM_NORMAL_ALU, size, reg1, isPhysical1, reg2, isPhysical2, getTypeFromIntSize(size));
1378 }
1379 //!alu binary op with one reg operand and one mem operand
1380 
1381 //!
alu_binary_reg_mem(OpndSize size,ALU_Opcode opc,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical)1382 void alu_binary_reg_mem(OpndSize size, ALU_Opcode opc,
1383              int reg, bool isPhysical,
1384              int disp, int base_reg, bool isBasePhysical) { //destination is mem!!
1385     Mnemonic m;
1386     if(size == OpndSize_64)
1387         m = map_of_64_opcode_2_mnemonic[opc];
1388     else
1389         m = map_of_alu_opcode_2_mnemonic[opc];
1390     dump_reg_mem(m, ATOM_NORMAL_ALU, size, reg, isPhysical, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, getTypeFromIntSize(size));
1391 }
1392 //!FPU ops with one mem operand
1393 
1394 //!
fpu_mem(LowOp * op,ALU_Opcode opc,OpndSize size,int disp,int base_reg,bool isBasePhysical)1395 void fpu_mem(LowOp* op, ALU_Opcode opc, OpndSize size, int disp, int base_reg, bool isBasePhysical) {
1396     Mnemonic m = map_of_fpu_opcode_2_mnemonic[opc];
1397     dump_mem_fp(m, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, 0);
1398 }
1399 //!SSE 32-bit ALU
1400 
1401 //!
alu_ss_binary_reg_reg(ALU_Opcode opc,int reg,bool isPhysical,int reg2,bool isPhysical2)1402 void alu_ss_binary_reg_reg(ALU_Opcode opc, int reg, bool isPhysical,
1403                 int reg2, bool isPhysical2) {
1404     Mnemonic m = (Mnemonic)(map_of_sse_opcode_2_mnemonic[opc]+1); //from SD to SS
1405     dump_reg_reg(m, ATOM_NORMAL_ALU, OpndSize_32, reg, isPhysical, reg2, isPhysical2, LowOpndRegType_xmm);
1406 }
1407 //!SSE 64-bit ALU
1408 
1409 //!
alu_sd_binary_reg_reg(ALU_Opcode opc,int reg,bool isPhysical,int reg2,bool isPhysical2)1410 void alu_sd_binary_reg_reg(ALU_Opcode opc, int reg, bool isPhysical,
1411                 int reg2, bool isPhysical2) {
1412     Mnemonic m = map_of_sse_opcode_2_mnemonic[opc];
1413     dump_reg_reg(m, ATOM_NORMAL_ALU, OpndSize_64, reg, isPhysical, reg2, isPhysical2, LowOpndRegType_xmm);
1414 }
1415 //!push reg to native stack
1416 
1417 //!
push_reg_to_stack(OpndSize size,int reg,bool isPhysical)1418 void push_reg_to_stack(OpndSize size, int reg, bool isPhysical) {
1419     dump_reg(Mnemonic_PUSH, ATOM_NORMAL, size, reg, isPhysical, getTypeFromIntSize(size));
1420 }
1421 //!push mem to native stack
1422 
1423 //!
push_mem_to_stack(OpndSize size,int disp,int base_reg,bool isBasePhysical)1424 void push_mem_to_stack(OpndSize size, int disp, int base_reg, bool isBasePhysical) {
1425     dump_mem(Mnemonic_PUSH, ATOM_NORMAL, size, disp, base_reg, isBasePhysical);
1426 }
1427 //!move from reg to memory
1428 
1429 //!
move_reg_to_mem(OpndSize size,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical)1430 void move_reg_to_mem(OpndSize size,
1431                       int reg, bool isPhysical,
1432                       int disp, int base_reg, bool isBasePhysical) {
1433     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1434     dump_reg_mem(m, ATOM_NORMAL, size, reg, isPhysical, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, getTypeFromIntSize(size));
1435 }
1436 //!move from reg to memory
1437 
1438 //!Operands are already allocated
move_reg_to_mem_noalloc(OpndSize size,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex)1439 void move_reg_to_mem_noalloc(OpndSize size,
1440                   int reg, bool isPhysical,
1441                   int disp, int base_reg, bool isBasePhysical,
1442                   MemoryAccessType mType, int mIndex) {
1443     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1444     dump_reg_mem_noalloc(m, size, reg, isPhysical, disp, base_reg, isBasePhysical, mType, mIndex, getTypeFromIntSize(size));
1445 }
1446 //!move from memory to reg
1447 
1448 //!
move_mem_to_reg(OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1449 LowOpRegMem* move_mem_to_reg(OpndSize size,
1450                       int disp, int base_reg, bool isBasePhysical,
1451                       int reg, bool isPhysical) {
1452     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1453     return dump_mem_reg(m, ATOM_NORMAL, size, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, reg, isPhysical, getTypeFromIntSize(size));
1454 }
1455 //!move from memory to reg
1456 
1457 //!Operands are already allocated
move_mem_to_reg_noalloc(OpndSize size,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg,bool isPhysical)1458 LowOpRegMem* move_mem_to_reg_noalloc(OpndSize size,
1459                   int disp, int base_reg, bool isBasePhysical,
1460                   MemoryAccessType mType, int mIndex,
1461                   int reg, bool isPhysical) {
1462     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1463     return dump_mem_reg_noalloc(m, size, disp, base_reg, isBasePhysical, mType, mIndex, reg, isPhysical, getTypeFromIntSize(size));
1464 }
1465 //!movss from memory to reg
1466 
1467 //!Operands are already allocated
move_ss_mem_to_reg_noalloc(int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex,int reg,bool isPhysical)1468 LowOpRegMem* move_ss_mem_to_reg_noalloc(int disp, int base_reg, bool isBasePhysical,
1469                  MemoryAccessType mType, int mIndex,
1470                  int reg, bool isPhysical) {
1471     return dump_mem_reg_noalloc(Mnemonic_MOVSS, OpndSize_32, disp, base_reg, isBasePhysical, mType, mIndex, reg, isPhysical, LowOpndRegType_xmm);
1472 }
1473 //!movss from reg to memory
1474 
1475 //!Operands are already allocated
move_ss_reg_to_mem_noalloc(int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical,MemoryAccessType mType,int mIndex)1476 LowOpMemReg* move_ss_reg_to_mem_noalloc(int reg, bool isPhysical,
1477                  int disp, int base_reg, bool isBasePhysical,
1478                  MemoryAccessType mType, int mIndex) {
1479     return dump_reg_mem_noalloc(Mnemonic_MOVSS, OpndSize_32, reg, isPhysical, disp, base_reg, isBasePhysical, mType, mIndex, LowOpndRegType_xmm);
1480 }
1481 //!movzx from memory to reg
1482 
1483 //!
movez_mem_to_reg(OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1484 void movez_mem_to_reg(OpndSize size,
1485                int disp, int base_reg, bool isBasePhysical,
1486                int reg, bool isPhysical) {
1487     Mnemonic m = Mnemonic_MOVZX;
1488     dump_movez_mem_reg(m, size, disp, base_reg, isBasePhysical, reg, isPhysical);
1489 }
1490 
1491 //!movzx from one reg to another reg
1492 
1493 //!
movez_reg_to_reg(OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2)1494 void movez_reg_to_reg(OpndSize size,
1495                       int reg, bool isPhysical,
1496                       int reg2, bool isPhysical2) {
1497     Mnemonic m = Mnemonic_MOVZX;
1498     dump_movez_reg_reg(m, size, reg, isPhysical, reg2, isPhysical2);
1499 }
1500 
movez_mem_disp_scale_to_reg(OpndSize size,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical)1501 void movez_mem_disp_scale_to_reg(OpndSize size,
1502                  int base_reg, bool isBasePhysical,
1503                  int disp, int index_reg, bool isIndexPhysical, int scale,
1504                  int reg, bool isPhysical) {
1505     dump_mem_scale_reg(Mnemonic_MOVZX, size, base_reg, isBasePhysical,
1506                  disp, index_reg, isIndexPhysical, scale,
1507                  reg, isPhysical, LowOpndRegType_gp);
1508 }
moves_mem_disp_scale_to_reg(OpndSize size,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical)1509 void moves_mem_disp_scale_to_reg(OpndSize size,
1510                   int base_reg, bool isBasePhysical,
1511                   int disp, int index_reg, bool isIndexPhysical, int scale,
1512                   int reg, bool isPhysical) {
1513     dump_mem_scale_reg(Mnemonic_MOVSX, size, base_reg, isBasePhysical,
1514                   disp, index_reg, isIndexPhysical, scale,
1515                   reg, isPhysical, LowOpndRegType_gp);
1516 }
1517 
1518 //!movsx from memory to reg
1519 
1520 //!
moves_mem_to_reg(LowOp * op,OpndSize size,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1521 void moves_mem_to_reg(LowOp* op, OpndSize size,
1522                int disp, int base_reg, bool isBasePhysical,
1523                int reg, bool isPhysical) {
1524     Mnemonic m = Mnemonic_MOVSX;
1525     dump_moves_mem_reg(m, size, disp, base_reg, isBasePhysical, reg, isPhysical);
1526 }
1527 //!mov from one reg to another reg
1528 
1529 //!
move_reg_to_reg(OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2)1530 void move_reg_to_reg(OpndSize size,
1531                       int reg, bool isPhysical,
1532                       int reg2, bool isPhysical2) {
1533     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1534     dump_reg_reg(m, ATOM_NORMAL, size, reg, isPhysical, reg2, isPhysical2, getTypeFromIntSize(size));
1535 }
1536 //!mov from one reg to another reg
1537 
1538 //!Operands are already allocated
move_reg_to_reg_noalloc(OpndSize size,int reg,bool isPhysical,int reg2,bool isPhysical2)1539 void move_reg_to_reg_noalloc(OpndSize size,
1540                   int reg, bool isPhysical,
1541                   int reg2, bool isPhysical2) {
1542     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1543     dump_reg_reg_noalloc(m, size, reg, isPhysical, reg2, isPhysical2, getTypeFromIntSize(size));
1544 }
1545 //!move from memory to reg
1546 
1547 //!
move_mem_scale_to_reg(OpndSize size,int base_reg,bool isBasePhysical,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical)1548 void move_mem_scale_to_reg(OpndSize size,
1549                 int base_reg, bool isBasePhysical, int index_reg, bool isIndexPhysical, int scale,
1550                 int reg, bool isPhysical) {
1551     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1552     dump_mem_scale_reg(m, size, base_reg, isBasePhysical, 0/*disp*/, index_reg, isIndexPhysical, scale,
1553                               reg, isPhysical, getTypeFromIntSize(size));
1554 }
move_mem_disp_scale_to_reg(OpndSize size,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale,int reg,bool isPhysical)1555 void move_mem_disp_scale_to_reg(OpndSize size,
1556                 int base_reg, bool isBasePhysical, int disp, int index_reg, bool isIndexPhysical, int scale,
1557                 int reg, bool isPhysical) {
1558     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1559     dump_mem_scale_reg(m, size, base_reg, isBasePhysical, disp, index_reg, isIndexPhysical, scale,
1560                               reg, isPhysical, getTypeFromIntSize(size));
1561 }
1562 //!move from reg to memory
1563 
1564 //!
move_reg_to_mem_scale(OpndSize size,int reg,bool isPhysical,int base_reg,bool isBasePhysical,int index_reg,bool isIndexPhysical,int scale)1565 void move_reg_to_mem_scale(OpndSize size,
1566                 int reg, bool isPhysical,
1567                 int base_reg, bool isBasePhysical, int index_reg, bool isIndexPhysical, int scale) {
1568     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1569     dump_reg_mem_scale(m, size, reg, isPhysical,
1570                               base_reg, isBasePhysical, 0/*disp*/, index_reg, isIndexPhysical, scale,
1571                               getTypeFromIntSize(size));
1572 }
move_reg_to_mem_disp_scale(OpndSize size,int reg,bool isPhysical,int base_reg,bool isBasePhysical,int disp,int index_reg,bool isIndexPhysical,int scale)1573 void move_reg_to_mem_disp_scale(OpndSize size,
1574                 int reg, bool isPhysical,
1575                 int base_reg, bool isBasePhysical, int disp, int index_reg, bool isIndexPhysical, int scale) {
1576     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1577     dump_reg_mem_scale(m, size, reg, isPhysical,
1578                               base_reg, isBasePhysical, disp, index_reg, isIndexPhysical, scale,
1579                               getTypeFromIntSize(size));
1580 }
1581 
move_chain_to_mem(OpndSize size,int imm,int disp,int base_reg,bool isBasePhysical)1582 void move_chain_to_mem(OpndSize size, int imm,
1583                         int disp, int base_reg, bool isBasePhysical) {
1584     dump_imm_mem(Mnemonic_MOV, ATOM_NORMAL, size, imm, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, true);
1585 }
1586 
1587 //!move an immediate to memory
1588 
1589 //!
move_imm_to_mem(OpndSize size,int imm,int disp,int base_reg,bool isBasePhysical)1590 void move_imm_to_mem(OpndSize size, int imm,
1591                       int disp, int base_reg, bool isBasePhysical) {
1592     assert(size != OpndSize_64);
1593     if(size == OpndSize_64) ALOGE("move_imm_to_mem with 64 bits");
1594     dump_imm_mem(Mnemonic_MOV, ATOM_NORMAL, size, imm, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, false);
1595 }
1596 //! set a VR to an immediate
1597 
1598 //!
set_VR_to_imm(u2 vA,OpndSize size,int imm)1599 void set_VR_to_imm(u2 vA, OpndSize size, int imm) {
1600     assert(size != OpndSize_64);
1601     if(size == OpndSize_64) ALOGE("move_imm_to_mem with 64 bits");
1602     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1603     if(gDvm.executionMode == kExecutionModeNcgO1) {
1604         int regAll = checkVirtualReg(vA, getTypeFromIntSize(size), 0);
1605         if(regAll != PhysicalReg_Null) {
1606             dump_imm_reg_noalloc(m, size, imm, regAll, true, LowOpndRegType_gp);
1607             updateRefCount(vA, getTypeFromIntSize(size));
1608             updateVirtualReg(vA, getTypeFromIntSize(size));
1609             return;
1610         }
1611         //will call freeReg
1612         freeReg(true);
1613         regAll = registerAlloc(LowOpndRegType_virtual | getTypeFromIntSize(size), vA, false/*dummy*/, true);
1614         if(regAll == PhysicalReg_Null) {
1615             dump_imm_mem_noalloc(m, size, imm, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA);
1616             return;
1617         }
1618         dump_imm_reg_noalloc(m, size, imm, regAll, true, LowOpndRegType_gp);
1619         updateVirtualReg(vA, getTypeFromIntSize(size));
1620     }
1621     else {
1622         dump_imm_mem(m, ATOM_NORMAL, size, imm, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA, false);
1623     }
1624 }
set_VR_to_imm_noupdateref(LowOp * op,u2 vA,OpndSize size,int imm)1625 void set_VR_to_imm_noupdateref(LowOp* op, u2 vA, OpndSize size, int imm) {
1626     return;
1627 }
1628 //! set a VR to an immediate
1629 
1630 //! Do not allocate a physical register for the VR
set_VR_to_imm_noalloc(u2 vA,OpndSize size,int imm)1631 void set_VR_to_imm_noalloc(u2 vA, OpndSize size, int imm) {
1632     assert(size != OpndSize_64);
1633     if(size == OpndSize_64) ALOGE("move_imm_to_mem with 64 bits");
1634     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1635     dump_imm_mem_noalloc(m, size, imm, 4*vA, PhysicalReg_FP, true, MemoryAccess_VR, vA);
1636 }
1637 
move_chain_to_reg(OpndSize size,int imm,int reg,bool isPhysical)1638 void move_chain_to_reg(OpndSize size, int imm, int reg, bool isPhysical) {
1639     dump_imm_reg(Mnemonic_MOV, ATOM_NORMAL, size, imm, reg, isPhysical, LowOpndRegType_gp, true);
1640 }
1641 
1642 //! move an immediate to reg
1643 
1644 //!
move_imm_to_reg(OpndSize size,int imm,int reg,bool isPhysical)1645 void move_imm_to_reg(OpndSize size, int imm, int reg, bool isPhysical) {
1646     assert(size != OpndSize_64);
1647     if(size == OpndSize_64) ALOGE("move_imm_to_reg with 64 bits");
1648     Mnemonic m = Mnemonic_MOV;
1649     dump_imm_reg(m, ATOM_NORMAL, size, imm, reg, isPhysical, LowOpndRegType_gp, false);
1650 }
1651 //! move an immediate to reg
1652 
1653 //! The operand is already allocated
move_imm_to_reg_noalloc(OpndSize size,int imm,int reg,bool isPhysical)1654 void move_imm_to_reg_noalloc(OpndSize size, int imm, int reg, bool isPhysical) {
1655     assert(size != OpndSize_64);
1656     if(size == OpndSize_64) ALOGE("move_imm_to_reg with 64 bits");
1657     Mnemonic m = Mnemonic_MOV;
1658     dump_imm_reg_noalloc(m, size, imm, reg, isPhysical, LowOpndRegType_gp);
1659 }
1660 //!cmov from reg to reg
1661 
1662 //!
conditional_move_reg_to_reg(OpndSize size,ConditionCode cc,int reg1,bool isPhysical1,int reg,bool isPhysical)1663 void conditional_move_reg_to_reg(OpndSize size, ConditionCode cc, int reg1, bool isPhysical1, int reg, bool isPhysical) {
1664     Mnemonic m = (Mnemonic)(Mnemonic_CMOVcc+cc);
1665     dump_reg_reg(m, ATOM_NORMAL, size, reg1, isPhysical1, reg, isPhysical, LowOpndRegType_gp);
1666 }
1667 //!movss from memory to reg
1668 
1669 //!
move_ss_mem_to_reg(LowOp * op,int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1670 void move_ss_mem_to_reg(LowOp* op, int disp, int base_reg, bool isBasePhysical,
1671                          int reg, bool isPhysical) {
1672     dump_mem_reg(Mnemonic_MOVSS, ATOM_NORMAL, OpndSize_32, disp, base_reg, isBasePhysical,
1673         MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_xmm);
1674 }
1675 //!movss from reg to memory
1676 
1677 //!
move_ss_reg_to_mem(LowOp * op,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical)1678 void move_ss_reg_to_mem(LowOp* op, int reg, bool isPhysical,
1679                          int disp, int base_reg, bool isBasePhysical) {
1680     dump_reg_mem(Mnemonic_MOVSS, ATOM_NORMAL, OpndSize_32, reg, isPhysical, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, LowOpndRegType_xmm);
1681 }
1682 //!movsd from memory to reg
1683 
1684 //!
move_sd_mem_to_reg(int disp,int base_reg,bool isBasePhysical,int reg,bool isPhysical)1685 void move_sd_mem_to_reg(int disp, int base_reg, bool isBasePhysical,
1686                          int reg, bool isPhysical) {
1687     dump_mem_reg(Mnemonic_MOVSD, ATOM_NORMAL, OpndSize_64, disp, base_reg, isBasePhysical, MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_xmm);
1688 }
1689 //!movsd from reg to memory
1690 
1691 //!
move_sd_reg_to_mem(LowOp * op,int reg,bool isPhysical,int disp,int base_reg,bool isBasePhysical)1692 void move_sd_reg_to_mem(LowOp* op, int reg, bool isPhysical,
1693                          int disp, int base_reg, bool isBasePhysical) {
1694     dump_reg_mem(Mnemonic_MOVSD, ATOM_NORMAL, OpndSize_64, reg, isPhysical,
1695                         disp, base_reg, isBasePhysical,
1696                         MemoryAccess_Unknown, -1, LowOpndRegType_xmm);
1697 }
1698 //!load from VR to a temporary
1699 
1700 //!
get_virtual_reg_all(u2 vB,OpndSize size,int reg,bool isPhysical,Mnemonic m)1701 void get_virtual_reg_all(u2 vB, OpndSize size, int reg, bool isPhysical, Mnemonic m) {
1702     LowOpndRegType type = getTypeFromIntSize(size);
1703     LowOpndRegType pType = type;//gp or xmm
1704     OpndSize size2 = size;
1705     Mnemonic m2 = m;
1706     if(m == Mnemonic_MOVSS) {
1707         size = OpndSize_32;
1708         size2 = OpndSize_64;
1709         type = LowOpndRegType_ss;
1710         pType = LowOpndRegType_xmm;
1711         m2 = Mnemonic_MOVQ; //to move from one xmm register to another
1712     }
1713     if(gDvm.executionMode == kExecutionModeNcgO1) {
1714         int tmpValue[2];
1715         int isConst;
1716         isConst = isVirtualRegConstant(vB, type, tmpValue, true/*updateRefCount*/);
1717         if(isConst == 3) {
1718             if(m == Mnemonic_MOVSS) { //load 32 bits from VR
1719                 //VR is not mapped to a register but in memory
1720                 dumpImmToMem(vB, OpndSize_32, tmpValue[0]);
1721                 //temporary reg has "pType" (which is xmm)
1722                 dump_mem_reg(m, ATOM_NORMAL, size, 4*vB, PhysicalReg_FP, true,
1723                     MemoryAccess_VR, vB, reg, isPhysical, pType);
1724                 return;
1725             }
1726             else if(m == Mnemonic_MOVSD || size == OpndSize_64) {
1727                 //VR is not mapped to a register but in memory
1728                 dumpImmToMem(vB, OpndSize_32, tmpValue[0]);
1729                 dumpImmToMem(vB+1, OpndSize_32, tmpValue[1]);
1730                 dump_mem_reg(m, ATOM_NORMAL, size, 4*vB, PhysicalReg_FP, true,
1731                     MemoryAccess_VR, vB, reg, isPhysical, pType);
1732                 return;
1733             }
1734             else if(size != OpndSize_64) {
1735                 //VR is not mapped to a register
1736                 dump_imm_reg(m, ATOM_NORMAL, size, tmpValue[0], reg, isPhysical, pType, false);
1737                 return;
1738             }
1739         }
1740         if(isConst == 1) dumpImmToMem(vB, OpndSize_32, tmpValue[0]);
1741         if(isConst == 2) dumpImmToMem(vB+1, OpndSize_32, tmpValue[1]);
1742         freeReg(true);
1743         int regAll = checkVirtualReg(vB, type, 0);
1744         if(regAll != PhysicalReg_Null) {
1745             startNativeCode(vB, type);
1746             donotSpillReg(regAll);
1747             //check XFER_MEM_TO_XMM
1748             updateVRAtUse(vB, type, regAll);
1749             //temporary reg has "pType"
1750             dump_reg_reg_noalloc_src(m2, ATOM_NORMAL, size2, regAll, true, reg, isPhysical, pType); //register allocator handles assembly move
1751             endNativeCode();
1752             updateRefCount(vB, type);
1753             return;
1754         }
1755         //not allocated to a register yet, no need to check XFER_MEM_TO_XMM
1756         regAll = registerAlloc(LowOpndRegType_virtual | type, vB, false/*dummy*/, false);
1757         if(regAll == PhysicalReg_Null) {
1758             dump_mem_reg_noalloc(m, size, 4*vB, PhysicalReg_FP, true,
1759                 MemoryAccess_VR, vB, reg, isPhysical, pType);
1760             return;
1761         }
1762 
1763         //temporary reg has pType
1764         if(checkTempReg2(reg, pType, isPhysical, regAll)) {
1765             registerAllocMove(reg, pType, isPhysical, regAll);
1766             dump_mem_reg_noalloc(m, size, 4*vB, PhysicalReg_FP, true,
1767                 MemoryAccess_VR, vB, regAll, true, pType);
1768             updateRefCount(vB, type);
1769             return;
1770         }
1771         else {
1772             dump_mem_reg_noalloc(m, size, 4*vB, PhysicalReg_FP, true,
1773                 MemoryAccess_VR, vB, regAll, true, pType);
1774             //xmm with 32 bits
1775             startNativeCode(vB, type);
1776             donotSpillReg(regAll);
1777             dump_reg_reg_noalloc_src(m2, ATOM_NORMAL, size2, regAll, true, reg, isPhysical, pType);
1778             endNativeCode();
1779             updateRefCount(vB, type);
1780             return;
1781         }
1782     }
1783     else {
1784         dump_mem_reg(m, ATOM_NORMAL, size, 4*vB, PhysicalReg_FP, true,
1785             MemoryAccess_VR, vB, reg, isPhysical, pType);
1786     }
1787 }
get_virtual_reg(u2 vB,OpndSize size,int reg,bool isPhysical)1788 void get_virtual_reg(u2 vB, OpndSize size, int reg, bool isPhysical) {
1789     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1790     return get_virtual_reg_all(vB, size, reg, isPhysical, m);
1791 }
get_virtual_reg_noalloc(u2 vB,OpndSize size,int reg,bool isPhysical)1792 void get_virtual_reg_noalloc(u2 vB, OpndSize size, int reg, bool isPhysical) {
1793     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1794     dump_mem_reg_noalloc(m, size, 4*vB, PhysicalReg_FP, true,
1795         MemoryAccess_VR, vB, reg, isPhysical, getTypeFromIntSize(size));
1796 }
1797 //3 cases: gp, xmm, ss
1798 //ss: the temporary register is xmm
1799 //!load from a temporary to a VR
1800 
1801 //!
set_virtual_reg_all(u2 vA,OpndSize size,int reg,bool isPhysical,Mnemonic m)1802 void set_virtual_reg_all(u2 vA, OpndSize size, int reg, bool isPhysical, Mnemonic m) {
1803     LowOpndRegType type = getTypeFromIntSize(size);
1804     LowOpndRegType pType = type;//gp or xmm
1805     OpndSize size2 = size;
1806     Mnemonic m2 = m;
1807     if(m == Mnemonic_MOVSS) {
1808         size = OpndSize_32;
1809         size2 = OpndSize_64;
1810         type = LowOpndRegType_ss;
1811         pType = LowOpndRegType_xmm;
1812         m2 = Mnemonic_MOVQ;
1813     }
1814     if(gDvm.executionMode == kExecutionModeNcgO1) {
1815         //3 cases
1816         //1: virtual register is already allocated to a physical register
1817         //   call dump_reg_reg_noalloc_dst
1818         //2: src reg is already allocated, VR is not yet allocated
1819         //   allocate VR to the same physical register used by src reg
1820         //   [call registerAllocMove]
1821         //3: both not yet allocated
1822         //   allocate a physical register for the VR
1823         //   then call dump_reg_reg_noalloc_dst
1824         //may need to convert from gp to xmm or the other way
1825         freeReg(true);
1826         int regAll = checkVirtualReg(vA, type, 0);
1827         if(regAll != PhysicalReg_Null)  { //case 1
1828             startNativeCode(-1, -1);
1829             donotSpillReg(regAll);
1830             dump_reg_reg_noalloc_dst(m2, size2, reg, isPhysical, regAll, true, pType); //temporary reg is "pType"
1831             endNativeCode();
1832             updateRefCount(vA, type);
1833             updateVirtualReg(vA, type); //will dump VR to memory, should happen afterwards
1834             return;
1835         }
1836         regAll = checkTempReg(reg, pType, isPhysical, vA); //vA is not used inside
1837         if(regAll != PhysicalReg_Null) { //case 2
1838             registerAllocMove(vA, LowOpndRegType_virtual | type, false, regAll);
1839             updateVirtualReg(vA, type); //will dump VR to memory, should happen afterwards
1840             return; //next native instruction starts at op
1841         }
1842         //case 3
1843         regAll = registerAlloc(LowOpndRegType_virtual | type, vA, false/*dummy*/, false);
1844         if(regAll == PhysicalReg_Null) {
1845             dump_reg_mem_noalloc(m, size, reg, isPhysical, 4*vA, PhysicalReg_FP, true,
1846                 MemoryAccess_VR, vA, pType);
1847             return;
1848         }
1849         startNativeCode(-1, -1);
1850         donotSpillReg(regAll);
1851         dump_reg_reg_noalloc_dst(m2, size2, reg, isPhysical, regAll, true, pType);
1852         endNativeCode();
1853         updateRefCount(vA, type);
1854         updateVirtualReg(vA, type);
1855     }
1856     else {
1857         dump_reg_mem(m, ATOM_NORMAL, size, reg, isPhysical, 4*vA, PhysicalReg_FP, true,
1858             MemoryAccess_VR, vA, pType);
1859     }
1860 }
set_virtual_reg(u2 vA,OpndSize size,int reg,bool isPhysical)1861 void set_virtual_reg(u2 vA, OpndSize size, int reg, bool isPhysical) {
1862     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1863     return set_virtual_reg_all(vA, size, reg, isPhysical, m);
1864 }
set_virtual_reg_noalloc(u2 vA,OpndSize size,int reg,bool isPhysical)1865 void set_virtual_reg_noalloc(u2 vA, OpndSize size, int reg, bool isPhysical) {
1866     Mnemonic m = (size == OpndSize_64) ? Mnemonic_MOVQ : Mnemonic_MOV;
1867     dump_reg_mem_noalloc(m, size, reg, isPhysical, 4*vA, PhysicalReg_FP, true,
1868         MemoryAccess_VR, vA, getTypeFromIntSize(size));
1869 }
get_VR_ss(int vB,int reg,bool isPhysical)1870 void get_VR_ss(int vB, int reg, bool isPhysical) {
1871     return get_virtual_reg_all(vB, OpndSize_64, reg, isPhysical, Mnemonic_MOVSS);
1872 }
set_VR_ss(int vA,int reg,bool isPhysical)1873 void set_VR_ss(int vA, int reg, bool isPhysical) {
1874     return set_virtual_reg_all(vA, OpndSize_64, reg, isPhysical, Mnemonic_MOVSS);
1875 }
get_VR_sd(int vB,int reg,bool isPhysical)1876 void get_VR_sd(int vB, int reg, bool isPhysical) {
1877     return get_virtual_reg_all(vB, OpndSize_64, reg, isPhysical, Mnemonic_MOVSD);
1878 }
set_VR_sd(int vA,int reg,bool isPhysical)1879 void set_VR_sd(int vA, int reg, bool isPhysical) {
1880     return set_virtual_reg_all(vA, OpndSize_64, reg, isPhysical, Mnemonic_MOVSD);
1881 }
1882 ////////////////////////////////// END: IA32 native instructions //////////////
1883 //! generate native instructions to get current PC in the stack frame
1884 
1885 //!
get_currentpc(int reg,bool isPhysical)1886 int get_currentpc(int reg, bool isPhysical) {
1887     move_mem_to_reg(OpndSize_32, -sizeofStackSaveArea+offStackSaveArea_localRefTop, PhysicalReg_FP, true, reg, isPhysical);
1888     return 1;
1889 }
1890 //!generate native code to perform null check
1891 
1892 //!This function does not export PC
simpleNullCheck(int reg,bool isPhysical,int vr)1893 int simpleNullCheck(int reg, bool isPhysical, int vr) {
1894     if(isVRNullCheck(vr, OpndSize_32)) {
1895         updateRefCount2(reg, LowOpndRegType_gp, isPhysical);
1896         num_removed_nullCheck++;
1897         return 0;
1898     }
1899     compare_imm_reg(OpndSize_32, 0, reg, isPhysical);
1900     conditional_jump_global_API(Condition_E, "common_errNullObject", false);
1901     setVRNullCheck(vr, OpndSize_32);
1902     return 0;
1903 }
1904 
1905 /* only for O1 code generator */
boundCheck(int vr_array,int reg_array,bool isPhysical_array,int vr_index,int reg_index,bool isPhysical_index,int exceptionNum)1906 int boundCheck(int vr_array, int reg_array, bool isPhysical_array,
1907                int vr_index, int reg_index, bool isPhysical_index,
1908                int exceptionNum) {
1909 #ifdef BOUNDCHECK_OPT
1910     if(isVRBoundCheck(vr_array, vr_index)) {
1911         updateRefCount2(reg_array, LowOpndRegType_gp, isPhysical_array);
1912         updateRefCount2(reg_index, LowOpndRegType_gp, isPhysical_index);
1913         return 0;
1914     }
1915 #endif
1916     compare_mem_reg(OpndSize_32, offArrayObject_length,
1917                     reg_array, isPhysical_array,
1918                     reg_index, isPhysical_index);
1919 
1920     char errName[256];
1921     sprintf(errName, "common_errArrayIndex");
1922     handlePotentialException(
1923                                        Condition_NC, Condition_C,
1924                                        exceptionNum, errName);
1925 #ifdef BOUNDCHECK_OPT
1926     setVRBoundCheck(vr_array, vr_index);
1927 #endif
1928     return 0;
1929 }
1930 
1931 //!generate native code to perform null check
1932 
1933 //!
nullCheck(int reg,bool isPhysical,int exceptionNum,int vr)1934 int nullCheck(int reg, bool isPhysical, int exceptionNum, int vr) {
1935     char label[LABEL_SIZE];
1936 
1937     if(gDvm.executionMode == kExecutionModeNcgO1) {
1938         //nullCheck optimization is available in O1 mode only
1939         if(isVRNullCheck(vr, OpndSize_32)) {
1940             updateRefCount2(reg, LowOpndRegType_gp, isPhysical);
1941             if(exceptionNum <= 1) {
1942                 updateRefCount2(PhysicalReg_EDX, LowOpndRegType_gp, true);
1943                 updateRefCount2(PhysicalReg_EDX, LowOpndRegType_gp, true);
1944             }
1945             num_removed_nullCheck++;
1946             return 0;
1947         }
1948         compare_imm_reg(OpndSize_32, 0, reg, isPhysical);
1949         rememberState(exceptionNum);
1950         snprintf(label, LABEL_SIZE, "after_exception_%d", exceptionNum);
1951         conditional_jump(Condition_NE, label, true);
1952         if(exceptionNum > 1)
1953             nextVersionOfHardReg(PhysicalReg_EDX, 2); //next version has 2 ref count
1954         export_pc(); //use %edx
1955         constVREndOfBB();
1956         beforeCall("exception"); //dump GG, GL VRs
1957         unconditional_jump_global_API("common_errNullObject", false);
1958         insertLabel(label, true);
1959         goToState(exceptionNum);
1960         setVRNullCheck(vr, OpndSize_32);
1961     } else {
1962         compare_imm_reg(OpndSize_32, 0, reg, isPhysical);
1963         snprintf(label, LABEL_SIZE, "after_exception_%d", exceptionNum);
1964         conditional_jump(Condition_NE, label, true);
1965         export_pc(); //use %edx
1966         unconditional_jump_global_API("common_errNullObject", false);
1967         insertLabel(label, true);
1968     }
1969     return 0;
1970 }
1971 //!generate native code to handle potential exception
1972 
1973 //!
handlePotentialException(ConditionCode code_excep,ConditionCode code_okay,int exceptionNum,const char * errName)1974 int handlePotentialException(
1975                              ConditionCode code_excep, ConditionCode code_okay,
1976                              int exceptionNum, const char* errName) {
1977     char label[LABEL_SIZE];
1978 
1979     if(gDvm.executionMode == kExecutionModeNcgO1) {
1980         rememberState(exceptionNum);
1981         snprintf(label, LABEL_SIZE, "after_exception_%d", exceptionNum);
1982         conditional_jump(code_okay, label, true);
1983         if(exceptionNum > 1)
1984             nextVersionOfHardReg(PhysicalReg_EDX, 2); //next version has 2 ref count
1985         export_pc(); //use %edx
1986         constVREndOfBB();
1987         beforeCall("exception"); //dump GG, GL VRs
1988         if(!strcmp(errName, "common_throw_message")) {
1989             move_imm_to_reg(OpndSize_32, LstrInstantiationErrorPtr, PhysicalReg_ECX, true);
1990         }
1991         unconditional_jump_global_API(errName, false);
1992         insertLabel(label, true);
1993         goToState(exceptionNum);
1994     } else {
1995         snprintf(label, LABEL_SIZE, "after_exception_%d", exceptionNum);
1996         conditional_jump(code_okay, label, true);
1997         export_pc(); //use %edx
1998         if(!strcmp(errName, "common_throw_message")) {
1999             move_imm_to_reg(OpndSize_32, LstrInstantiationErrorPtr, PhysicalReg_ECX, true);
2000         }
2001         unconditional_jump_global_API(errName, false);
2002         insertLabel(label, true);
2003     }
2004     return 0;
2005 }
2006 //!generate native code to get the self pointer from glue
2007 
2008 //!It uses one scratch register
get_self_pointer(int reg,bool isPhysical)2009 int get_self_pointer(int reg, bool isPhysical) {
2010     move_mem_to_reg(OpndSize_32, offEBP_self, PhysicalReg_EBP, true, reg, isPhysical);
2011     return 0;
2012 }
2013 //!generate native code to get ResStrings from glue
2014 
2015 //!It uses two scratch registers
get_res_strings(int reg,bool isPhysical)2016 int get_res_strings(int reg, bool isPhysical) {
2017     //if spill_loc_index > 0 || reg != NULL, use registerAlloc
2018     if(isGlueHandled(PhysicalReg_GLUE_DVMDEX)) {
2019         //if spill_loc_index > 0
2020         //  load from spilled location, update spill_loc_index & physicalReg
2021 #if 0
2022         updateRefCount2(C_SCRATCH_1, LowOpndRegType_gp, isScratchPhysical);
2023         updateRefCount2(C_SCRATCH_1, LowOpndRegType_gp, isScratchPhysical);
2024         updateRefCount2(C_SCRATCH_2, LowOpndRegType_gp, isScratchPhysical);
2025         updateRefCount2(C_SCRATCH_2, LowOpndRegType_gp, isScratchPhysical);
2026 #endif
2027         startNativeCode(-1, -1);
2028         freeReg(true);
2029         int regAll = registerAlloc(LowOpndRegType_gp, PhysicalReg_GLUE_DVMDEX, false, false/*updateRefCount*/);
2030         donotSpillReg(regAll);
2031         dump_mem_reg_noalloc_mem(Mnemonic_MOV, ATOM_NORMAL, OpndSize_32, offDvmDex_pResStrings, regAll, true, MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_gp);
2032         endNativeCode();
2033     }
2034     else
2035         {
2036             get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2037             move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical, C_SCRATCH_2, isScratchPhysical);
2038             //glue is not in a physical reg nor in a spilled location
2039             updateGlue(C_SCRATCH_2, isScratchPhysical, PhysicalReg_GLUE_DVMDEX); //spill_loc_index is -1, set physicalReg
2040             move_mem_to_reg(OpndSize_32, offDvmDex_pResStrings, C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2041         }
2042     return 0;
2043 }
get_res_classes(int reg,bool isPhysical)2044 int get_res_classes(int reg, bool isPhysical) {
2045     //if spill_loc_index > 0 || reg != NULL, use registerAlloc
2046     if(isGlueHandled(PhysicalReg_GLUE_DVMDEX)) {
2047         //if spill_loc_index > 0
2048         //  load from spilled location, updte spill_loc_index & physicalReg
2049         startNativeCode(-1, -1);
2050         freeReg(true);
2051         int regAll = registerAlloc(LowOpndRegType_gp, PhysicalReg_GLUE_DVMDEX, false, false/*updateRefCount*/);
2052         donotSpillReg(regAll);
2053         dump_mem_reg_noalloc_mem(Mnemonic_MOV, ATOM_NORMAL, OpndSize_32, offDvmDex_pResClasses, regAll, true, MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_gp);
2054         endNativeCode();
2055     }
2056     else
2057         {
2058             get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2059             move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical, C_SCRATCH_2, isScratchPhysical);
2060             //glue is not in a physical reg nor in a spilled location
2061             updateGlue(C_SCRATCH_2, isScratchPhysical, PhysicalReg_GLUE_DVMDEX); //spill_loc_index is -1, set physicalReg
2062             move_mem_to_reg(OpndSize_32, offDvmDex_pResClasses, C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2063         }
2064     return 0;
2065 }
2066 //!generate native code to get ResFields from glue
2067 
2068 //!It uses two scratch registers
get_res_fields(int reg,bool isPhysical)2069 int get_res_fields(int reg, bool isPhysical) {
2070     //if spill_loc_index > 0 || reg != NULL, use registerAlloc
2071     if(isGlueHandled(PhysicalReg_GLUE_DVMDEX)) {
2072         //if spill_loc_index > 0
2073         //  load from spilled location, updte spill_loc_index & physicalReg
2074         startNativeCode(-1, -1);
2075         freeReg(true);
2076         int regAll = registerAlloc(LowOpndRegType_gp, PhysicalReg_GLUE_DVMDEX, false, false/*updateRefCount*/);
2077         donotSpillReg(regAll);
2078         dump_mem_reg_noalloc_mem(Mnemonic_MOV, ATOM_NORMAL, OpndSize_32, offDvmDex_pResFields, regAll, true, MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_gp);
2079         endNativeCode();
2080     }
2081     else
2082         {
2083             get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2084             move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical, C_SCRATCH_2, isScratchPhysical);
2085             //glue is not in a physical reg nor in a spilled location
2086             updateGlue(C_SCRATCH_2, isScratchPhysical, PhysicalReg_GLUE_DVMDEX); //spill_loc_index is -1, set physicalReg
2087             move_mem_to_reg(OpndSize_32, offDvmDex_pResFields, C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2088         }
2089     return 0;
2090 }
2091 //!generate native code to get ResMethods from glue
2092 
2093 //!It uses two scratch registers
get_res_methods(int reg,bool isPhysical)2094 int get_res_methods(int reg, bool isPhysical) {
2095     //if spill_loc_index > 0 || reg != NULL, use registerAlloc
2096     if(isGlueHandled(PhysicalReg_GLUE_DVMDEX)) {
2097         //if spill_loc_index > 0
2098         //  load from spilled location, updte spill_loc_index & physicalReg
2099         startNativeCode(-1, -1);
2100         freeReg(true);
2101         int regAll = registerAlloc(LowOpndRegType_gp, PhysicalReg_GLUE_DVMDEX, false, false/*updateRefCount*/);
2102         donotSpillReg(regAll);
2103         dump_mem_reg_noalloc_mem(Mnemonic_MOV, ATOM_NORMAL, OpndSize_32, offDvmDex_pResMethods, regAll, true, MemoryAccess_Unknown, -1, reg, isPhysical, LowOpndRegType_gp);
2104         endNativeCode();
2105     }
2106     else
2107         {
2108             get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2109             move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical, C_SCRATCH_2, isScratchPhysical);
2110             //glue is not in a physical reg nor in a spilled location
2111             updateGlue(C_SCRATCH_2, isScratchPhysical, PhysicalReg_GLUE_DVMDEX); //spill_loc_index is -1, set physicalReg
2112             move_mem_to_reg(OpndSize_32, offDvmDex_pResMethods, C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2113         }
2114     return 0;
2115 }
2116 //!generate native code to get the current class object from glue
2117 
2118 //!It uses two scratch registers
get_glue_method_class(int reg,bool isPhysical)2119 int get_glue_method_class(int reg, bool isPhysical) {
2120     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2121     move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.method), C_SCRATCH_1, isScratchPhysical, C_SCRATCH_2, isScratchPhysical);
2122     move_mem_to_reg(OpndSize_32, offMethod_clazz, C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2123     return 0;
2124 }
2125 //!generate native code to get the current method from glue
2126 
2127 //!It uses one scratch register
get_glue_method(int reg,bool isPhysical)2128 int get_glue_method(int reg, bool isPhysical) {
2129     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2130     move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.method), C_SCRATCH_1, isScratchPhysical, reg, isPhysical);
2131     return 0;
2132 }
2133 //!generate native code to set the current method in glue
2134 
2135 //!It uses one scratch register
set_glue_method(int reg,bool isPhysical)2136 int set_glue_method(int reg, bool isPhysical) {
2137     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2138     move_reg_to_mem(OpndSize_32, reg, isPhysical, offsetof(Thread, interpSave.method), C_SCRATCH_1, isScratchPhysical);
2139     return 0;
2140 }
2141 
2142 //!generate native code to get DvmDex from glue
2143 
2144 //!It uses one scratch register
get_glue_dvmdex(int reg,bool isPhysical)2145 int get_glue_dvmdex(int reg, bool isPhysical) {
2146     //if spill_loc_index > 0 || reg != NULL, use registerAlloc
2147     if(isGlueHandled(PhysicalReg_GLUE_DVMDEX)) {
2148         //if spill_loc_index > 0
2149         //  load from spilled location, updte spill_loc_index & physicalReg
2150         startNativeCode(-1, -1);
2151         freeReg(true);
2152         int regAll = registerAlloc(LowOpndRegType_gp, PhysicalReg_GLUE_DVMDEX, false, false/*updateRefCount*/);
2153         donotSpillReg(regAll);
2154         dump_reg_reg_noalloc_src(Mnemonic_MOV, ATOM_NORMAL, OpndSize_32, regAll, true,
2155                                           reg, isPhysical, LowOpndRegType_gp);
2156         endNativeCode();
2157     }
2158     else
2159         {
2160             get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2161             move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical, reg, isPhysical);
2162             //glue is not in a physical reg nor in a spilled location
2163             updateGlue(reg, isPhysical, PhysicalReg_GLUE_DVMDEX); //spill_loc_index is -1, set physicalReg
2164         }
2165     return 0;
2166 }
2167 //!generate native code to set DvmDex in glue
2168 
2169 //!It uses one scratch register
set_glue_dvmdex(int reg,bool isPhysical)2170 int set_glue_dvmdex(int reg, bool isPhysical) {
2171     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2172     move_reg_to_mem(OpndSize_32, reg, isPhysical, offsetof(Thread, interpSave.methodClassDex), C_SCRATCH_1, isScratchPhysical);
2173     return 0;
2174 }
2175 //!generate native code to get SuspendCount from glue
2176 
2177 //!It uses one scratch register
get_suspendCount(int reg,bool isPhysical)2178 int get_suspendCount(int reg, bool isPhysical) {
2179     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2180     move_mem_to_reg(OpndSize_32, offsetof(Thread, suspendCount), C_SCRATCH_1, isScratchPhysical, reg, isPhysical);
2181     return 0;
2182 }
2183 
2184 //!generate native code to get retval from glue
2185 
2186 //!It uses one scratch register
get_return_value(OpndSize size,int reg,bool isPhysical)2187 int get_return_value(OpndSize size, int reg, bool isPhysical) {
2188     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2189     move_mem_to_reg(size, offsetof(Thread, interpSave.retval), C_SCRATCH_1, isScratchPhysical, reg, isPhysical);
2190     return 0;
2191 }
2192 //!generate native code to set retval in glue
2193 
2194 //!It uses one scratch register
set_return_value(OpndSize size,int reg,bool isPhysical)2195 int set_return_value(OpndSize size, int reg, bool isPhysical) {
2196     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2197     move_reg_to_mem(size, reg, isPhysical, offsetof(Thread, interpSave.retval), C_SCRATCH_1, isScratchPhysical);
2198     return 0;
2199 }
2200 //!generate native code to clear exception object in glue
2201 
2202 //!It uses two scratch registers
clear_exception()2203 int clear_exception() {
2204     get_self_pointer(C_SCRATCH_2, isScratchPhysical);
2205     move_imm_to_mem(OpndSize_32, 0, offsetof(Thread, exception), C_SCRATCH_2, isScratchPhysical);
2206     return 0;
2207 }
2208 //!generate native code to get exception object in glue
2209 
2210 //!It uses two scratch registers
get_exception(int reg,bool isPhysical)2211 int get_exception(int reg, bool isPhysical) {
2212     get_self_pointer(C_SCRATCH_2, isScratchPhysical);
2213     move_mem_to_reg(OpndSize_32, offsetof(Thread, exception), C_SCRATCH_2, isScratchPhysical, reg, isPhysical);
2214     return 0;
2215 }
2216 //!generate native code to set exception object in glue
2217 
2218 //!It uses two scratch registers
set_exception(int reg,bool isPhysical)2219 int set_exception(int reg, bool isPhysical) {
2220     get_self_pointer(C_SCRATCH_2, isScratchPhysical);
2221     move_reg_to_mem(OpndSize_32, reg, isPhysical, offsetof(Thread, exception), C_SCRATCH_2, isScratchPhysical);
2222     return 0;
2223 }
2224 //!generate native code to save frame pointer and current PC in stack frame to glue
2225 
2226 //!It uses two scratch registers
save_pc_fp_to_glue()2227 int save_pc_fp_to_glue() {
2228     get_self_pointer(C_SCRATCH_1, isScratchPhysical);
2229     move_reg_to_mem(OpndSize_32, PhysicalReg_FP, true, offsetof(Thread, interpSave.curFrame), C_SCRATCH_1, isScratchPhysical);
2230 
2231     //from stack-save currentPc
2232     move_mem_to_reg(OpndSize_32, -sizeofStackSaveArea+offStackSaveArea_localRefTop, PhysicalReg_FP, true, C_SCRATCH_2, isScratchPhysical);
2233     move_reg_to_mem(OpndSize_32, C_SCRATCH_2, isScratchPhysical, offsetof(Thread, interpSave.pc), C_SCRATCH_1, isScratchPhysical);
2234     return 0;
2235 }
2236 //! get SaveArea pointer
2237 
2238 //!
savearea_from_fp(int reg,bool isPhysical)2239 int savearea_from_fp(int reg, bool isPhysical) {
2240     load_effective_addr(-sizeofStackSaveArea, PhysicalReg_FP, true, reg, isPhysical);
2241     return 0;
2242 }
2243 
2244 #ifdef DEBUG_CALL_STACK3
call_debug_dumpSwitch()2245 int call_debug_dumpSwitch() {
2246     typedef void (*vmHelper)(int);
2247     vmHelper funcPtr = debug_dumpSwitch;
2248     callFuncPtr((int)funcPtr, "debug_dumpSwitch");
2249     return 0;
2250 }
2251 #endif
2252 
call_dvmQuasiAtomicSwap64()2253 int call_dvmQuasiAtomicSwap64() {
2254     typedef int64_t (*vmHelper)(int64_t, volatile int64_t*);
2255     vmHelper funcPtr = dvmQuasiAtomicSwap64;
2256     if(gDvm.executionMode == kExecutionModeNcgO1) {
2257         beforeCall("dvmQuasiAtomicSwap64");
2258         callFuncPtr((int)funcPtr, "dvmQuasiAtomicSwap64");
2259         afterCall("dvmQuasiAtomicSwap64");
2260     } else {
2261         callFuncPtr((int)funcPtr, "dvmQuasiAtomicSwap64");
2262     }
2263     return 0;
2264 }
2265 
call_dvmQuasiAtomicRead64()2266 int call_dvmQuasiAtomicRead64() {
2267     typedef int64_t (*vmHelper)(volatile const int64_t*);
2268     vmHelper funcPtr = dvmQuasiAtomicRead64;
2269     if(gDvm.executionMode == kExecutionModeNcgO1) {
2270         beforeCall("dvmQuasiAtomiRead64");
2271         callFuncPtr((int)funcPtr, "dvmQuasiAtomicRead64");
2272         afterCall("dvmQuasiAtomicRead64");
2273         touchEax(); //for return value
2274         touchEdx();
2275     } else {
2276         callFuncPtr((int)funcPtr, "dvmQuasiAtomicRead64");
2277     }
2278     return 0;
2279 }
2280 
call_dvmJitToInterpPunt()2281 int call_dvmJitToInterpPunt() {
2282     typedef void (*vmHelper)(int);
2283     vmHelper funcPtr = dvmJitToInterpPunt;
2284     callFuncPtr((int)funcPtr, "dvmJitToInterpPunt");
2285     return 0;
2286 }
2287 
call_dvmJitToInterpNormal()2288 int call_dvmJitToInterpNormal() {
2289     typedef void (*vmHelper)(int);
2290     vmHelper funcPtr = dvmJitToInterpNormal;
2291     if(gDvm.executionMode == kExecutionModeNcgO1) {
2292         beforeCall("dvmJitToInterpNormal");
2293         callFuncPtr((int)funcPtr, "dvmJitToInterpNormal");
2294         afterCall("dvmJitToInterpNormal");
2295         touchEbx();
2296     } else {
2297         callFuncPtr((int)funcPtr, "dvmJitToInterpNormal");
2298     }
2299     return 0;
2300 }
2301 
call_dvmJitToInterpTraceSelectNoChain()2302 int call_dvmJitToInterpTraceSelectNoChain() {
2303     typedef void (*vmHelper)(int);
2304     vmHelper funcPtr = dvmJitToInterpTraceSelectNoChain;
2305     if(gDvm.executionMode == kExecutionModeNcgO1) {
2306         beforeCall("dvmJitToInterpTraceSelectNoChain");
2307         callFuncPtr((int)funcPtr, "dvmJitToInterpTraceSelectNoChain");
2308         afterCall("dvmJitToInterpTraceSelectNoChain");
2309         touchEbx();
2310     } else {
2311         callFuncPtr((int)funcPtr, "dvmJitToInterpTraceSelectNoChain");
2312     }
2313     return 0;
2314 }
2315 
call_dvmJitToInterpTraceSelect()2316 int call_dvmJitToInterpTraceSelect() {
2317     typedef void (*vmHelper)(int);
2318     vmHelper funcPtr = dvmJitToInterpTraceSelect;
2319     if(gDvm.executionMode == kExecutionModeNcgO1) {
2320         beforeCall("dvmJitToInterpTraceSelect");
2321         callFuncPtr((int)funcPtr, "dvmJitToInterpTraceSelect");
2322         afterCall("dvmJitToInterpTraceSelect");
2323         touchEbx();
2324     } else {
2325         callFuncPtr((int)funcPtr, "dvmJitToInterpTraceSelect");
2326     }
2327     return 0;
2328 }
2329 
call_dvmJitToPatchPredictedChain()2330 int call_dvmJitToPatchPredictedChain() {
2331     typedef const Method * (*vmHelper)(const Method *method,
2332                                        Thread *self,
2333                                        PredictedChainingCell *cell,
2334                                        const ClassObject *clazz);
2335     vmHelper funcPtr = dvmJitToPatchPredictedChain;
2336     if(gDvm.executionMode == kExecutionModeNcgO1) {
2337         beforeCall("dvmJitToPatchPredictedChain");
2338         callFuncPtr((int)funcPtr, "dvmJitToPatchPredictedChain");
2339         afterCall("dvmJitToPatchPredictedChain");
2340     } else {
2341         callFuncPtr((int)funcPtr, "dvmJitToPatchPredictedChain");
2342     }
2343     return 0;
2344 }
2345 
2346 //!generate native code to call __moddi3
2347 
2348 //!
call_moddi3()2349 int call_moddi3() {
2350     if(gDvm.executionMode == kExecutionModeNcgO1) {
2351         beforeCall("moddi3");
2352         callFuncPtr((intptr_t)__moddi3, "__moddi3");
2353         afterCall("moddi3");
2354     } else {
2355         callFuncPtr((intptr_t)__moddi3, "__moddi3");
2356     }
2357     return 0;
2358 }
2359 //!generate native code to call __divdi3
2360 
2361 //!
call_divdi3()2362 int call_divdi3() {
2363     if(gDvm.executionMode == kExecutionModeNcgO1) {
2364         beforeCall("divdi3");
2365         callFuncPtr((intptr_t)__divdi3, "__divdi3");
2366         afterCall("divdi3");
2367     } else {
2368         callFuncPtr((intptr_t)__divdi3, "__divdi3");
2369     }
2370     return 0;
2371 }
2372 
2373 //!generate native code to call fmod
2374 
2375 //!
call_fmod()2376 int call_fmod() {
2377     typedef double (*libHelper)(double, double);
2378     libHelper funcPtr = fmod;
2379     if(gDvm.executionMode == kExecutionModeNcgO1) {
2380         beforeCall("fmod");
2381         callFuncPtr((int)funcPtr, "fmod");
2382         afterCall("fmod");
2383     } else {
2384         callFuncPtr((int)funcPtr, "fmod");
2385     }
2386     return 0;
2387 }
2388 //!generate native code to call fmodf
2389 
2390 //!
call_fmodf()2391 int call_fmodf() {
2392     typedef float (*libHelper)(float, float);
2393     libHelper funcPtr = fmodf;
2394     if(gDvm.executionMode == kExecutionModeNcgO1) {
2395         beforeCall("fmodf");
2396         callFuncPtr((int)funcPtr, "fmodf");
2397         afterCall("fmodf");
2398     } else {
2399         callFuncPtr((int)funcPtr, "fmodf");
2400     }
2401     return 0;
2402 }
2403 //!generate native code to call dvmFindCatchBlock
2404 
2405 //!
call_dvmFindCatchBlock()2406 int call_dvmFindCatchBlock() {
2407     //int dvmFindCatchBlock(Thread* self, int relPc, Object* exception,
2408     //bool doUnroll, void** newFrame)
2409     typedef int (*vmHelper)(Thread*, int, Object*, bool, void**);
2410     vmHelper funcPtr = dvmFindCatchBlock;
2411     if(gDvm.executionMode == kExecutionModeNcgO1) {
2412         beforeCall("dvmFindCatchBlock");
2413         callFuncPtr((int)funcPtr, "dvmFindCatchBlock");
2414         afterCall("dvmFindCatchBlock");
2415     } else {
2416         callFuncPtr((int)funcPtr, "dvmFindCatchBlock");
2417     }
2418     return 0;
2419 }
2420 //!generate native code to call dvmThrowVerificationError
2421 
2422 //!
call_dvmThrowVerificationError()2423 int call_dvmThrowVerificationError() {
2424     typedef void (*vmHelper)(const Method*, int, int);
2425     vmHelper funcPtr = dvmThrowVerificationError;
2426     if(gDvm.executionMode == kExecutionModeNcgO1) {
2427         beforeCall("dvmThrowVerificationError");
2428         callFuncPtr((int)funcPtr, "dvmThrowVerificationError");
2429         afterCall("dvmThrowVerificationError");
2430     } else {
2431         callFuncPtr((int)funcPtr, "dvmThrowVerificationError");
2432     }
2433     return 0;
2434 }
2435 
2436 //!generate native code to call dvmResolveMethod
2437 
2438 //!
call_dvmResolveMethod()2439 int call_dvmResolveMethod() {
2440     //Method* dvmResolveMethod(const ClassObject* referrer, u4 methodIdx, MethodType methodType);
2441     typedef Method* (*vmHelper)(const ClassObject*, u4, MethodType);
2442     vmHelper funcPtr = dvmResolveMethod;
2443     if(gDvm.executionMode == kExecutionModeNcgO1) {
2444         beforeCall("dvmResolveMethod");
2445         callFuncPtr((int)funcPtr, "dvmResolveMethod");
2446         afterCall("dvmResolveMethod");
2447     } else {
2448         callFuncPtr((int)funcPtr, "dvmResolveMethod");
2449     }
2450     return 0;
2451 }
2452 //!generate native code to call dvmResolveClass
2453 
2454 //!
call_dvmResolveClass()2455 int call_dvmResolveClass() {
2456     //ClassObject* dvmResolveClass(const ClassObject* referrer, u4 classIdx, bool fromUnverifiedConstant)
2457     typedef ClassObject* (*vmHelper)(const ClassObject*, u4, bool);
2458     vmHelper funcPtr = dvmResolveClass;
2459     if(gDvm.executionMode == kExecutionModeNcgO1) {
2460         beforeCall("dvmResolveClass");
2461         callFuncPtr((int)funcPtr, "dvmResolveClass");
2462         afterCall("dvmResolveClass");
2463     } else {
2464         callFuncPtr((int)funcPtr, "dvmResolveClass");
2465     }
2466     return 0;
2467 }
2468 
2469 //!generate native code to call dvmInstanceofNonTrivial
2470 
2471 //!
call_dvmInstanceofNonTrivial()2472 int call_dvmInstanceofNonTrivial() {
2473     typedef int (*vmHelper)(const ClassObject*, const ClassObject*);
2474     vmHelper funcPtr = dvmInstanceofNonTrivial;
2475     if(gDvm.executionMode == kExecutionModeNcgO1) {
2476         beforeCall("dvmInstanceofNonTrivial");
2477         callFuncPtr((int)funcPtr, "dvmInstanceofNonTrivial");
2478         afterCall("dvmInstanceofNonTrivial");
2479     } else {
2480         callFuncPtr((int)funcPtr, "dvmInstanceofNonTrivial");
2481     }
2482     return 0;
2483 }
2484 //!generate native code to call dvmThrowException
2485 
2486 //!
call_dvmThrow()2487 int call_dvmThrow() {
2488     typedef void (*vmHelper)(ClassObject* exceptionClass, const char*);
2489     vmHelper funcPtr = dvmThrowException;
2490     if(gDvm.executionMode == kExecutionModeNcgO1) {
2491         beforeCall("dvmThrowException");
2492         callFuncPtr((int)funcPtr, "dvmThrowException");
2493         afterCall("dvmThrowException");
2494     } else {
2495         callFuncPtr((int)funcPtr, "dvmThrowException");
2496     }
2497     return 0;
2498 }
2499 //!generate native code to call dvmThrowExceptionWithClassMessage
2500 
2501 //!
call_dvmThrowWithMessage()2502 int call_dvmThrowWithMessage() {
2503     typedef void (*vmHelper)(ClassObject* exceptionClass, const char*);
2504     vmHelper funcPtr = dvmThrowExceptionWithClassMessage;
2505     if(gDvm.executionMode == kExecutionModeNcgO1) {
2506         beforeCall("dvmThrowExceptionWithClassMessage");
2507         callFuncPtr((int)funcPtr, "dvmThrowExceptionWithClassMessage");
2508         afterCall("dvmThrowExceptionWithClassMessage");
2509     } else {
2510         callFuncPtr((int)funcPtr, "dvmThrowExceptionWithClassMessage");
2511     }
2512     return 0;
2513 }
2514 //!generate native code to call dvmCheckSuspendPending
2515 
2516 //!
call_dvmCheckSuspendPending()2517 int call_dvmCheckSuspendPending() {
2518     typedef bool (*vmHelper)(Thread*);
2519     vmHelper funcPtr = dvmCheckSuspendPending;
2520     if(gDvm.executionMode == kExecutionModeNcgO1) {
2521         beforeCall("dvmCheckSuspendPending");
2522         callFuncPtr((int)funcPtr, "dvmCheckSuspendPending");
2523         afterCall("dvmCheckSuspendPending");
2524     } else {
2525         callFuncPtr((int)funcPtr, "dvmCheckSuspendPending");
2526     }
2527     return 0;
2528 }
2529 //!generate native code to call dvmLockObject
2530 
2531 //!
call_dvmLockObject()2532 int call_dvmLockObject() {
2533     typedef void (*vmHelper)(struct Thread*, struct Object*);
2534     vmHelper funcPtr = dvmLockObject;
2535     if(gDvm.executionMode == kExecutionModeNcgO1) {
2536         beforeCall("dvmLockObject");
2537         callFuncPtr((int)funcPtr, "dvmLockObject");
2538         afterCall("dvmLockObject");
2539     } else {
2540         callFuncPtr((int)funcPtr, "dvmLockObject");
2541     }
2542     return 0;
2543 }
2544 //!generate native code to call dvmUnlockObject
2545 
2546 //!
call_dvmUnlockObject()2547 int call_dvmUnlockObject() {
2548     typedef bool (*vmHelper)(Thread*, Object*);
2549     vmHelper funcPtr = dvmUnlockObject;
2550     if(gDvm.executionMode == kExecutionModeNcgO1) {
2551         beforeCall("dvmUnlockObject");
2552         callFuncPtr((int)funcPtr, "dvmUnlockObject");
2553         afterCall("dvmUnlockObject");
2554     } else {
2555         callFuncPtr((int)funcPtr, "dvmUnlockObject");
2556     }
2557     return 0;
2558 }
2559 //!generate native code to call dvmInitClass
2560 
2561 //!
call_dvmInitClass()2562 int call_dvmInitClass() {
2563     typedef bool (*vmHelper)(ClassObject*);
2564     vmHelper funcPtr = dvmInitClass;
2565     if(gDvm.executionMode == kExecutionModeNcgO1) {
2566         beforeCall("dvmInitClass");
2567         callFuncPtr((int)funcPtr, "dvmInitClass");
2568         afterCall("dvmInitClass");
2569     } else {
2570         callFuncPtr((int)funcPtr, "dvmInitClass");
2571     }
2572     return 0;
2573 }
2574 //!generate native code to call dvmAllocObject
2575 
2576 //!
call_dvmAllocObject()2577 int call_dvmAllocObject() {
2578     typedef Object* (*vmHelper)(ClassObject*, int);
2579     vmHelper funcPtr = dvmAllocObject;
2580     if(gDvm.executionMode == kExecutionModeNcgO1) {
2581         beforeCall("dvmAllocObject");
2582         callFuncPtr((int)funcPtr, "dvmAllocObject");
2583         afterCall("dvmAllocObject");
2584     } else {
2585         callFuncPtr((int)funcPtr, "dvmAllocObject");
2586     }
2587     return 0;
2588 }
2589 //!generate native code to call dvmAllocArrayByClass
2590 
2591 //!
call_dvmAllocArrayByClass()2592 int call_dvmAllocArrayByClass() {
2593     typedef ArrayObject* (*vmHelper)(ClassObject*, size_t, int);
2594     vmHelper funcPtr = dvmAllocArrayByClass;
2595     if(gDvm.executionMode == kExecutionModeNcgO1) {
2596         beforeCall("dvmAllocArrayByClass");
2597         callFuncPtr((int)funcPtr, "dvmAllocArrayByClass");
2598         afterCall("dvmAllocArrayByClass");
2599     } else {
2600         callFuncPtr((int)funcPtr, "dvmAllocArrayByClass");
2601     }
2602     return 0;
2603 }
2604 //!generate native code to call dvmAllocPrimitiveArray
2605 
2606 //!
call_dvmAllocPrimitiveArray()2607 int call_dvmAllocPrimitiveArray() {
2608     typedef ArrayObject* (*vmHelper)(char, size_t, int);
2609     vmHelper funcPtr = dvmAllocPrimitiveArray;
2610     if(gDvm.executionMode == kExecutionModeNcgO1) {
2611         beforeCall("dvmAllocPrimitiveArray");
2612         callFuncPtr((int)funcPtr, "dvmAllocPrimitiveArray");
2613         afterCall("dvmAllocPrimitiveArray");
2614     } else {
2615         callFuncPtr((int)funcPtr, "dvmAllocPrimitiveArray");
2616     }
2617     return 0;
2618 }
2619 //!generate native code to call dvmInterpHandleFillArrayData
2620 
2621 //!
call_dvmInterpHandleFillArrayData()2622 int call_dvmInterpHandleFillArrayData() {
2623     typedef bool (*vmHelper)(ArrayObject*, const u2*);
2624     vmHelper funcPtr = dvmInterpHandleFillArrayData;
2625     if(gDvm.executionMode == kExecutionModeNcgO1) {
2626         beforeCall("dvmInterpHandleFillArrayData"); //before move_imm_to_reg to avoid spilling C_SCRATCH_1
2627         callFuncPtr((int)funcPtr, "dvmInterpHandleFillArrayData");
2628         afterCall("dvmInterpHandleFillArrayData");
2629     } else {
2630         callFuncPtr((int)funcPtr, "dvmInterpHandleFillArrayData");
2631     }
2632     return 0;
2633 }
2634 
2635 //!generate native code to call dvmNcgHandlePackedSwitch
2636 
2637 //!
call_dvmNcgHandlePackedSwitch()2638 int call_dvmNcgHandlePackedSwitch() {
2639     typedef s4 (*vmHelper)(const s4*, s4, u2, s4);
2640     vmHelper funcPtr = dvmNcgHandlePackedSwitch;
2641     if(gDvm.executionMode == kExecutionModeNcgO1) {
2642         beforeCall("dvmNcgHandlePackedSwitch");
2643         callFuncPtr((int)funcPtr, "dvmNcgHandlePackedSwitch");
2644         afterCall("dvmNcgHandlePackedSwitch");
2645     } else {
2646         callFuncPtr((int)funcPtr, "dvmNcgHandlePackedSwitch");
2647     }
2648     return 0;
2649 }
2650 
call_dvmJitHandlePackedSwitch()2651 int call_dvmJitHandlePackedSwitch() {
2652     typedef s4 (*vmHelper)(const s4*, s4, u2, s4);
2653     vmHelper funcPtr = dvmJitHandlePackedSwitch;
2654     if(gDvm.executionMode == kExecutionModeNcgO1) {
2655         beforeCall("dvmJitHandlePackedSwitch");
2656         callFuncPtr((int)funcPtr, "dvmJitHandlePackedSwitch");
2657         afterCall("dvmJitHandlePackedSwitch");
2658     } else {
2659         callFuncPtr((int)funcPtr, "dvmJitHandlePackedSwitch");
2660     }
2661     return 0;
2662 }
2663 
2664 //!generate native code to call dvmNcgHandleSparseSwitch
2665 
2666 //!
call_dvmNcgHandleSparseSwitch()2667 int call_dvmNcgHandleSparseSwitch() {
2668     typedef s4 (*vmHelper)(const s4*, u2, s4);
2669     vmHelper funcPtr = dvmNcgHandleSparseSwitch;
2670     if(gDvm.executionMode == kExecutionModeNcgO1) {
2671         beforeCall("dvmNcgHandleSparseSwitch");
2672         callFuncPtr((int)funcPtr, "dvmNcgHandleSparseSwitch");
2673         afterCall("dvmNcgHandleSparseSwitch");
2674     } else {
2675         callFuncPtr((int)funcPtr, "dvmNcgHandleSparseSwitch");
2676     }
2677     return 0;
2678 }
2679 
call_dvmJitHandleSparseSwitch()2680 int call_dvmJitHandleSparseSwitch() {
2681     typedef s4 (*vmHelper)(const s4*, u2, s4);
2682     vmHelper funcPtr = dvmJitHandleSparseSwitch;
2683     if(gDvm.executionMode == kExecutionModeNcgO1) {
2684         beforeCall("dvmJitHandleSparseSwitch");
2685         callFuncPtr((int)funcPtr, "dvmJitHandleSparseSwitch");
2686         afterCall("dvmJitHandleSparseSwitch");
2687     } else {
2688         callFuncPtr((int)funcPtr, "dvmJitHandleSparseSwitch");
2689     }
2690     return 0;
2691 }
2692 
2693 //!generate native code to call dvmCanPutArrayElement
2694 
2695 //!
call_dvmCanPutArrayElement()2696 int call_dvmCanPutArrayElement() {
2697     typedef bool (*vmHelper)(const ClassObject*, const ClassObject*);
2698     vmHelper funcPtr = dvmCanPutArrayElement;
2699     if(gDvm.executionMode == kExecutionModeNcgO1) {
2700         beforeCall("dvmCanPutArrayElement");
2701         callFuncPtr((int)funcPtr, "dvmCanPutArrayElement");
2702         afterCall("dvmCanPutArrayElement");
2703     } else {
2704         callFuncPtr((int)funcPtr, "dvmCanPutArrayElement");
2705     }
2706     return 0;
2707 }
2708 
2709 //!generate native code to call dvmFindInterfaceMethodInCache
2710 
2711 //!
call_dvmFindInterfaceMethodInCache()2712 int call_dvmFindInterfaceMethodInCache() {
2713     typedef Method* (*vmHelper)(ClassObject*, u4, const Method*, DvmDex*);
2714     vmHelper funcPtr = dvmFindInterfaceMethodInCache;
2715     if(gDvm.executionMode == kExecutionModeNcgO1) {
2716         beforeCall("dvmFindInterfaceMethodInCache");
2717         callFuncPtr((int)funcPtr, "dvmFindInterfaceMethodInCache");
2718         afterCall("dvmFindInterfaceMethodInCache");
2719     } else {
2720         callFuncPtr((int)funcPtr, "dvmFindInterfaceMethodInCache");
2721     }
2722     return 0;
2723 }
2724 
2725 //!generate native code to call dvmHandleStackOverflow
2726 
2727 //!
call_dvmHandleStackOverflow()2728 int call_dvmHandleStackOverflow() {
2729     typedef void (*vmHelper)(Thread*, const Method*);
2730     vmHelper funcPtr = dvmHandleStackOverflow;
2731     if(gDvm.executionMode == kExecutionModeNcgO1) {
2732         beforeCall("dvmHandleStackOverflow");
2733         callFuncPtr((int)funcPtr, "dvmHandleStackOverflow");
2734         afterCall("dvmHandleStackOverflow");
2735     } else {
2736         callFuncPtr((int)funcPtr, "dvmHandleStackOverflow");
2737     }
2738     return 0;
2739 }
2740 //!generate native code to call dvmResolveString
2741 
2742 //!
call_dvmResolveString()2743 int call_dvmResolveString() {
2744     //StringObject* dvmResolveString(const ClassObject* referrer, u4 stringIdx)
2745     typedef StringObject* (*vmHelper)(const ClassObject*, u4);
2746     vmHelper funcPtr = dvmResolveString;
2747     if(gDvm.executionMode == kExecutionModeNcgO1) {
2748         beforeCall("dvmResolveString");
2749         callFuncPtr((int)funcPtr, "dvmResolveString");
2750         afterCall("dvmResolveString");
2751     } else {
2752         callFuncPtr((int)funcPtr, "dvmResolveString");
2753     }
2754     return 0;
2755 }
2756 //!generate native code to call dvmResolveInstField
2757 
2758 //!
call_dvmResolveInstField()2759 int call_dvmResolveInstField() {
2760     //InstField* dvmResolveInstField(const ClassObject* referrer, u4 ifieldIdx)
2761     typedef InstField* (*vmHelper)(const ClassObject*, u4);
2762     vmHelper funcPtr = dvmResolveInstField;
2763     if(gDvm.executionMode == kExecutionModeNcgO1) {
2764         beforeCall("dvmResolveInstField");
2765         callFuncPtr((int)funcPtr, "dvmResolveInstField");
2766         afterCall("dvmResolveInstField");
2767     } else {
2768         callFuncPtr((int)funcPtr, "dvmResolveInstField");
2769     }
2770     return 0;
2771 }
2772 //!generate native code to call dvmResolveStaticField
2773 
2774 //!
call_dvmResolveStaticField()2775 int call_dvmResolveStaticField() {
2776     //StaticField* dvmResolveStaticField(const ClassObject* referrer, u4 sfieldIdx)
2777     typedef StaticField* (*vmHelper)(const ClassObject*, u4);
2778     vmHelper funcPtr = dvmResolveStaticField;
2779     if(gDvm.executionMode == kExecutionModeNcgO1) {
2780         beforeCall("dvmResolveStaticField");
2781         callFuncPtr((int)funcPtr, "dvmResolveStaticField");
2782         afterCall("dvmResolveStaticField");
2783     } else {
2784         callFuncPtr((int)funcPtr, "dvmResolveStaticField");
2785     }
2786     return 0;
2787 }
2788 
2789 #define P_GPR_2 PhysicalReg_ECX
2790 /*!
2791 \brief This function is used to resolve a string reference
2792 
2793 INPUT: const pool index in %eax
2794 
2795 OUTPUT: resolved string in %eax
2796 
2797 The registers are hard-coded, 2 physical registers %esi and %edx are used as scratch registers;
2798 It calls a C function dvmResolveString;
2799 The only register that is still live after this function is ebx
2800 */
const_string_resolve()2801 int const_string_resolve() {
2802     scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EDX;
2803     scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
2804     insertLabel(".const_string_resolve", false);
2805     //method stored in glue structure as well as on the interpreted stack
2806     get_glue_method_class(P_GPR_2, true);
2807     load_effective_addr(-8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2808     move_reg_to_mem(OpndSize_32, PhysicalReg_EAX, true, 4, PhysicalReg_ESP, true);
2809     move_reg_to_mem(OpndSize_32, P_GPR_2, true, 0, PhysicalReg_ESP, true);
2810     call_dvmResolveString();
2811     load_effective_addr(8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2812     compare_imm_reg( OpndSize_32, 0, PhysicalReg_EAX, true);
2813     conditional_jump(Condition_E, "common_exceptionThrown", false);
2814     x86_return();
2815     return 0;
2816 }
2817 #undef P_GPR_2
2818 /*!
2819 \brief This function is used to resolve a class
2820 
2821 INPUT: const pool index in argument "indexReg" (%eax)
2822 
2823 OUTPUT: resolved class in %eax
2824 
2825 The registers are hard-coded, 3 physical registers (%esi, %edx, startLR:%eax) are used as scratch registers.
2826 It calls a C function dvmResolveClass;
2827 The only register that is still live after this function is ebx
2828 */
resolve_class2(int startLR,bool isPhysical,int indexReg,bool indexPhysical,int thirdArg)2829 int resolve_class2(
2830            int startLR/*scratch register*/, bool isPhysical, int indexReg/*const pool index*/,
2831            bool indexPhysical, int thirdArg) {
2832     insertLabel(".class_resolve", false);
2833     scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EDX;
2834     scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
2835 
2836     //push index to stack first, to free indexReg
2837     load_effective_addr(-12, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2838     move_reg_to_mem(OpndSize_32, indexReg, indexPhysical, 4, PhysicalReg_ESP, true);
2839     get_glue_method_class(startLR, isPhysical);
2840     move_imm_to_mem(OpndSize_32, thirdArg, 8, PhysicalReg_ESP, true);
2841     move_reg_to_mem(OpndSize_32, startLR, isPhysical, 0, PhysicalReg_ESP, true);
2842     call_dvmResolveClass();
2843     load_effective_addr(12, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2844     compare_imm_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
2845     conditional_jump(Condition_E, "common_exceptionThrown", false);
2846 
2847     x86_return();
2848     return 0;
2849 }
2850 /*!
2851 \brief This function is used to resolve a method, and it is called once with %eax for both indexReg and startLR
2852 
2853 INPUT: const pool index in argument "indexReg" (%eax)
2854 
2855 OUTPUT: resolved method in %eax
2856 
2857 The registers are hard-coded, 3 physical registers (%esi, %edx, startLR:%eax) are used as scratch registers.
2858 It calls a C function dvmResolveMethod;
2859 The only register that is still live after this function is ebx
2860 */
resolve_method2(int startLR,bool isPhysical,int indexReg,bool indexPhysical,int thirdArg)2861 int resolve_method2(
2862             int startLR/*logical register index*/, bool isPhysical, int indexReg/*const pool index*/,
2863             bool indexPhysical,
2864             int thirdArg/*VIRTUAL*/) {
2865     if(thirdArg == METHOD_VIRTUAL)
2866         insertLabel(".virtual_method_resolve", false);
2867     else if(thirdArg == METHOD_DIRECT)
2868         insertLabel(".direct_method_resolve", false);
2869     else if(thirdArg == METHOD_STATIC)
2870         insertLabel(".static_method_resolve", false);
2871 
2872     load_effective_addr(-12, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2873     move_reg_to_mem(OpndSize_32, indexReg, indexPhysical, 4, PhysicalReg_ESP, true);
2874 
2875     scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EDX;
2876     scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
2877     get_glue_method_class(startLR, isPhysical);
2878 
2879     move_imm_to_mem(OpndSize_32, thirdArg, 8, PhysicalReg_ESP, true);
2880     move_reg_to_mem(OpndSize_32, startLR, isPhysical, 0, PhysicalReg_ESP, true);
2881     call_dvmResolveMethod();
2882     load_effective_addr(12, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2883     compare_imm_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
2884     conditional_jump(Condition_E, "common_exceptionThrown", false);
2885 
2886     x86_return();
2887     return 0;
2888 }
2889 /*!
2890 \brief This function is used to resolve an instance field
2891 
2892 INPUT: const pool index in argument "indexReg" (%eax)
2893 
2894 OUTPUT: resolved field in %eax
2895 
2896 The registers are hard-coded, 3 physical registers (%esi, %edx, startLR:%eax) are used as scratch registers.
2897 It calls a C function dvmResolveInstField;
2898 The only register that is still live after this function is ebx
2899 */
resolve_inst_field2(int startLR,bool isPhysical,int indexReg,bool indexPhysical)2900 int resolve_inst_field2(
2901             int startLR/*logical register index*/, bool isPhysical,
2902             int indexReg/*const pool index*/, bool indexPhysical) {
2903     insertLabel(".inst_field_resolve", false);
2904     scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EDX;
2905     scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
2906 
2907     load_effective_addr(-8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2908     move_reg_to_mem(OpndSize_32, indexReg, indexPhysical, 4, PhysicalReg_ESP, true);
2909     //method stored in glue structure as well as interpreted stack
2910     get_glue_method_class(startLR, isPhysical);
2911     move_reg_to_mem(OpndSize_32, startLR, isPhysical, 0, PhysicalReg_ESP, true);
2912     call_dvmResolveInstField();
2913     load_effective_addr(8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2914     compare_imm_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
2915     conditional_jump(Condition_E, "common_exceptionThrown", false);
2916 
2917     x86_return();
2918     return 0;
2919 }
2920 /*!
2921 \brief This function is used to resolve a static field
2922 
2923 INPUT: const pool index in argument "indexReg" (%eax)
2924 
2925 OUTPUT: resolved field in %eax
2926 
2927 The registers are hard-coded, 3 physical registers (%esi, %edx, startLR:%eax) are used as scratch registers.
2928 It calls a C function dvmResolveStaticField;
2929 The only register that is still live after this function is ebx
2930 */
resolve_static_field2(int startLR,bool isPhysical,int indexReg,bool indexPhysical)2931 int resolve_static_field2(
2932               int startLR/*logical register index*/, bool isPhysical, int indexReg/*const pool index*/,
2933               bool indexPhysical) {
2934     insertLabel(".static_field_resolve", false);
2935     scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EDX;
2936     scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
2937 
2938     load_effective_addr(-8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2939     move_reg_to_mem(OpndSize_32, indexReg, indexPhysical, 4, PhysicalReg_ESP, true);
2940     get_glue_method_class(startLR, isPhysical);
2941     move_reg_to_mem(OpndSize_32, startLR, isPhysical, 0, PhysicalReg_ESP, true);
2942     call_dvmResolveStaticField();
2943     load_effective_addr(8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2944     compare_imm_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
2945     conditional_jump(Condition_E, "common_exceptionThrown", false);
2946 
2947     x86_return();
2948     return 0;
2949 }
2950 
pushAllRegs()2951 int pushAllRegs() {
2952     load_effective_addr(-28, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2953     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_EAX, true, 24, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2954     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_EBX, true, 20, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2955     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_ECX, true, 16, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2956     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_EDX, true, 12, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2957     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_ESI, true, 8, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2958     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_EDI, true, 4, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2959     move_reg_to_mem_noalloc(OpndSize_32, PhysicalReg_EBP, true, 0, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1);
2960     return 0;
2961 }
popAllRegs()2962 int popAllRegs() {
2963     move_mem_to_reg_noalloc(OpndSize_32, 24, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_EAX, true);
2964     move_mem_to_reg_noalloc(OpndSize_32, 20, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_EBX, true);
2965     move_mem_to_reg_noalloc(OpndSize_32, 16, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_ECX, true);
2966     move_mem_to_reg_noalloc(OpndSize_32, 12, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_EDX, true);
2967     move_mem_to_reg_noalloc(OpndSize_32, 8, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_ESI, true);
2968     move_mem_to_reg_noalloc(OpndSize_32, 4, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_EDI, true);
2969     move_mem_to_reg_noalloc(OpndSize_32, 0, PhysicalReg_ESP, true, MemoryAccess_Unknown, -1, PhysicalReg_EBP, true);
2970     load_effective_addr(28, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
2971     return 0;
2972 }
2973 
dump_nop(int size)2974 void dump_nop(int size) {
2975     switch(size) {
2976         case 1:
2977           *stream = 0x90;
2978           break;
2979         case 2:
2980           *stream = 0x66;
2981           *(stream +1) = 0x90;
2982           break;
2983         case 3:
2984           *stream = 0x0f;
2985           *(stream + 1) = 0x1f;
2986           *(stream + 2) = 0x00;
2987           break;
2988         default:
2989           //TODO: add more cases.
2990           break;
2991     }
2992     stream += size;
2993 }
2994