1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17
18 /*! \file LowerJump.cpp
19 \brief This file lowers the following bytecodes: IF_XXX, GOTO
20 */
21 #include <math.h>
22 #include "libdex/DexOpcodes.h"
23 #include "libdex/DexFile.h"
24 #include "Lower.h"
25 #include "NcgAot.h"
26 #include "enc_wrapper.h"
27 #include "interp/InterpDefs.h"
28 #include "NcgHelper.h"
29
30 LabelMap* globalMap;
31 LabelMap* globalShortMap;//make sure for each bytecode, there is no duplicated label
32 LabelMap* globalWorklist = NULL;
33 LabelMap* globalShortWorklist;
34
35 int globalMapNum;
36 int globalWorklistNum;
37 int globalDataWorklistNum;
38 int VMAPIWorklistNum;
39 int globalPCWorklistNum;
40 int chainingWorklistNum;
41
42 LabelMap* globalDataWorklist = NULL;
43 LabelMap* globalPCWorklist = NULL;
44 LabelMap* chainingWorklist = NULL;
45 LabelMap* VMAPIWorklist = NULL;
46
47 char* ncgClassData;
48 char* ncgClassDataPtr;
49 char* ncgMethodData;
50 char* ncgMethodDataPtr;
51 int ncgClassNum;
52 int ncgMethodNum;
53
54 NCGWorklist* globalNCGWorklist;
55 DataWorklist* methodDataWorklist;
56 #ifdef ENABLE_TRACING
57 MapWorklist* methodMapWorklist;
58 #endif
59 /*!
60 \brief search globalShortMap to find the entry for the given label
61
62 */
findItemForShortLabel(const char * label)63 LabelMap* findItemForShortLabel(const char* label) {
64 LabelMap* ptr = globalShortMap;
65 while(ptr != NULL) {
66 if(!strcmp(label, ptr->label)) {
67 return ptr;
68 }
69 ptr = ptr->nextItem;
70 }
71 return NULL;
72 }
73 //assume size of "jump reg" is 2
74 #define JUMP_REG_SIZE 2
75 #define ADD_REG_REG_SIZE 3
76 /*!
77 \brief update value of the immediate in the given jump instruction
78
79 check whether the immediate is out of range for the pre-set size
80 */
updateJumpInst(char * jumpInst,OpndSize immSize,int relativeNCG)81 int updateJumpInst(char* jumpInst, OpndSize immSize, int relativeNCG) {
82 #ifdef DEBUG_NCG_JUMP
83 ALOGI("update jump inst @ %p with %d", jumpInst, relativeNCG);
84 #endif
85 if(immSize == OpndSize_8) { //-128 to 127
86 if(relativeNCG >= 128 || relativeNCG < -128) {
87 ALOGE("pre-allocated space for a forward jump is not big enough");
88 dvmAbort();
89 }
90 }
91 if(immSize == OpndSize_16) { //-2^16 to 2^16-1
92 if(relativeNCG >= 32768 || relativeNCG < -32768) {
93 ALOGE("pre-allocated space for a forward jump is not big enough");
94 dvmAbort();
95 }
96 }
97 encoder_update_imm(relativeNCG, jumpInst);
98 return 0;
99 }
100
101 /*!
102 \brief insert a label
103
104 It takes argument checkDup, if checkDup is true, an entry is created in globalShortMap, entries in globalShortWorklist are checked, if there exists a match, the immediate in the jump instruction is updated and the entry is removed from globalShortWorklist;
105 otherwise, an entry is created in globalMap.
106 */
insertLabel(const char * label,bool checkDup)107 int insertLabel(const char* label, bool checkDup) {
108 LabelMap* item = NULL;
109 if(!checkDup) {
110 item = (LabelMap*)malloc(sizeof(LabelMap));
111 if(item == NULL) {
112 ALOGE("Memory allocation failed");
113 return -1;
114 }
115 snprintf(item->label, LABEL_SIZE, "%s", label);
116 item->codePtr = stream;
117 item->nextItem = globalMap;
118 globalMap = item;
119 #ifdef DEBUG_NCG_CODE_SIZE
120 ALOGI("insert global label %s %p", label, stream);
121 #endif
122 globalMapNum++;
123 return 0;
124 }
125
126 item = (LabelMap*)malloc(sizeof(LabelMap));
127 if(item == NULL) {
128 ALOGE("Memory allocation failed");
129 return -1;
130 }
131 snprintf(item->label, LABEL_SIZE, "%s", label);
132 item->codePtr = stream;
133 item->nextItem = globalShortMap;
134 globalShortMap = item;
135 #ifdef DEBUG_NCG
136 ALOGI("insert short-term label %s %p", label, stream);
137 #endif
138 LabelMap* ptr = globalShortWorklist;
139 LabelMap* ptr_prevItem = NULL;
140 while(ptr != NULL) {
141 if(!strcmp(ptr->label, label)) {
142 //perform work
143 int relativeNCG = stream - ptr->codePtr;
144 unsigned instSize = encoder_get_inst_size(ptr->codePtr);
145 relativeNCG -= instSize; //size of the instruction
146 #ifdef DEBUG_NCG
147 ALOGI("perform work short-term %p for label %s relative %d", ptr->codePtr, label, relativeNCG);
148 #endif
149 updateJumpInst(ptr->codePtr, ptr->size, relativeNCG);
150 //remove work
151 if(ptr_prevItem == NULL) {
152 globalShortWorklist = ptr->nextItem;
153 free(ptr);
154 ptr = globalShortWorklist; //ptr_prevItem is still NULL
155 }
156 else {
157 ptr_prevItem->nextItem = ptr->nextItem;
158 free(ptr);
159 ptr = ptr_prevItem->nextItem;
160 }
161 }
162 else {
163 ptr_prevItem = ptr;
164 ptr = ptr->nextItem;
165 }
166 } //while
167 return 0;
168 }
169 /*!
170 \brief search globalMap to find the entry for the given label
171
172 */
findCodeForLabel(const char * label)173 char* findCodeForLabel(const char* label) {
174 LabelMap* ptr = globalMap;
175 while(ptr != NULL) {
176 if(!strcmp(label, ptr->label)) {
177 return ptr->codePtr;
178 }
179 ptr = ptr->nextItem;
180 }
181 return NULL;
182 }
183 /*!
184 \brief search globalShortMap to find the entry for the given label
185
186 */
findCodeForShortLabel(const char * label)187 char* findCodeForShortLabel(const char* label) {
188 LabelMap* ptr = globalShortMap;
189 while(ptr != NULL) {
190 if(!strcmp(label, ptr->label)) {
191 return ptr->codePtr;
192 }
193 ptr = ptr->nextItem;
194 }
195 return NULL;
196 }
insertLabelWorklist(const char * label,OpndSize immSize)197 int insertLabelWorklist(const char* label, OpndSize immSize) {
198 LabelMap* item = (LabelMap*)malloc(sizeof(LabelMap));
199 if(item == NULL) {
200 ALOGE("Memory allocation failed");
201 return -1;
202 }
203 snprintf(item->label, LABEL_SIZE, "%s", label);
204 item->codePtr = stream;
205 item->size = immSize;
206 item->nextItem = globalWorklist;
207 globalWorklist = item;
208 #ifdef DEBUG_NCG
209 ALOGI("insert globalWorklist: %s %p", label, stream);
210 #endif
211 return 0;
212 }
213
insertShortWorklist(const char * label,OpndSize immSize)214 int insertShortWorklist(const char* label, OpndSize immSize) {
215 LabelMap* item = (LabelMap*)malloc(sizeof(LabelMap));
216 if(item == NULL) {
217 ALOGE("Memory allocation failed");
218 return -1;
219 }
220 snprintf(item->label, LABEL_SIZE, "%s", label);
221 item->codePtr = stream;
222 item->size = immSize;
223 item->nextItem = globalShortWorklist;
224 globalShortWorklist = item;
225 #ifdef DEBUG_NCG
226 ALOGI("insert globalShortWorklist: %s %p", label, stream);
227 #endif
228 return 0;
229 }
230 /*!
231 \brief free memory allocated for globalMap
232
233 */
freeLabelMap()234 void freeLabelMap() {
235 LabelMap* ptr = globalMap;
236 while(ptr != NULL) {
237 globalMap = ptr->nextItem;
238 free(ptr);
239 ptr = globalMap;
240 }
241 }
242 /*!
243 \brief free memory allocated for globalShortMap
244
245 */
freeShortMap()246 void freeShortMap() {
247 LabelMap* ptr = globalShortMap;
248 while(ptr != NULL) {
249 globalShortMap = ptr->nextItem;
250 free(ptr);
251 ptr = globalShortMap;
252 }
253 globalShortMap = NULL;
254 }
255
insertGlobalPCWorklist(char * offset,char * codeStart)256 int insertGlobalPCWorklist(char * offset, char * codeStart)
257 {
258 LabelMap* item = (LabelMap*)malloc(sizeof(LabelMap));
259 if(item == NULL) {
260 ALOGE("Memory allocation failed");
261 return -1;
262 }
263 snprintf(item->label, LABEL_SIZE, "%s", "export_pc");
264 item->size = OpndSize_32;
265 item->codePtr = offset; //points to the immediate operand
266 item->addend = codeStart - streamMethodStart; //relative code pointer
267 item->nextItem = globalPCWorklist;
268 globalPCWorklist = item;
269 globalPCWorklistNum ++;
270
271 #ifdef DEBUG_NCG
272 ALOGI("insert globalPCWorklist: %p %p %p %x %p", globalDvmNcg->streamCode, codeStart, streamCode, item->addend, item->codePtr);
273 #endif
274 return 0;
275 }
276
insertChainingWorklist(int bbId,char * codeStart)277 int insertChainingWorklist(int bbId, char * codeStart)
278 {
279 LabelMap* item = (LabelMap*)malloc(sizeof(LabelMap));
280 if(item == NULL) {
281 ALOGE("Memory allocation failed");
282 return -1;
283 }
284 item->size = OpndSize_32;
285 item->codePtr = codeStart; //points to the move instruction
286 item->addend = bbId; //relative code pointer
287 item->nextItem = chainingWorklist;
288 chainingWorklist = item;
289
290 #ifdef DEBUG_NCG
291 ALOGI("insertChainingWorklist: %p basic block %d", codeStart, bbId);
292 #endif
293 return 0;
294 }
295
insertGlobalDataWorklist(char * offset,const char * label)296 int insertGlobalDataWorklist(char * offset, const char* label)
297 {
298 LabelMap* item = (LabelMap*)malloc(sizeof(LabelMap));
299 if(item == NULL) {
300 ALOGE("Memory allocation failed");
301 return -1;
302 }
303 snprintf(item->label, LABEL_SIZE, "%s", label);
304 item->codePtr = offset;
305 item->size = OpndSize_32;
306 item->nextItem = globalDataWorklist;
307 globalDataWorklist = item;
308 globalDataWorklistNum ++;
309
310 #ifdef DEBUG_NCG
311 ALOGI("insert globalDataWorklist: %s %p", label, offset);
312 #endif
313
314 return 0;
315 }
316
insertVMAPIWorklist(char * offset,const char * label)317 int insertVMAPIWorklist(char * offset, const char* label)
318 {
319 LabelMap* item = (LabelMap*)malloc(sizeof(LabelMap));
320 if(item == NULL) {
321 ALOGE("Memory allocation failed");
322 return -1;
323 }
324 snprintf(item->label, LABEL_SIZE, "%s", label);
325 item->codePtr = offset;
326 item->size = OpndSize_32;
327
328 item->nextItem = VMAPIWorklist;
329 VMAPIWorklist = item;
330
331 VMAPIWorklistNum ++;
332
333 #ifdef DEBUG_NCG
334 ALOGI("insert VMAPIWorklist: %s %p", label, offset);
335 #endif
336 return 0;
337 }
338 ////////////////////////////////////////////////
339
340
341 int updateImmRMInst(char* moveInst, const char* label, int relativeNCG); //forward declaration
342 //////////////////// performLabelWorklist is defined differently for code cache
performChainingWorklist()343 void performChainingWorklist() {
344 LabelMap* ptr = chainingWorklist;
345 while(ptr != NULL) {
346 int tmpNCG = traceLabelList[ptr->addend].lop.generic.offset;
347 char* NCGaddr = streamMethodStart + tmpNCG;
348 updateImmRMInst(ptr->codePtr, "", (int)NCGaddr);
349 chainingWorklist = ptr->nextItem;
350 free(ptr);
351 ptr = chainingWorklist;
352 }
353 }
freeChainingWorklist()354 void freeChainingWorklist() {
355 LabelMap* ptr = chainingWorklist;
356 while(ptr != NULL) {
357 chainingWorklist = ptr->nextItem;
358 free(ptr);
359 ptr = chainingWorklist;
360 }
361 }
362
363 //Work only for initNCG
performLabelWorklist()364 void performLabelWorklist() {
365 LabelMap* ptr = globalWorklist;
366 while(ptr != NULL) {
367 #ifdef DEBUG_NCG
368 ALOGI("perform work global %p for label %s", ptr->codePtr, ptr->label);
369 #endif
370 char* targetCode = findCodeForLabel(ptr->label);
371 assert(targetCode != NULL);
372 int relativeNCG = targetCode - ptr->codePtr;
373 unsigned instSize = encoder_get_inst_size(ptr->codePtr);
374 relativeNCG -= instSize; //size of the instruction
375 updateJumpInst(ptr->codePtr, ptr->size, relativeNCG);
376 globalWorklist = ptr->nextItem;
377 free(ptr);
378 ptr = globalWorklist;
379 }
380 }
freeLabelWorklist()381 void freeLabelWorklist() {
382 LabelMap* ptr = globalWorklist;
383 while(ptr != NULL) {
384 globalWorklist = ptr->nextItem;
385 free(ptr);
386 ptr = globalWorklist;
387 }
388 }
389
390 ///////////////////////////////////////////////////
391 /*!
392 \brief update value of the immediate in the given move instruction
393
394 */
updateImmRMInst(char * moveInst,const char * label,int relativeNCG)395 int updateImmRMInst(char* moveInst, const char* label, int relativeNCG) {
396 #ifdef DEBUG_NCG
397 ALOGI("perform work ImmRM inst @ %p for label %s with %d", moveInst, label, relativeNCG);
398 #endif
399 encoder_update_imm_rm(relativeNCG, moveInst);
400 return 0;
401 }
402 //! maximum instruction size for jump,jcc,call: 6 for jcc rel32
403 #define MAX_JCC_SIZE 6
404 //! minimum instruction size for jump,jcc,call: 2
405 #define MIN_JCC_SIZE 2
406 /*!
407 \brief estimate size of the immediate
408
409 Somehow, 16 bit jump does not work. This function will return either 8 bit or 32 bit
410 EXAMPLE:
411 native code at A: ...
412 native code at B: jump relOffset (target is A)
413 native code at B':
414 --> relOffset = A - B' = A - B - size of the jump instruction
415 Argument "target" is equal to A - B. To determine size of the immediate, we check tha value of "target - size of the jump instructoin"
416 */
estOpndSizeFromImm(int target)417 OpndSize estOpndSizeFromImm(int target) {
418 if(target-MIN_JCC_SIZE < 128 && target-MAX_JCC_SIZE >= -128) return OpndSize_8;
419 #ifdef SUPPORT_IMM_16
420 if(target-MIN_JCC_SIZE < 32768 && target-MAX_JCC_SIZE >= -32768) return OpndSize_16;
421 #endif
422 return OpndSize_32;
423 }
424 /*!
425 \brief return size of a jump or call instruction
426
427 */
getJmpCallInstSize(OpndSize size,JmpCall_type type)428 unsigned getJmpCallInstSize(OpndSize size, JmpCall_type type) {
429 if(type == JmpCall_uncond) {
430 if(size == OpndSize_8) return 2;
431 if(size == OpndSize_16) return 4;
432 return 5;
433 }
434 if(type == JmpCall_cond) {
435 if(size == OpndSize_8) return 2;
436 if(size == OpndSize_16) return 5;
437 return 6;
438 }
439 if(type == JmpCall_reg) {
440 assert(size == OpndSize_32);
441 return JUMP_REG_SIZE;
442 }
443 if(type == JmpCall_call) {
444 assert(size != OpndSize_8);
445 if(size == OpndSize_16) return 4;
446 return 5;
447 }
448 return 0;
449 }
450 /*!
451 \brief check whether a branch target is already handled, if yes, return the size of the immediate; otherwise, call insertShortWorklist or insertLabelWorklist.
452
453 If the branch target is not handled, call insertShortWorklist or insertLabelWorklist depending on isShortTerm, unknown is set to true, immSize is set to 32 if isShortTerm is false, set to 32 if isShortTerm is true and target is check_cast_null, set to 8 otherwise.
454
455 If the branch target is handled, call estOpndSizeFromImm to set immSize for jump instruction, returns the value of the immediate
456 */
getRelativeOffset(const char * target,bool isShortTerm,JmpCall_type type,bool * unknown,OpndSize * immSize)457 int getRelativeOffset(const char* target, bool isShortTerm, JmpCall_type type, bool* unknown, OpndSize* immSize) {
458 char* targetPtrInStream = NULL;
459 if(isShortTerm) targetPtrInStream = findCodeForShortLabel(target);
460 else targetPtrInStream = findCodeForLabel(target);
461
462 int relOffset;
463 *unknown = false;
464 if(targetPtrInStream == NULL) {
465 //branch target is not handled yet
466 relOffset = 0;
467 *unknown = true;
468 if(isShortTerm) {
469 /* for backward jump, at this point, we don't know how far the target is from this jump
470 since the lable is only used within a single bytecode, we assume OpndSize_8 is big enough
471 but there are special cases where we should use 32 bit offset
472 */
473 if(!strcmp(target, ".check_cast_null") || !strcmp(target, ".stackOverflow") ||
474 !strcmp(target, ".invokeChain") ||
475 !strcmp(target, ".new_instance_done") ||
476 !strcmp(target, ".new_array_done") ||
477 !strcmp(target, ".fill_array_data_done") ||
478 !strcmp(target, ".inlined_string_compare_done") ||
479 !strncmp(target, "after_exception", 15)) {
480 #ifdef SUPPORT_IMM_16
481 *immSize = OpndSize_16;
482 #else
483 *immSize = OpndSize_32;
484 #endif
485 } else {
486 *immSize = OpndSize_8;
487 }
488 #ifdef DEBUG_NCG_JUMP
489 ALOGI("insert to short worklist %s %d", target, *immSize);
490 #endif
491 insertShortWorklist(target, *immSize);
492 }
493 else {
494 #ifdef SUPPORT_IMM_16
495 *immSize = OpndSize_16;
496 #else
497 *immSize = OpndSize_32;
498 #endif
499 insertLabelWorklist(target, *immSize);
500 }
501 if(type == JmpCall_call) { //call sz16 does not work in gdb
502 *immSize = OpndSize_32;
503 }
504 return 0;
505 }
506 else if (!isShortTerm) {
507 #ifdef SUPPORT_IMM_16
508 *immSize = OpndSize_16;
509 #else
510 *immSize = OpndSize_32;
511 #endif
512 insertLabelWorklist(target, *immSize);
513 }
514
515 #ifdef DEBUG_NCG
516 ALOGI("backward branch @ %p for label %s", stream, target);
517 #endif
518 relOffset = targetPtrInStream - stream;
519 if(type == JmpCall_call) *immSize = OpndSize_32;
520 else
521 *immSize = estOpndSizeFromImm(relOffset);
522
523 relOffset -= getJmpCallInstSize(*immSize, type);
524 return relOffset;
525 }
526
527 /*!
528 \brief generate a single native instruction "jcc imm" to jump to a label
529
530 */
conditional_jump(ConditionCode cc,const char * target,bool isShortTerm)531 void conditional_jump(ConditionCode cc, const char* target, bool isShortTerm) {
532 if(jumpToException(target) && currentExceptionBlockIdx >= 0) { //jump to the exceptionThrow block
533 condJumpToBasicBlock(stream, cc, currentExceptionBlockIdx);
534 return;
535 }
536 Mnemonic m = (Mnemonic)(Mnemonic_Jcc + cc);
537 bool unknown;
538 OpndSize size;
539 int imm = 0;
540 imm = getRelativeOffset(target, isShortTerm, JmpCall_cond, &unknown, &size);
541 dump_label(m, size, imm, target, isShortTerm);
542 }
543 /*!
544 \brief generate a single native instruction "jmp imm" to jump to ".invokeArgsDone"
545
546 */
goto_invokeArgsDone()547 void goto_invokeArgsDone() {
548 unconditional_jump_global_API(".invokeArgsDone", false);
549 }
550 /*!
551 \brief generate a single native instruction "jmp imm" to jump to a label
552
553 If the target is ".invokeArgsDone" and mode is NCG O1, extra work is performed to dump content of virtual registers to memory.
554 */
unconditional_jump(const char * target,bool isShortTerm)555 void unconditional_jump(const char* target, bool isShortTerm) {
556 if(jumpToException(target) && currentExceptionBlockIdx >= 0) { //jump to the exceptionThrow block
557 jumpToBasicBlock(stream, currentExceptionBlockIdx);
558 return;
559 }
560 Mnemonic m = Mnemonic_JMP;
561 bool unknown;
562 OpndSize size;
563 if(gDvm.executionMode == kExecutionModeNcgO1) {
564 //for other three labels used by JIT: invokeArgsDone_formal, _native, _jit
565 if(!strncmp(target, ".invokeArgsDone", 15)) {
566 touchEcx(); //keep ecx live, if ecx was spilled, it is loaded here
567 beforeCall(target); //
568 }
569 if(!strcmp(target, ".invokeArgsDone")) {
570 nextVersionOfHardReg(PhysicalReg_EDX, 1); //edx will be used in a function
571 call("ncgGetEIP"); //must be immediately before JMP
572 }
573 }
574 int imm = 0;
575 imm = getRelativeOffset(target, isShortTerm, JmpCall_uncond, &unknown, &size);
576 dump_label(m, size, imm, target, isShortTerm);
577 if(gDvm.executionMode == kExecutionModeNcgO1) {
578 if(!strncmp(target, ".invokeArgsDone", 15)) {
579 afterCall(target); //un-spill before executing the next bytecode
580 }
581 }
582 }
583 /*!
584 \brief generate a single native instruction "jcc imm"
585
586 */
conditional_jump_int(ConditionCode cc,int target,OpndSize size)587 void conditional_jump_int(ConditionCode cc, int target, OpndSize size) {
588 Mnemonic m = (Mnemonic)(Mnemonic_Jcc + cc);
589 dump_ncg(m, size, target);
590 }
591 /*!
592 \brief generate a single native instruction "jmp imm"
593
594 */
unconditional_jump_int(int target,OpndSize size)595 void unconditional_jump_int(int target, OpndSize size) {
596 Mnemonic m = Mnemonic_JMP;
597 dump_ncg(m, size, target);
598 }
599 /*!
600 \brief generate a single native instruction "jmp reg"
601
602 */
unconditional_jump_reg(int reg,bool isPhysical)603 void unconditional_jump_reg(int reg, bool isPhysical) {
604 dump_reg(Mnemonic_JMP, ATOM_NORMAL, OpndSize_32, reg, isPhysical, LowOpndRegType_gp);
605 }
606
607 /*!
608 \brief generate a single native instruction to call a function
609
610 If mode is NCG O1, extra work is performed to dump content of virtual registers to memory.
611 */
call(const char * target)612 void call(const char* target) {
613 if(gDvm.executionMode == kExecutionModeNcgO1) {
614 beforeCall(target);
615 }
616 Mnemonic m = Mnemonic_CALL;
617 bool dummy;
618 OpndSize size;
619 int relOffset = 0;
620 relOffset = getRelativeOffset(target, false, JmpCall_call, &dummy, &size);
621 dump_label(m, size, relOffset, target, false);
622 if(gDvm.executionMode == kExecutionModeNcgO1) {
623 afterCall(target);
624 }
625 }
626 /*!
627 \brief generate a single native instruction to call a function
628
629 */
call_reg(int reg,bool isPhysical)630 void call_reg(int reg, bool isPhysical) {
631 Mnemonic m = Mnemonic_CALL;
632 dump_reg(m, ATOM_NORMAL, OpndSize_32, reg, isPhysical, LowOpndRegType_gp);
633 }
call_reg_noalloc(int reg,bool isPhysical)634 void call_reg_noalloc(int reg, bool isPhysical) {
635 Mnemonic m = Mnemonic_CALL;
636 dump_reg_noalloc(m, OpndSize_32, reg, isPhysical, LowOpndRegType_gp);
637 }
638
639 /*!
640 \brief generate a single native instruction to call a function
641
642 */
call_mem(int disp,int reg,bool isPhysical)643 void call_mem(int disp, int reg, bool isPhysical) {
644 Mnemonic m = Mnemonic_CALL;
645 dump_mem(m, ATOM_NORMAL, OpndSize_32, disp, reg, isPhysical);
646 }
647
648 /*!
649 \brief insert an entry to globalNCGWorklist
650
651 */
insertNCGWorklist(s4 relativePC,OpndSize immSize)652 int insertNCGWorklist(s4 relativePC, OpndSize immSize) {
653 int offsetNCG2 = stream - streamMethodStart;
654 #ifdef DEBUG_NCG
655 ALOGI("insert NCGWorklist (goto forward) @ %p offsetPC %x relativePC %x offsetNCG %x", stream, offsetPC, relativePC, offsetNCG2);
656 #endif
657 NCGWorklist* item = (NCGWorklist*)malloc(sizeof(NCGWorklist));
658 if(item == NULL) {
659 ALOGE("Memory allocation failed");
660 return -1;
661 }
662 item->relativePC = relativePC;
663 item->offsetPC = offsetPC;
664 item->offsetNCG = offsetNCG2;
665 item->codePtr = stream;
666 item->size = immSize;
667 item->nextItem = globalNCGWorklist;
668 globalNCGWorklist = item;
669 return 0;
670 }
671 #ifdef ENABLE_TRACING
insertMapWorklist(s4 BCOffset,s4 NCGOffset,int isStartOfPC)672 int insertMapWorklist(s4 BCOffset, s4 NCGOffset, int isStartOfPC) {
673 return 0;
674 }
675 #endif
676 /*!
677 \brief insert an entry to methodDataWorklist
678
679 This function is used by bytecode FILL_ARRAY_DATA, PACKED_SWITCH, SPARSE_SWITCH
680 */
insertDataWorklist(s4 relativePC,char * codePtr1)681 int insertDataWorklist(s4 relativePC, char* codePtr1) {
682 //insert according to offsetPC+relativePC, smallest at the head
683 DataWorklist* item = (DataWorklist*)malloc(sizeof(DataWorklist));
684 if(item == NULL) {
685 ALOGE("Memory allocation failed");
686 return -1;
687 }
688 item->relativePC = relativePC;
689 item->offsetPC = offsetPC;
690 item->codePtr = codePtr1;
691 item->codePtr2 = stream; //jump_reg for switch
692 DataWorklist* ptr = methodDataWorklist;
693 DataWorklist* prev_ptr = NULL;
694 while(ptr != NULL) {
695 int tmpPC = ptr->offsetPC + ptr->relativePC;
696 int tmpPC2 = relativePC + offsetPC;
697 if(tmpPC2 < tmpPC) {
698 break;
699 }
700 prev_ptr = ptr;
701 ptr = ptr->nextItem;
702 }
703 //insert item before ptr
704 if(prev_ptr != NULL) {
705 prev_ptr->nextItem = item;
706 }
707 else methodDataWorklist = item;
708 item->nextItem = ptr;
709 return 0;
710 }
711
712 /*!
713 \brief work on globalNCGWorklist
714
715 */
performNCGWorklist()716 int performNCGWorklist() {
717 NCGWorklist* ptr = globalNCGWorklist;
718 while(ptr != NULL) {
719 ALOGV("perform NCG worklist: @ %p target block %d target NCG %x",
720 ptr->codePtr, ptr->relativePC, traceLabelList[ptr->relativePC].lop.generic.offset);
721 int tmpNCG = traceLabelList[ptr->relativePC].lop.generic.offset;
722 assert(tmpNCG >= 0);
723 int relativeNCG = tmpNCG - ptr->offsetNCG;
724 unsigned instSize = encoder_get_inst_size(ptr->codePtr);
725 relativeNCG -= instSize;
726 updateJumpInst(ptr->codePtr, ptr->size, relativeNCG);
727 globalNCGWorklist = ptr->nextItem;
728 free(ptr);
729 ptr = globalNCGWorklist;
730 }
731 return 0;
732 }
freeNCGWorklist()733 void freeNCGWorklist() {
734 NCGWorklist* ptr = globalNCGWorklist;
735 while(ptr != NULL) {
736 globalNCGWorklist = ptr->nextItem;
737 free(ptr);
738 ptr = globalNCGWorklist;
739 }
740 }
741
742 /*!
743 \brief used by bytecode SWITCH
744
745 targetPC points to start of the data section
746 Code sequence for SWITCH
747 call ncgGetEIP
748 @codeInst: add_reg_reg %eax, %edx
749 jump_reg %edx
750 This function returns the offset in native code between add_reg_reg and the data section
751 */
getRelativeNCGForSwitch(int targetPC,char * codeInst)752 int getRelativeNCGForSwitch(int targetPC, char* codeInst) {
753 int tmpNCG = mapFromBCtoNCG[targetPC];
754 int offsetNCG2 = codeInst - streamMethodStart;
755 int relativeOff = tmpNCG - offsetNCG2;
756 return relativeOff;
757 }
758 /*!
759 \brief work on methodDataWorklist
760
761 */
performDataWorklist()762 int performDataWorklist() {
763 DataWorklist* ptr = methodDataWorklist;
764 if(ptr == NULL) return 0;
765
766 char* codeCacheEnd = ((char *) gDvmJit.codeCache) + gDvmJit.codeCacheSize - CODE_CACHE_PADDING;
767 u2 insnsSize = dvmGetMethodInsnsSize(currentMethod); //bytecode
768 //align stream to multiple of 4
769 int alignBytes = (int)stream & 3;
770 if(alignBytes != 0) alignBytes = 4-alignBytes;
771 stream += alignBytes;
772
773 while(ptr != NULL) {
774 int tmpPC = ptr->offsetPC + ptr->relativePC;
775 int endPC = insnsSize;
776 if(ptr->nextItem != NULL) endPC = ptr->nextItem->offsetPC + ptr->nextItem->relativePC;
777 mapFromBCtoNCG[tmpPC] = stream - streamMethodStart; //offsetNCG in byte
778
779 //handle fill_array_data, packed switch & sparse switch
780 u2 tmpInst = *(currentMethod->insns + ptr->offsetPC);
781 u2* sizePtr;
782 s4* entryPtr_bytecode;
783 u2 tSize, iVer;
784 u4 sz;
785
786 if (gDvmJit.codeCacheFull == true) {
787 // We are out of code cache space. Skip writing data/code to
788 // code cache. Simply free the item.
789 methodDataWorklist = ptr->nextItem;
790 free(ptr);
791 ptr = methodDataWorklist;
792 }
793
794 switch (INST_INST(tmpInst)) {
795 case OP_FILL_ARRAY_DATA:
796 sz = (endPC-tmpPC)*sizeof(u2);
797 if ((stream + sz) < codeCacheEnd) {
798 memcpy(stream, (u2*)currentMethod->insns+tmpPC, sz);
799 #ifdef DEBUG_NCG_CODE_SIZE
800 ALOGI("copy data section to stream %p: start at %d, %d bytes", stream, tmpPC, sz);
801 #endif
802 #ifdef DEBUG_NCG
803 ALOGI("update data section at %p with %d", ptr->codePtr, stream-ptr->codePtr);
804 #endif
805 updateImmRMInst(ptr->codePtr, "", stream - ptr->codePtr);
806 stream += sz;
807 } else {
808 gDvmJit.codeCacheFull = true;
809 }
810 break;
811 case OP_PACKED_SWITCH:
812 updateImmRMInst(ptr->codePtr, "", stream-ptr->codePtr);
813 sizePtr = (u2*)currentMethod->insns+tmpPC + 1 /*signature*/;
814 entryPtr_bytecode = (s4*)(sizePtr + 1 /*size*/ + 2 /*firstKey*/);
815 tSize = *(sizePtr);
816 sz = tSize * 4; /* expected size needed in stream */
817 if ((stream + sz) < codeCacheEnd) {
818 for(iVer = 0; iVer < tSize; iVer++) {
819 //update entries
820 s4 relativePC = *entryPtr_bytecode; //relative to ptr->offsetPC
821 //need stream, offsetPC,
822 int relativeNCG = getRelativeNCGForSwitch(relativePC+ptr->offsetPC, ptr->codePtr2);
823 #ifdef DEBUG_NCG_CODE_SIZE
824 ALOGI("convert target from %d to %d", relativePC+ptr->offsetPC, relativeNCG);
825 #endif
826 *((s4*)stream) = relativeNCG;
827 stream += 4;
828 entryPtr_bytecode++;
829 }
830 } else {
831 gDvmJit.codeCacheFull = true;
832 }
833 break;
834 case OP_SPARSE_SWITCH:
835 updateImmRMInst(ptr->codePtr, "", stream-ptr->codePtr);
836 sizePtr = (u2*)currentMethod->insns+tmpPC + 1 /*signature*/;
837 s4* keyPtr_bytecode = (s4*)(sizePtr + 1 /*size*/);
838 tSize = *(sizePtr);
839 entryPtr_bytecode = (s4*)(keyPtr_bytecode + tSize);
840 sz = tSize * (sizeof(s4) + 4); /* expected size needed in stream */
841 if ((stream + sz) < codeCacheEnd) {
842 memcpy(stream, keyPtr_bytecode, tSize*sizeof(s4));
843 stream += tSize*sizeof(s4);
844 for(iVer = 0; iVer < tSize; iVer++) {
845 //update entries
846 s4 relativePC = *entryPtr_bytecode; //relative to ptr->offsetPC
847 //need stream, offsetPC,
848 int relativeNCG = getRelativeNCGForSwitch(relativePC+ptr->offsetPC, ptr->codePtr2);
849 *((s4*)stream) = relativeNCG;
850 stream += 4;
851 entryPtr_bytecode++;
852 }
853 } else {
854 gDvmJit.codeCacheFull = true;
855 }
856 break;
857 }
858
859 //remove the item
860 methodDataWorklist = ptr->nextItem;
861 free(ptr);
862 ptr = methodDataWorklist;
863 }
864 return 0;
865 }
freeDataWorklist()866 void freeDataWorklist() {
867 DataWorklist* ptr = methodDataWorklist;
868 while(ptr != NULL) {
869 methodDataWorklist = ptr->nextItem;
870 free(ptr);
871 ptr = methodDataWorklist;
872 }
873 }
874
875 //////////////////////////
876 /*!
877 \brief check whether a branch target (specified by relative offset in bytecode) is already handled, if yes, return the size of the immediate; otherwise, call insertNCGWorklist.
878
879 If the branch target is not handled, call insertNCGWorklist, unknown is set to true, immSize is set to 32.
880
881 If the branch target is handled, call estOpndSizeFromImm to set immSize for jump instruction, returns the value of the immediate
882 */
getRelativeNCG(s4 tmp,JmpCall_type type,bool * unknown,OpndSize * size)883 int getRelativeNCG(s4 tmp, JmpCall_type type, bool* unknown, OpndSize* size) {//tmp: relativePC
884 int tmpNCG = traceLabelList[tmp].lop.generic.offset;
885
886 *unknown = false;
887 if(tmpNCG <0) {
888 *unknown = true;
889 #ifdef SUPPORT_IMM_16
890 *size = OpndSize_16;
891 #else
892 *size = OpndSize_32;
893 #endif
894 insertNCGWorklist(tmp, *size);
895 return 0;
896 }
897 int offsetNCG2 = stream - streamMethodStart;
898 #ifdef DEBUG_NCG
899 ALOGI("goto backward @ %p offsetPC %d relativePC %d offsetNCG %d relativeNCG %d", stream, offsetPC, tmp, offsetNCG2, tmpNCG-offsetNCG2);
900 #endif
901 int relativeOff = tmpNCG - offsetNCG2;
902 *size = estOpndSizeFromImm(relativeOff);
903 return relativeOff - getJmpCallInstSize(*size, type);
904 }
905 /*!
906 \brief a helper function to handle backward branch
907
908 input: jump target in %eax; at end of the function, jump to %eax
909 */
common_backwardBranch()910 int common_backwardBranch() {
911 insertLabel("common_backwardBranch", false);
912 spill_reg(PhysicalReg_EAX, true);
913 call("common_periodicChecks_entry");
914 unspill_reg(PhysicalReg_EAX, true);
915 unconditional_jump_reg(PhysicalReg_EAX, true);
916 return 0;
917 }
918 //when this is called from JIT, there is no need to check GC
common_goto(s4 tmp)919 int common_goto(s4 tmp) { //tmp: target basic block id
920 bool unknown;
921 OpndSize size;
922 constVREndOfBB();
923 globalVREndOfBB(currentMethod);
924
925 int relativeNCG = tmp;
926 relativeNCG = getRelativeNCG(tmp, JmpCall_uncond, &unknown, &size);
927 unconditional_jump_int(relativeNCG, size);
928 return 1;
929 }
common_if(s4 tmp,ConditionCode cc_next,ConditionCode cc)930 int common_if(s4 tmp, ConditionCode cc_next, ConditionCode cc) {
931 bool unknown;
932 OpndSize size;
933 int relativeNCG = traceCurrentBB->taken ? traceCurrentBB->taken->id : 0;
934
935 if(traceCurrentBB->taken)
936 relativeNCG = getRelativeNCG(traceCurrentBB->taken->id, JmpCall_cond, &unknown, &size);
937 conditional_jump_int(cc, relativeNCG, size);
938 relativeNCG = traceCurrentBB->fallThrough ? traceCurrentBB->fallThrough->id : 0;
939 if(traceCurrentBB->fallThrough)
940 relativeNCG = getRelativeNCG(traceCurrentBB->fallThrough->id, JmpCall_uncond, &unknown, &size);
941 unconditional_jump_int(relativeNCG, size);
942 return 2;
943 }
944
945 /*!
946 \brief helper function to handle null object error
947
948 */
common_errNullObject()949 int common_errNullObject() {
950 insertLabel("common_errNullObject", false);
951 move_imm_to_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
952 move_imm_to_reg(OpndSize_32, LstrNullPointerException, PhysicalReg_ECX, true);
953 unconditional_jump("common_throw", false);
954 return 0;
955 }
956 /*!
957 \brief helper function to handle string index error
958
959 */
common_StringIndexOutOfBounds()960 int common_StringIndexOutOfBounds() {
961 insertLabel("common_StringIndexOutOfBounds", false);
962 move_imm_to_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
963 move_imm_to_reg(OpndSize_32, LstrStringIndexOutOfBoundsException, PhysicalReg_ECX, true);
964 unconditional_jump("common_throw", false);
965 return 0;
966 }
967
968 /*!
969 \brief helper function to handle array index error
970
971 */
common_errArrayIndex()972 int common_errArrayIndex() {
973 insertLabel("common_errArrayIndex", false);
974 move_imm_to_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
975 move_imm_to_reg(OpndSize_32, LstrArrayIndexException, PhysicalReg_ECX, true);
976 unconditional_jump("common_throw", false);
977 return 0;
978 }
979 /*!
980 \brief helper function to handle array store error
981
982 */
common_errArrayStore()983 int common_errArrayStore() {
984 insertLabel("common_errArrayStore", false);
985 move_imm_to_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
986 move_imm_to_reg(OpndSize_32, LstrArrayStoreException, PhysicalReg_ECX, true);
987 unconditional_jump("common_throw", false);
988 return 0;
989 }
990 /*!
991 \brief helper function to handle negative array size error
992
993 */
common_errNegArraySize()994 int common_errNegArraySize() {
995 insertLabel("common_errNegArraySize", false);
996 move_imm_to_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
997 move_imm_to_reg(OpndSize_32, LstrNegativeArraySizeException, PhysicalReg_ECX, true);
998 unconditional_jump("common_throw", false);
999 return 0;
1000 }
1001 /*!
1002 \brief helper function to handle divide-by-zero error
1003
1004 */
common_errDivideByZero()1005 int common_errDivideByZero() {
1006 insertLabel("common_errDivideByZero", false);
1007 move_imm_to_reg(OpndSize_32, LstrDivideByZero, PhysicalReg_EAX, true);
1008 move_imm_to_reg(OpndSize_32, LstrArithmeticException, PhysicalReg_ECX, true);
1009 unconditional_jump("common_throw", false);
1010 return 0;
1011 }
1012 /*!
1013 \brief helper function to handle no such method error
1014
1015 */
common_errNoSuchMethod()1016 int common_errNoSuchMethod() {
1017 insertLabel("common_errNoSuchMethod", false);
1018 move_imm_to_reg(OpndSize_32, 0, PhysicalReg_EAX, true);
1019 move_imm_to_reg(OpndSize_32, LstrNoSuchMethodError, PhysicalReg_ECX, true);
1020 unconditional_jump("common_throw", false);
1021 return 0;
1022 }
1023 int call_dvmFindCatchBlock();
1024
1025 #define P_GPR_1 PhysicalReg_ESI //self callee-saved
1026 #define P_GPR_2 PhysicalReg_EBX //exception callee-saved
1027 #define P_GPR_3 PhysicalReg_EAX //method that caused exception
1028 /*!
1029 \brief helper function common_exceptionThrown
1030
1031 */
common_exceptionThrown()1032 int common_exceptionThrown() {
1033 insertLabel("common_exceptionThrown", false);
1034 typedef void (*vmHelper)(int);
1035 vmHelper funcPtr = dvmJitToExceptionThrown;
1036 move_imm_to_reg(OpndSize_32, (int)funcPtr, C_SCRATCH_1, isScratchPhysical);
1037 unconditional_jump_reg(C_SCRATCH_1, isScratchPhysical);
1038 return 0;
1039 }
1040 #undef P_GPR_1
1041 #undef P_GPR_2
1042 #undef P_GPR_3
1043
1044 /*!
1045 \brief helper function to throw an exception with message
1046
1047 INPUT: obj_reg(%eax), exceptionPtrReg(%ecx)
1048 SCRATCH: C_SCRATCH_1(%esi) & C_SCRATCH_2(%edx)
1049 OUTPUT: no
1050 */
throw_exception_message(int exceptionPtrReg,int obj_reg,bool isPhysical,int startLR,bool startPhysical)1051 int throw_exception_message(int exceptionPtrReg, int obj_reg, bool isPhysical,
1052 int startLR/*logical register index*/, bool startPhysical) {
1053 insertLabel("common_throw_message", false);
1054 scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EDX;
1055 scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
1056
1057 move_mem_to_reg(OpndSize_32, offObject_clazz, obj_reg, isPhysical, C_SCRATCH_1, isScratchPhysical);
1058 move_mem_to_reg(OpndSize_32, offClassObject_descriptor, C_SCRATCH_1, isScratchPhysical, C_SCRATCH_2, isScratchPhysical);
1059 load_effective_addr(-8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1060 move_reg_to_mem(OpndSize_32, C_SCRATCH_2, isScratchPhysical, 4, PhysicalReg_ESP, true);
1061 move_reg_to_mem(OpndSize_32, exceptionPtrReg, true, 0, PhysicalReg_ESP, true);
1062 call_dvmThrowWithMessage();
1063 load_effective_addr(8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1064 unconditional_jump("common_exceptionThrown", false);
1065 return 0;
1066 }
1067 /*!
1068 \brief helper function to throw an exception
1069
1070 scratch: C_SCRATCH_1(%edx)
1071 */
throw_exception(int exceptionPtrReg,int immReg,int startLR,bool startPhysical)1072 int throw_exception(int exceptionPtrReg, int immReg,
1073 int startLR/*logical register index*/, bool startPhysical) {
1074 insertLabel("common_throw", false);
1075 scratchRegs[0] = PhysicalReg_EDX; scratchRegs[1] = PhysicalReg_Null;
1076 scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
1077
1078 load_effective_addr(-8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1079 move_reg_to_mem(OpndSize_32, immReg, true, 4, PhysicalReg_ESP, true);
1080 move_reg_to_mem(OpndSize_32, exceptionPtrReg, true, 0, PhysicalReg_ESP, true);
1081 call_dvmThrow();
1082 load_effective_addr(8, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1083 unconditional_jump("common_exceptionThrown", false);
1084 return 0;
1085 }
1086
1087 //! lower bytecode GOTO
1088
1089 //!
op_goto()1090 int op_goto() {
1091 s2 tmp = traceCurrentBB->taken->id;
1092 int retval = common_goto(tmp);
1093 rPC += 1;
1094 return retval;
1095 }
1096 //! lower bytecode GOTO_16
1097
1098 //!
op_goto_16()1099 int op_goto_16() {
1100 s2 tmp = traceCurrentBB->taken->id;
1101 int retval = common_goto(tmp);
1102 rPC += 2;
1103 return retval;
1104 }
1105 //! lower bytecode GOTO_32
1106
1107 //!
op_goto_32()1108 int op_goto_32() {
1109 s2 tmp = traceCurrentBB->taken->id;
1110 int retval = common_goto((s4)tmp);
1111 rPC += 3;
1112 return retval;
1113 }
1114 #define P_GPR_1 PhysicalReg_EBX
1115 //! lower bytecode PACKED_SWITCH
1116
1117 //!
op_packed_switch()1118 int op_packed_switch() {
1119 u4 tmp = (u4)FETCH(1);
1120 tmp |= (u4)FETCH(2) << 16;
1121 u2 vA = INST_AA(inst);
1122
1123 #ifdef DEBUG_EACH_BYTECODE
1124 u2 tSize = 0;
1125 s4 firstKey = 0;
1126 s4* entries = NULL;
1127 #else
1128 u2* switchData = rPC + (s4)tmp;
1129 if (*switchData++ != kPackedSwitchSignature) {
1130 /* should have been caught by verifier */
1131 dvmThrowInternalError(
1132 "bad packed switch magic");
1133 return 0; //no_op
1134 }
1135 u2 tSize = *switchData++;
1136 assert(tSize > 0);
1137 s4 firstKey = *switchData++;
1138 firstKey |= (*switchData++) << 16;
1139 s4* entries = (s4*) switchData;
1140 assert(((u4)entries & 0x3) == 0);
1141 #endif
1142
1143 get_virtual_reg(vA, OpndSize_32, 1, false);
1144 //dvmNcgHandlePackedSwitch: testVal, size, first_key, targets
1145 load_effective_addr(-16, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1146 move_imm_to_mem(OpndSize_32, tSize, 8, PhysicalReg_ESP, true);
1147 move_imm_to_mem(OpndSize_32, firstKey, 4, PhysicalReg_ESP, true);
1148
1149 /* "entries" is constant for JIT
1150 it is the 1st argument to dvmJitHandlePackedSwitch */
1151 move_imm_to_mem(OpndSize_32, (int)entries, 0, PhysicalReg_ESP, true);
1152 move_reg_to_mem(OpndSize_32, 1, false, 12, PhysicalReg_ESP, true);
1153
1154 //if value out of range, fall through (no_op)
1155 //return targets[testVal - first_key]
1156 scratchRegs[0] = PhysicalReg_SCRATCH_1;
1157 call_dvmJitHandlePackedSwitch();
1158 load_effective_addr(16, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1159 //TODO: eax should be absolute address, call globalVREndOfBB, constVREndOfBB
1160 //conditional_jump_global_API(Condition_LE, "common_backwardBranch", false);
1161 constVREndOfBB();
1162 globalVREndOfBB(currentMethod); //update GG VRs
1163 //get rPC, %eax has the relative PC offset
1164 alu_binary_imm_reg(OpndSize_32, add_opc, (int)rPC, PhysicalReg_EAX, true);
1165 scratchRegs[0] = PhysicalReg_SCRATCH_2;
1166 #if defined(WITH_JIT_TUNING)
1167 /* Fall back to interpreter after resolving address of switch target.
1168 * Indicate a kSwitchOverflow. Note: This is not an "overflow". But it helps
1169 * count the times we return from a Switch
1170 */
1171 move_imm_to_mem(OpndSize_32, kSwitchOverflow, 0, PhysicalReg_ESP, true);
1172 #endif
1173 jumpToInterpNoChain();
1174 rPC += 3;
1175 return 0;
1176 }
1177 #undef P_GPR_1
1178
1179 #define P_GPR_1 PhysicalReg_EBX
1180 //! lower bytecode SPARSE_SWITCH
1181
1182 //!
op_sparse_switch()1183 int op_sparse_switch() {
1184 u4 tmp = (u4)FETCH(1);
1185 tmp |= (u4)FETCH(2) << 16;
1186 u2 vA = INST_AA(inst);
1187 #ifdef DEBUG_EACH_BYTECODE
1188 u2 tSize = 0;
1189 const s4* keys = NULL;
1190 s4* entries = NULL;
1191 #else
1192 u2* switchData = rPC + (s4)tmp;
1193
1194 if (*switchData++ != kSparseSwitchSignature) {
1195 /* should have been caught by verifier */
1196 dvmThrowInternalError(
1197 "bad sparse switch magic");
1198 return 0; //no_op
1199 }
1200 u2 tSize = *switchData++;
1201 assert(tSize > 0);
1202 const s4* keys = (const s4*) switchData;
1203 assert(((u4)keys & 0x3) == 0);
1204 s4* entries = (s4*)switchData + tSize;
1205 assert(((u4)entries & 0x3) == 0);
1206 #endif
1207
1208 get_virtual_reg(vA, OpndSize_32, 1, false);
1209 //dvmNcgHandleSparseSwitch: keys, size, testVal
1210 load_effective_addr(-12, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1211 move_imm_to_mem(OpndSize_32, tSize, 4, PhysicalReg_ESP, true);
1212
1213 /* "keys" is constant for JIT
1214 it is the 1st argument to dvmJitHandleSparseSwitch */
1215 move_imm_to_mem(OpndSize_32, (int)keys, 0, PhysicalReg_ESP, true);
1216 move_reg_to_mem(OpndSize_32, 1, false, 8, PhysicalReg_ESP, true);
1217
1218 scratchRegs[0] = PhysicalReg_SCRATCH_1;
1219 //if testVal is in keys, return the corresponding target
1220 //otherwise, fall through (no_op)
1221 call_dvmJitHandleSparseSwitch();
1222 load_effective_addr(12, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1223 //TODO: eax should be absolute address, call globalVREndOfBB constVREndOfBB
1224 //conditional_jump_global_API(Condition_LE, "common_backwardBranch", false);
1225 constVREndOfBB();
1226 globalVREndOfBB(currentMethod);
1227 //get rPC, %eax has the relative PC offset
1228 alu_binary_imm_reg(OpndSize_32, add_opc, (int)rPC, PhysicalReg_EAX, true);
1229 scratchRegs[0] = PhysicalReg_SCRATCH_2;
1230 #if defined(WITH_JIT_TUNING)
1231 /* Fall back to interpreter after resolving address of switch target.
1232 * Indicate a kSwitchOverflow. Note: This is not an "overflow". But it helps
1233 * count the times we return from a Switch
1234 */
1235 move_imm_to_mem(OpndSize_32, kSwitchOverflow, 0, PhysicalReg_ESP, true);
1236 #endif
1237 jumpToInterpNoChain();
1238 rPC += 3;
1239 return 0;
1240 }
1241
1242 #undef P_GPR_1
1243
1244 #define P_GPR_1 PhysicalReg_EBX
1245 //! lower bytecode IF_EQ
1246
1247 //!
op_if_eq()1248 int op_if_eq() {
1249 u2 vA = INST_A(inst);
1250 u2 vB = INST_B(inst);
1251 s2 tmp = (s2)FETCH(1);
1252 get_virtual_reg(vA, OpndSize_32, 1, false);
1253 compare_VR_reg(OpndSize_32, vB, 1, false);
1254 constVREndOfBB();
1255 globalVREndOfBB(currentMethod);
1256 common_if(tmp, Condition_NE, Condition_E);
1257 rPC += 2;
1258 return 0;
1259 }
1260 //! lower bytecode IF_NE
1261
1262 //!
op_if_ne()1263 int op_if_ne() {
1264 u2 vA = INST_A(inst);
1265 u2 vB = INST_B(inst);
1266 s2 tmp = (s2)FETCH(1);
1267 get_virtual_reg(vA, OpndSize_32, 1, false);
1268 compare_VR_reg(OpndSize_32, vB, 1, false);
1269 constVREndOfBB();
1270 globalVREndOfBB(currentMethod);
1271 common_if(tmp, Condition_E, Condition_NE);
1272 rPC += 2;
1273 return 0;
1274 }
1275 //! lower bytecode IF_LT
1276
1277 //!
op_if_lt()1278 int op_if_lt() {
1279 u2 vA = INST_A(inst);
1280 u2 vB = INST_B(inst);
1281 s2 tmp = (s2)FETCH(1);
1282 get_virtual_reg(vA, OpndSize_32, 1, false);
1283 compare_VR_reg(OpndSize_32, vB, 1, false);
1284 constVREndOfBB();
1285 globalVREndOfBB(currentMethod);
1286 common_if(tmp, Condition_GE, Condition_L);
1287 rPC += 2;
1288 return 0;
1289 }
1290 //! lower bytecode IF_GE
1291
1292 //!
op_if_ge()1293 int op_if_ge() {
1294 u2 vA = INST_A(inst);
1295 u2 vB = INST_B(inst);
1296 s2 tmp = (s2)FETCH(1);
1297 get_virtual_reg(vA, OpndSize_32, 1, false);
1298 compare_VR_reg(OpndSize_32, vB, 1, false);
1299 constVREndOfBB();
1300 globalVREndOfBB(currentMethod);
1301 common_if(tmp, Condition_L, Condition_GE);
1302 rPC += 2;
1303 return 0;
1304 }
1305 //! lower bytecode IF_GT
1306
1307 //!
op_if_gt()1308 int op_if_gt() {
1309 u2 vA = INST_A(inst);
1310 u2 vB = INST_B(inst);
1311 s2 tmp = (s2)FETCH(1);
1312 get_virtual_reg(vA, OpndSize_32, 1, false);
1313 compare_VR_reg(OpndSize_32, vB, 1, false);
1314 constVREndOfBB();
1315 globalVREndOfBB(currentMethod);
1316 common_if(tmp, Condition_LE, Condition_G);
1317 rPC += 2;
1318 return 0;
1319 }
1320 //! lower bytecode IF_LE
1321
1322 //!
op_if_le()1323 int op_if_le() {
1324 u2 vA = INST_A(inst);
1325 u2 vB = INST_B(inst);
1326 s2 tmp = (s2)FETCH(1);
1327 get_virtual_reg(vA, OpndSize_32, 1, false);
1328 compare_VR_reg(OpndSize_32, vB, 1, false);
1329 constVREndOfBB();
1330 globalVREndOfBB(currentMethod);
1331 common_if(tmp, Condition_G, Condition_LE);
1332 rPC += 2;
1333 return 0;
1334 }
1335 #undef P_GPR_1
1336 //! lower bytecode IF_EQZ
1337
1338 //!
op_if_eqz()1339 int op_if_eqz() {
1340 u2 vA = INST_AA(inst);
1341 s2 tmp = (s2)FETCH(1);
1342 compare_imm_VR(OpndSize_32,
1343 0, vA);
1344 constVREndOfBB();
1345 globalVREndOfBB(currentMethod);
1346 common_if(tmp, Condition_NE, Condition_E);
1347 rPC += 2;
1348 return 0;
1349 }
1350 //! lower bytecode IF_NEZ
1351
1352 //!
op_if_nez()1353 int op_if_nez() {
1354 u2 vA = INST_AA(inst);
1355 s2 tmp = (s2)FETCH(1);
1356 compare_imm_VR(OpndSize_32,
1357 0, vA);
1358 constVREndOfBB();
1359 globalVREndOfBB(currentMethod);
1360 common_if(tmp, Condition_E, Condition_NE);
1361 rPC += 2;
1362 return 0;
1363 }
1364 //! lower bytecode IF_LTZ
1365
1366 //!
op_if_ltz()1367 int op_if_ltz() {
1368 u2 vA = INST_AA(inst);
1369 s2 tmp = (s2)FETCH(1);
1370 compare_imm_VR(OpndSize_32,
1371 0, vA);
1372 constVREndOfBB();
1373 globalVREndOfBB(currentMethod);
1374 common_if(tmp, Condition_GE, Condition_L);
1375 rPC += 2;
1376 return 0;
1377 }
1378 //! lower bytecode IF_GEZ
1379
1380 //!
op_if_gez()1381 int op_if_gez() {
1382 u2 vA = INST_AA(inst);
1383 s2 tmp = (s2)FETCH(1);
1384 compare_imm_VR(OpndSize_32,
1385 0, vA);
1386 constVREndOfBB();
1387 globalVREndOfBB(currentMethod);
1388 common_if(tmp, Condition_L, Condition_GE);
1389 rPC += 2;
1390 return 0;
1391 }
1392 //! lower bytecode IF_GTZ
1393
1394 //!
op_if_gtz()1395 int op_if_gtz() {
1396 u2 vA = INST_AA(inst);
1397 s2 tmp = (s2)FETCH(1);
1398 compare_imm_VR(OpndSize_32,
1399 0, vA);
1400 constVREndOfBB();
1401 globalVREndOfBB(currentMethod);
1402 common_if(tmp, Condition_LE, Condition_G);
1403 rPC += 2;
1404 return 0;
1405 }
1406 //! lower bytecode IF_LEZ
1407
1408 //!
op_if_lez()1409 int op_if_lez() {
1410 u2 vA = INST_AA(inst);
1411 s2 tmp = (s2)FETCH(1);
1412 compare_imm_VR(OpndSize_32,
1413 0, vA);
1414 constVREndOfBB();
1415 globalVREndOfBB(currentMethod);
1416 common_if(tmp, Condition_G, Condition_LE);
1417 rPC += 2;
1418 return 0;
1419 }
1420
1421 #define P_GPR_1 PhysicalReg_ECX
1422 #define P_GPR_2 PhysicalReg_EBX
1423 /*!
1424 \brief helper function common_periodicChecks4 to check GC request
1425 BCOffset in %edx
1426 */
common_periodicChecks4()1427 int common_periodicChecks4() {
1428 insertLabel("common_periodicChecks4", false);
1429 #if (!defined(ENABLE_TRACING))
1430 get_self_pointer(PhysicalReg_ECX, true);
1431 move_mem_to_reg(OpndSize_32, offsetof(Thread, suspendCount), PhysicalReg_ECX, true, PhysicalReg_EAX, true);
1432 compare_imm_reg(OpndSize_32, 0, PhysicalReg_EAX, true); //suspendCount
1433 conditional_jump(Condition_NE, "common_handleSuspend4", true); //called once
1434 x86_return();
1435
1436 insertLabel("common_handleSuspend4", true);
1437 push_reg_to_stack(OpndSize_32, PhysicalReg_ECX, true);
1438 call_dvmCheckSuspendPending();
1439 load_effective_addr(4, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1440 x86_return();
1441
1442 #else
1443 ///////////////////
1444 //get debuggerActive: 3 memory accesses, and $7
1445 move_mem_to_reg(OpndSize_32, offGlue_pSelfSuspendCount, PhysicalReg_Glue, true, P_GPR_1, true);
1446 move_mem_to_reg(OpndSize_32, offGlue_pIntoDebugger, PhysicalReg_Glue, true, P_GPR_2, true);
1447
1448 compare_imm_mem(OpndSize_32, 0, 0, P_GPR_1, true); //suspendCount
1449 conditional_jump(Condition_NE, "common_handleSuspend4_1", true); //called once
1450
1451 compare_imm_mem(OpndSize_32, 0, 0, P_GPR_2, true); //debugger active
1452
1453 conditional_jump(Condition_NE, "common_debuggerActive4", true);
1454
1455 //recover registers and return
1456 x86_return();
1457
1458 insertLabel("common_handleSuspend4_1", true);
1459 push_mem_to_stack(OpndSize_32, offGlue_self, PhysicalReg_Glue, true);
1460 call_dvmCheckSuspendPending();
1461 load_effective_addr(4, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1462 x86_return();
1463
1464 insertLabel("common_debuggerActive4", true);
1465 //%edx: offsetBC (at run time, get method->insns_bytecode, then calculate BCPointer)
1466 move_mem_to_reg(OpndSize_32, offGlue_method, PhysicalReg_Glue, true, P_GPR_1, true);
1467 move_mem_to_reg(OpndSize_32, offMethod_insns_bytecode, P_GPR_1, true, P_GPR_2, true);
1468 alu_binary_reg_reg(OpndSize_32, add_opc, P_GPR_2, true, PhysicalReg_EDX, true);
1469 move_imm_to_mem(OpndSize_32, 0, offGlue_entryPoint, PhysicalReg_Glue, true);
1470 unconditional_jump("common_gotoBail", false); //update glue->rPC with edx
1471 #endif
1472 return 0;
1473 }
1474 //input: %edx PC adjustment
1475 //CHECK: should %edx be saved before calling dvmCheckSuspendPending?
1476 /*!
1477 \brief helper function common_periodicChecks_entry to check GC request
1478
1479 */
common_periodicChecks_entry()1480 int common_periodicChecks_entry() {
1481 insertLabel("common_periodicChecks_entry", false);
1482 scratchRegs[0] = PhysicalReg_ESI; scratchRegs[1] = PhysicalReg_EAX;
1483 scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
1484 get_suspendCount(P_GPR_1, true);
1485
1486 //get debuggerActive: 3 memory accesses, and $7
1487 #if 0 //defined(WITH_DEBUGGER)
1488 get_debuggerActive(P_GPR_2, true);
1489 #endif
1490
1491 compare_imm_reg(OpndSize_32, 0, P_GPR_1, true); //suspendCount
1492 conditional_jump(Condition_NE, "common_handleSuspend", true); //called once
1493
1494 #if 0 //defined(WITH_DEBUGGER)
1495 #ifdef NCG_DEBUG
1496 compare_imm_reg(OpndSize_32, 0, P_GPR_2, true); //debugger active
1497 conditional_jump(Condition_NE, "common_debuggerActive", true);
1498 #endif
1499 #endif
1500
1501 //recover registers and return
1502 x86_return();
1503 insertLabel("common_handleSuspend", true);
1504 get_self_pointer(P_GPR_1, true);
1505 load_effective_addr(-4, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1506 move_reg_to_mem(OpndSize_32, P_GPR_1, true, 0, PhysicalReg_ESP, true);
1507 call_dvmCheckSuspendPending();
1508 load_effective_addr(4, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1509 x86_return();
1510 #ifdef NCG_DEBUG
1511 insertLabel("common_debuggerActive", true);
1512 //adjust PC!!! use 0(%esp) TODO
1513 set_glue_entryPoint_imm(0); //kInterpEntryInstr);
1514 unconditional_jump("common_gotoBail", false);
1515 #endif
1516 return 0;
1517 }
1518 #undef P_GPR_1
1519 #undef P_GPR_2
1520 /*!
1521 \brief helper function common_gotoBail
1522 input: %edx: BCPointer %esi: Glue
1523 set %eax to 1 (switch interpreter = true), recover the callee-saved registers and return
1524 */
common_gotoBail()1525 int common_gotoBail() {
1526 insertLabel("common_gotoBail", false);
1527 //scratchRegs[0] = PhysicalReg_EDX; scratchRegs[1] = PhysicalReg_ESI;
1528 //scratchRegs[2] = PhysicalReg_Null; scratchRegs[3] = PhysicalReg_Null;
1529 //save_pc_fp_to_glue();
1530 get_self_pointer(PhysicalReg_EAX, true);
1531 move_reg_to_mem(OpndSize_32, PhysicalReg_FP, true, offsetof(Thread, interpSave.curFrame), PhysicalReg_EAX, true);
1532 move_reg_to_mem(OpndSize_32, PhysicalReg_EDX, true, offsetof(Thread, interpSave.pc), PhysicalReg_EAX, true);
1533
1534 move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.bailPtr), PhysicalReg_EAX, true, PhysicalReg_ESP, true);
1535 move_reg_to_reg(OpndSize_32, PhysicalReg_ESP, true, PhysicalReg_EBP, true);
1536 load_effective_addr(FRAME_SIZE-4, PhysicalReg_EBP, true, PhysicalReg_EBP, true);
1537 move_imm_to_reg(OpndSize_32, 1, PhysicalReg_EAX, true); //return value
1538 move_mem_to_reg(OpndSize_32, -4, PhysicalReg_EBP, true, PhysicalReg_EDI, true);
1539 move_mem_to_reg(OpndSize_32, -8, PhysicalReg_EBP, true, PhysicalReg_ESI, true);
1540 move_mem_to_reg(OpndSize_32, -12, PhysicalReg_EBP, true, PhysicalReg_EBX, true);
1541 move_reg_to_reg(OpndSize_32, PhysicalReg_EBP, true, PhysicalReg_ESP, true);
1542 move_mem_to_reg(OpndSize_32, 0, PhysicalReg_ESP, true, PhysicalReg_EBP, true);
1543 load_effective_addr(4, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1544 x86_return();
1545 return 0;
1546 }
1547 /*!
1548 \brief helper function common_gotoBail_0
1549
1550 set %eax to 0, recover the callee-saved registers and return
1551 */
common_gotoBail_0()1552 int common_gotoBail_0() {
1553 insertLabel("common_gotoBail_0", false);
1554
1555 get_self_pointer(PhysicalReg_EAX, true);
1556 move_reg_to_mem(OpndSize_32, PhysicalReg_FP, true, offsetof(Thread, interpSave.curFrame), PhysicalReg_EAX, true);
1557 move_reg_to_mem(OpndSize_32, PhysicalReg_EDX, true, offsetof(Thread, interpSave.pc), PhysicalReg_EAX, true);
1558
1559 /*
1560 movl offThread_bailPtr(%ecx),%esp # Restore "setjmp" esp
1561 movl %esp,%ebp
1562 addl $(FRAME_SIZE-4), %ebp # Restore %ebp at point of setjmp
1563 movl EDI_SPILL(%ebp),%edi
1564 movl ESI_SPILL(%ebp),%esi
1565 movl EBX_SPILL(%ebp),%ebx
1566 movl %ebp, %esp # strip frame
1567 pop %ebp # restore caller's ebp
1568 ret # return to dvmMterpStdRun's caller
1569 */
1570 move_mem_to_reg(OpndSize_32, offsetof(Thread, interpSave.bailPtr), PhysicalReg_EAX, true, PhysicalReg_ESP, true);
1571 move_reg_to_reg(OpndSize_32, PhysicalReg_ESP, true, PhysicalReg_EBP, true);
1572 load_effective_addr(FRAME_SIZE-4, PhysicalReg_EBP, true, PhysicalReg_EBP, true);
1573 move_imm_to_reg(OpndSize_32, 0, PhysicalReg_EAX, true); //return value
1574 move_mem_to_reg(OpndSize_32, -4, PhysicalReg_EBP, true, PhysicalReg_EDI, true);
1575 move_mem_to_reg(OpndSize_32, -8, PhysicalReg_EBP, true, PhysicalReg_ESI, true);
1576 move_mem_to_reg(OpndSize_32, -12, PhysicalReg_EBP, true, PhysicalReg_EBX, true);
1577 move_reg_to_reg(OpndSize_32, PhysicalReg_EBP, true, PhysicalReg_ESP, true);
1578 move_mem_to_reg(OpndSize_32, 0, PhysicalReg_ESP, true, PhysicalReg_EBP, true);
1579 load_effective_addr(4, PhysicalReg_ESP, true, PhysicalReg_ESP, true);
1580 x86_return();
1581 return 0;
1582 }
1583