1 /*
2 * This file was generated automatically by gen-mterp.py for 'x86-atom'.
3 *
4 * --> DO NOT EDIT <--
5 */
6
7 /* File: c/header.cpp */
8 /*
9 * Copyright (C) 2008 The Android Open Source Project
10 *
11 * Licensed under the Apache License, Version 2.0 (the "License");
12 * you may not use this file except in compliance with the License.
13 * You may obtain a copy of the License at
14 *
15 * http://www.apache.org/licenses/LICENSE-2.0
16 *
17 * Unless required by applicable law or agreed to in writing, software
18 * distributed under the License is distributed on an "AS IS" BASIS,
19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 * See the License for the specific language governing permissions and
21 * limitations under the License.
22 */
23
24 /* common includes */
25 #include "Dalvik.h"
26 #include "interp/InterpDefs.h"
27 #include "mterp/Mterp.h"
28 #include <math.h> // needed for fmod, fmodf
29 #include "mterp/common/FindInterface.h"
30
31 /*
32 * Configuration defines. These affect the C implementations, i.e. the
33 * portable interpreter(s) and C stubs.
34 *
35 * Some defines are controlled by the Makefile, e.g.:
36 * WITH_INSTR_CHECKS
37 * WITH_TRACKREF_CHECKS
38 * EASY_GDB
39 * NDEBUG
40 */
41
42 #ifdef WITH_INSTR_CHECKS /* instruction-level paranoia (slow!) */
43 # define CHECK_BRANCH_OFFSETS
44 # define CHECK_REGISTER_INDICES
45 #endif
46
47 /*
48 * Some architectures require 64-bit alignment for access to 64-bit data
49 * types. We can't just use pointers to copy 64-bit values out of our
50 * interpreted register set, because gcc may assume the pointer target is
51 * aligned and generate invalid code.
52 *
53 * There are two common approaches:
54 * (1) Use a union that defines a 32-bit pair and a 64-bit value.
55 * (2) Call memcpy().
56 *
57 * Depending upon what compiler you're using and what options are specified,
58 * one may be faster than the other. For example, the compiler might
59 * convert a memcpy() of 8 bytes into a series of instructions and omit
60 * the call. The union version could cause some strange side-effects,
61 * e.g. for a while ARM gcc thought it needed separate storage for each
62 * inlined instance, and generated instructions to zero out ~700 bytes of
63 * stack space at the top of the interpreter.
64 *
65 * The default is to use memcpy(). The current gcc for ARM seems to do
66 * better with the union.
67 */
68 #if defined(__ARM_EABI__)
69 # define NO_UNALIGN_64__UNION
70 #endif
71
72
73 //#define LOG_INSTR /* verbose debugging */
74 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
75
76 /*
77 * Export another copy of the PC on every instruction; this is largely
78 * redundant with EXPORT_PC and the debugger code. This value can be
79 * compared against what we have stored on the stack with EXPORT_PC to
80 * help ensure that we aren't missing any export calls.
81 */
82 #if WITH_EXTRA_GC_CHECKS > 1
83 # define EXPORT_EXTRA_PC() (self->currentPc2 = pc)
84 #else
85 # define EXPORT_EXTRA_PC()
86 #endif
87
88 /*
89 * Adjust the program counter. "_offset" is a signed int, in 16-bit units.
90 *
91 * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
92 *
93 * We don't advance the program counter until we finish an instruction or
94 * branch, because we do want to have to unroll the PC if there's an
95 * exception.
96 */
97 #ifdef CHECK_BRANCH_OFFSETS
98 # define ADJUST_PC(_offset) do { \
99 int myoff = _offset; /* deref only once */ \
100 if (pc + myoff < curMethod->insns || \
101 pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
102 { \
103 char* desc; \
104 desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
105 ALOGE("Invalid branch %d at 0x%04x in %s.%s %s", \
106 myoff, (int) (pc - curMethod->insns), \
107 curMethod->clazz->descriptor, curMethod->name, desc); \
108 free(desc); \
109 dvmAbort(); \
110 } \
111 pc += myoff; \
112 EXPORT_EXTRA_PC(); \
113 } while (false)
114 #else
115 # define ADJUST_PC(_offset) do { \
116 pc += _offset; \
117 EXPORT_EXTRA_PC(); \
118 } while (false)
119 #endif
120
121 /*
122 * If enabled, log instructions as we execute them.
123 */
124 #ifdef LOG_INSTR
125 # define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
126 # define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
127 # define ILOG(_level, ...) do { \
128 char debugStrBuf[128]; \
129 snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
130 if (curMethod != NULL) \
131 ALOG(_level, LOG_TAG"i", "%-2d|%04x%s", \
132 self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
133 else \
134 ALOG(_level, LOG_TAG"i", "%-2d|####%s", \
135 self->threadId, debugStrBuf); \
136 } while(false)
137 void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
138 # define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
139 static const char kSpacing[] = " ";
140 #else
141 # define ILOGD(...) ((void)0)
142 # define ILOGV(...) ((void)0)
143 # define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
144 #endif
145
146 /* get a long from an array of u4 */
getLongFromArray(const u4 * ptr,int idx)147 static inline s8 getLongFromArray(const u4* ptr, int idx)
148 {
149 #if defined(NO_UNALIGN_64__UNION)
150 union { s8 ll; u4 parts[2]; } conv;
151
152 ptr += idx;
153 conv.parts[0] = ptr[0];
154 conv.parts[1] = ptr[1];
155 return conv.ll;
156 #else
157 s8 val;
158 memcpy(&val, &ptr[idx], 8);
159 return val;
160 #endif
161 }
162
163 /* store a long into an array of u4 */
putLongToArray(u4 * ptr,int idx,s8 val)164 static inline void putLongToArray(u4* ptr, int idx, s8 val)
165 {
166 #if defined(NO_UNALIGN_64__UNION)
167 union { s8 ll; u4 parts[2]; } conv;
168
169 ptr += idx;
170 conv.ll = val;
171 ptr[0] = conv.parts[0];
172 ptr[1] = conv.parts[1];
173 #else
174 memcpy(&ptr[idx], &val, 8);
175 #endif
176 }
177
178 /* get a double from an array of u4 */
getDoubleFromArray(const u4 * ptr,int idx)179 static inline double getDoubleFromArray(const u4* ptr, int idx)
180 {
181 #if defined(NO_UNALIGN_64__UNION)
182 union { double d; u4 parts[2]; } conv;
183
184 ptr += idx;
185 conv.parts[0] = ptr[0];
186 conv.parts[1] = ptr[1];
187 return conv.d;
188 #else
189 double dval;
190 memcpy(&dval, &ptr[idx], 8);
191 return dval;
192 #endif
193 }
194
195 /* store a double into an array of u4 */
putDoubleToArray(u4 * ptr,int idx,double dval)196 static inline void putDoubleToArray(u4* ptr, int idx, double dval)
197 {
198 #if defined(NO_UNALIGN_64__UNION)
199 union { double d; u4 parts[2]; } conv;
200
201 ptr += idx;
202 conv.d = dval;
203 ptr[0] = conv.parts[0];
204 ptr[1] = conv.parts[1];
205 #else
206 memcpy(&ptr[idx], &dval, 8);
207 #endif
208 }
209
210 /*
211 * If enabled, validate the register number on every access. Otherwise,
212 * just do an array access.
213 *
214 * Assumes the existence of "u4* fp".
215 *
216 * "_idx" may be referenced more than once.
217 */
218 #ifdef CHECK_REGISTER_INDICES
219 # define GET_REGISTER(_idx) \
220 ( (_idx) < curMethod->registersSize ? \
221 (fp[(_idx)]) : (assert(!"bad reg"),1969) )
222 # define SET_REGISTER(_idx, _val) \
223 ( (_idx) < curMethod->registersSize ? \
224 (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
225 # define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
226 # define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
227 # define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
228 # define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
229 # define GET_REGISTER_WIDE(_idx) \
230 ( (_idx) < curMethod->registersSize-1 ? \
231 getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
232 # define SET_REGISTER_WIDE(_idx, _val) \
233 ( (_idx) < curMethod->registersSize-1 ? \
234 (void)putLongToArray(fp, (_idx), (_val)) : assert(!"bad reg") )
235 # define GET_REGISTER_FLOAT(_idx) \
236 ( (_idx) < curMethod->registersSize ? \
237 (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
238 # define SET_REGISTER_FLOAT(_idx, _val) \
239 ( (_idx) < curMethod->registersSize ? \
240 (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
241 # define GET_REGISTER_DOUBLE(_idx) \
242 ( (_idx) < curMethod->registersSize-1 ? \
243 getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
244 # define SET_REGISTER_DOUBLE(_idx, _val) \
245 ( (_idx) < curMethod->registersSize-1 ? \
246 (void)putDoubleToArray(fp, (_idx), (_val)) : assert(!"bad reg") )
247 #else
248 # define GET_REGISTER(_idx) (fp[(_idx)])
249 # define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
250 # define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
251 # define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
252 # define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
253 # define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
254 # define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
255 # define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
256 # define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
257 # define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
258 # define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
259 # define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
260 #endif
261
262 /*
263 * Get 16 bits from the specified offset of the program counter. We always
264 * want to load 16 bits at a time from the instruction stream -- it's more
265 * efficient than 8 and won't have the alignment problems that 32 might.
266 *
267 * Assumes existence of "const u2* pc".
268 */
269 #define FETCH(_offset) (pc[(_offset)])
270
271 /*
272 * Extract instruction byte from 16-bit fetch (_inst is a u2).
273 */
274 #define INST_INST(_inst) ((_inst) & 0xff)
275
276 /*
277 * Replace the opcode (used when handling breakpoints). _opcode is a u1.
278 */
279 #define INST_REPLACE_OP(_inst, _opcode) (((_inst) & 0xff00) | _opcode)
280
281 /*
282 * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
283 */
284 #define INST_A(_inst) (((_inst) >> 8) & 0x0f)
285 #define INST_B(_inst) ((_inst) >> 12)
286
287 /*
288 * Get the 8-bit "vAA" 8-bit register index from the instruction word.
289 * (_inst is u2)
290 */
291 #define INST_AA(_inst) ((_inst) >> 8)
292
293 /*
294 * The current PC must be available to Throwable constructors, e.g.
295 * those created by the various exception throw routines, so that the
296 * exception stack trace can be generated correctly. If we don't do this,
297 * the offset within the current method won't be shown correctly. See the
298 * notes in Exception.c.
299 *
300 * This is also used to determine the address for precise GC.
301 *
302 * Assumes existence of "u4* fp" and "const u2* pc".
303 */
304 #define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
305
306 /*
307 * Check to see if "obj" is NULL. If so, throw an exception. Assumes the
308 * pc has already been exported to the stack.
309 *
310 * Perform additional checks on debug builds.
311 *
312 * Use this to check for NULL when the instruction handler calls into
313 * something that could throw an exception (so we have already called
314 * EXPORT_PC at the top).
315 */
checkForNull(Object * obj)316 static inline bool checkForNull(Object* obj)
317 {
318 if (obj == NULL) {
319 dvmThrowNullPointerException(NULL);
320 return false;
321 }
322 #ifdef WITH_EXTRA_OBJECT_VALIDATION
323 if (!dvmIsHeapAddress(obj)) {
324 ALOGE("Invalid object %p", obj);
325 dvmAbort();
326 }
327 #endif
328 #ifndef NDEBUG
329 if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
330 /* probable heap corruption */
331 ALOGE("Invalid object class %p (in %p)", obj->clazz, obj);
332 dvmAbort();
333 }
334 #endif
335 return true;
336 }
337
338 /*
339 * Check to see if "obj" is NULL. If so, export the PC into the stack
340 * frame and throw an exception.
341 *
342 * Perform additional checks on debug builds.
343 *
344 * Use this to check for NULL when the instruction handler doesn't do
345 * anything else that can throw an exception.
346 */
checkForNullExportPC(Object * obj,u4 * fp,const u2 * pc)347 static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
348 {
349 if (obj == NULL) {
350 EXPORT_PC();
351 dvmThrowNullPointerException(NULL);
352 return false;
353 }
354 #ifdef WITH_EXTRA_OBJECT_VALIDATION
355 if (!dvmIsHeapAddress(obj)) {
356 ALOGE("Invalid object %p", obj);
357 dvmAbort();
358 }
359 #endif
360 #ifndef NDEBUG
361 if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
362 /* probable heap corruption */
363 ALOGE("Invalid object class %p (in %p)", obj->clazz, obj);
364 dvmAbort();
365 }
366 #endif
367 return true;
368 }
369
370 /* File: cstubs/stubdefs.cpp */
371 /*
372 * In the C mterp stubs, "goto" is a function call followed immediately
373 * by a return.
374 */
375
376 #define GOTO_TARGET_DECL(_target, ...) \
377 extern "C" void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
378
379 /* (void)xxx to quiet unused variable compiler warnings. */
380 #define GOTO_TARGET(_target, ...) \
381 void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) { \
382 u2 ref, vsrc1, vsrc2, vdst; \
383 u2 inst = FETCH(0); \
384 const Method* methodToCall; \
385 StackSaveArea* debugSaveArea; \
386 (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
387 (void)methodToCall; (void)debugSaveArea;
388
389 #define GOTO_TARGET_END }
390
391 /*
392 * Redefine what used to be local variable accesses into Thread struct
393 * references. (These are undefined down in "footer.cpp".)
394 */
395 #define retval self->interpSave.retval
396 #define pc self->interpSave.pc
397 #define fp self->interpSave.curFrame
398 #define curMethod self->interpSave.method
399 #define methodClassDex self->interpSave.methodClassDex
400 #define debugTrackedRefStart self->interpSave.debugTrackedRefStart
401
402 /* ugh */
403 #define STUB_HACK(x) x
404 #if defined(WITH_JIT)
405 #define JIT_STUB_HACK(x) x
406 #else
407 #define JIT_STUB_HACK(x)
408 #endif
409
410 /*
411 * InterpSave's pc and fp must be valid when breaking out to a
412 * "Reportxxx" routine. Because the portable interpreter uses local
413 * variables for these, we must flush prior. Stubs, however, use
414 * the interpSave vars directly, so this is a nop for stubs.
415 */
416 #define PC_FP_TO_SELF()
417 #define PC_TO_SELF()
418
419 /*
420 * Opcode handler framing macros. Here, each opcode is a separate function
421 * that takes a "self" argument and returns void. We can't declare
422 * these "static" because they may be called from an assembly stub.
423 * (void)xxx to quiet unused variable compiler warnings.
424 */
425 #define HANDLE_OPCODE(_op) \
426 extern "C" void dvmMterp_##_op(Thread* self); \
427 void dvmMterp_##_op(Thread* self) { \
428 u4 ref; \
429 u2 vsrc1, vsrc2, vdst; \
430 u2 inst = FETCH(0); \
431 (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
432
433 #define OP_END }
434
435 /*
436 * Like the "portable" FINISH, but don't reload "inst", and return to caller
437 * when done. Further, debugger/profiler checks are handled
438 * before handler execution in mterp, so we don't do them here either.
439 */
440 #if defined(WITH_JIT)
441 #define FINISH(_offset) { \
442 ADJUST_PC(_offset); \
443 if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { \
444 dvmCheckJit(pc, self); \
445 } \
446 return; \
447 }
448 #else
449 #define FINISH(_offset) { \
450 ADJUST_PC(_offset); \
451 return; \
452 }
453 #endif
454
455
456 /*
457 * The "goto label" statements turn into function calls followed by
458 * return statements. Some of the functions take arguments, which in the
459 * portable interpreter are handled by assigning values to globals.
460 */
461
462 #define GOTO_exceptionThrown() \
463 do { \
464 dvmMterp_exceptionThrown(self); \
465 return; \
466 } while(false)
467
468 #define GOTO_returnFromMethod() \
469 do { \
470 dvmMterp_returnFromMethod(self); \
471 return; \
472 } while(false)
473
474 #define GOTO_invoke(_target, _methodCallRange) \
475 do { \
476 dvmMterp_##_target(self, _methodCallRange); \
477 return; \
478 } while(false)
479
480 #define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \
481 do { \
482 dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall, \
483 _vsrc1, _vdst); \
484 return; \
485 } while(false)
486
487 /*
488 * As a special case, "goto bail" turns into a longjmp.
489 */
490 #define GOTO_bail() \
491 dvmMterpStdBail(self, false);
492
493 /*
494 * Periodically check for thread suspension.
495 *
496 * While we're at it, see if a debugger has attached or the profiler has
497 * started.
498 */
499 #define PERIODIC_CHECKS(_pcadj) { \
500 if (dvmCheckSuspendQuick(self)) { \
501 EXPORT_PC(); /* need for precise GC */ \
502 dvmCheckSuspendPending(self); \
503 } \
504 }
505
506 /* File: c/opcommon.cpp */
507 /* forward declarations of goto targets */
508 GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
509 GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
510 GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
511 GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
512 GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
513 GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
514 GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
515 GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
516 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
517 u2 count, u2 regs);
518 GOTO_TARGET_DECL(returnFromMethod);
519 GOTO_TARGET_DECL(exceptionThrown);
520
521 /*
522 * ===========================================================================
523 *
524 * What follows are opcode definitions shared between multiple opcodes with
525 * minor substitutions handled by the C pre-processor. These should probably
526 * use the mterp substitution mechanism instead, with the code here moved
527 * into common fragment files (like the asm "binop.S"), although it's hard
528 * to give up the C preprocessor in favor of the much simpler text subst.
529 *
530 * ===========================================================================
531 */
532
533 #define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
534 HANDLE_OPCODE(_opcode /*vA, vB*/) \
535 vdst = INST_A(inst); \
536 vsrc1 = INST_B(inst); \
537 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
538 SET_REGISTER##_totype(vdst, \
539 GET_REGISTER##_fromtype(vsrc1)); \
540 FINISH(1);
541
542 #define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
543 _tovtype, _tortype) \
544 HANDLE_OPCODE(_opcode /*vA, vB*/) \
545 { \
546 /* spec defines specific handling for +/- inf and NaN values */ \
547 _fromvtype val; \
548 _tovtype intMin, intMax, result; \
549 vdst = INST_A(inst); \
550 vsrc1 = INST_B(inst); \
551 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
552 val = GET_REGISTER##_fromrtype(vsrc1); \
553 intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
554 intMax = ~intMin; \
555 result = (_tovtype) val; \
556 if (val >= intMax) /* +inf */ \
557 result = intMax; \
558 else if (val <= intMin) /* -inf */ \
559 result = intMin; \
560 else if (val != val) /* NaN */ \
561 result = 0; \
562 else \
563 result = (_tovtype) val; \
564 SET_REGISTER##_tortype(vdst, result); \
565 } \
566 FINISH(1);
567
568 #define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
569 HANDLE_OPCODE(_opcode /*vA, vB*/) \
570 vdst = INST_A(inst); \
571 vsrc1 = INST_B(inst); \
572 ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
573 SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
574 FINISH(1);
575
576 /* NOTE: the comparison result is always a signed 4-byte integer */
577 #define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
578 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
579 { \
580 int result; \
581 u2 regs; \
582 _varType val1, val2; \
583 vdst = INST_AA(inst); \
584 regs = FETCH(1); \
585 vsrc1 = regs & 0xff; \
586 vsrc2 = regs >> 8; \
587 ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
588 val1 = GET_REGISTER##_type(vsrc1); \
589 val2 = GET_REGISTER##_type(vsrc2); \
590 if (val1 == val2) \
591 result = 0; \
592 else if (val1 < val2) \
593 result = -1; \
594 else if (val1 > val2) \
595 result = 1; \
596 else \
597 result = (_nanVal); \
598 ILOGV("+ result=%d", result); \
599 SET_REGISTER(vdst, result); \
600 } \
601 FINISH(2);
602
603 #define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
604 HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
605 vsrc1 = INST_A(inst); \
606 vsrc2 = INST_B(inst); \
607 if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
608 int branchOffset = (s2)FETCH(1); /* sign-extended */ \
609 ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
610 branchOffset); \
611 ILOGV("> branch taken"); \
612 if (branchOffset < 0) \
613 PERIODIC_CHECKS(branchOffset); \
614 FINISH(branchOffset); \
615 } else { \
616 ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
617 FINISH(2); \
618 }
619
620 #define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
621 HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
622 vsrc1 = INST_AA(inst); \
623 if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
624 int branchOffset = (s2)FETCH(1); /* sign-extended */ \
625 ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
626 ILOGV("> branch taken"); \
627 if (branchOffset < 0) \
628 PERIODIC_CHECKS(branchOffset); \
629 FINISH(branchOffset); \
630 } else { \
631 ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
632 FINISH(2); \
633 }
634
635 #define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
636 HANDLE_OPCODE(_opcode /*vA, vB*/) \
637 vdst = INST_A(inst); \
638 vsrc1 = INST_B(inst); \
639 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
640 SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
641 FINISH(1);
642
643 #define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
644 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
645 { \
646 u2 srcRegs; \
647 vdst = INST_AA(inst); \
648 srcRegs = FETCH(1); \
649 vsrc1 = srcRegs & 0xff; \
650 vsrc2 = srcRegs >> 8; \
651 ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
652 if (_chkdiv != 0) { \
653 s4 firstVal, secondVal, result; \
654 firstVal = GET_REGISTER(vsrc1); \
655 secondVal = GET_REGISTER(vsrc2); \
656 if (secondVal == 0) { \
657 EXPORT_PC(); \
658 dvmThrowArithmeticException("divide by zero"); \
659 GOTO_exceptionThrown(); \
660 } \
661 if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
662 if (_chkdiv == 1) \
663 result = firstVal; /* division */ \
664 else \
665 result = 0; /* remainder */ \
666 } else { \
667 result = firstVal _op secondVal; \
668 } \
669 SET_REGISTER(vdst, result); \
670 } else { \
671 /* non-div/rem case */ \
672 SET_REGISTER(vdst, \
673 (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
674 } \
675 } \
676 FINISH(2);
677
678 #define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
679 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
680 { \
681 u2 srcRegs; \
682 vdst = INST_AA(inst); \
683 srcRegs = FETCH(1); \
684 vsrc1 = srcRegs & 0xff; \
685 vsrc2 = srcRegs >> 8; \
686 ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
687 SET_REGISTER(vdst, \
688 _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
689 } \
690 FINISH(2);
691
692 #define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
693 HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
694 vdst = INST_A(inst); \
695 vsrc1 = INST_B(inst); \
696 vsrc2 = FETCH(1); \
697 ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
698 (_opname), vdst, vsrc1, vsrc2); \
699 if (_chkdiv != 0) { \
700 s4 firstVal, result; \
701 firstVal = GET_REGISTER(vsrc1); \
702 if ((s2) vsrc2 == 0) { \
703 EXPORT_PC(); \
704 dvmThrowArithmeticException("divide by zero"); \
705 GOTO_exceptionThrown(); \
706 } \
707 if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
708 /* won't generate /lit16 instr for this; check anyway */ \
709 if (_chkdiv == 1) \
710 result = firstVal; /* division */ \
711 else \
712 result = 0; /* remainder */ \
713 } else { \
714 result = firstVal _op (s2) vsrc2; \
715 } \
716 SET_REGISTER(vdst, result); \
717 } else { \
718 /* non-div/rem case */ \
719 SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
720 } \
721 FINISH(2);
722
723 #define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
724 HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
725 { \
726 u2 litInfo; \
727 vdst = INST_AA(inst); \
728 litInfo = FETCH(1); \
729 vsrc1 = litInfo & 0xff; \
730 vsrc2 = litInfo >> 8; /* constant */ \
731 ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
732 (_opname), vdst, vsrc1, vsrc2); \
733 if (_chkdiv != 0) { \
734 s4 firstVal, result; \
735 firstVal = GET_REGISTER(vsrc1); \
736 if ((s1) vsrc2 == 0) { \
737 EXPORT_PC(); \
738 dvmThrowArithmeticException("divide by zero"); \
739 GOTO_exceptionThrown(); \
740 } \
741 if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
742 if (_chkdiv == 1) \
743 result = firstVal; /* division */ \
744 else \
745 result = 0; /* remainder */ \
746 } else { \
747 result = firstVal _op ((s1) vsrc2); \
748 } \
749 SET_REGISTER(vdst, result); \
750 } else { \
751 SET_REGISTER(vdst, \
752 (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
753 } \
754 } \
755 FINISH(2);
756
757 #define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
758 HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
759 { \
760 u2 litInfo; \
761 vdst = INST_AA(inst); \
762 litInfo = FETCH(1); \
763 vsrc1 = litInfo & 0xff; \
764 vsrc2 = litInfo >> 8; /* constant */ \
765 ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
766 (_opname), vdst, vsrc1, vsrc2); \
767 SET_REGISTER(vdst, \
768 _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
769 } \
770 FINISH(2);
771
772 #define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
773 HANDLE_OPCODE(_opcode /*vA, vB*/) \
774 vdst = INST_A(inst); \
775 vsrc1 = INST_B(inst); \
776 ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
777 if (_chkdiv != 0) { \
778 s4 firstVal, secondVal, result; \
779 firstVal = GET_REGISTER(vdst); \
780 secondVal = GET_REGISTER(vsrc1); \
781 if (secondVal == 0) { \
782 EXPORT_PC(); \
783 dvmThrowArithmeticException("divide by zero"); \
784 GOTO_exceptionThrown(); \
785 } \
786 if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
787 if (_chkdiv == 1) \
788 result = firstVal; /* division */ \
789 else \
790 result = 0; /* remainder */ \
791 } else { \
792 result = firstVal _op secondVal; \
793 } \
794 SET_REGISTER(vdst, result); \
795 } else { \
796 SET_REGISTER(vdst, \
797 (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
798 } \
799 FINISH(1);
800
801 #define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
802 HANDLE_OPCODE(_opcode /*vA, vB*/) \
803 vdst = INST_A(inst); \
804 vsrc1 = INST_B(inst); \
805 ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
806 SET_REGISTER(vdst, \
807 _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
808 FINISH(1);
809
810 #define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
811 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
812 { \
813 u2 srcRegs; \
814 vdst = INST_AA(inst); \
815 srcRegs = FETCH(1); \
816 vsrc1 = srcRegs & 0xff; \
817 vsrc2 = srcRegs >> 8; \
818 ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
819 if (_chkdiv != 0) { \
820 s8 firstVal, secondVal, result; \
821 firstVal = GET_REGISTER_WIDE(vsrc1); \
822 secondVal = GET_REGISTER_WIDE(vsrc2); \
823 if (secondVal == 0LL) { \
824 EXPORT_PC(); \
825 dvmThrowArithmeticException("divide by zero"); \
826 GOTO_exceptionThrown(); \
827 } \
828 if ((u8)firstVal == 0x8000000000000000ULL && \
829 secondVal == -1LL) \
830 { \
831 if (_chkdiv == 1) \
832 result = firstVal; /* division */ \
833 else \
834 result = 0; /* remainder */ \
835 } else { \
836 result = firstVal _op secondVal; \
837 } \
838 SET_REGISTER_WIDE(vdst, result); \
839 } else { \
840 SET_REGISTER_WIDE(vdst, \
841 (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
842 } \
843 } \
844 FINISH(2);
845
846 #define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
847 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
848 { \
849 u2 srcRegs; \
850 vdst = INST_AA(inst); \
851 srcRegs = FETCH(1); \
852 vsrc1 = srcRegs & 0xff; \
853 vsrc2 = srcRegs >> 8; \
854 ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
855 SET_REGISTER_WIDE(vdst, \
856 _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
857 } \
858 FINISH(2);
859
860 #define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
861 HANDLE_OPCODE(_opcode /*vA, vB*/) \
862 vdst = INST_A(inst); \
863 vsrc1 = INST_B(inst); \
864 ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
865 if (_chkdiv != 0) { \
866 s8 firstVal, secondVal, result; \
867 firstVal = GET_REGISTER_WIDE(vdst); \
868 secondVal = GET_REGISTER_WIDE(vsrc1); \
869 if (secondVal == 0LL) { \
870 EXPORT_PC(); \
871 dvmThrowArithmeticException("divide by zero"); \
872 GOTO_exceptionThrown(); \
873 } \
874 if ((u8)firstVal == 0x8000000000000000ULL && \
875 secondVal == -1LL) \
876 { \
877 if (_chkdiv == 1) \
878 result = firstVal; /* division */ \
879 else \
880 result = 0; /* remainder */ \
881 } else { \
882 result = firstVal _op secondVal; \
883 } \
884 SET_REGISTER_WIDE(vdst, result); \
885 } else { \
886 SET_REGISTER_WIDE(vdst, \
887 (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
888 } \
889 FINISH(1);
890
891 #define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
892 HANDLE_OPCODE(_opcode /*vA, vB*/) \
893 vdst = INST_A(inst); \
894 vsrc1 = INST_B(inst); \
895 ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
896 SET_REGISTER_WIDE(vdst, \
897 _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
898 FINISH(1);
899
900 #define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
901 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
902 { \
903 u2 srcRegs; \
904 vdst = INST_AA(inst); \
905 srcRegs = FETCH(1); \
906 vsrc1 = srcRegs & 0xff; \
907 vsrc2 = srcRegs >> 8; \
908 ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
909 SET_REGISTER_FLOAT(vdst, \
910 GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
911 } \
912 FINISH(2);
913
914 #define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
915 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
916 { \
917 u2 srcRegs; \
918 vdst = INST_AA(inst); \
919 srcRegs = FETCH(1); \
920 vsrc1 = srcRegs & 0xff; \
921 vsrc2 = srcRegs >> 8; \
922 ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
923 SET_REGISTER_DOUBLE(vdst, \
924 GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
925 } \
926 FINISH(2);
927
928 #define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
929 HANDLE_OPCODE(_opcode /*vA, vB*/) \
930 vdst = INST_A(inst); \
931 vsrc1 = INST_B(inst); \
932 ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
933 SET_REGISTER_FLOAT(vdst, \
934 GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
935 FINISH(1);
936
937 #define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
938 HANDLE_OPCODE(_opcode /*vA, vB*/) \
939 vdst = INST_A(inst); \
940 vsrc1 = INST_B(inst); \
941 ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
942 SET_REGISTER_DOUBLE(vdst, \
943 GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
944 FINISH(1);
945
946 #define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
947 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
948 { \
949 ArrayObject* arrayObj; \
950 u2 arrayInfo; \
951 EXPORT_PC(); \
952 vdst = INST_AA(inst); \
953 arrayInfo = FETCH(1); \
954 vsrc1 = arrayInfo & 0xff; /* array ptr */ \
955 vsrc2 = arrayInfo >> 8; /* index */ \
956 ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
957 arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
958 if (!checkForNull((Object*) arrayObj)) \
959 GOTO_exceptionThrown(); \
960 if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
961 dvmThrowArrayIndexOutOfBoundsException( \
962 arrayObj->length, GET_REGISTER(vsrc2)); \
963 GOTO_exceptionThrown(); \
964 } \
965 SET_REGISTER##_regsize(vdst, \
966 ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)]); \
967 ILOGV("+ AGET[%d]=%#x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
968 } \
969 FINISH(2);
970
971 #define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
972 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
973 { \
974 ArrayObject* arrayObj; \
975 u2 arrayInfo; \
976 EXPORT_PC(); \
977 vdst = INST_AA(inst); /* AA: source value */ \
978 arrayInfo = FETCH(1); \
979 vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
980 vsrc2 = arrayInfo >> 8; /* CC: index */ \
981 ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
982 arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
983 if (!checkForNull((Object*) arrayObj)) \
984 GOTO_exceptionThrown(); \
985 if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
986 dvmThrowArrayIndexOutOfBoundsException( \
987 arrayObj->length, GET_REGISTER(vsrc2)); \
988 GOTO_exceptionThrown(); \
989 } \
990 ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
991 ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)] = \
992 GET_REGISTER##_regsize(vdst); \
993 } \
994 FINISH(2);
995
996 /*
997 * It's possible to get a bad value out of a field with sub-32-bit stores
998 * because the -quick versions always operate on 32 bits. Consider:
999 * short foo = -1 (sets a 32-bit register to 0xffffffff)
1000 * iput-quick foo (writes all 32 bits to the field)
1001 * short bar = 1 (sets a 32-bit register to 0x00000001)
1002 * iput-short (writes the low 16 bits to the field)
1003 * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
1004 * This can only happen when optimized and non-optimized code has interleaved
1005 * access to the same field. This is unlikely but possible.
1006 *
1007 * The easiest way to fix this is to always read/write 32 bits at a time. On
1008 * a device with a 16-bit data bus this is sub-optimal. (The alternative
1009 * approach is to have sub-int versions of iget-quick, but now we're wasting
1010 * Dalvik instruction space and making it less likely that handler code will
1011 * already be in the CPU i-cache.)
1012 */
1013 #define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
1014 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
1015 { \
1016 InstField* ifield; \
1017 Object* obj; \
1018 EXPORT_PC(); \
1019 vdst = INST_A(inst); \
1020 vsrc1 = INST_B(inst); /* object ptr */ \
1021 ref = FETCH(1); /* field ref */ \
1022 ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
1023 obj = (Object*) GET_REGISTER(vsrc1); \
1024 if (!checkForNull(obj)) \
1025 GOTO_exceptionThrown(); \
1026 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
1027 if (ifield == NULL) { \
1028 ifield = dvmResolveInstField(curMethod->clazz, ref); \
1029 if (ifield == NULL) \
1030 GOTO_exceptionThrown(); \
1031 } \
1032 SET_REGISTER##_regsize(vdst, \
1033 dvmGetField##_ftype(obj, ifield->byteOffset)); \
1034 ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
1035 (u8) GET_REGISTER##_regsize(vdst)); \
1036 } \
1037 FINISH(2);
1038
1039 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
1040 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
1041 { \
1042 Object* obj; \
1043 vdst = INST_A(inst); \
1044 vsrc1 = INST_B(inst); /* object ptr */ \
1045 ref = FETCH(1); /* field offset */ \
1046 ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
1047 (_opname), vdst, vsrc1, ref); \
1048 obj = (Object*) GET_REGISTER(vsrc1); \
1049 if (!checkForNullExportPC(obj, fp, pc)) \
1050 GOTO_exceptionThrown(); \
1051 SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
1052 ILOGV("+ IGETQ %d=0x%08llx", ref, \
1053 (u8) GET_REGISTER##_regsize(vdst)); \
1054 } \
1055 FINISH(2);
1056
1057 #define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
1058 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
1059 { \
1060 InstField* ifield; \
1061 Object* obj; \
1062 EXPORT_PC(); \
1063 vdst = INST_A(inst); \
1064 vsrc1 = INST_B(inst); /* object ptr */ \
1065 ref = FETCH(1); /* field ref */ \
1066 ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
1067 obj = (Object*) GET_REGISTER(vsrc1); \
1068 if (!checkForNull(obj)) \
1069 GOTO_exceptionThrown(); \
1070 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
1071 if (ifield == NULL) { \
1072 ifield = dvmResolveInstField(curMethod->clazz, ref); \
1073 if (ifield == NULL) \
1074 GOTO_exceptionThrown(); \
1075 } \
1076 dvmSetField##_ftype(obj, ifield->byteOffset, \
1077 GET_REGISTER##_regsize(vdst)); \
1078 ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
1079 (u8) GET_REGISTER##_regsize(vdst)); \
1080 } \
1081 FINISH(2);
1082
1083 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
1084 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
1085 { \
1086 Object* obj; \
1087 vdst = INST_A(inst); \
1088 vsrc1 = INST_B(inst); /* object ptr */ \
1089 ref = FETCH(1); /* field offset */ \
1090 ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
1091 (_opname), vdst, vsrc1, ref); \
1092 obj = (Object*) GET_REGISTER(vsrc1); \
1093 if (!checkForNullExportPC(obj, fp, pc)) \
1094 GOTO_exceptionThrown(); \
1095 dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
1096 ILOGV("+ IPUTQ %d=0x%08llx", ref, \
1097 (u8) GET_REGISTER##_regsize(vdst)); \
1098 } \
1099 FINISH(2);
1100
1101 /*
1102 * The JIT needs dvmDexGetResolvedField() to return non-null.
1103 * Because the portable interpreter is not involved with the JIT
1104 * and trace building, we only need the extra check here when this
1105 * code is massaged into a stub called from an assembly interpreter.
1106 * This is controlled by the JIT_STUB_HACK maco.
1107 */
1108
1109 #define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
1110 HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
1111 { \
1112 StaticField* sfield; \
1113 vdst = INST_AA(inst); \
1114 ref = FETCH(1); /* field ref */ \
1115 ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
1116 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
1117 if (sfield == NULL) { \
1118 EXPORT_PC(); \
1119 sfield = dvmResolveStaticField(curMethod->clazz, ref); \
1120 if (sfield == NULL) \
1121 GOTO_exceptionThrown(); \
1122 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
1123 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
1124 } \
1125 } \
1126 SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
1127 ILOGV("+ SGET '%s'=0x%08llx", \
1128 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
1129 } \
1130 FINISH(2);
1131
1132 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
1133 HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
1134 { \
1135 StaticField* sfield; \
1136 vdst = INST_AA(inst); \
1137 ref = FETCH(1); /* field ref */ \
1138 ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
1139 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
1140 if (sfield == NULL) { \
1141 EXPORT_PC(); \
1142 sfield = dvmResolveStaticField(curMethod->clazz, ref); \
1143 if (sfield == NULL) \
1144 GOTO_exceptionThrown(); \
1145 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
1146 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
1147 } \
1148 } \
1149 dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
1150 ILOGV("+ SPUT '%s'=0x%08llx", \
1151 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
1152 } \
1153 FINISH(2);
1154
1155 /* File: c/OP_IGET_VOLATILE.cpp */
1156 HANDLE_IGET_X(OP_IGET_VOLATILE, "-volatile", IntVolatile, )
1157 OP_END
1158
1159 /* File: c/OP_IPUT_VOLATILE.cpp */
1160 HANDLE_IPUT_X(OP_IPUT_VOLATILE, "-volatile", IntVolatile, )
1161 OP_END
1162
1163 /* File: c/OP_SGET_VOLATILE.cpp */
1164 HANDLE_SGET_X(OP_SGET_VOLATILE, "-volatile", IntVolatile, )
1165 OP_END
1166
1167 /* File: c/OP_SPUT_VOLATILE.cpp */
1168 HANDLE_SPUT_X(OP_SPUT_VOLATILE, "-volatile", IntVolatile, )
1169 OP_END
1170
1171 /* File: c/OP_IGET_OBJECT_VOLATILE.cpp */
1172 HANDLE_IGET_X(OP_IGET_OBJECT_VOLATILE, "-object-volatile", ObjectVolatile, _AS_OBJECT)
1173 OP_END
1174
1175 /* File: c/OP_IGET_WIDE_VOLATILE.cpp */
1176 HANDLE_IGET_X(OP_IGET_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
1177 OP_END
1178
1179 /* File: c/OP_IPUT_WIDE_VOLATILE.cpp */
1180 HANDLE_IPUT_X(OP_IPUT_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
1181 OP_END
1182
1183 /* File: c/OP_SGET_WIDE_VOLATILE.cpp */
1184 HANDLE_SGET_X(OP_SGET_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
1185 OP_END
1186
1187 /* File: c/OP_SPUT_WIDE_VOLATILE.cpp */
1188 HANDLE_SPUT_X(OP_SPUT_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE)
1189 OP_END
1190
1191 /* File: c/OP_BREAKPOINT.cpp */
HANDLE_OPCODE(OP_BREAKPOINT)1192 HANDLE_OPCODE(OP_BREAKPOINT)
1193 {
1194 /*
1195 * Restart this instruction with the original opcode. We do
1196 * this by simply jumping to the handler.
1197 *
1198 * It's probably not necessary to update "inst", but we do it
1199 * for the sake of anything that needs to do disambiguation in a
1200 * common handler with INST_INST.
1201 *
1202 * The breakpoint itself is handled over in updateDebugger(),
1203 * because we need to detect other events (method entry, single
1204 * step) and report them in the same event packet, and we're not
1205 * yet handling those through breakpoint instructions. By the
1206 * time we get here, the breakpoint has already been handled and
1207 * the thread resumed.
1208 */
1209 u1 originalOpcode = dvmGetOriginalOpcode(pc);
1210 ALOGV("+++ break 0x%02x (0x%04x -> 0x%04x)", originalOpcode, inst,
1211 INST_REPLACE_OP(inst, originalOpcode));
1212 inst = INST_REPLACE_OP(inst, originalOpcode);
1213 FINISH_BKPT(originalOpcode);
1214 }
1215 OP_END
1216
1217 /* File: c/OP_EXECUTE_INLINE_RANGE.cpp */
HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE)1218 HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/)
1219 {
1220 u4 arg0, arg1, arg2, arg3;
1221 arg0 = arg1 = arg2 = arg3 = 0; /* placate gcc */
1222
1223 EXPORT_PC();
1224
1225 vsrc1 = INST_AA(inst); /* #of args */
1226 ref = FETCH(1); /* inline call "ref" */
1227 vdst = FETCH(2); /* range base */
1228 ILOGV("|execute-inline-range args=%d @%d {regs=v%d-v%d}",
1229 vsrc1, ref, vdst, vdst+vsrc1-1);
1230
1231 assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear
1232 assert(vsrc1 <= 4);
1233
1234 switch (vsrc1) {
1235 case 4:
1236 arg3 = GET_REGISTER(vdst+3);
1237 /* fall through */
1238 case 3:
1239 arg2 = GET_REGISTER(vdst+2);
1240 /* fall through */
1241 case 2:
1242 arg1 = GET_REGISTER(vdst+1);
1243 /* fall through */
1244 case 1:
1245 arg0 = GET_REGISTER(vdst+0);
1246 /* fall through */
1247 default: // case 0
1248 ;
1249 }
1250
1251 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
1252 if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref))
1253 GOTO_exceptionThrown();
1254 } else {
1255 if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref))
1256 GOTO_exceptionThrown();
1257 }
1258 }
1259 FINISH(3);
1260 OP_END
1261
1262 /* File: c/OP_INVOKE_OBJECT_INIT_RANGE.cpp */
HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_RANGE)1263 HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/)
1264 {
1265 Object* obj;
1266
1267 vsrc1 = FETCH(2); /* reg number of "this" pointer */
1268 obj = GET_REGISTER_AS_OBJECT(vsrc1);
1269
1270 if (!checkForNullExportPC(obj, fp, pc))
1271 GOTO_exceptionThrown();
1272
1273 /*
1274 * The object should be marked "finalizable" when Object.<init>
1275 * completes normally. We're going to assume it does complete
1276 * (by virtue of being nothing but a return-void) and set it now.
1277 */
1278 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) {
1279 EXPORT_PC();
1280 dvmSetFinalizable(obj);
1281 if (dvmGetException(self))
1282 GOTO_exceptionThrown();
1283 }
1284
1285 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) {
1286 /* behave like OP_INVOKE_DIRECT_RANGE */
1287 GOTO_invoke(invokeDirect, true);
1288 }
1289 FINISH(3);
1290 }
1291 OP_END
1292
1293 /* File: c/OP_RETURN_VOID_BARRIER.cpp */
1294 HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/)
1295 ILOGV("|return-void");
1296 #ifndef NDEBUG
1297 retval.j = 0xababababULL; /* placate valgrind */
1298 #endif
1299 ANDROID_MEMBAR_STORE();
1300 GOTO_returnFromMethod();
1301 OP_END
1302
1303 /* File: c/OP_IPUT_OBJECT_VOLATILE.cpp */
1304 HANDLE_IPUT_X(OP_IPUT_OBJECT_VOLATILE, "-object-volatile", ObjectVolatile, _AS_OBJECT)
1305 OP_END
1306
1307 /* File: c/OP_SGET_OBJECT_VOLATILE.cpp */
1308 HANDLE_SGET_X(OP_SGET_OBJECT_VOLATILE, "-object-volatile", ObjectVolatile, _AS_OBJECT)
1309 OP_END
1310
1311 /* File: c/OP_SPUT_OBJECT_VOLATILE.cpp */
1312 HANDLE_SPUT_X(OP_SPUT_OBJECT_VOLATILE, "-object-volatile", ObjectVolatile, _AS_OBJECT)
1313 OP_END
1314
1315 /* File: c/gotoTargets.cpp */
1316 /*
1317 * C footer. This has some common code shared by the various targets.
1318 */
1319
1320 /*
1321 * Everything from here on is a "goto target". In the basic interpreter
1322 * we jump into these targets and then jump directly to the handler for
1323 * next instruction. Here, these are subroutines that return to the caller.
1324 */
1325
GOTO_TARGET(filledNewArray,bool methodCallRange,bool)1326 GOTO_TARGET(filledNewArray, bool methodCallRange, bool)
1327 {
1328 ClassObject* arrayClass;
1329 ArrayObject* newArray;
1330 u4* contents;
1331 char typeCh;
1332 int i;
1333 u4 arg5;
1334
1335 EXPORT_PC();
1336
1337 ref = FETCH(1); /* class ref */
1338 vdst = FETCH(2); /* first 4 regs -or- range base */
1339
1340 if (methodCallRange) {
1341 vsrc1 = INST_AA(inst); /* #of elements */
1342 arg5 = -1; /* silence compiler warning */
1343 ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
1344 vsrc1, ref, vdst, vdst+vsrc1-1);
1345 } else {
1346 arg5 = INST_A(inst);
1347 vsrc1 = INST_B(inst); /* #of elements */
1348 ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
1349 vsrc1, ref, vdst, arg5);
1350 }
1351
1352 /*
1353 * Resolve the array class.
1354 */
1355 arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
1356 if (arrayClass == NULL) {
1357 arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
1358 if (arrayClass == NULL)
1359 GOTO_exceptionThrown();
1360 }
1361 /*
1362 if (!dvmIsArrayClass(arrayClass)) {
1363 dvmThrowRuntimeException(
1364 "filled-new-array needs array class");
1365 GOTO_exceptionThrown();
1366 }
1367 */
1368 /* verifier guarantees this is an array class */
1369 assert(dvmIsArrayClass(arrayClass));
1370 assert(dvmIsClassInitialized(arrayClass));
1371
1372 /*
1373 * Create an array of the specified type.
1374 */
1375 LOGVV("+++ filled-new-array type is '%s'", arrayClass->descriptor);
1376 typeCh = arrayClass->descriptor[1];
1377 if (typeCh == 'D' || typeCh == 'J') {
1378 /* category 2 primitives not allowed */
1379 dvmThrowRuntimeException("bad filled array req");
1380 GOTO_exceptionThrown();
1381 } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
1382 /* TODO: requires multiple "fill in" loops with different widths */
1383 ALOGE("non-int primitives not implemented");
1384 dvmThrowInternalError(
1385 "filled-new-array not implemented for anything but 'int'");
1386 GOTO_exceptionThrown();
1387 }
1388
1389 newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK);
1390 if (newArray == NULL)
1391 GOTO_exceptionThrown();
1392
1393 /*
1394 * Fill in the elements. It's legal for vsrc1 to be zero.
1395 */
1396 contents = (u4*)(void*)newArray->contents;
1397 if (methodCallRange) {
1398 for (i = 0; i < vsrc1; i++)
1399 contents[i] = GET_REGISTER(vdst+i);
1400 } else {
1401 assert(vsrc1 <= 5);
1402 if (vsrc1 == 5) {
1403 contents[4] = GET_REGISTER(arg5);
1404 vsrc1--;
1405 }
1406 for (i = 0; i < vsrc1; i++) {
1407 contents[i] = GET_REGISTER(vdst & 0x0f);
1408 vdst >>= 4;
1409 }
1410 }
1411 if (typeCh == 'L' || typeCh == '[') {
1412 dvmWriteBarrierArray(newArray, 0, newArray->length);
1413 }
1414
1415 retval.l = (Object*)newArray;
1416 }
1417 FINISH(3);
1418 GOTO_TARGET_END
1419
1420
GOTO_TARGET(invokeVirtual,bool methodCallRange,bool)1421 GOTO_TARGET(invokeVirtual, bool methodCallRange, bool)
1422 {
1423 Method* baseMethod;
1424 Object* thisPtr;
1425
1426 EXPORT_PC();
1427
1428 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1429 ref = FETCH(1); /* method ref */
1430 vdst = FETCH(2); /* 4 regs -or- first reg */
1431
1432 /*
1433 * The object against which we are executing a method is always
1434 * in the first argument.
1435 */
1436 if (methodCallRange) {
1437 assert(vsrc1 > 0);
1438 ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
1439 vsrc1, ref, vdst, vdst+vsrc1-1);
1440 thisPtr = (Object*) GET_REGISTER(vdst);
1441 } else {
1442 assert((vsrc1>>4) > 0);
1443 ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
1444 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1445 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
1446 }
1447
1448 if (!checkForNull(thisPtr))
1449 GOTO_exceptionThrown();
1450
1451 /*
1452 * Resolve the method. This is the correct method for the static
1453 * type of the object. We also verify access permissions here.
1454 */
1455 baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
1456 if (baseMethod == NULL) {
1457 baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
1458 if (baseMethod == NULL) {
1459 ILOGV("+ unknown method or access denied");
1460 GOTO_exceptionThrown();
1461 }
1462 }
1463
1464 /*
1465 * Combine the object we found with the vtable offset in the
1466 * method.
1467 */
1468 assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
1469 methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
1470
1471 #if defined(WITH_JIT) && defined(MTERP_STUB)
1472 self->methodToCall = methodToCall;
1473 self->callsiteClass = thisPtr->clazz;
1474 #endif
1475
1476 #if 0
1477 if (dvmIsAbstractMethod(methodToCall)) {
1478 /*
1479 * This can happen if you create two classes, Base and Sub, where
1480 * Sub is a sub-class of Base. Declare a protected abstract
1481 * method foo() in Base, and invoke foo() from a method in Base.
1482 * Base is an "abstract base class" and is never instantiated
1483 * directly. Now, Override foo() in Sub, and use Sub. This
1484 * Works fine unless Sub stops providing an implementation of
1485 * the method.
1486 */
1487 dvmThrowAbstractMethodError("abstract method not implemented");
1488 GOTO_exceptionThrown();
1489 }
1490 #else
1491 assert(!dvmIsAbstractMethod(methodToCall) ||
1492 methodToCall->nativeFunc != NULL);
1493 #endif
1494
1495 LOGVV("+++ base=%s.%s virtual[%d]=%s.%s",
1496 baseMethod->clazz->descriptor, baseMethod->name,
1497 (u4) baseMethod->methodIndex,
1498 methodToCall->clazz->descriptor, methodToCall->name);
1499 assert(methodToCall != NULL);
1500
1501 #if 0
1502 if (vsrc1 != methodToCall->insSize) {
1503 ALOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s",
1504 baseMethod->clazz->descriptor, baseMethod->name,
1505 (u4) baseMethod->methodIndex,
1506 methodToCall->clazz->descriptor, methodToCall->name);
1507 //dvmDumpClass(baseMethod->clazz);
1508 //dvmDumpClass(methodToCall->clazz);
1509 dvmDumpAllClasses(0);
1510 }
1511 #endif
1512
1513 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1514 }
1515 GOTO_TARGET_END
1516
GOTO_TARGET(invokeSuper,bool methodCallRange)1517 GOTO_TARGET(invokeSuper, bool methodCallRange)
1518 {
1519 Method* baseMethod;
1520 u2 thisReg;
1521
1522 EXPORT_PC();
1523
1524 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1525 ref = FETCH(1); /* method ref */
1526 vdst = FETCH(2); /* 4 regs -or- first reg */
1527
1528 if (methodCallRange) {
1529 ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
1530 vsrc1, ref, vdst, vdst+vsrc1-1);
1531 thisReg = vdst;
1532 } else {
1533 ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
1534 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1535 thisReg = vdst & 0x0f;
1536 }
1537
1538 /* impossible in well-formed code, but we must check nevertheless */
1539 if (!checkForNull((Object*) GET_REGISTER(thisReg)))
1540 GOTO_exceptionThrown();
1541
1542 /*
1543 * Resolve the method. This is the correct method for the static
1544 * type of the object. We also verify access permissions here.
1545 * The first arg to dvmResolveMethod() is just the referring class
1546 * (used for class loaders and such), so we don't want to pass
1547 * the superclass into the resolution call.
1548 */
1549 baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
1550 if (baseMethod == NULL) {
1551 baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
1552 if (baseMethod == NULL) {
1553 ILOGV("+ unknown method or access denied");
1554 GOTO_exceptionThrown();
1555 }
1556 }
1557
1558 /*
1559 * Combine the object we found with the vtable offset in the
1560 * method's class.
1561 *
1562 * We're using the current method's class' superclass, not the
1563 * superclass of "this". This is because we might be executing
1564 * in a method inherited from a superclass, and we want to run
1565 * in that class' superclass.
1566 */
1567 if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) {
1568 /*
1569 * Method does not exist in the superclass. Could happen if
1570 * superclass gets updated.
1571 */
1572 dvmThrowNoSuchMethodError(baseMethod->name);
1573 GOTO_exceptionThrown();
1574 }
1575 methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
1576
1577 #if 0
1578 if (dvmIsAbstractMethod(methodToCall)) {
1579 dvmThrowAbstractMethodError("abstract method not implemented");
1580 GOTO_exceptionThrown();
1581 }
1582 #else
1583 assert(!dvmIsAbstractMethod(methodToCall) ||
1584 methodToCall->nativeFunc != NULL);
1585 #endif
1586 LOGVV("+++ base=%s.%s super-virtual=%s.%s",
1587 baseMethod->clazz->descriptor, baseMethod->name,
1588 methodToCall->clazz->descriptor, methodToCall->name);
1589 assert(methodToCall != NULL);
1590
1591 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1592 }
1593 GOTO_TARGET_END
1594
GOTO_TARGET(invokeInterface,bool methodCallRange)1595 GOTO_TARGET(invokeInterface, bool methodCallRange)
1596 {
1597 Object* thisPtr;
1598 ClassObject* thisClass;
1599
1600 EXPORT_PC();
1601
1602 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1603 ref = FETCH(1); /* method ref */
1604 vdst = FETCH(2); /* 4 regs -or- first reg */
1605
1606 /*
1607 * The object against which we are executing a method is always
1608 * in the first argument.
1609 */
1610 if (methodCallRange) {
1611 assert(vsrc1 > 0);
1612 ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
1613 vsrc1, ref, vdst, vdst+vsrc1-1);
1614 thisPtr = (Object*) GET_REGISTER(vdst);
1615 } else {
1616 assert((vsrc1>>4) > 0);
1617 ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
1618 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1619 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
1620 }
1621
1622 if (!checkForNull(thisPtr))
1623 GOTO_exceptionThrown();
1624
1625 thisClass = thisPtr->clazz;
1626
1627 /*
1628 * Given a class and a method index, find the Method* with the
1629 * actual code we want to execute.
1630 */
1631 methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod,
1632 methodClassDex);
1633 #if defined(WITH_JIT) && defined(MTERP_STUB)
1634 self->callsiteClass = thisClass;
1635 self->methodToCall = methodToCall;
1636 #endif
1637 if (methodToCall == NULL) {
1638 assert(dvmCheckException(self));
1639 GOTO_exceptionThrown();
1640 }
1641
1642 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1643 }
1644 GOTO_TARGET_END
1645
GOTO_TARGET(invokeDirect,bool methodCallRange)1646 GOTO_TARGET(invokeDirect, bool methodCallRange)
1647 {
1648 u2 thisReg;
1649
1650 EXPORT_PC();
1651
1652 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1653 ref = FETCH(1); /* method ref */
1654 vdst = FETCH(2); /* 4 regs -or- first reg */
1655
1656 if (methodCallRange) {
1657 ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
1658 vsrc1, ref, vdst, vdst+vsrc1-1);
1659 thisReg = vdst;
1660 } else {
1661 ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
1662 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1663 thisReg = vdst & 0x0f;
1664 }
1665
1666 if (!checkForNull((Object*) GET_REGISTER(thisReg)))
1667 GOTO_exceptionThrown();
1668
1669 methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
1670 if (methodToCall == NULL) {
1671 methodToCall = dvmResolveMethod(curMethod->clazz, ref,
1672 METHOD_DIRECT);
1673 if (methodToCall == NULL) {
1674 ILOGV("+ unknown direct method"); // should be impossible
1675 GOTO_exceptionThrown();
1676 }
1677 }
1678 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1679 }
1680 GOTO_TARGET_END
1681
1682 GOTO_TARGET(invokeStatic, bool methodCallRange)
1683 EXPORT_PC();
1684
1685 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1686 ref = FETCH(1); /* method ref */
1687 vdst = FETCH(2); /* 4 regs -or- first reg */
1688
1689 if (methodCallRange)
1690 ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
1691 vsrc1, ref, vdst, vdst+vsrc1-1);
1692 else
1693 ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
1694 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1695
1696 methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
1697 if (methodToCall == NULL) {
1698 methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC);
1699 if (methodToCall == NULL) {
1700 ILOGV("+ unknown method");
1701 GOTO_exceptionThrown();
1702 }
1703
1704 #if defined(WITH_JIT) && defined(MTERP_STUB)
1705 /*
1706 * The JIT needs dvmDexGetResolvedMethod() to return non-null.
1707 * Include the check if this code is being used as a stub
1708 * called from the assembly interpreter.
1709 */
1710 if ((self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) &&
1711 (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL)) {
1712 /* Class initialization is still ongoing */
1713 dvmJitEndTraceSelect(self,pc);
1714 }
1715 #endif
1716 }
1717 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1718 GOTO_TARGET_END
1719
GOTO_TARGET(invokeVirtualQuick,bool methodCallRange)1720 GOTO_TARGET(invokeVirtualQuick, bool methodCallRange)
1721 {
1722 Object* thisPtr;
1723
1724 EXPORT_PC();
1725
1726 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1727 ref = FETCH(1); /* vtable index */
1728 vdst = FETCH(2); /* 4 regs -or- first reg */
1729
1730 /*
1731 * The object against which we are executing a method is always
1732 * in the first argument.
1733 */
1734 if (methodCallRange) {
1735 assert(vsrc1 > 0);
1736 ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
1737 vsrc1, ref, vdst, vdst+vsrc1-1);
1738 thisPtr = (Object*) GET_REGISTER(vdst);
1739 } else {
1740 assert((vsrc1>>4) > 0);
1741 ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
1742 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1743 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
1744 }
1745
1746 if (!checkForNull(thisPtr))
1747 GOTO_exceptionThrown();
1748
1749
1750 /*
1751 * Combine the object we found with the vtable offset in the
1752 * method.
1753 */
1754 assert(ref < (unsigned int) thisPtr->clazz->vtableCount);
1755 methodToCall = thisPtr->clazz->vtable[ref];
1756 #if defined(WITH_JIT) && defined(MTERP_STUB)
1757 self->callsiteClass = thisPtr->clazz;
1758 self->methodToCall = methodToCall;
1759 #endif
1760
1761 #if 0
1762 if (dvmIsAbstractMethod(methodToCall)) {
1763 dvmThrowAbstractMethodError("abstract method not implemented");
1764 GOTO_exceptionThrown();
1765 }
1766 #else
1767 assert(!dvmIsAbstractMethod(methodToCall) ||
1768 methodToCall->nativeFunc != NULL);
1769 #endif
1770
1771 LOGVV("+++ virtual[%d]=%s.%s",
1772 ref, methodToCall->clazz->descriptor, methodToCall->name);
1773 assert(methodToCall != NULL);
1774
1775 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1776 }
1777 GOTO_TARGET_END
1778
GOTO_TARGET(invokeSuperQuick,bool methodCallRange)1779 GOTO_TARGET(invokeSuperQuick, bool methodCallRange)
1780 {
1781 u2 thisReg;
1782
1783 EXPORT_PC();
1784
1785 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
1786 ref = FETCH(1); /* vtable index */
1787 vdst = FETCH(2); /* 4 regs -or- first reg */
1788
1789 if (methodCallRange) {
1790 ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
1791 vsrc1, ref, vdst, vdst+vsrc1-1);
1792 thisReg = vdst;
1793 } else {
1794 ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
1795 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
1796 thisReg = vdst & 0x0f;
1797 }
1798 /* impossible in well-formed code, but we must check nevertheless */
1799 if (!checkForNull((Object*) GET_REGISTER(thisReg)))
1800 GOTO_exceptionThrown();
1801
1802 #if 0 /* impossible in optimized + verified code */
1803 if (ref >= curMethod->clazz->super->vtableCount) {
1804 dvmThrowNoSuchMethodError(NULL);
1805 GOTO_exceptionThrown();
1806 }
1807 #else
1808 assert(ref < (unsigned int) curMethod->clazz->super->vtableCount);
1809 #endif
1810
1811 /*
1812 * Combine the object we found with the vtable offset in the
1813 * method's class.
1814 *
1815 * We're using the current method's class' superclass, not the
1816 * superclass of "this". This is because we might be executing
1817 * in a method inherited from a superclass, and we want to run
1818 * in the method's class' superclass.
1819 */
1820 methodToCall = curMethod->clazz->super->vtable[ref];
1821
1822 #if 0
1823 if (dvmIsAbstractMethod(methodToCall)) {
1824 dvmThrowAbstractMethodError("abstract method not implemented");
1825 GOTO_exceptionThrown();
1826 }
1827 #else
1828 assert(!dvmIsAbstractMethod(methodToCall) ||
1829 methodToCall->nativeFunc != NULL);
1830 #endif
1831 LOGVV("+++ super-virtual[%d]=%s.%s",
1832 ref, methodToCall->clazz->descriptor, methodToCall->name);
1833 assert(methodToCall != NULL);
1834 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
1835 }
1836 GOTO_TARGET_END
1837
1838
1839 /*
1840 * General handling for return-void, return, and return-wide. Put the
1841 * return value in "retval" before jumping here.
1842 */
GOTO_TARGET(returnFromMethod)1843 GOTO_TARGET(returnFromMethod)
1844 {
1845 StackSaveArea* saveArea;
1846
1847 /*
1848 * We must do this BEFORE we pop the previous stack frame off, so
1849 * that the GC can see the return value (if any) in the local vars.
1850 *
1851 * Since this is now an interpreter switch point, we must do it before
1852 * we do anything at all.
1853 */
1854 PERIODIC_CHECKS(0);
1855
1856 ILOGV("> retval=0x%llx (leaving %s.%s %s)",
1857 retval.j, curMethod->clazz->descriptor, curMethod->name,
1858 curMethod->shorty);
1859 //DUMP_REGS(curMethod, fp);
1860
1861 saveArea = SAVEAREA_FROM_FP(fp);
1862
1863 #ifdef EASY_GDB
1864 debugSaveArea = saveArea;
1865 #endif
1866
1867 /* back up to previous frame and see if we hit a break */
1868 fp = (u4*)saveArea->prevFrame;
1869 assert(fp != NULL);
1870
1871 /* Handle any special subMode requirements */
1872 if (self->interpBreak.ctl.subMode != 0) {
1873 PC_FP_TO_SELF();
1874 dvmReportReturn(self);
1875 }
1876
1877 if (dvmIsBreakFrame(fp)) {
1878 /* bail without popping the method frame from stack */
1879 LOGVV("+++ returned into break frame");
1880 GOTO_bail();
1881 }
1882
1883 /* update thread FP, and reset local variables */
1884 self->interpSave.curFrame = fp;
1885 curMethod = SAVEAREA_FROM_FP(fp)->method;
1886 self->interpSave.method = curMethod;
1887 //methodClass = curMethod->clazz;
1888 methodClassDex = curMethod->clazz->pDvmDex;
1889 pc = saveArea->savedPc;
1890 ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor,
1891 curMethod->name, curMethod->shorty);
1892
1893 /* use FINISH on the caller's invoke instruction */
1894 //u2 invokeInstr = INST_INST(FETCH(0));
1895 if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
1896 invokeInstr <= OP_INVOKE_INTERFACE*/)
1897 {
1898 FINISH(3);
1899 } else {
1900 //ALOGE("Unknown invoke instr %02x at %d",
1901 // invokeInstr, (int) (pc - curMethod->insns));
1902 assert(false);
1903 }
1904 }
1905 GOTO_TARGET_END
1906
1907
1908 /*
1909 * Jump here when the code throws an exception.
1910 *
1911 * By the time we get here, the Throwable has been created and the stack
1912 * trace has been saved off.
1913 */
GOTO_TARGET(exceptionThrown)1914 GOTO_TARGET(exceptionThrown)
1915 {
1916 Object* exception;
1917 int catchRelPc;
1918
1919 PERIODIC_CHECKS(0);
1920
1921 /*
1922 * We save off the exception and clear the exception status. While
1923 * processing the exception we might need to load some Throwable
1924 * classes, and we don't want class loader exceptions to get
1925 * confused with this one.
1926 */
1927 assert(dvmCheckException(self));
1928 exception = dvmGetException(self);
1929 dvmAddTrackedAlloc(exception, self);
1930 dvmClearException(self);
1931
1932 ALOGV("Handling exception %s at %s:%d",
1933 exception->clazz->descriptor, curMethod->name,
1934 dvmLineNumFromPC(curMethod, pc - curMethod->insns));
1935
1936 /*
1937 * Report the exception throw to any "subMode" watchers.
1938 *
1939 * TODO: if the exception was thrown by interpreted code, control
1940 * fell through native, and then back to us, we will report the
1941 * exception at the point of the throw and again here. We can avoid
1942 * this by not reporting exceptions when we jump here directly from
1943 * the native call code above, but then we won't report exceptions
1944 * that were thrown *from* the JNI code (as opposed to *through* it).
1945 *
1946 * The correct solution is probably to ignore from-native exceptions
1947 * here, and have the JNI exception code do the reporting to the
1948 * debugger.
1949 */
1950 if (self->interpBreak.ctl.subMode != 0) {
1951 PC_FP_TO_SELF();
1952 dvmReportExceptionThrow(self, exception);
1953 }
1954
1955 /*
1956 * We need to unroll to the catch block or the nearest "break"
1957 * frame.
1958 *
1959 * A break frame could indicate that we have reached an intermediate
1960 * native call, or have gone off the top of the stack and the thread
1961 * needs to exit. Either way, we return from here, leaving the
1962 * exception raised.
1963 *
1964 * If we do find a catch block, we want to transfer execution to
1965 * that point.
1966 *
1967 * Note this can cause an exception while resolving classes in
1968 * the "catch" blocks.
1969 */
1970 catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
1971 exception, false, (void**)(void*)&fp);
1972
1973 /*
1974 * Restore the stack bounds after an overflow. This isn't going to
1975 * be correct in all circumstances, e.g. if JNI code devours the
1976 * exception this won't happen until some other exception gets
1977 * thrown. If the code keeps pushing the stack bounds we'll end
1978 * up aborting the VM.
1979 *
1980 * Note we want to do this *after* the call to dvmFindCatchBlock,
1981 * because that may need extra stack space to resolve exception
1982 * classes (e.g. through a class loader).
1983 *
1984 * It's possible for the stack overflow handling to cause an
1985 * exception (specifically, class resolution in a "catch" block
1986 * during the call above), so we could see the thread's overflow
1987 * flag raised but actually be running in a "nested" interpreter
1988 * frame. We don't allow doubled-up StackOverflowErrors, so
1989 * we can check for this by just looking at the exception type
1990 * in the cleanup function. Also, we won't unroll past the SOE
1991 * point because the more-recent exception will hit a break frame
1992 * as it unrolls to here.
1993 */
1994 if (self->stackOverflowed)
1995 dvmCleanupStackOverflow(self, exception);
1996
1997 if (catchRelPc < 0) {
1998 /* falling through to JNI code or off the bottom of the stack */
1999 #if DVM_SHOW_EXCEPTION >= 2
2000 ALOGD("Exception %s from %s:%d not caught locally",
2001 exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
2002 dvmLineNumFromPC(curMethod, pc - curMethod->insns));
2003 #endif
2004 dvmSetException(self, exception);
2005 dvmReleaseTrackedAlloc(exception, self);
2006 GOTO_bail();
2007 }
2008
2009 #if DVM_SHOW_EXCEPTION >= 3
2010 {
2011 const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method;
2012 ALOGD("Exception %s thrown from %s:%d to %s:%d",
2013 exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
2014 dvmLineNumFromPC(curMethod, pc - curMethod->insns),
2015 dvmGetMethodSourceFile(catchMethod),
2016 dvmLineNumFromPC(catchMethod, catchRelPc));
2017 }
2018 #endif
2019
2020 /*
2021 * Adjust local variables to match self->interpSave.curFrame and the
2022 * updated PC.
2023 */
2024 //fp = (u4*) self->interpSave.curFrame;
2025 curMethod = SAVEAREA_FROM_FP(fp)->method;
2026 self->interpSave.method = curMethod;
2027 //methodClass = curMethod->clazz;
2028 methodClassDex = curMethod->clazz->pDvmDex;
2029 pc = curMethod->insns + catchRelPc;
2030 ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
2031 curMethod->name, curMethod->shorty);
2032 DUMP_REGS(curMethod, fp, false); // show all regs
2033
2034 /*
2035 * Restore the exception if the handler wants it.
2036 *
2037 * The Dalvik spec mandates that, if an exception handler wants to
2038 * do something with the exception, the first instruction executed
2039 * must be "move-exception". We can pass the exception along
2040 * through the thread struct, and let the move-exception instruction
2041 * clear it for us.
2042 *
2043 * If the handler doesn't call move-exception, we don't want to
2044 * finish here with an exception still pending.
2045 */
2046 if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
2047 dvmSetException(self, exception);
2048
2049 dvmReleaseTrackedAlloc(exception, self);
2050 FINISH(0);
2051 }
2052 GOTO_TARGET_END
2053
2054
2055
2056 /*
2057 * General handling for invoke-{virtual,super,direct,static,interface},
2058 * including "quick" variants.
2059 *
2060 * Set "methodToCall" to the Method we're calling, and "methodCallRange"
2061 * depending on whether this is a "/range" instruction.
2062 *
2063 * For a range call:
2064 * "vsrc1" holds the argument count (8 bits)
2065 * "vdst" holds the first argument in the range
2066 * For a non-range call:
2067 * "vsrc1" holds the argument count (4 bits) and the 5th argument index
2068 * "vdst" holds four 4-bit register indices
2069 *
2070 * The caller must EXPORT_PC before jumping here, because any method
2071 * call can throw a stack overflow exception.
2072 */
GOTO_TARGET(invokeMethod,bool methodCallRange,const Method * _methodToCall,u2 count,u2 regs)2073 GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
2074 u2 count, u2 regs)
2075 {
2076 STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;);
2077
2078 //printf("range=%d call=%p count=%d regs=0x%04x\n",
2079 // methodCallRange, methodToCall, count, regs);
2080 //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
2081 // methodToCall->name, methodToCall->shorty);
2082
2083 u4* outs;
2084 int i;
2085
2086 /*
2087 * Copy args. This may corrupt vsrc1/vdst.
2088 */
2089 if (methodCallRange) {
2090 // could use memcpy or a "Duff's device"; most functions have
2091 // so few args it won't matter much
2092 assert(vsrc1 <= curMethod->outsSize);
2093 assert(vsrc1 == methodToCall->insSize);
2094 outs = OUTS_FROM_FP(fp, vsrc1);
2095 for (i = 0; i < vsrc1; i++)
2096 outs[i] = GET_REGISTER(vdst+i);
2097 } else {
2098 u4 count = vsrc1 >> 4;
2099
2100 assert(count <= curMethod->outsSize);
2101 assert(count == methodToCall->insSize);
2102 assert(count <= 5);
2103
2104 outs = OUTS_FROM_FP(fp, count);
2105 #if 0
2106 if (count == 5) {
2107 outs[4] = GET_REGISTER(vsrc1 & 0x0f);
2108 count--;
2109 }
2110 for (i = 0; i < (int) count; i++) {
2111 outs[i] = GET_REGISTER(vdst & 0x0f);
2112 vdst >>= 4;
2113 }
2114 #else
2115 // This version executes fewer instructions but is larger
2116 // overall. Seems to be a teensy bit faster.
2117 assert((vdst >> 16) == 0); // 16 bits -or- high 16 bits clear
2118 switch (count) {
2119 case 5:
2120 outs[4] = GET_REGISTER(vsrc1 & 0x0f);
2121 case 4:
2122 outs[3] = GET_REGISTER(vdst >> 12);
2123 case 3:
2124 outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
2125 case 2:
2126 outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
2127 case 1:
2128 outs[0] = GET_REGISTER(vdst & 0x0f);
2129 default:
2130 ;
2131 }
2132 #endif
2133 }
2134 }
2135
2136 /*
2137 * (This was originally a "goto" target; I've kept it separate from the
2138 * stuff above in case we want to refactor things again.)
2139 *
2140 * At this point, we have the arguments stored in the "outs" area of
2141 * the current method's stack frame, and the method to call in
2142 * "methodToCall". Push a new stack frame.
2143 */
2144 {
2145 StackSaveArea* newSaveArea;
2146 u4* newFp;
2147
2148 ILOGV("> %s%s.%s %s",
2149 dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
2150 methodToCall->clazz->descriptor, methodToCall->name,
2151 methodToCall->shorty);
2152
2153 newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
2154 newSaveArea = SAVEAREA_FROM_FP(newFp);
2155
2156 /* verify that we have enough space */
2157 if (true) {
2158 u1* bottom;
2159 bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
2160 if (bottom < self->interpStackEnd) {
2161 /* stack overflow */
2162 ALOGV("Stack overflow on method call (start=%p end=%p newBot=%p(%d) size=%d '%s')",
2163 self->interpStackStart, self->interpStackEnd, bottom,
2164 (u1*) fp - bottom, self->interpStackSize,
2165 methodToCall->name);
2166 dvmHandleStackOverflow(self, methodToCall);
2167 assert(dvmCheckException(self));
2168 GOTO_exceptionThrown();
2169 }
2170 //ALOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p",
2171 // fp, newFp, newSaveArea, bottom);
2172 }
2173
2174 #ifdef LOG_INSTR
2175 if (methodToCall->registersSize > methodToCall->insSize) {
2176 /*
2177 * This makes valgrind quiet when we print registers that
2178 * haven't been initialized. Turn it off when the debug
2179 * messages are disabled -- we want valgrind to report any
2180 * used-before-initialized issues.
2181 */
2182 memset(newFp, 0xcc,
2183 (methodToCall->registersSize - methodToCall->insSize) * 4);
2184 }
2185 #endif
2186
2187 #ifdef EASY_GDB
2188 newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
2189 #endif
2190 newSaveArea->prevFrame = fp;
2191 newSaveArea->savedPc = pc;
2192 #if defined(WITH_JIT) && defined(MTERP_STUB)
2193 newSaveArea->returnAddr = 0;
2194 #endif
2195 newSaveArea->method = methodToCall;
2196
2197 if (self->interpBreak.ctl.subMode != 0) {
2198 /*
2199 * We mark ENTER here for both native and non-native
2200 * calls. For native calls, we'll mark EXIT on return.
2201 * For non-native calls, EXIT is marked in the RETURN op.
2202 */
2203 PC_TO_SELF();
2204 dvmReportInvoke(self, methodToCall);
2205 }
2206
2207 if (!dvmIsNativeMethod(methodToCall)) {
2208 /*
2209 * "Call" interpreted code. Reposition the PC, update the
2210 * frame pointer and other local state, and continue.
2211 */
2212 curMethod = methodToCall;
2213 self->interpSave.method = curMethod;
2214 methodClassDex = curMethod->clazz->pDvmDex;
2215 pc = methodToCall->insns;
2216 fp = newFp;
2217 self->interpSave.curFrame = fp;
2218 #ifdef EASY_GDB
2219 debugSaveArea = SAVEAREA_FROM_FP(newFp);
2220 #endif
2221 self->debugIsMethodEntry = true; // profiling, debugging
2222 ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
2223 curMethod->name, curMethod->shorty);
2224 DUMP_REGS(curMethod, fp, true); // show input args
2225 FINISH(0); // jump to method start
2226 } else {
2227 /* set this up for JNI locals, even if not a JNI native */
2228 newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
2229
2230 self->interpSave.curFrame = newFp;
2231
2232 DUMP_REGS(methodToCall, newFp, true); // show input args
2233
2234 if (self->interpBreak.ctl.subMode != 0) {
2235 dvmReportPreNativeInvoke(methodToCall, self, fp);
2236 }
2237
2238 ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
2239 methodToCall->name, methodToCall->shorty);
2240
2241 /*
2242 * Jump through native call bridge. Because we leave no
2243 * space for locals on native calls, "newFp" points directly
2244 * to the method arguments.
2245 */
2246 (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
2247
2248 if (self->interpBreak.ctl.subMode != 0) {
2249 dvmReportPostNativeInvoke(methodToCall, self, fp);
2250 }
2251
2252 /* pop frame off */
2253 dvmPopJniLocals(self, newSaveArea);
2254 self->interpSave.curFrame = fp;
2255
2256 /*
2257 * If the native code threw an exception, or interpreted code
2258 * invoked by the native call threw one and nobody has cleared
2259 * it, jump to our local exception handling.
2260 */
2261 if (dvmCheckException(self)) {
2262 ALOGV("Exception thrown by/below native code");
2263 GOTO_exceptionThrown();
2264 }
2265
2266 ILOGD("> retval=0x%llx (leaving native)", retval.j);
2267 ILOGD("> (return from native %s.%s to %s.%s %s)",
2268 methodToCall->clazz->descriptor, methodToCall->name,
2269 curMethod->clazz->descriptor, curMethod->name,
2270 curMethod->shorty);
2271
2272 //u2 invokeInstr = INST_INST(FETCH(0));
2273 if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
2274 invokeInstr <= OP_INVOKE_INTERFACE*/)
2275 {
2276 FINISH(3);
2277 } else {
2278 //ALOGE("Unknown invoke instr %02x at %d",
2279 // invokeInstr, (int) (pc - curMethod->insns));
2280 assert(false);
2281 }
2282 }
2283 }
2284 assert(false); // should not get here
2285 GOTO_TARGET_END
2286
2287 /* File: cstubs/enddefs.cpp */
2288
2289 /* undefine "magic" name remapping */
2290 #undef retval
2291 #undef pc
2292 #undef fp
2293 #undef curMethod
2294 #undef methodClassDex
2295 #undef self
2296 #undef debugTrackedRefStart
2297
2298