• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifdef WITH_JIT
17 
18 /*
19  * Target independent portion of Android's Jit
20  */
21 
22 #include "Dalvik.h"
23 #include "Jit.h"
24 
25 #include "libdex/DexOpcodes.h"
26 #include <unistd.h>
27 #include <pthread.h>
28 #include <sys/time.h>
29 #include <signal.h>
30 #include "compiler/Compiler.h"
31 #include "compiler/CompilerUtility.h"
32 #include "compiler/CompilerIR.h"
33 #include <errno.h>
34 
35 #if defined(WITH_SELF_VERIFICATION)
36 /* Allocate space for per-thread ShadowSpace data structures */
dvmSelfVerificationShadowSpaceAlloc(Thread * self)37 void* dvmSelfVerificationShadowSpaceAlloc(Thread* self)
38 {
39     self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace));
40     if (self->shadowSpace == NULL)
41         return NULL;
42 
43     self->shadowSpace->registerSpaceSize = REG_SPACE;
44     self->shadowSpace->registerSpace =
45         (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int));
46 
47     return self->shadowSpace->registerSpace;
48 }
49 
50 /* Free per-thread ShadowSpace data structures */
dvmSelfVerificationShadowSpaceFree(Thread * self)51 void dvmSelfVerificationShadowSpaceFree(Thread* self)
52 {
53     free(self->shadowSpace->registerSpace);
54     free(self->shadowSpace);
55 }
56 
57 /*
58  * Save out PC, FP, thread state, and registers to shadow space.
59  * Return a pointer to the shadow space for JIT to use.
60  *
61  * The set of saved state from the Thread structure is:
62  *     pc  (Dalvik PC)
63  *     fp  (Dalvik FP)
64  *     retval
65  *     method
66  *     methodClassDex
67  *     interpStackEnd
68  */
dvmSelfVerificationSaveState(const u2 * pc,u4 * fp,Thread * self,int targetTrace)69 void* dvmSelfVerificationSaveState(const u2* pc, u4* fp,
70                                    Thread* self, int targetTrace)
71 {
72     ShadowSpace *shadowSpace = self->shadowSpace;
73     unsigned preBytes = self->interpSave.method->outsSize*4 +
74         sizeof(StackSaveArea);
75     unsigned postBytes = self->interpSave.method->registersSize*4;
76 
77     //ALOGD("### selfVerificationSaveState(%d) pc: %#x fp: %#x",
78     //    self->threadId, (int)pc, (int)fp);
79 
80     if (shadowSpace->selfVerificationState != kSVSIdle) {
81         ALOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d",
82             self->threadId, shadowSpace->selfVerificationState);
83         ALOGD("********** SHADOW STATE DUMP **********");
84         ALOGD("PC: %#x FP: %#x", (int)pc, (int)fp);
85     }
86     shadowSpace->selfVerificationState = kSVSStart;
87 
88     // Dynamically grow shadow register space if necessary
89     if (preBytes + postBytes > shadowSpace->registerSpaceSize * sizeof(u4)) {
90         free(shadowSpace->registerSpace);
91         shadowSpace->registerSpaceSize = (preBytes + postBytes) / sizeof(u4);
92         shadowSpace->registerSpace =
93             (int*) calloc(shadowSpace->registerSpaceSize, sizeof(u4));
94     }
95 
96     // Remember original state
97     shadowSpace->startPC = pc;
98     shadowSpace->fp = fp;
99     shadowSpace->retval = self->interpSave.retval;
100     shadowSpace->interpStackEnd = self->interpStackEnd;
101 
102     /*
103      * Store the original method here in case the trace ends with a
104      * return/invoke, the last method.
105      */
106     shadowSpace->method = self->interpSave.method;
107     shadowSpace->methodClassDex = self->interpSave.methodClassDex;
108 
109     shadowSpace->shadowFP = shadowSpace->registerSpace +
110                             shadowSpace->registerSpaceSize - postBytes/4;
111 
112     self->interpSave.curFrame = (u4*)shadowSpace->shadowFP;
113     self->interpStackEnd = (u1*)shadowSpace->registerSpace;
114 
115     // Create a copy of the stack
116     memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
117         preBytes+postBytes);
118 
119     // Setup the shadowed heap space
120     shadowSpace->heapSpaceTail = shadowSpace->heapSpace;
121 
122     // Reset trace length
123     shadowSpace->traceLength = 0;
124 
125     return shadowSpace;
126 }
127 
128 /*
129  * Save ending PC, FP and compiled code exit point to shadow space.
130  * Return a pointer to the shadow space for JIT to restore state.
131  */
dvmSelfVerificationRestoreState(const u2 * pc,u4 * fp,SelfVerificationState exitState,Thread * self)132 void* dvmSelfVerificationRestoreState(const u2* pc, u4* fp,
133                                       SelfVerificationState exitState,
134                                       Thread* self)
135 {
136     ShadowSpace *shadowSpace = self->shadowSpace;
137     shadowSpace->endPC = pc;
138     shadowSpace->endShadowFP = fp;
139     shadowSpace->jitExitState = exitState;
140 
141     //ALOGD("### selfVerificationRestoreState(%d) pc: %#x fp: %#x endPC: %#x",
142     //    self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp,
143     //    (int)pc);
144 
145     if (shadowSpace->selfVerificationState != kSVSStart) {
146         ALOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d",
147             self->threadId, shadowSpace->selfVerificationState);
148         ALOGD("********** SHADOW STATE DUMP **********");
149         ALOGD("Dalvik PC: %#x endPC: %#x", (int)shadowSpace->startPC,
150             (int)shadowSpace->endPC);
151         ALOGD("Interp FP: %#x", (int)shadowSpace->fp);
152         ALOGD("Shadow FP: %#x endFP: %#x", (int)shadowSpace->shadowFP,
153             (int)shadowSpace->endShadowFP);
154     }
155 
156     // Special case when punting after a single instruction
157     if (exitState == kSVSPunt && pc == shadowSpace->startPC) {
158         shadowSpace->selfVerificationState = kSVSIdle;
159     } else {
160         shadowSpace->selfVerificationState = exitState;
161     }
162 
163     /* Restore state before returning */
164     self->interpSave.pc = shadowSpace->startPC;
165     self->interpSave.curFrame = shadowSpace->fp;
166     self->interpSave.method = shadowSpace->method;
167     self->interpSave.methodClassDex = shadowSpace->methodClassDex;
168     self->interpSave.retval = shadowSpace->retval;
169     self->interpStackEnd = shadowSpace->interpStackEnd;
170 
171     return shadowSpace;
172 }
173 
174 /* Print contents of virtual registers */
selfVerificationPrintRegisters(int * addr,int * addrRef,int numWords)175 static void selfVerificationPrintRegisters(int* addr, int* addrRef,
176                                            int numWords)
177 {
178     int i;
179     for (i = 0; i < numWords; i++) {
180         ALOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : "");
181     }
182 }
183 
184 /* Print values maintained in shadowSpace */
selfVerificationDumpState(const u2 * pc,Thread * self)185 static void selfVerificationDumpState(const u2* pc, Thread* self)
186 {
187     ShadowSpace* shadowSpace = self->shadowSpace;
188     StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->interpSave.curFrame);
189     int frameBytes = (int) shadowSpace->registerSpace +
190                      shadowSpace->registerSpaceSize*4 -
191                      (int) shadowSpace->shadowFP;
192     int localRegs = 0;
193     int frameBytes2 = 0;
194     if ((uintptr_t)self->interpSave.curFrame < (uintptr_t)shadowSpace->fp) {
195         localRegs = (stackSave->method->registersSize -
196                      stackSave->method->insSize)*4;
197         frameBytes2 = (int) shadowSpace->fp -
198                       (int)self->interpSave.curFrame - localRegs;
199     }
200     ALOGD("********** SHADOW STATE DUMP **********");
201     ALOGD("CurrentPC: %#x, Offset: 0x%04x", (int)pc,
202         (int)(pc - stackSave->method->insns));
203     ALOGD("Class: %s", shadowSpace->method->clazz->descriptor);
204     ALOGD("Method: %s", shadowSpace->method->name);
205     ALOGD("Dalvik PC: %#x endPC: %#x", (int)shadowSpace->startPC,
206         (int)shadowSpace->endPC);
207     ALOGD("Interp FP: %#x endFP: %#x", (int)shadowSpace->fp,
208         (int)self->interpSave.curFrame);
209     ALOGD("Shadow FP: %#x endFP: %#x", (int)shadowSpace->shadowFP,
210         (int)shadowSpace->endShadowFP);
211     ALOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes,
212         localRegs, frameBytes2);
213     ALOGD("Trace length: %d State: %d", shadowSpace->traceLength,
214         shadowSpace->selfVerificationState);
215 }
216 
217 /* Print decoded instructions in the current trace */
selfVerificationDumpTrace(const u2 * pc,Thread * self)218 static void selfVerificationDumpTrace(const u2* pc, Thread* self)
219 {
220     ShadowSpace* shadowSpace = self->shadowSpace;
221     StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->interpSave.curFrame);
222     int i, addr, offset;
223     DecodedInstruction *decInsn;
224 
225     ALOGD("********** SHADOW TRACE DUMP **********");
226     for (i = 0; i < shadowSpace->traceLength; i++) {
227         addr = shadowSpace->trace[i].addr;
228         offset =  (int)((u2*)addr - stackSave->method->insns);
229         decInsn = &(shadowSpace->trace[i].decInsn);
230         /* Not properly decoding instruction, some registers may be garbage */
231         ALOGD("%#x: (0x%04x) %s",
232             addr, offset, dexGetOpcodeName(decInsn->opcode));
233     }
234 }
235 
236 /* Code is forced into this spin loop when a divergence is detected */
selfVerificationSpinLoop(ShadowSpace * shadowSpace)237 static void selfVerificationSpinLoop(ShadowSpace *shadowSpace)
238 {
239     const u2 *startPC = shadowSpace->startPC;
240     JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL);
241     if (desc) {
242         dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc);
243         /*
244          * This function effectively terminates the VM right here, so not
245          * freeing the desc pointer when the enqueuing fails is acceptable.
246          */
247     }
248     gDvmJit.selfVerificationSpin = true;
249     while(gDvmJit.selfVerificationSpin) sleep(10);
250 }
251 
252 /*
253  * If here, we're re-interpreting an instruction that was included
254  * in a trace that was just executed.  This routine is called for
255  * each instruction in the original trace, and compares state
256  * when it reaches the end point.
257  *
258  * TUNING: the interpretation mechanism now supports a counted
259  * single-step mechanism.  If we were to associate an instruction
260  * count with each trace exit, we could just single-step the right
261  * number of cycles and then compare.  This would improve detection
262  * of control divergences, as well as (slightly) simplify this code.
263  */
dvmCheckSelfVerification(const u2 * pc,Thread * self)264 void dvmCheckSelfVerification(const u2* pc, Thread* self)
265 {
266     ShadowSpace *shadowSpace = self->shadowSpace;
267     SelfVerificationState state = shadowSpace->selfVerificationState;
268 
269     DecodedInstruction decInsn;
270     dexDecodeInstruction(pc, &decInsn);
271 
272     //ALOGD("### DbgIntp(%d): PC: %#x endPC: %#x state: %d len: %d %s",
273     //    self->threadId, (int)pc, (int)shadowSpace->endPC, state,
274     //    shadowSpace->traceLength, dexGetOpcodeName(decInsn.opcode));
275 
276     if (state == kSVSIdle || state == kSVSStart) {
277         ALOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d",
278             self->threadId, state);
279         selfVerificationDumpState(pc, self);
280         selfVerificationDumpTrace(pc, self);
281     }
282 
283     /*
284      * Generalize the self verification state to kSVSDebugInterp unless the
285      * entry reason is kSVSBackwardBranch or kSVSSingleStep.
286      */
287     if (state != kSVSBackwardBranch && state != kSVSSingleStep) {
288         shadowSpace->selfVerificationState = kSVSDebugInterp;
289     }
290 
291     /*
292      * Check if the current pc matches the endPC. Only check for non-zero
293      * trace length when backward branches are involved.
294      */
295     if (pc == shadowSpace->endPC &&
296         (state == kSVSDebugInterp || state == kSVSSingleStep ||
297          (state == kSVSBackwardBranch && shadowSpace->traceLength != 0))) {
298 
299         shadowSpace->selfVerificationState = kSVSIdle;
300 
301         /* Check register space */
302         int frameBytes = (int) shadowSpace->registerSpace +
303                          shadowSpace->registerSpaceSize*4 -
304                          (int) shadowSpace->shadowFP;
305         if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) {
306             if (state == kSVSBackwardBranch) {
307                 /* State mismatch on backward branch - try one more iteration */
308                 shadowSpace->selfVerificationState = kSVSDebugInterp;
309                 goto log_and_continue;
310             }
311             ALOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId);
312             selfVerificationDumpState(pc, self);
313             selfVerificationDumpTrace(pc, self);
314             ALOGD("*** Interp Registers: addr: %#x bytes: %d",
315                 (int)shadowSpace->fp, frameBytes);
316             selfVerificationPrintRegisters((int*)shadowSpace->fp,
317                                            (int*)shadowSpace->shadowFP,
318                                            frameBytes/4);
319             ALOGD("*** Shadow Registers: addr: %#x bytes: %d",
320                 (int)shadowSpace->shadowFP, frameBytes);
321             selfVerificationPrintRegisters((int*)shadowSpace->shadowFP,
322                                            (int*)shadowSpace->fp,
323                                            frameBytes/4);
324             selfVerificationSpinLoop(shadowSpace);
325         }
326         /* Check new frame if it exists (invokes only) */
327         if ((uintptr_t)self->interpSave.curFrame < (uintptr_t)shadowSpace->fp) {
328             StackSaveArea* stackSave =
329                 SAVEAREA_FROM_FP(self->interpSave.curFrame);
330             int localRegs = (stackSave->method->registersSize -
331                              stackSave->method->insSize)*4;
332             int frameBytes2 = (int) shadowSpace->fp -
333                               (int) self->interpSave.curFrame - localRegs;
334             if (memcmp(((char*)self->interpSave.curFrame)+localRegs,
335                 ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) {
336                 if (state == kSVSBackwardBranch) {
337                     /*
338                      * State mismatch on backward branch - try one more
339                      * iteration.
340                      */
341                     shadowSpace->selfVerificationState = kSVSDebugInterp;
342                     goto log_and_continue;
343                 }
344                 ALOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!",
345                     self->threadId);
346                 selfVerificationDumpState(pc, self);
347                 selfVerificationDumpTrace(pc, self);
348                 ALOGD("*** Interp Registers: addr: %#x l: %d bytes: %d",
349                     (int)self->interpSave.curFrame, localRegs, frameBytes2);
350                 selfVerificationPrintRegisters((int*)self->interpSave.curFrame,
351                                                (int*)shadowSpace->endShadowFP,
352                                                (frameBytes2+localRegs)/4);
353                 ALOGD("*** Shadow Registers: addr: %#x l: %d bytes: %d",
354                     (int)shadowSpace->endShadowFP, localRegs, frameBytes2);
355                 selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP,
356                                                (int*)self->interpSave.curFrame,
357                                                (frameBytes2+localRegs)/4);
358                 selfVerificationSpinLoop(shadowSpace);
359             }
360         }
361 
362         /* Check memory space */
363         bool memDiff = false;
364         ShadowHeap* heapSpacePtr;
365         for (heapSpacePtr = shadowSpace->heapSpace;
366              heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
367             int memData = *((unsigned int*) heapSpacePtr->addr);
368             if (heapSpacePtr->data != memData) {
369                 if (state == kSVSBackwardBranch) {
370                     /*
371                      * State mismatch on backward branch - try one more
372                      * iteration.
373                      */
374                     shadowSpace->selfVerificationState = kSVSDebugInterp;
375                     goto log_and_continue;
376                 }
377                 ALOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId);
378                 ALOGD("Addr: %#x Intrp Data: %#x Jit Data: %#x",
379                     heapSpacePtr->addr, memData, heapSpacePtr->data);
380                 selfVerificationDumpState(pc, self);
381                 selfVerificationDumpTrace(pc, self);
382                 memDiff = true;
383             }
384         }
385         if (memDiff) selfVerificationSpinLoop(shadowSpace);
386 
387 
388         /*
389          * Success.  If this shadowed trace included a single-stepped
390          * instruction, we need to stay in the interpreter for one
391          * more interpretation before resuming.
392          */
393         if (state == kSVSSingleStep) {
394             assert(self->jitResumeNPC != NULL);
395             assert(self->singleStepCount == 0);
396             self->singleStepCount = 1;
397             dvmEnableSubMode(self, kSubModeCountedStep);
398         }
399 
400         /*
401          * Switch off shadow replay mode.  The next shadowed trace
402          * execution will turn it back on.
403          */
404         dvmDisableSubMode(self, kSubModeJitSV);
405 
406         self->jitState = kJitDone;
407         return;
408     }
409 log_and_continue:
410     /* If end not been reached, make sure max length not exceeded */
411     if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) {
412         ALOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId);
413         ALOGD("startPC: %#x endPC: %#x currPC: %#x",
414             (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc);
415         selfVerificationDumpState(pc, self);
416         selfVerificationDumpTrace(pc, self);
417         selfVerificationSpinLoop(shadowSpace);
418         return;
419     }
420     /* Log the instruction address and decoded instruction for debug */
421     shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
422     shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn;
423     shadowSpace->traceLength++;
424 }
425 #endif
426 
427 /*
428  * If one of our fixed tables or the translation buffer fills up,
429  * call this routine to avoid wasting cycles on future translation requests.
430  */
dvmJitStopTranslationRequests()431 void dvmJitStopTranslationRequests()
432 {
433     /*
434      * Note 1: This won't necessarily stop all translation requests, and
435      * operates on a delayed mechanism.  Running threads look to the copy
436      * of this value in their private thread structures and won't see
437      * this change until it is refreshed (which happens on interpreter
438      * entry).
439      * Note 2: This is a one-shot memory leak on this table. Because this is a
440      * permanent off switch for Jit profiling, it is a one-time leak of 1K
441      * bytes, and no further attempt will be made to re-allocate it.  Can't
442      * free it because some thread may be holding a reference.
443      */
444     gDvmJit.pProfTable = NULL;
445     dvmJitUpdateThreadStateAll();
446 }
447 
448 #if defined(WITH_JIT_TUNING)
449 /* Convenience function to increment counter from assembly code */
dvmBumpNoChain(int from)450 void dvmBumpNoChain(int from)
451 {
452     gDvmJit.noChainExit[from]++;
453 }
454 
455 /* Convenience function to increment counter from assembly code */
dvmBumpNormal()456 void dvmBumpNormal()
457 {
458     gDvmJit.normalExit++;
459 }
460 
461 /* Convenience function to increment counter from assembly code */
dvmBumpPunt(int from)462 void dvmBumpPunt(int from)
463 {
464     gDvmJit.puntExit++;
465 }
466 #endif
467 
468 /* Dumps debugging & tuning stats to the log */
dvmJitStats()469 void dvmJitStats()
470 {
471     int i;
472     int hit;
473     int not_hit;
474     int chains;
475     int stubs;
476     if (gDvmJit.pJitEntryTable) {
477         for (i=0, stubs=chains=hit=not_hit=0;
478              i < (int) gDvmJit.jitTableSize;
479              i++) {
480             if (gDvmJit.pJitEntryTable[i].dPC != 0) {
481                 hit++;
482                 if (gDvmJit.pJitEntryTable[i].codeAddress ==
483                       dvmCompilerGetInterpretTemplate())
484                     stubs++;
485             } else
486                 not_hit++;
487             if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
488                 chains++;
489         }
490         ALOGD("JIT: table size is %d, entries used is %d",
491              gDvmJit.jitTableSize,  gDvmJit.jitTableEntriesUsed);
492         ALOGD("JIT: %d traces, %d slots, %d chains, %d thresh, %s",
493              hit, not_hit + hit, chains, gDvmJit.threshold,
494              gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
495 
496 #if defined(WITH_JIT_TUNING)
497         ALOGD("JIT: Code cache patches: %d", gDvmJit.codeCachePatches);
498 
499         ALOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt",
500              gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
501              gDvmJit.normalExit, gDvmJit.puntExit);
502 
503         ALOGD("JIT: ICHits: %d", gDvmICHitCount);
504 
505         ALOGD("JIT: noChainExit: %d IC miss, %d interp callsite, "
506              "%d switch overflow",
507              gDvmJit.noChainExit[kInlineCacheMiss],
508              gDvmJit.noChainExit[kCallsiteInterpreted],
509              gDvmJit.noChainExit[kSwitchOverflow]);
510 
511         ALOGD("JIT: ICPatch: %d init, %d rejected, %d lock-free, %d queued, "
512              "%d dropped",
513              gDvmJit.icPatchInit, gDvmJit.icPatchRejected,
514              gDvmJit.icPatchLockFree, gDvmJit.icPatchQueued,
515              gDvmJit.icPatchDropped);
516 
517         ALOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return",
518              gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic,
519              gDvmJit.invokeNative, gDvmJit.returnOp);
520         ALOGD("JIT: Inline: %d mgetter, %d msetter, %d pgetter, %d psetter",
521              gDvmJit.invokeMonoGetterInlined, gDvmJit.invokeMonoSetterInlined,
522              gDvmJit.invokePolyGetterInlined, gDvmJit.invokePolySetterInlined);
523         ALOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000);
524         ALOGD("JIT: Avg unit compilation time: %llu us",
525              gDvmJit.numCompilations == 0 ? 0 :
526              gDvmJit.jitTime / gDvmJit.numCompilations);
527         ALOGD("JIT: Potential GC blocked by compiler: max %llu us / "
528              "avg %llu us (%d)",
529              gDvmJit.maxCompilerThreadBlockGCTime,
530              gDvmJit.numCompilerThreadBlockGC == 0 ?
531                  0 : gDvmJit.compilerThreadBlockGCTime /
532                      gDvmJit.numCompilerThreadBlockGC,
533              gDvmJit.numCompilerThreadBlockGC);
534 #endif
535 
536         ALOGD("JIT: %d Translation chains, %d interp stubs",
537              gDvmJit.translationChains, stubs);
538         if (gDvmJit.profileMode == kTraceProfilingContinuous) {
539             dvmCompilerSortAndPrintTraceProfiles();
540         }
541     }
542 }
543 
544 
545 /* End current trace now & don't include current instruction */
dvmJitEndTraceSelect(Thread * self,const u2 * dPC)546 void dvmJitEndTraceSelect(Thread* self, const u2* dPC)
547 {
548     if (self->jitState == kJitTSelect) {
549         self->jitState = kJitTSelectEnd;
550     }
551     if (self->jitState == kJitTSelectEnd) {
552         // Clean up and finish now.
553         dvmCheckJit(dPC, self);
554     }
555 }
556 
557 /*
558  * Find an entry in the JitTable, creating if necessary.
559  * Returns null if table is full.
560  */
lookupAndAdd(const u2 * dPC,bool callerLocked,bool isMethodEntry)561 static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked,
562                               bool isMethodEntry)
563 {
564     u4 chainEndMarker = gDvmJit.jitTableSize;
565     u4 idx = dvmJitHash(dPC);
566 
567     /*
568      * Walk the bucket chain to find an exact match for our PC and trace/method
569      * type
570      */
571     while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
572            ((gDvmJit.pJitEntryTable[idx].dPC != dPC) ||
573             (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry !=
574              isMethodEntry))) {
575         idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
576     }
577 
578     if (gDvmJit.pJitEntryTable[idx].dPC != dPC ||
579         gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry != isMethodEntry) {
580         /*
581          * No match.  Aquire jitTableLock and find the last
582          * slot in the chain. Possibly continue the chain walk in case
583          * some other thread allocated the slot we were looking
584          * at previuosly (perhaps even the dPC we're trying to enter).
585          */
586         if (!callerLocked)
587             dvmLockMutex(&gDvmJit.tableLock);
588         /*
589          * At this point, if .dPC is NULL, then the slot we're
590          * looking at is the target slot from the primary hash
591          * (the simple, and common case).  Otherwise we're going
592          * to have to find a free slot and chain it.
593          */
594         ANDROID_MEMBAR_FULL(); /* Make sure we reload [].dPC after lock */
595         if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
596             u4 prev;
597             while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
598                 if (gDvmJit.pJitEntryTable[idx].dPC == dPC &&
599                     gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
600                         isMethodEntry) {
601                     /* Another thread got there first for this dPC */
602                     if (!callerLocked)
603                         dvmUnlockMutex(&gDvmJit.tableLock);
604                     return &gDvmJit.pJitEntryTable[idx];
605                 }
606                 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
607             }
608             /* Here, idx should be pointing to the last cell of an
609              * active chain whose last member contains a valid dPC */
610             assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
611             /* Linear walk to find a free cell and add it to the end */
612             prev = idx;
613             while (true) {
614                 idx++;
615                 if (idx == chainEndMarker)
616                     idx = 0;  /* Wraparound */
617                 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
618                     (idx == prev))
619                     break;
620             }
621             if (idx != prev) {
622                 JitEntryInfoUnion oldValue;
623                 JitEntryInfoUnion newValue;
624                 /*
625                  * Although we hold the lock so that noone else will
626                  * be trying to update a chain field, the other fields
627                  * packed into the word may be in use by other threads.
628                  */
629                 do {
630                     oldValue = gDvmJit.pJitEntryTable[prev].u;
631                     newValue = oldValue;
632                     newValue.info.chain = idx;
633                 } while (android_atomic_release_cas(oldValue.infoWord,
634                         newValue.infoWord,
635                         &gDvmJit.pJitEntryTable[prev].u.infoWord) != 0);
636             }
637         }
638         if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
639             gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry = isMethodEntry;
640             /*
641              * Initialize codeAddress and allocate the slot.  Must
642              * happen in this order (since dPC is set, the entry is live.
643              */
644             android_atomic_release_store((int32_t)dPC,
645                  (volatile int32_t *)(void *)&gDvmJit.pJitEntryTable[idx].dPC);
646             /* for simulator mode, we need to initialized codeAddress to null */
647             gDvmJit.pJitEntryTable[idx].codeAddress = NULL;
648             gDvmJit.pJitEntryTable[idx].dPC = dPC;
649             gDvmJit.jitTableEntriesUsed++;
650         } else {
651             /* Table is full */
652             idx = chainEndMarker;
653         }
654         if (!callerLocked)
655             dvmUnlockMutex(&gDvmJit.tableLock);
656     }
657     return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
658 }
659 
660 /* Dump a trace description */
dvmJitDumpTraceDesc(JitTraceDescription * trace)661 void dvmJitDumpTraceDesc(JitTraceDescription *trace)
662 {
663     int i;
664     bool done = false;
665     const u2* dpc;
666     const u2* dpcBase;
667     int curFrag = 0;
668     ALOGD("===========================================");
669     ALOGD("Trace dump %#x, Method %s off %#x",(int)trace,
670          trace->method->name,trace->trace[curFrag].info.frag.startOffset);
671     dpcBase = trace->method->insns;
672     while (!done) {
673         DecodedInstruction decInsn;
674         if (trace->trace[curFrag].isCode) {
675             ALOGD("Frag[%d]- Insts: %d, start: %#x, hint: %#x, end: %d",
676                  curFrag, trace->trace[curFrag].info.frag.numInsts,
677                  trace->trace[curFrag].info.frag.startOffset,
678                  trace->trace[curFrag].info.frag.hint,
679                  trace->trace[curFrag].info.frag.runEnd);
680             dpc = dpcBase + trace->trace[curFrag].info.frag.startOffset;
681             for (i=0; i<trace->trace[curFrag].info.frag.numInsts; i++) {
682                 dexDecodeInstruction(dpc, &decInsn);
683                 ALOGD("    0x%04x - %s %#x",(dpc-dpcBase),
684                      dexGetOpcodeName(decInsn.opcode),(int)dpc);
685                 dpc += dexGetWidthFromOpcode(decInsn.opcode);
686             }
687             if (trace->trace[curFrag].info.frag.runEnd) {
688                 done = true;
689             }
690         } else {
691             ALOGD("Frag[%d]- META info: 0x%08x", curFrag,
692                  (int)trace->trace[curFrag].info.meta);
693         }
694         curFrag++;
695     }
696     ALOGD("-------------------------------------------");
697 }
698 
699 /*
700  * Append the class ptr of "this" and the current method ptr to the current
701  * trace. That is, the trace runs will contain the following components:
702  *  + trace run that ends with an invoke (existing entry)
703  *  + thisClass (new)
704  *  + calleeMethod (new)
705  */
insertClassMethodInfo(Thread * self,const ClassObject * thisClass,const Method * calleeMethod,const DecodedInstruction * insn)706 static void insertClassMethodInfo(Thread* self,
707                                   const ClassObject* thisClass,
708                                   const Method* calleeMethod,
709                                   const DecodedInstruction* insn)
710 {
711     int currTraceRun = ++self->currTraceRun;
712     self->trace[currTraceRun].info.meta = thisClass ?
713                                     (void *) thisClass->descriptor : NULL;
714     self->trace[currTraceRun].isCode = false;
715 
716     currTraceRun = ++self->currTraceRun;
717     self->trace[currTraceRun].info.meta = thisClass ?
718                                     (void *) thisClass->classLoader : NULL;
719     self->trace[currTraceRun].isCode = false;
720 
721     currTraceRun = ++self->currTraceRun;
722     self->trace[currTraceRun].info.meta = (void *) calleeMethod;
723     self->trace[currTraceRun].isCode = false;
724 }
725 
726 /*
727  * Check if the next instruction following the invoke is a move-result and if
728  * so add it to the trace. That is, this will add the trace run that includes
729  * the move-result to the trace list.
730  *
731  *  + trace run that ends with an invoke (existing entry)
732  *  + thisClass (existing entry)
733  *  + calleeMethod (existing entry)
734  *  + move result (new)
735  *
736  * lastPC, len, offset are all from the preceding invoke instruction
737  */
insertMoveResult(const u2 * lastPC,int len,int offset,Thread * self)738 static void insertMoveResult(const u2 *lastPC, int len, int offset,
739                              Thread *self)
740 {
741     DecodedInstruction nextDecInsn;
742     const u2 *moveResultPC = lastPC + len;
743 
744     dexDecodeInstruction(moveResultPC, &nextDecInsn);
745     if ((nextDecInsn.opcode != OP_MOVE_RESULT) &&
746         (nextDecInsn.opcode != OP_MOVE_RESULT_WIDE) &&
747         (nextDecInsn.opcode != OP_MOVE_RESULT_OBJECT))
748         return;
749 
750     /* We need to start a new trace run */
751     int currTraceRun = ++self->currTraceRun;
752     self->currRunHead = moveResultPC;
753     self->trace[currTraceRun].info.frag.startOffset = offset + len;
754     self->trace[currTraceRun].info.frag.numInsts = 1;
755     self->trace[currTraceRun].info.frag.runEnd = false;
756     self->trace[currTraceRun].info.frag.hint = kJitHintNone;
757     self->trace[currTraceRun].isCode = true;
758     self->totalTraceLen++;
759 
760     self->currRunLen = dexGetWidthFromInstruction(moveResultPC);
761 }
762 
763 /*
764  * Adds to the current trace request one instruction at a time, just
765  * before that instruction is interpreted.  This is the primary trace
766  * selection function.  NOTE: return instruction are handled a little
767  * differently.  In general, instructions are "proposed" to be added
768  * to the current trace prior to interpretation.  If the interpreter
769  * then successfully completes the instruction, is will be considered
770  * part of the request.  This allows us to examine machine state prior
771  * to interpretation, and also abort the trace request if the instruction
772  * throws or does something unexpected.  However, return instructions
773  * will cause an immediate end to the translation request - which will
774  * be passed to the compiler before the return completes.  This is done
775  * in response to special handling of returns by the interpreter (and
776  * because returns cannot throw in a way that causes problems for the
777  * translated code.
778  */
dvmCheckJit(const u2 * pc,Thread * self)779 void dvmCheckJit(const u2* pc, Thread* self)
780 {
781     const ClassObject *thisClass = self->callsiteClass;
782     const Method* curMethod = self->methodToCall;
783     int flags, len;
784     int allDone = false;
785     /* Stay in break/single-stop mode for the next instruction */
786     bool stayOneMoreInst = false;
787 
788     /* Prepare to handle last PC and stage the current PC & method*/
789     const u2 *lastPC = self->lastPC;
790 
791     self->lastPC = pc;
792 
793     switch (self->jitState) {
794         int offset;
795         DecodedInstruction decInsn;
796         case kJitTSelect:
797             /* First instruction - just remember the PC and exit */
798             if (lastPC == NULL) break;
799             /* Grow the trace around the last PC if jitState is kJitTSelect */
800             dexDecodeInstruction(lastPC, &decInsn);
801 #if TRACE_OPCODE_FILTER
802             /* Only add JIT support opcode to trace. End the trace if
803              * this opcode is not supported.
804              */
805             if (!dvmIsOpcodeSupportedByJit(decInsn.opcode)) {
806                 self->jitState = kJitTSelectEnd;
807                 break;
808             }
809 #endif
810             /*
811              * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due
812              * to the amount of space it takes to generate the chaining
813              * cells.
814              */
815             if (self->totalTraceLen != 0 &&
816                 (decInsn.opcode == OP_PACKED_SWITCH ||
817                  decInsn.opcode == OP_SPARSE_SWITCH)) {
818                 self->jitState = kJitTSelectEnd;
819                 break;
820             }
821 
822 #if defined(SHOW_TRACE)
823             ALOGD("TraceGen: adding %s. lpc:%#x, pc:%#x",
824                  dexGetOpcodeName(decInsn.opcode), (int)lastPC, (int)pc);
825 #endif
826             flags = dexGetFlagsFromOpcode(decInsn.opcode);
827             len = dexGetWidthFromInstruction(lastPC);
828             offset = lastPC - self->traceMethod->insns;
829             assert((unsigned) offset <
830                    dvmGetMethodInsnsSize(self->traceMethod));
831             if (lastPC != self->currRunHead + self->currRunLen) {
832                 int currTraceRun;
833                 /* We need to start a new trace run */
834                 currTraceRun = ++self->currTraceRun;
835                 self->currRunLen = 0;
836                 self->currRunHead = (u2*)lastPC;
837                 self->trace[currTraceRun].info.frag.startOffset = offset;
838                 self->trace[currTraceRun].info.frag.numInsts = 0;
839                 self->trace[currTraceRun].info.frag.runEnd = false;
840                 self->trace[currTraceRun].info.frag.hint = kJitHintNone;
841                 self->trace[currTraceRun].isCode = true;
842             }
843             self->trace[self->currTraceRun].info.frag.numInsts++;
844             self->totalTraceLen++;
845             self->currRunLen += len;
846 
847             /*
848              * If the last instruction is an invoke, we will try to sneak in
849              * the move-result* (if existent) into a separate trace run.
850              */
851             {
852               int needReservedRun = (flags & kInstrInvoke) ? 1 : 0;
853 
854               /* Will probably never hit this with the current trace builder */
855               if (self->currTraceRun ==
856                    (MAX_JIT_RUN_LEN - 1 - needReservedRun)) {
857                 self->jitState = kJitTSelectEnd;
858               }
859             }
860 
861             if (!dexIsGoto(flags) &&
862                   ((flags & (kInstrCanBranch |
863                              kInstrCanSwitch |
864                              kInstrCanReturn |
865                              kInstrInvoke)) != 0)) {
866                     self->jitState = kJitTSelectEnd;
867 #if defined(SHOW_TRACE)
868                 ALOGD("TraceGen: ending on %s, basic block end",
869                      dexGetOpcodeName(decInsn.opcode));
870 #endif
871 
872                 /*
873                  * If the current invoke is a {virtual,interface}, get the
874                  * current class/method pair into the trace as well.
875                  * If the next instruction is a variant of move-result, insert
876                  * it to the trace too.
877                  */
878                 if (flags & kInstrInvoke) {
879                     insertClassMethodInfo(self, thisClass, curMethod,
880                                           &decInsn);
881                     insertMoveResult(lastPC, len, offset, self);
882                 }
883             }
884             /* Break on throw or self-loop */
885             if ((decInsn.opcode == OP_THROW) || (lastPC == pc)){
886                 self->jitState = kJitTSelectEnd;
887             }
888             if (self->totalTraceLen >= JIT_MAX_TRACE_LEN) {
889                 self->jitState = kJitTSelectEnd;
890             }
891             if ((flags & kInstrCanReturn) != kInstrCanReturn) {
892                 break;
893             }
894             else {
895                 /*
896                  * Last instruction is a return - stay in the dbg interpreter
897                  * for one more instruction if it is a non-void return, since
898                  * we don't want to start a trace with move-result as the first
899                  * instruction (which is already included in the trace
900                  * containing the invoke.
901                  */
902                 if (decInsn.opcode != OP_RETURN_VOID) {
903                     stayOneMoreInst = true;
904                 }
905             }
906             /* NOTE: intentional fallthrough for returns */
907         case kJitTSelectEnd:
908             {
909                 /* Empty trace - set to bail to interpreter */
910                 if (self->totalTraceLen == 0) {
911                     dvmJitSetCodeAddr(self->currTraceHead,
912                                       dvmCompilerGetInterpretTemplate(),
913                                       dvmCompilerGetInterpretTemplateSet(),
914                                       false /* Not method entry */, 0);
915                     self->jitState = kJitDone;
916                     allDone = true;
917                     break;
918                 }
919 
920                 int lastTraceDesc = self->currTraceRun;
921 
922                 /* Extend a new empty desc if the last slot is meta info */
923                 if (!self->trace[lastTraceDesc].isCode) {
924                     lastTraceDesc = ++self->currTraceRun;
925                     self->trace[lastTraceDesc].info.frag.startOffset = 0;
926                     self->trace[lastTraceDesc].info.frag.numInsts = 0;
927                     self->trace[lastTraceDesc].info.frag.hint = kJitHintNone;
928                     self->trace[lastTraceDesc].isCode = true;
929                 }
930 
931                 /* Mark the end of the trace runs */
932                 self->trace[lastTraceDesc].info.frag.runEnd = true;
933 
934                 JitTraceDescription* desc =
935                    (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
936                      sizeof(JitTraceRun) * (self->currTraceRun+1));
937 
938                 if (desc == NULL) {
939                     ALOGE("Out of memory in trace selection");
940                     dvmJitStopTranslationRequests();
941                     self->jitState = kJitDone;
942                     allDone = true;
943                     break;
944                 }
945 
946                 desc->method = self->traceMethod;
947                 memcpy((char*)&(desc->trace[0]),
948                     (char*)&(self->trace[0]),
949                     sizeof(JitTraceRun) * (self->currTraceRun+1));
950 #if defined(SHOW_TRACE)
951                 ALOGD("TraceGen:  trace done, adding to queue");
952                 dvmJitDumpTraceDesc(desc);
953 #endif
954                 if (dvmCompilerWorkEnqueue(
955                        self->currTraceHead,kWorkOrderTrace,desc)) {
956                     /* Work order successfully enqueued */
957                     if (gDvmJit.blockingMode) {
958                         dvmCompilerDrainQueue();
959                     }
960                 } else {
961                     /*
962                      * Make sure the descriptor for the abandoned work order is
963                      * freed.
964                      */
965                     free(desc);
966                 }
967                 self->jitState = kJitDone;
968                 allDone = true;
969             }
970             break;
971         case kJitDone:
972             allDone = true;
973             break;
974         case kJitNot:
975             allDone = true;
976             break;
977         default:
978             ALOGE("Unexpected JIT state: %d", self->jitState);
979             dvmAbort();
980             break;
981     }
982 
983     /*
984      * If we're done with trace selection, switch off the control flags.
985      */
986      if (allDone) {
987          dvmDisableSubMode(self, kSubModeJitTraceBuild);
988          if (stayOneMoreInst) {
989              // Clear jitResumeNPC explicitly since we know we don't need it
990              // here.
991              self->jitResumeNPC = NULL;
992              // Keep going in single-step mode for at least one more inst
993              if (self->singleStepCount == 0)
994                  self->singleStepCount = 1;
995              dvmEnableSubMode(self, kSubModeCountedStep);
996          }
997      }
998      return;
999 }
1000 
dvmJitFindEntry(const u2 * pc,bool isMethodEntry)1001 JitEntry *dvmJitFindEntry(const u2* pc, bool isMethodEntry)
1002 {
1003     int idx = dvmJitHash(pc);
1004 
1005     /* Expect a high hit rate on 1st shot */
1006     if ((gDvmJit.pJitEntryTable[idx].dPC == pc) &&
1007         (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == isMethodEntry))
1008         return &gDvmJit.pJitEntryTable[idx];
1009     else {
1010         int chainEndMarker = gDvmJit.jitTableSize;
1011         while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
1012             idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
1013             if ((gDvmJit.pJitEntryTable[idx].dPC == pc) &&
1014                 (gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
1015                 isMethodEntry))
1016                 return &gDvmJit.pJitEntryTable[idx];
1017         }
1018     }
1019     return NULL;
1020 }
1021 
1022 /*
1023  * Walk through the JIT profile table and find the corresponding JIT code, in
1024  * the specified format (ie trace vs method). This routine needs to be fast.
1025  */
getCodeAddrCommon(const u2 * dPC,bool methodEntry)1026 void* getCodeAddrCommon(const u2* dPC, bool methodEntry)
1027 {
1028     int idx = dvmJitHash(dPC);
1029     const u2* pc = gDvmJit.pJitEntryTable[idx].dPC;
1030     if (pc != NULL) {
1031         bool hideTranslation = dvmJitHideTranslation();
1032         if (pc == dPC &&
1033             gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry == methodEntry) {
1034             int offset = (gDvmJit.profileMode >= kTraceProfilingContinuous) ?
1035                  0 : gDvmJit.pJitEntryTable[idx].u.info.profileOffset;
1036             intptr_t codeAddress =
1037                 (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress;
1038 #if defined(WITH_JIT_TUNING)
1039             gDvmJit.addrLookupsFound++;
1040 #endif
1041             return hideTranslation || !codeAddress ?  NULL :
1042                   (void *)(codeAddress + offset);
1043         } else {
1044             int chainEndMarker = gDvmJit.jitTableSize;
1045             while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
1046                 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
1047                 if (gDvmJit.pJitEntryTable[idx].dPC == dPC &&
1048                     gDvmJit.pJitEntryTable[idx].u.info.isMethodEntry ==
1049                         methodEntry) {
1050                     int offset = (gDvmJit.profileMode >=
1051                         kTraceProfilingContinuous) ? 0 :
1052                         gDvmJit.pJitEntryTable[idx].u.info.profileOffset;
1053                     intptr_t codeAddress =
1054                         (intptr_t)gDvmJit.pJitEntryTable[idx].codeAddress;
1055 #if defined(WITH_JIT_TUNING)
1056                     gDvmJit.addrLookupsFound++;
1057 #endif
1058                     return hideTranslation || !codeAddress ? NULL :
1059                         (void *)(codeAddress + offset);
1060                 }
1061             }
1062         }
1063     }
1064 #if defined(WITH_JIT_TUNING)
1065     gDvmJit.addrLookupsNotFound++;
1066 #endif
1067     return NULL;
1068 }
1069 
1070 /*
1071  * If a translated code address, in trace format, exists for the davik byte code
1072  * pointer return it.
1073  */
dvmJitGetTraceAddr(const u2 * dPC)1074 void* dvmJitGetTraceAddr(const u2* dPC)
1075 {
1076     return getCodeAddrCommon(dPC, false /* method entry */);
1077 }
1078 
1079 /*
1080  * If a translated code address, in whole-method format, exists for the davik
1081  * byte code pointer return it.
1082  */
dvmJitGetMethodAddr(const u2 * dPC)1083 void* dvmJitGetMethodAddr(const u2* dPC)
1084 {
1085     return getCodeAddrCommon(dPC, true /* method entry */);
1086 }
1087 
1088 /*
1089  * Similar to dvmJitGetTraceAddr, but returns null if the calling
1090  * thread is in a single-step mode.
1091  */
dvmJitGetTraceAddrThread(const u2 * dPC,Thread * self)1092 void* dvmJitGetTraceAddrThread(const u2* dPC, Thread* self)
1093 {
1094     return (self->interpBreak.ctl.breakFlags != 0) ? NULL :
1095             getCodeAddrCommon(dPC, false /* method entry */);
1096 }
1097 
1098 /*
1099  * Similar to dvmJitGetMethodAddr, but returns null if the calling
1100  * thread is in a single-step mode.
1101  */
dvmJitGetMethodAddrThread(const u2 * dPC,Thread * self)1102 void* dvmJitGetMethodAddrThread(const u2* dPC, Thread* self)
1103 {
1104     return (self->interpBreak.ctl.breakFlags != 0) ? NULL :
1105             getCodeAddrCommon(dPC, true /* method entry */);
1106 }
1107 
1108 /*
1109  * Register the translated code pointer into the JitTable.
1110  * NOTE: Once a codeAddress field transitions from initial state to
1111  * JIT'd code, it must not be altered without first halting all
1112  * threads.  We defer the setting of the profile prefix size until
1113  * after the new code address is set to ensure that the prefix offset
1114  * is never applied to the initial interpret-only translation.  All
1115  * translations with non-zero profile prefixes will still be correct
1116  * if entered as if the profile offset is 0, but the interpret-only
1117  * template cannot handle a non-zero prefix.
1118  * NOTE: JitTable must not be in danger of reset while this
1119  * code is executing. see Issue 4271784 for details.
1120  */
dvmJitSetCodeAddr(const u2 * dPC,void * nPC,JitInstructionSetType set,bool isMethodEntry,int profilePrefixSize)1121 void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set,
1122                        bool isMethodEntry, int profilePrefixSize)
1123 {
1124     JitEntryInfoUnion oldValue;
1125     JitEntryInfoUnion newValue;
1126     /*
1127      * Get the JitTable slot for this dPC (or create one if JitTable
1128      * has been reset between the time the trace was requested and
1129      * now.
1130      */
1131     JitEntry *jitEntry = isMethodEntry ?
1132         lookupAndAdd(dPC, false /* caller holds tableLock */, isMethodEntry) :
1133                      dvmJitFindEntry(dPC, isMethodEntry);
1134     assert(jitEntry);
1135     /* Note: order of update is important */
1136     do {
1137         oldValue = jitEntry->u;
1138         newValue = oldValue;
1139         newValue.info.isMethodEntry = isMethodEntry;
1140         newValue.info.instructionSet = set;
1141         newValue.info.profileOffset = profilePrefixSize;
1142     } while (android_atomic_release_cas(
1143              oldValue.infoWord, newValue.infoWord,
1144              &jitEntry->u.infoWord) != 0);
1145     jitEntry->codeAddress = nPC;
1146 }
1147 
1148 /*
1149  * Determine if valid trace-bulding request is active.  If so, set
1150  * the proper flags in interpBreak and return.  Trace selection will
1151  * then begin normally via dvmCheckBefore.
1152  */
dvmJitCheckTraceRequest(Thread * self)1153 void dvmJitCheckTraceRequest(Thread* self)
1154 {
1155     int i;
1156     /*
1157      * A note on trace "hotness" filtering:
1158      *
1159      * Our first level trigger is intentionally loose - we need it to
1160      * fire easily not just to identify potential traces to compile, but
1161      * also to allow re-entry into the code cache.
1162      *
1163      * The 2nd level filter (done here) exists to be selective about
1164      * what we actually compile.  It works by requiring the same
1165      * trace head "key" (defined as filterKey below) to appear twice in
1166      * a relatively short period of time.   The difficulty is defining the
1167      * shape of the filterKey.  Unfortunately, there is no "one size fits
1168      * all" approach.
1169      *
1170      * For spiky execution profiles dominated by a smallish
1171      * number of very hot loops, we would want the second-level filter
1172      * to be very selective.  A good selective filter is requiring an
1173      * exact match of the Dalvik PC.  In other words, defining filterKey as:
1174      *     intptr_t filterKey = (intptr_t)self->interpSave.pc
1175      *
1176      * However, for flat execution profiles we do best when aggressively
1177      * translating.  A heuristically decent proxy for this is to use
1178      * the value of the method pointer containing the trace as the filterKey.
1179      * Intuitively, this is saying that once any trace in a method appears hot,
1180      * immediately translate any other trace from that same method that
1181      * survives the first-level filter.  Here, filterKey would be defined as:
1182      *     intptr_t filterKey = (intptr_t)self->interpSave.method
1183      *
1184      * The problem is that we can't easily detect whether we're dealing
1185      * with a spiky or flat profile.  If we go with the "pc" match approach,
1186      * flat profiles perform poorly.  If we go with the loose "method" match,
1187      * we end up generating a lot of useless translations.  Probably the
1188      * best approach in the future will be to retain profile information
1189      * across runs of each application in order to determine it's profile,
1190      * and then choose once we have enough history.
1191      *
1192      * However, for now we've decided to chose a compromise filter scheme that
1193      * includes elements of both.  The high order bits of the filter key
1194      * are drawn from the enclosing method, and are combined with a slice
1195      * of the low-order bits of the Dalvik pc of the trace head.  The
1196      * looseness of the filter can be adjusted by changing with width of
1197      * the Dalvik pc slice (JIT_TRACE_THRESH_FILTER_PC_BITS).  The wider
1198      * the slice, the tighter the filter.
1199      *
1200      * Note: the fixed shifts in the function below reflect assumed word
1201      * alignment for method pointers, and half-word alignment of the Dalvik pc.
1202      * for method pointers and half-word alignment for dalvik pc.
1203      */
1204     u4 methodKey = (u4)self->interpSave.method <<
1205                    (JIT_TRACE_THRESH_FILTER_PC_BITS - 2);
1206     u4 pcKey = ((u4)self->interpSave.pc >> 1) &
1207                ((1 << JIT_TRACE_THRESH_FILTER_PC_BITS) - 1);
1208     intptr_t filterKey = (intptr_t)(methodKey | pcKey);
1209 
1210     // Shouldn't be here if already building a trace.
1211     assert((self->interpBreak.ctl.subMode & kSubModeJitTraceBuild)==0);
1212 
1213     /* Check if the JIT request can be handled now */
1214     if ((gDvmJit.pJitEntryTable != NULL) &&
1215         ((self->interpBreak.ctl.breakFlags & kInterpSingleStep) == 0)){
1216         /* Bypass the filter for hot trace requests or during stress mode */
1217         if (self->jitState == kJitTSelectRequest &&
1218             gDvmJit.threshold > 6) {
1219             /* Two-level filtering scheme */
1220             for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
1221                 if (filterKey == self->threshFilter[i]) {
1222                     self->threshFilter[i] = 0; // Reset filter entry
1223                     break;
1224                 }
1225             }
1226             if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
1227                 /*
1228                  * Use random replacement policy - otherwise we could miss a
1229                  * large loop that contains more traces than the size of our
1230                  * filter array.
1231                  */
1232                 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
1233                 self->threshFilter[i] = filterKey;
1234                 self->jitState = kJitDone;
1235             }
1236         }
1237 
1238         /* If the compiler is backlogged, cancel any JIT actions */
1239         if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) {
1240             self->jitState = kJitDone;
1241         }
1242 
1243         /*
1244          * Check for additional reasons that might force the trace select
1245          * request to be dropped
1246          */
1247         if (self->jitState == kJitTSelectRequest ||
1248             self->jitState == kJitTSelectRequestHot) {
1249             if (dvmJitFindEntry(self->interpSave.pc, false)) {
1250                 /* In progress - nothing do do */
1251                self->jitState = kJitDone;
1252             } else {
1253                 JitEntry *slot = lookupAndAdd(self->interpSave.pc,
1254                                               false /* lock */,
1255                                               false /* method entry */);
1256                 if (slot == NULL) {
1257                     /*
1258                      * Table is full.  This should have been
1259                      * detected by the compiler thread and the table
1260                      * resized before we run into it here.  Assume bad things
1261                      * are afoot and disable profiling.
1262                      */
1263                     self->jitState = kJitDone;
1264                     ALOGD("JIT: JitTable full, disabling profiling");
1265                     dvmJitStopTranslationRequests();
1266                 }
1267             }
1268         }
1269 
1270         switch (self->jitState) {
1271             case kJitTSelectRequest:
1272             case kJitTSelectRequestHot:
1273                 self->jitState = kJitTSelect;
1274                 self->traceMethod = self->interpSave.method;
1275                 self->currTraceHead = self->interpSave.pc;
1276                 self->currTraceRun = 0;
1277                 self->totalTraceLen = 0;
1278                 self->currRunHead = self->interpSave.pc;
1279                 self->currRunLen = 0;
1280                 self->trace[0].info.frag.startOffset =
1281                      self->interpSave.pc - self->interpSave.method->insns;
1282                 self->trace[0].info.frag.numInsts = 0;
1283                 self->trace[0].info.frag.runEnd = false;
1284                 self->trace[0].info.frag.hint = kJitHintNone;
1285                 self->trace[0].isCode = true;
1286                 self->lastPC = 0;
1287                 /* Turn on trace selection mode */
1288                 dvmEnableSubMode(self, kSubModeJitTraceBuild);
1289 #if defined(SHOW_TRACE)
1290                 ALOGD("Starting trace for %s at %#x",
1291                      self->interpSave.method->name, (int)self->interpSave.pc);
1292 #endif
1293                 break;
1294             case kJitDone:
1295                 break;
1296             default:
1297                 ALOGE("Unexpected JIT state: %d", self->jitState);
1298                 dvmAbort();
1299         }
1300     } else {
1301         /* Cannot build trace this time */
1302         self->jitState = kJitDone;
1303     }
1304 }
1305 
1306 /*
1307  * Resizes the JitTable.  Must be a power of 2, and returns true on failure.
1308  * Stops all threads, and thus is a heavyweight operation. May only be called
1309  * by the compiler thread.
1310  */
dvmJitResizeJitTable(unsigned int size)1311 bool dvmJitResizeJitTable( unsigned int size )
1312 {
1313     JitEntry *pNewTable;
1314     JitEntry *pOldTable;
1315     JitEntry tempEntry;
1316     unsigned int oldSize;
1317     unsigned int i;
1318 
1319     assert(gDvmJit.pJitEntryTable != NULL);
1320     assert(size && !(size & (size - 1)));   /* Is power of 2? */
1321 
1322     ALOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
1323 
1324     if (size <= gDvmJit.jitTableSize) {
1325         return true;
1326     }
1327 
1328     /* Make sure requested size is compatible with chain field width */
1329     tempEntry.u.info.chain = size;
1330     if (tempEntry.u.info.chain != size) {
1331         ALOGD("Jit: JitTable request of %d too big", size);
1332         return true;
1333     }
1334 
1335     pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
1336     if (pNewTable == NULL) {
1337         return true;
1338     }
1339     for (i=0; i< size; i++) {
1340         pNewTable[i].u.info.chain = size;  /* Initialize chain termination */
1341     }
1342 
1343     /* Stop all other interpreting/jit'ng threads */
1344     dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE);
1345 
1346     pOldTable = gDvmJit.pJitEntryTable;
1347     oldSize = gDvmJit.jitTableSize;
1348 
1349     dvmLockMutex(&gDvmJit.tableLock);
1350     gDvmJit.pJitEntryTable = pNewTable;
1351     gDvmJit.jitTableSize = size;
1352     gDvmJit.jitTableMask = size - 1;
1353     gDvmJit.jitTableEntriesUsed = 0;
1354 
1355     for (i=0; i < oldSize; i++) {
1356         if (pOldTable[i].dPC) {
1357             JitEntry *p;
1358             u2 chain;
1359             p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/,
1360                              pOldTable[i].u.info.isMethodEntry);
1361             p->codeAddress = pOldTable[i].codeAddress;
1362             /* We need to preserve the new chain field, but copy the rest */
1363             chain = p->u.info.chain;
1364             p->u = pOldTable[i].u;
1365             p->u.info.chain = chain;
1366         }
1367     }
1368 
1369     dvmUnlockMutex(&gDvmJit.tableLock);
1370 
1371     free(pOldTable);
1372 
1373     /* Restart the world */
1374     dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE);
1375 
1376     return false;
1377 }
1378 
1379 /*
1380  * Reset the JitTable to the initial clean state.
1381  */
dvmJitResetTable()1382 void dvmJitResetTable()
1383 {
1384     JitEntry *jitEntry = gDvmJit.pJitEntryTable;
1385     unsigned int size = gDvmJit.jitTableSize;
1386     unsigned int i;
1387 
1388     dvmLockMutex(&gDvmJit.tableLock);
1389 
1390     /* Note: If need to preserve any existing counts. Do so here. */
1391     if (gDvmJit.pJitTraceProfCounters) {
1392         for (i=0; i < JIT_PROF_BLOCK_BUCKETS; i++) {
1393             if (gDvmJit.pJitTraceProfCounters->buckets[i])
1394                 memset((void *) gDvmJit.pJitTraceProfCounters->buckets[i],
1395                        0, sizeof(JitTraceCounter_t) * JIT_PROF_BLOCK_ENTRIES);
1396         }
1397         gDvmJit.pJitTraceProfCounters->next = 0;
1398     }
1399 
1400     memset((void *) jitEntry, 0, sizeof(JitEntry) * size);
1401     for (i=0; i< size; i++) {
1402         jitEntry[i].u.info.chain = size;  /* Initialize chain termination */
1403     }
1404     gDvmJit.jitTableEntriesUsed = 0;
1405     dvmUnlockMutex(&gDvmJit.tableLock);
1406 }
1407 
1408 /*
1409  * Return the address of the next trace profile counter.  This address
1410  * will be embedded in the generated code for the trace, and thus cannot
1411  * change while the trace exists.
1412  */
dvmJitNextTraceCounter()1413 JitTraceCounter_t *dvmJitNextTraceCounter()
1414 {
1415     int idx = gDvmJit.pJitTraceProfCounters->next / JIT_PROF_BLOCK_ENTRIES;
1416     int elem = gDvmJit.pJitTraceProfCounters->next % JIT_PROF_BLOCK_ENTRIES;
1417     JitTraceCounter_t *res;
1418     /* Lazily allocate blocks of counters */
1419     if (!gDvmJit.pJitTraceProfCounters->buckets[idx]) {
1420         JitTraceCounter_t *p =
1421               (JitTraceCounter_t*) calloc(JIT_PROF_BLOCK_ENTRIES, sizeof(*p));
1422         if (!p) {
1423             ALOGE("Failed to allocate block of trace profile counters");
1424             dvmAbort();
1425         }
1426         gDvmJit.pJitTraceProfCounters->buckets[idx] = p;
1427     }
1428     res = &gDvmJit.pJitTraceProfCounters->buckets[idx][elem];
1429     gDvmJit.pJitTraceProfCounters->next++;
1430     return res;
1431 }
1432 
1433 /*
1434  * Float/double conversion requires clamping to min and max of integer form.  If
1435  * target doesn't support this normally, use these.
1436  */
dvmJitd2l(double d)1437 s8 dvmJitd2l(double d)
1438 {
1439     static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
1440     static const double kMinLong = (double)(s8)0x8000000000000000ULL;
1441     if (d >= kMaxLong)
1442         return (s8)0x7fffffffffffffffULL;
1443     else if (d <= kMinLong)
1444         return (s8)0x8000000000000000ULL;
1445     else if (d != d) // NaN case
1446         return 0;
1447     else
1448         return (s8)d;
1449 }
1450 
dvmJitf2l(float f)1451 s8 dvmJitf2l(float f)
1452 {
1453     static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
1454     static const float kMinLong = (float)(s8)0x8000000000000000ULL;
1455     if (f >= kMaxLong)
1456         return (s8)0x7fffffffffffffffULL;
1457     else if (f <= kMinLong)
1458         return (s8)0x8000000000000000ULL;
1459     else if (f != f) // NaN case
1460         return 0;
1461     else
1462         return (s8)f;
1463 }
1464 
1465 /* Should only be called by the compiler thread */
dvmJitChangeProfileMode(TraceProfilingModes newState)1466 void dvmJitChangeProfileMode(TraceProfilingModes newState)
1467 {
1468     if (gDvmJit.profileMode != newState) {
1469         gDvmJit.profileMode = newState;
1470         dvmJitUnchainAll();
1471     }
1472 }
1473 
dvmJitTraceProfilingOn()1474 void dvmJitTraceProfilingOn()
1475 {
1476     if (gDvmJit.profileMode == kTraceProfilingPeriodicOff)
1477         dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1478                                     (void*) kTraceProfilingPeriodicOn);
1479     else if (gDvmJit.profileMode == kTraceProfilingDisabled)
1480         dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1481                                     (void*) kTraceProfilingContinuous);
1482 }
1483 
dvmJitTraceProfilingOff()1484 void dvmJitTraceProfilingOff()
1485 {
1486     if (gDvmJit.profileMode == kTraceProfilingPeriodicOn)
1487         dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1488                                     (void*) kTraceProfilingPeriodicOff);
1489     else if (gDvmJit.profileMode == kTraceProfilingContinuous)
1490         dvmCompilerForceWorkEnqueue(NULL, kWorkOrderProfileMode,
1491                                     (void*) kTraceProfilingDisabled);
1492 }
1493 
1494 /*
1495  * Update JIT-specific info in Thread structure for a single thread
1496  */
dvmJitUpdateThreadStateSingle(Thread * thread)1497 void dvmJitUpdateThreadStateSingle(Thread* thread)
1498 {
1499     thread->pJitProfTable = gDvmJit.pProfTable;
1500     thread->jitThreshold = gDvmJit.threshold;
1501 }
1502 
1503 /*
1504  * Walk through the thread list and refresh all local copies of
1505  * JIT global state (which was placed there for fast access).
1506  */
dvmJitUpdateThreadStateAll()1507 void dvmJitUpdateThreadStateAll()
1508 {
1509     Thread* self = dvmThreadSelf();
1510     Thread* thread;
1511 
1512     dvmLockThreadList(self);
1513     for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
1514         dvmJitUpdateThreadStateSingle(thread);
1515     }
1516     dvmUnlockThreadList();
1517 
1518 }
1519 #endif /* WITH_JIT */
1520