• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <sys/mman.h>
18 #include <errno.h>
19 #include <cutils/ashmem.h>
20 
21 #include "Dalvik.h"
22 #include "interp/Jit.h"
23 #include "CompilerInternals.h"
24 
25 extern "C" void dvmCompilerTemplateStart(void);
26 extern "C" void dmvCompilerTemplateEnd(void);
27 
workQueueLength(void)28 static inline bool workQueueLength(void)
29 {
30     return gDvmJit.compilerQueueLength;
31 }
32 
workDequeue(void)33 static CompilerWorkOrder workDequeue(void)
34 {
35     assert(gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex].kind
36            != kWorkOrderInvalid);
37     CompilerWorkOrder work =
38         gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex];
39     gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex++].kind =
40         kWorkOrderInvalid;
41     if (gDvmJit.compilerWorkDequeueIndex == COMPILER_WORK_QUEUE_SIZE) {
42         gDvmJit.compilerWorkDequeueIndex = 0;
43     }
44     gDvmJit.compilerQueueLength--;
45     if (gDvmJit.compilerQueueLength == 0) {
46         dvmSignalCond(&gDvmJit.compilerQueueEmpty);
47     }
48 
49     /* Remember the high water mark of the queue length */
50     if (gDvmJit.compilerQueueLength > gDvmJit.compilerMaxQueued)
51         gDvmJit.compilerMaxQueued = gDvmJit.compilerQueueLength;
52 
53     return work;
54 }
55 
56 /*
57  * Enqueue a work order - retrying until successful.  If attempt to enqueue
58  * is repeatedly unsuccessful, assume the JIT is in a bad state and force a
59  * code cache reset.
60  */
61 #define ENQUEUE_MAX_RETRIES 20
dvmCompilerForceWorkEnqueue(const u2 * pc,WorkOrderKind kind,void * info)62 void dvmCompilerForceWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
63 {
64     bool success;
65     int retries = 0;
66     do {
67         success = dvmCompilerWorkEnqueue(pc, kind, info);
68         if (!success) {
69             retries++;
70             if (retries > ENQUEUE_MAX_RETRIES) {
71                 LOGE("JIT: compiler queue wedged - forcing reset");
72                 gDvmJit.codeCacheFull = true;  // Force reset
73                 success = true;  // Because we'll drop the order now anyway
74             } else {
75                 dvmLockMutex(&gDvmJit.compilerLock);
76                 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
77                                   &gDvmJit.compilerLock);
78                 dvmUnlockMutex(&gDvmJit.compilerLock);
79 
80             }
81         }
82     } while (!success);
83 }
84 
85 /*
86  * Attempt to enqueue a work order, returning true if successful.
87  *
88  * NOTE: Make sure that the caller frees the info pointer if the return value
89  * is false.
90  */
dvmCompilerWorkEnqueue(const u2 * pc,WorkOrderKind kind,void * info)91 bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
92 {
93     int cc;
94     int i;
95     int numWork;
96     bool result = true;
97 
98     dvmLockMutex(&gDvmJit.compilerLock);
99 
100     /*
101      * Return if queue or code cache is full.
102      */
103     if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
104         gDvmJit.codeCacheFull == true) {
105         dvmUnlockMutex(&gDvmJit.compilerLock);
106         return false;
107     }
108 
109     for (numWork = gDvmJit.compilerQueueLength,
110            i = gDvmJit.compilerWorkDequeueIndex;
111          numWork > 0;
112          numWork--) {
113         /* Already enqueued */
114         if (gDvmJit.compilerWorkQueue[i++].pc == pc) {
115             dvmUnlockMutex(&gDvmJit.compilerLock);
116             return true;
117         }
118         /* Wrap around */
119         if (i == COMPILER_WORK_QUEUE_SIZE)
120             i = 0;
121     }
122 
123     CompilerWorkOrder *newOrder =
124         &gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkEnqueueIndex];
125     newOrder->pc = pc;
126     newOrder->kind = kind;
127     newOrder->info = info;
128     newOrder->result.methodCompilationAborted = NULL;
129     newOrder->result.codeAddress = NULL;
130     newOrder->result.discardResult =
131         (kind == kWorkOrderTraceDebug) ? true : false;
132     newOrder->result.cacheVersion = gDvmJit.cacheVersion;
133     newOrder->result.requestingThread = dvmThreadSelf();
134 
135     gDvmJit.compilerWorkEnqueueIndex++;
136     if (gDvmJit.compilerWorkEnqueueIndex == COMPILER_WORK_QUEUE_SIZE)
137         gDvmJit.compilerWorkEnqueueIndex = 0;
138     gDvmJit.compilerQueueLength++;
139     cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
140     assert(cc == 0);
141 
142     dvmUnlockMutex(&gDvmJit.compilerLock);
143     return result;
144 }
145 
146 /* Block until the queue length is 0, or there is a pending suspend request */
dvmCompilerDrainQueue(void)147 void dvmCompilerDrainQueue(void)
148 {
149     Thread *self = dvmThreadSelf();
150 
151     dvmLockMutex(&gDvmJit.compilerLock);
152     while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread &&
153            self->suspendCount == 0) {
154         /*
155          * Use timed wait here - more than one mutator threads may be blocked
156          * but the compiler thread will only signal once when the queue is
157          * emptied. Furthermore, the compiler thread may have been shutdown
158          * so the blocked thread may never get the wakeup signal.
159          */
160         dvmRelativeCondWait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock,                             1000, 0);
161     }
162     dvmUnlockMutex(&gDvmJit.compilerLock);
163 }
164 
dvmCompilerSetupCodeCache(void)165 bool dvmCompilerSetupCodeCache(void)
166 {
167     int fd;
168 
169     /* Allocate the code cache */
170     fd = ashmem_create_region("dalvik-jit-code-cache", gDvmJit.codeCacheSize);
171     if (fd < 0) {
172         LOGE("Could not create %u-byte ashmem region for the JIT code cache",
173              gDvmJit.codeCacheSize);
174         return false;
175     }
176     gDvmJit.codeCache = mmap(NULL, gDvmJit.codeCacheSize,
177                              PROT_READ | PROT_WRITE | PROT_EXEC,
178                              MAP_PRIVATE , fd, 0);
179     close(fd);
180     if (gDvmJit.codeCache == MAP_FAILED) {
181         LOGE("Failed to mmap the JIT code cache: %s", strerror(errno));
182         return false;
183     }
184 
185     gDvmJit.pageSizeMask = getpagesize() - 1;
186 
187     /* This can be found through "dalvik-jit-code-cache" in /proc/<pid>/maps */
188     // LOGD("Code cache starts at %p", gDvmJit.codeCache);
189 
190     /* Copy the template code into the beginning of the code cache */
191     int templateSize = (intptr_t) dmvCompilerTemplateEnd -
192                        (intptr_t) dvmCompilerTemplateStart;
193     memcpy((void *) gDvmJit.codeCache,
194            (void *) dvmCompilerTemplateStart,
195            templateSize);
196 
197     /*
198      * Work around a CPU bug by keeping the 32-bit ARM handler code in its own
199      * page.
200      */
201     if (dvmCompilerInstructionSet() == DALVIK_JIT_THUMB2) {
202         templateSize = (templateSize + 4095) & ~4095;
203     }
204 
205     gDvmJit.templateSize = templateSize;
206     gDvmJit.codeCacheByteUsed = templateSize;
207 
208     /* Only flush the part in the code cache that is being used now */
209     dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
210                           (intptr_t) gDvmJit.codeCache + templateSize, 0);
211 
212     int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize,
213                           PROTECT_CODE_CACHE_ATTRS);
214 
215     if (result == -1) {
216         LOGE("Failed to remove the write permission for the code cache");
217         dvmAbort();
218     }
219 
220     return true;
221 }
222 
crawlDalvikStack(Thread * thread,bool print)223 static void crawlDalvikStack(Thread *thread, bool print)
224 {
225     void *fp = thread->interpSave.curFrame;
226     StackSaveArea* saveArea = NULL;
227     int stackLevel = 0;
228 
229     if (print) {
230         LOGD("Crawling tid %d (%s / %p %s)", thread->systemTid,
231              dvmGetThreadStatusStr(thread->status),
232              thread->inJitCodeCache,
233              thread->inJitCodeCache ? "jit" : "interp");
234     }
235     /* Crawl the Dalvik stack frames to clear the returnAddr field */
236     while (fp != NULL) {
237         saveArea = SAVEAREA_FROM_FP(fp);
238 
239         if (print) {
240             if (dvmIsBreakFrame((u4*)fp)) {
241                 LOGD("  #%d: break frame (%p)",
242                      stackLevel, saveArea->returnAddr);
243             }
244             else {
245                 LOGD("  #%d: %s.%s%s (%p)",
246                      stackLevel,
247                      saveArea->method->clazz->descriptor,
248                      saveArea->method->name,
249                      dvmIsNativeMethod(saveArea->method) ?
250                          " (native)" : "",
251                      saveArea->returnAddr);
252             }
253         }
254         stackLevel++;
255         saveArea->returnAddr = NULL;
256         assert(fp != saveArea->prevFrame);
257         fp = saveArea->prevFrame;
258     }
259     /* Make sure the stack is fully unwound to the bottom */
260     assert(saveArea == NULL ||
261            (u1 *) (saveArea+1) == thread->interpStackStart);
262 }
263 
resetCodeCache(void)264 static void resetCodeCache(void)
265 {
266     Thread* thread;
267     u8 startTime = dvmGetRelativeTimeUsec();
268     int inJit = 0;
269     int byteUsed = gDvmJit.codeCacheByteUsed;
270 
271     /* If any thread is found stuck in the JIT state, don't reset the cache  */
272     dvmLockThreadList(NULL);
273     for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
274         /*
275          * Crawl the stack to wipe out the returnAddr field so that
276          * 1) the soon-to-be-deleted code in the JIT cache won't be used
277          * 2) or the thread stuck in the JIT land will soon return
278          *    to the interpreter land
279          */
280         crawlDalvikStack(thread, false);
281         if (thread->inJitCodeCache) {
282             inJit++;
283         }
284         /* Cancel any ongoing trace selection */
285         dvmDisableSubMode(thread, kSubModeJitTraceBuild);
286     }
287     dvmUnlockThreadList();
288 
289     if (inJit) {
290         LOGD("JIT code cache reset delayed (%d bytes %d/%d)",
291              gDvmJit.codeCacheByteUsed, gDvmJit.numCodeCacheReset,
292              ++gDvmJit.numCodeCacheResetDelayed);
293         return;
294     }
295 
296     /* Lock the mutex to clean up the work queue */
297     dvmLockMutex(&gDvmJit.compilerLock);
298 
299     /* Update the translation cache version */
300     gDvmJit.cacheVersion++;
301 
302     /* Drain the work queue to free the work orders */
303     while (workQueueLength()) {
304         CompilerWorkOrder work = workDequeue();
305         free(work.info);
306     }
307 
308     /* Reset the JitEntry table contents to the initial unpopulated state */
309     dvmJitResetTable();
310 
311     UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
312     /*
313      * Wipe out the code cache content to force immediate crashes if
314      * stale JIT'ed code is invoked.
315      */
316     memset((char *) gDvmJit.codeCache + gDvmJit.templateSize,
317            0,
318            gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
319     dvmCompilerCacheFlush((intptr_t) gDvmJit.codeCache,
320                           (intptr_t) gDvmJit.codeCache +
321                           gDvmJit.codeCacheByteUsed, 0);
322 
323     PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
324 
325     /* Reset the current mark of used bytes to the end of template code */
326     gDvmJit.codeCacheByteUsed = gDvmJit.templateSize;
327     gDvmJit.numCompilations = 0;
328 
329     /* Reset the work queue */
330     memset(gDvmJit.compilerWorkQueue, 0,
331            sizeof(CompilerWorkOrder) * COMPILER_WORK_QUEUE_SIZE);
332     gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
333     gDvmJit.compilerQueueLength = 0;
334 
335     /* Reset the IC patch work queue */
336     dvmLockMutex(&gDvmJit.compilerICPatchLock);
337     gDvmJit.compilerICPatchIndex = 0;
338     dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
339 
340     /*
341      * Reset the inflight compilation address (can only be done in safe points
342      * or by the compiler thread when its thread state is RUNNING).
343      */
344     gDvmJit.inflightBaseAddr = NULL;
345 
346     /* All clear now */
347     gDvmJit.codeCacheFull = false;
348 
349     dvmUnlockMutex(&gDvmJit.compilerLock);
350 
351     LOGD("JIT code cache reset in %lld ms (%d bytes %d/%d)",
352          (dvmGetRelativeTimeUsec() - startTime) / 1000,
353          byteUsed, ++gDvmJit.numCodeCacheReset,
354          gDvmJit.numCodeCacheResetDelayed);
355 }
356 
357 /*
358  * Perform actions that are only safe when all threads are suspended. Currently
359  * we do:
360  * 1) Check if the code cache is full. If so reset it and restart populating it
361  *    from scratch.
362  * 2) Patch predicted chaining cells by consuming recorded work orders.
363  */
dvmCompilerPerformSafePointChecks(void)364 void dvmCompilerPerformSafePointChecks(void)
365 {
366     if (gDvmJit.codeCacheFull) {
367         resetCodeCache();
368     }
369     dvmCompilerPatchInlineCache();
370 }
371 
compilerThreadStartup(void)372 static bool compilerThreadStartup(void)
373 {
374     JitEntry *pJitTable = NULL;
375     unsigned char *pJitProfTable = NULL;
376     JitTraceProfCounters *pJitTraceProfCounters = NULL;
377     unsigned int i;
378 
379     if (!dvmCompilerArchInit())
380         goto fail;
381 
382     /*
383      * Setup the code cache if we have not inherited a valid code cache
384      * from the zygote.
385      */
386     if (gDvmJit.codeCache == NULL) {
387         if (!dvmCompilerSetupCodeCache())
388             goto fail;
389     }
390 
391     /* Allocate the initial arena block */
392     if (dvmCompilerHeapInit() == false) {
393         goto fail;
394     }
395 
396     /* Cache the thread pointer */
397     gDvmJit.compilerThread = dvmThreadSelf();
398 
399     dvmLockMutex(&gDvmJit.compilerLock);
400 
401     /* Track method-level compilation statistics */
402     gDvmJit.methodStatsTable =  dvmHashTableCreate(32, NULL);
403 
404 #if defined(WITH_JIT_TUNING)
405     gDvm.verboseShutdown = true;
406 #endif
407 
408     dvmUnlockMutex(&gDvmJit.compilerLock);
409 
410     /* Set up the JitTable */
411 
412     /* Power of 2? */
413     assert(gDvmJit.jitTableSize &&
414            !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1)));
415 
416     dvmInitMutex(&gDvmJit.tableLock);
417     dvmLockMutex(&gDvmJit.tableLock);
418     pJitTable = (JitEntry*)
419                 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
420     if (!pJitTable) {
421         LOGE("jit table allocation failed");
422         dvmUnlockMutex(&gDvmJit.tableLock);
423         goto fail;
424     }
425     /*
426      * NOTE: the profile table must only be allocated once, globally.
427      * Profiling is turned on and off by nulling out gDvm.pJitProfTable
428      * and then restoring its original value.  However, this action
429      * is not synchronized for speed so threads may continue to hold
430      * and update the profile table after profiling has been turned
431      * off by null'ng the global pointer.  Be aware.
432      */
433     pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
434     if (!pJitProfTable) {
435         LOGE("jit prof table allocation failed");
436         free(pJitProfTable);
437         dvmUnlockMutex(&gDvmJit.tableLock);
438         goto fail;
439     }
440     memset(pJitProfTable, gDvmJit.threshold, JIT_PROF_SIZE);
441     for (i=0; i < gDvmJit.jitTableSize; i++) {
442        pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
443     }
444     /* Is chain field wide enough for termination pattern? */
445     assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
446 
447     /* Allocate the trace profiling structure */
448     pJitTraceProfCounters = (JitTraceProfCounters*)
449                              calloc(1, sizeof(*pJitTraceProfCounters));
450     if (!pJitTraceProfCounters) {
451         LOGE("jit trace prof counters allocation failed");
452         dvmUnlockMutex(&gDvmJit.tableLock);
453         goto fail;
454     }
455 
456     gDvmJit.pJitEntryTable = pJitTable;
457     gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
458     gDvmJit.jitTableEntriesUsed = 0;
459     gDvmJit.compilerHighWater =
460         COMPILER_WORK_QUEUE_SIZE - (COMPILER_WORK_QUEUE_SIZE/4);
461     /*
462      * If the VM is launched with wait-on-the-debugger, we will need to hide
463      * the profile table here
464      */
465     gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable;
466     gDvmJit.pProfTableCopy = pJitProfTable;
467     gDvmJit.pJitTraceProfCounters = pJitTraceProfCounters;
468     dvmJitUpdateThreadStateAll();
469     dvmUnlockMutex(&gDvmJit.tableLock);
470 
471     /* Signal running threads to refresh their cached pJitTable pointers */
472     dvmSuspendAllThreads(SUSPEND_FOR_REFRESH);
473     dvmResumeAllThreads(SUSPEND_FOR_REFRESH);
474 
475     /* Enable signature breakpoints by customizing the following code */
476 #if defined(SIGNATURE_BREAKPOINT)
477     /*
478      * Suppose one sees the following native crash in the bugreport:
479      * I/DEBUG   ( 1638): Build fingerprint: 'unknown'
480      * I/DEBUG   ( 1638): pid: 2468, tid: 2507  >>> com.google.android.gallery3d
481      * I/DEBUG   ( 1638): signal 11 (SIGSEGV), fault addr 00001400
482      * I/DEBUG   ( 1638):  r0 44ea7190  r1 44e4f7b8  r2 44ebc710  r3 00000000
483      * I/DEBUG   ( 1638):  r4 00000a00  r5 41862dec  r6 4710dc10  r7 00000280
484      * I/DEBUG   ( 1638):  r8 ad010f40  r9 46a37a12  10 001116b0  fp 42a78208
485      * I/DEBUG   ( 1638):  ip 00000090  sp 4710dbc8  lr ad060e67  pc 46b90682
486      * cpsr 00000030
487      * I/DEBUG   ( 1638):  #00  pc 46b90682 /dev/ashmem/dalvik-jit-code-cache
488      * I/DEBUG   ( 1638):  #01  pc 00060e62  /system/lib/libdvm.so
489      *
490      * I/DEBUG   ( 1638): code around pc:
491      * I/DEBUG   ( 1638): 46b90660 6888d01c 34091dcc d2174287 4a186b68
492      * I/DEBUG   ( 1638): 46b90670 d0052800 68006809 28004790 6b68d00e
493      * I/DEBUG   ( 1638): 46b90680 512000bc 37016eaf 6ea866af 6f696028
494      * I/DEBUG   ( 1638): 46b90690 682a6069 429a686b e003da08 6df1480b
495      * I/DEBUG   ( 1638): 46b906a0 1c2d4788 47806d70 46a378fa 47806d70
496      *
497      * Clearly it is a JIT bug. To find out which translation contains the
498      * offending code, the content of the memory dump around the faulting PC
499      * can be pasted into the gDvmJit.signatureBreakpoint[] array and next time
500      * when a similar compilation is being created, the JIT compiler replay the
501      * trace in the verbose mode and one can investigate the instruction
502      * sequence in details.
503      *
504      * The length of the signature may need additional experiments to determine.
505      * The rule of thumb is don't include PC-relative instructions in the
506      * signature since it may be affected by the alignment of the compiled code.
507      * However, a signature that's too short might increase the chance of false
508      * positive matches. Using gdbjithelper to disassembly the memory content
509      * first might be a good companion approach.
510      *
511      * For example, if the next 4 words starting from 46b90680 is pasted into
512      * the data structure:
513      */
514 
515     gDvmJit.signatureBreakpointSize = 4;
516     gDvmJit.signatureBreakpoint =
517         malloc(sizeof(u4) * gDvmJit.signatureBreakpointSize);
518     gDvmJit.signatureBreakpoint[0] = 0x512000bc;
519     gDvmJit.signatureBreakpoint[1] = 0x37016eaf;
520     gDvmJit.signatureBreakpoint[2] = 0x6ea866af;
521     gDvmJit.signatureBreakpoint[3] = 0x6f696028;
522 
523     /*
524      * The following log will be printed when a match is found in subsequent
525      * testings:
526      *
527      * D/dalvikvm( 2468): Signature match starting from offset 0x34 (4 words)
528      * D/dalvikvm( 2468): --------
529      * D/dalvikvm( 2468): Compiler: Building trace for computeVisibleItems,
530      * offset 0x1f7
531      * D/dalvikvm( 2468): 0x46a37a12: 0x0090 add-int v42, v5, v26
532      * D/dalvikvm( 2468): 0x46a37a16: 0x004d aput-object v13, v14, v42
533      * D/dalvikvm( 2468): 0x46a37a1a: 0x0028 goto, (#0), (#0)
534      * D/dalvikvm( 2468): 0x46a3794e: 0x00d8 add-int/lit8 v26, v26, (#1)
535      * D/dalvikvm( 2468): 0x46a37952: 0x0028 goto, (#0), (#0)
536      * D/dalvikvm( 2468): 0x46a378ee: 0x0002 move/from16 v0, v26, (#0)
537      * D/dalvikvm( 2468): 0x46a378f2: 0x0002 move/from16 v1, v29, (#0)
538      * D/dalvikvm( 2468): 0x46a378f6: 0x0035 if-ge v0, v1, (#10)
539      * D/dalvikvm( 2468): TRACEINFO (554): 0x46a37624
540      * Lcom/cooliris/media/GridLayer;computeVisibleItems 0x1f7 14 of 934, 8
541      * blocks
542      *     :
543      *     :
544      * D/dalvikvm( 2468): 0x20 (0020): ldr     r0, [r5, #52]
545      * D/dalvikvm( 2468): 0x22 (0022): ldr     r2, [pc, #96]
546      * D/dalvikvm( 2468): 0x24 (0024): cmp     r0, #0
547      * D/dalvikvm( 2468): 0x26 (0026): beq     0x00000034
548      * D/dalvikvm( 2468): 0x28 (0028): ldr     r1, [r1, #0]
549      * D/dalvikvm( 2468): 0x2a (002a): ldr     r0, [r0, #0]
550      * D/dalvikvm( 2468): 0x2c (002c): blx     r2
551      * D/dalvikvm( 2468): 0x2e (002e): cmp     r0, #0
552      * D/dalvikvm( 2468): 0x30 (0030): beq     0x00000050
553      * D/dalvikvm( 2468): 0x32 (0032): ldr     r0, [r5, #52]
554      * D/dalvikvm( 2468): 0x34 (0034): lsls    r4, r7, #2
555      * D/dalvikvm( 2468): 0x36 (0036): str     r0, [r4, r4]
556      * D/dalvikvm( 2468): -------- dalvik offset: 0x01fb @ goto, (#0), (#0)
557      * D/dalvikvm( 2468): L0x0195:
558      * D/dalvikvm( 2468): -------- dalvik offset: 0x0195 @ add-int/lit8 v26,
559      * v26, (#1)
560      * D/dalvikvm( 2468): 0x38 (0038): ldr     r7, [r5, #104]
561      * D/dalvikvm( 2468): 0x3a (003a): adds    r7, r7, #1
562      * D/dalvikvm( 2468): 0x3c (003c): str     r7, [r5, #104]
563      * D/dalvikvm( 2468): -------- dalvik offset: 0x0197 @ goto, (#0), (#0)
564      * D/dalvikvm( 2468): L0x0165:
565      * D/dalvikvm( 2468): -------- dalvik offset: 0x0165 @ move/from16 v0, v26,
566      * (#0)
567      * D/dalvikvm( 2468): 0x3e (003e): ldr     r0, [r5, #104]
568      * D/dalvikvm( 2468): 0x40 (0040): str     r0, [r5, #0]
569      *
570      * The "str r0, [r4, r4]" is indeed the culprit of the native crash.
571      */
572 #endif
573 
574     return true;
575 
576 fail:
577     return false;
578 
579 }
580 
compilerThreadStart(void * arg)581 static void *compilerThreadStart(void *arg)
582 {
583     dvmChangeStatus(NULL, THREAD_VMWAIT);
584 
585     /*
586      * If we're not running stand-alone, wait a little before
587      * recieving translation requests on the assumption that process start
588      * up code isn't worth compiling.  We'll resume when the framework
589      * signals us that the first screen draw has happened, or the timer
590      * below expires (to catch daemons).
591      *
592      * There is a theoretical race between the callback to
593      * VMRuntime.startJitCompiation and when the compiler thread reaches this
594      * point. In case the callback happens earlier, in order not to permanently
595      * hold the system_server (which is not using the timed wait) in
596      * interpreter-only mode we bypass the delay here.
597      */
598     if (gDvmJit.runningInAndroidFramework &&
599         !gDvmJit.alreadyEnabledViaFramework) {
600         /*
601          * If the current VM instance is the system server (detected by having
602          * 0 in gDvm.systemServerPid), we will use the indefinite wait on the
603          * conditional variable to determine whether to start the JIT or not.
604          * If the system server detects that the whole system is booted in
605          * safe mode, the conditional variable will never be signaled and the
606          * system server will remain in the interpreter-only mode. All
607          * subsequent apps will be started with the --enable-safemode flag
608          * explicitly appended.
609          */
610         if (gDvm.systemServerPid == 0) {
611             dvmLockMutex(&gDvmJit.compilerLock);
612             pthread_cond_wait(&gDvmJit.compilerQueueActivity,
613                               &gDvmJit.compilerLock);
614             dvmUnlockMutex(&gDvmJit.compilerLock);
615             LOGD("JIT started for system_server");
616         } else {
617             dvmLockMutex(&gDvmJit.compilerLock);
618             /*
619              * TUNING: experiment with the delay & perhaps make it
620              * target-specific
621              */
622             dvmRelativeCondWait(&gDvmJit.compilerQueueActivity,
623                                  &gDvmJit.compilerLock, 3000, 0);
624             dvmUnlockMutex(&gDvmJit.compilerLock);
625         }
626         if (gDvmJit.haltCompilerThread) {
627              return NULL;
628         }
629     }
630 
631     compilerThreadStartup();
632 
633     dvmLockMutex(&gDvmJit.compilerLock);
634     /*
635      * Since the compiler thread will not touch any objects on the heap once
636      * being created, we just fake its state as VMWAIT so that it can be a
637      * bit late when there is suspend request pending.
638      */
639     while (!gDvmJit.haltCompilerThread) {
640         if (workQueueLength() == 0) {
641             int cc;
642             cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
643             assert(cc == 0);
644             pthread_cond_wait(&gDvmJit.compilerQueueActivity,
645                               &gDvmJit.compilerLock);
646             continue;
647         } else {
648             do {
649                 CompilerWorkOrder work = workDequeue();
650                 dvmUnlockMutex(&gDvmJit.compilerLock);
651 #if defined(WITH_JIT_TUNING)
652                 /*
653                  * This is live across setjmp().  Mark it volatile to suppress
654                  * a gcc warning.  We should not need this since it is assigned
655                  * only once but gcc is not smart enough.
656                  */
657                 volatile u8 startTime = dvmGetRelativeTimeUsec();
658 #endif
659                 /*
660                  * Check whether there is a suspend request on me.  This
661                  * is necessary to allow a clean shutdown.
662                  *
663                  * However, in the blocking stress testing mode, let the
664                  * compiler thread continue doing compilations to unblock
665                  * other requesting threads. This may occasionally cause
666                  * shutdown from proceeding cleanly in the standalone invocation
667                  * of the vm but this should be acceptable.
668                  */
669                 if (!gDvmJit.blockingMode)
670                     dvmCheckSuspendPending(dvmThreadSelf());
671                 /* Is JitTable filling up? */
672                 if (gDvmJit.jitTableEntriesUsed >
673                     (gDvmJit.jitTableSize - gDvmJit.jitTableSize/4)) {
674                     bool resizeFail =
675                         dvmJitResizeJitTable(gDvmJit.jitTableSize * 2);
676                     /*
677                      * If the jit table is full, consider it's time to reset
678                      * the code cache too.
679                      */
680                     gDvmJit.codeCacheFull |= resizeFail;
681                 }
682                 if (gDvmJit.haltCompilerThread) {
683                     LOGD("Compiler shutdown in progress - discarding request");
684                 } else if (!gDvmJit.codeCacheFull) {
685                     jmp_buf jmpBuf;
686                     work.bailPtr = &jmpBuf;
687                     bool aborted = setjmp(jmpBuf);
688                     if (!aborted) {
689                         bool codeCompiled = dvmCompilerDoWork(&work);
690                         /*
691                          * Make sure we are still operating with the
692                          * same translation cache version.  See
693                          * Issue 4271784 for details.
694                          */
695                         dvmLockMutex(&gDvmJit.compilerLock);
696                         if ((work.result.cacheVersion ==
697                              gDvmJit.cacheVersion) &&
698                              codeCompiled &&
699                              !work.result.discardResult &&
700                              work.result.codeAddress) {
701                             dvmJitSetCodeAddr(work.pc, work.result.codeAddress,
702                                               work.result.instructionSet,
703                                               false, /* not method entry */
704                                               work.result.profileCodeSize);
705                         }
706                         dvmUnlockMutex(&gDvmJit.compilerLock);
707                     }
708                     dvmCompilerArenaReset();
709                 }
710                 free(work.info);
711 #if defined(WITH_JIT_TUNING)
712                 gDvmJit.jitTime += dvmGetRelativeTimeUsec() - startTime;
713 #endif
714                 dvmLockMutex(&gDvmJit.compilerLock);
715             } while (workQueueLength() != 0);
716         }
717     }
718     pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
719     dvmUnlockMutex(&gDvmJit.compilerLock);
720 
721     /*
722      * As part of detaching the thread we need to call into Java code to update
723      * the ThreadGroup, and we should not be in VMWAIT state while executing
724      * interpreted code.
725      */
726     dvmChangeStatus(NULL, THREAD_RUNNING);
727 
728     if (gDvm.verboseShutdown)
729         LOGD("Compiler thread shutting down");
730     return NULL;
731 }
732 
dvmCompilerStartup(void)733 bool dvmCompilerStartup(void)
734 {
735 
736     dvmInitMutex(&gDvmJit.compilerLock);
737     dvmInitMutex(&gDvmJit.compilerICPatchLock);
738     dvmInitMutex(&gDvmJit.codeCacheProtectionLock);
739     dvmLockMutex(&gDvmJit.compilerLock);
740     pthread_cond_init(&gDvmJit.compilerQueueActivity, NULL);
741     pthread_cond_init(&gDvmJit.compilerQueueEmpty, NULL);
742 
743     /* Reset the work queue */
744     gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
745     gDvmJit.compilerQueueLength = 0;
746     dvmUnlockMutex(&gDvmJit.compilerLock);
747 
748     /*
749      * Defer rest of initialization until we're sure JIT'ng makes sense. Launch
750      * the compiler thread, which will do the real initialization if and
751      * when it is signalled to do so.
752      */
753     return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler",
754                                    compilerThreadStart, NULL);
755 }
756 
dvmCompilerShutdown(void)757 void dvmCompilerShutdown(void)
758 {
759     void *threadReturn;
760 
761     /* Disable new translation requests */
762     gDvmJit.pProfTable = NULL;
763     gDvmJit.pProfTableCopy = NULL;
764     dvmJitUpdateThreadStateAll();
765 
766     if (gDvm.verboseShutdown ||
767             gDvmJit.profileMode == kTraceProfilingContinuous) {
768         dvmCompilerDumpStats();
769         while (gDvmJit.compilerQueueLength)
770           sleep(5);
771     }
772 
773     if (gDvmJit.compilerHandle) {
774 
775         gDvmJit.haltCompilerThread = true;
776 
777         dvmLockMutex(&gDvmJit.compilerLock);
778         pthread_cond_signal(&gDvmJit.compilerQueueActivity);
779         dvmUnlockMutex(&gDvmJit.compilerLock);
780 
781         if (pthread_join(gDvmJit.compilerHandle, &threadReturn) != 0)
782             LOGW("Compiler thread join failed");
783         else if (gDvm.verboseShutdown)
784             LOGD("Compiler thread has shut down");
785     }
786 
787     /* Break loops within the translation cache */
788     dvmJitUnchainAll();
789 
790     /*
791      * NOTE: our current implementatation doesn't allow for the compiler
792      * thread to be restarted after it exits here.  We aren't freeing
793      * the JitTable or the ProfTable because threads which still may be
794      * running or in the process of shutting down may hold references to
795      * them.
796      */
797 }
798 
dvmCompilerUpdateGlobalState()799 void dvmCompilerUpdateGlobalState()
800 {
801     bool jitActive;
802     bool jitActivate;
803     bool needUnchain = false;
804 
805     /*
806      * The tableLock might not be initialized yet by the compiler thread if
807      * debugger is attached from the very beginning of the VM launch. If
808      * pProfTableCopy is NULL, the lock is not initialized yet and we don't
809      * need to refresh anything either.
810      */
811     if (gDvmJit.pProfTableCopy == NULL) {
812         return;
813     }
814 
815     /*
816      * On the first enabling of method tracing, switch the compiler
817      * into a mode that includes trace support for invokes and returns.
818      * If there are any existing translations, flush them.  NOTE:  we
819      * can't blindly flush the translation cache because this code
820      * may be executed before the compiler thread has finished
821      * initialization.
822      */
823     if ((gDvm.activeProfilers != 0) &&
824         !gDvmJit.methodTraceSupport) {
825         bool resetRequired;
826         /*
827          * compilerLock will prevent new compilations from being
828          * installed while we are working.
829          */
830         dvmLockMutex(&gDvmJit.compilerLock);
831         gDvmJit.cacheVersion++; // invalidate compilations in flight
832         gDvmJit.methodTraceSupport = true;
833         resetRequired = (gDvmJit.numCompilations != 0);
834         dvmUnlockMutex(&gDvmJit.compilerLock);
835         if (resetRequired) {
836             dvmSuspendAllThreads(SUSPEND_FOR_CC_RESET);
837             resetCodeCache();
838             dvmResumeAllThreads(SUSPEND_FOR_CC_RESET);
839         }
840     }
841 
842     dvmLockMutex(&gDvmJit.tableLock);
843     jitActive = gDvmJit.pProfTable != NULL;
844     jitActivate = !dvmDebuggerOrProfilerActive();
845 
846     if (jitActivate && !jitActive) {
847         gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
848     } else if (!jitActivate && jitActive) {
849         gDvmJit.pProfTable = NULL;
850         needUnchain = true;
851     }
852     dvmUnlockMutex(&gDvmJit.tableLock);
853     if (needUnchain)
854         dvmJitUnchainAll();
855     // Make sure all threads have current values
856     dvmJitUpdateThreadStateAll();
857 }
858