1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <sys/mman.h>
18 #include <errno.h>
19 #include <cutils/ashmem.h>
20
21 #include "Dalvik.h"
22 #include "interp/Jit.h"
23 #include "CompilerInternals.h"
24
workQueueLength(void)25 static inline bool workQueueLength(void)
26 {
27 return gDvmJit.compilerQueueLength;
28 }
29
workDequeue(void)30 static CompilerWorkOrder workDequeue(void)
31 {
32 assert(gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex].kind
33 != kWorkOrderInvalid);
34 CompilerWorkOrder work =
35 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex];
36 gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkDequeueIndex++].kind =
37 kWorkOrderInvalid;
38 if (gDvmJit.compilerWorkDequeueIndex == COMPILER_WORK_QUEUE_SIZE) {
39 gDvmJit.compilerWorkDequeueIndex = 0;
40 }
41 gDvmJit.compilerQueueLength--;
42 if (gDvmJit.compilerQueueLength == 0) {
43 dvmSignalCond(&gDvmJit.compilerQueueEmpty);
44 }
45
46 /* Remember the high water mark of the queue length */
47 if (gDvmJit.compilerQueueLength > gDvmJit.compilerMaxQueued)
48 gDvmJit.compilerMaxQueued = gDvmJit.compilerQueueLength;
49
50 return work;
51 }
52
53 /*
54 * Attempt to enqueue a work order, returning true if successful.
55 * This routine will not block, but simply return if it couldn't
56 * aquire the lock or if the queue is full.
57 *
58 * NOTE: Make sure that the caller frees the info pointer if the return value
59 * is false.
60 */
dvmCompilerWorkEnqueue(const u2 * pc,WorkOrderKind kind,void * info)61 bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
62 {
63 int cc;
64 int i;
65 int numWork;
66 bool result = true;
67
68 if (dvmTryLockMutex(&gDvmJit.compilerLock)) {
69 return false; // Couldn't acquire the lock
70 }
71
72 /*
73 * Return if queue or code cache is full.
74 */
75 if (gDvmJit.compilerQueueLength == COMPILER_WORK_QUEUE_SIZE ||
76 gDvmJit.codeCacheFull == true) {
77 result = false;
78 goto unlockAndExit;
79 }
80
81 for (numWork = gDvmJit.compilerQueueLength,
82 i = gDvmJit.compilerWorkDequeueIndex;
83 numWork > 0;
84 numWork--) {
85 /* Already enqueued */
86 if (gDvmJit.compilerWorkQueue[i++].pc == pc)
87 goto unlockAndExit;
88 /* Wrap around */
89 if (i == COMPILER_WORK_QUEUE_SIZE)
90 i = 0;
91 }
92
93 CompilerWorkOrder *newOrder =
94 &gDvmJit.compilerWorkQueue[gDvmJit.compilerWorkEnqueueIndex];
95 newOrder->pc = pc;
96 newOrder->kind = kind;
97 newOrder->info = info;
98 newOrder->result.methodCompilationAborted = NULL;
99 newOrder->result.codeAddress = NULL;
100 newOrder->result.discardResult =
101 (kind == kWorkOrderTraceDebug) ? true : false;
102 newOrder->result.requestingThread = dvmThreadSelf();
103
104 gDvmJit.compilerWorkEnqueueIndex++;
105 if (gDvmJit.compilerWorkEnqueueIndex == COMPILER_WORK_QUEUE_SIZE)
106 gDvmJit.compilerWorkEnqueueIndex = 0;
107 gDvmJit.compilerQueueLength++;
108 cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
109 assert(cc == 0);
110
111 unlockAndExit:
112 dvmUnlockMutex(&gDvmJit.compilerLock);
113 return result;
114 }
115
116 /* Block until the queue length is 0, or there is a pending suspend request */
dvmCompilerDrainQueue(void)117 void dvmCompilerDrainQueue(void)
118 {
119 Thread *self = dvmThreadSelf();
120
121 dvmLockMutex(&gDvmJit.compilerLock);
122 while (workQueueLength() != 0 && !gDvmJit.haltCompilerThread &&
123 self->suspendCount == 0) {
124 /*
125 * Use timed wait here - more than one mutator threads may be blocked
126 * but the compiler thread will only signal once when the queue is
127 * emptied. Furthermore, the compiler thread may have been shutdown
128 * so the blocked thread may never get the wakeup signal.
129 */
130 dvmRelativeCondWait(&gDvmJit.compilerQueueEmpty, &gDvmJit.compilerLock, 1000, 0);
131 }
132 dvmUnlockMutex(&gDvmJit.compilerLock);
133 }
134
dvmCompilerSetupCodeCache(void)135 bool dvmCompilerSetupCodeCache(void)
136 {
137 extern void dvmCompilerTemplateStart(void);
138 extern void dmvCompilerTemplateEnd(void);
139 int fd;
140
141 /* Allocate the code cache */
142 fd = ashmem_create_region("dalvik-jit-code-cache", gDvmJit.codeCacheSize);
143 if (fd < 0) {
144 LOGE("Could not create %u-byte ashmem region for the JIT code cache",
145 gDvmJit.codeCacheSize);
146 return false;
147 }
148 gDvmJit.codeCache = mmap(NULL, gDvmJit.codeCacheSize,
149 PROT_READ | PROT_WRITE | PROT_EXEC,
150 MAP_PRIVATE , fd, 0);
151 close(fd);
152 if (gDvmJit.codeCache == MAP_FAILED) {
153 LOGE("Failed to mmap the JIT code cache: %s\n", strerror(errno));
154 return false;
155 }
156
157 gDvmJit.pageSizeMask = getpagesize() - 1;
158
159 /* This can be found through "dalvik-jit-code-cache" in /proc/<pid>/maps */
160 // LOGD("Code cache starts at %p", gDvmJit.codeCache);
161
162 /* Copy the template code into the beginning of the code cache */
163 int templateSize = (intptr_t) dmvCompilerTemplateEnd -
164 (intptr_t) dvmCompilerTemplateStart;
165 memcpy((void *) gDvmJit.codeCache,
166 (void *) dvmCompilerTemplateStart,
167 templateSize);
168
169 /*
170 * Work around a CPU bug by keeping the 32-bit ARM handler code in its own
171 * page.
172 */
173 if (dvmCompilerInstructionSet() == DALVIK_JIT_THUMB2) {
174 templateSize = (templateSize + 4095) & ~4095;
175 }
176
177 gDvmJit.templateSize = templateSize;
178 gDvmJit.codeCacheByteUsed = templateSize;
179
180 /* Only flush the part in the code cache that is being used now */
181 cacheflush((intptr_t) gDvmJit.codeCache,
182 (intptr_t) gDvmJit.codeCache + templateSize, 0);
183
184 int result = mprotect(gDvmJit.codeCache, gDvmJit.codeCacheSize,
185 PROTECT_CODE_CACHE_ATTRS);
186
187 if (result == -1) {
188 LOGE("Failed to remove the write permission for the code cache");
189 dvmAbort();
190 }
191
192 return true;
193 }
194
crawlDalvikStack(Thread * thread,bool print)195 static void crawlDalvikStack(Thread *thread, bool print)
196 {
197 void *fp = thread->curFrame;
198 StackSaveArea* saveArea = NULL;
199 int stackLevel = 0;
200
201 if (print) {
202 LOGD("Crawling tid %d (%s / %p %s)", thread->systemTid,
203 dvmGetThreadStatusStr(thread->status),
204 thread->inJitCodeCache,
205 thread->inJitCodeCache ? "jit" : "interp");
206 }
207 /* Crawl the Dalvik stack frames to clear the returnAddr field */
208 while (fp != NULL) {
209 saveArea = SAVEAREA_FROM_FP(fp);
210
211 if (print) {
212 if (dvmIsBreakFrame(fp)) {
213 LOGD(" #%d: break frame (%p)",
214 stackLevel, saveArea->returnAddr);
215 }
216 else {
217 LOGD(" #%d: %s.%s%s (%p)",
218 stackLevel,
219 saveArea->method->clazz->descriptor,
220 saveArea->method->name,
221 dvmIsNativeMethod(saveArea->method) ?
222 " (native)" : "",
223 saveArea->returnAddr);
224 }
225 }
226 stackLevel++;
227 saveArea->returnAddr = NULL;
228 assert(fp != saveArea->prevFrame);
229 fp = saveArea->prevFrame;
230 }
231 /* Make sure the stack is fully unwound to the bottom */
232 assert(saveArea == NULL ||
233 (u1 *) (saveArea+1) == thread->interpStackStart);
234 }
235
resetCodeCache(void)236 static void resetCodeCache(void)
237 {
238 Thread* thread;
239 u8 startTime = dvmGetRelativeTimeUsec();
240 int inJit = 0;
241 int byteUsed = gDvmJit.codeCacheByteUsed;
242
243 /* If any thread is found stuck in the JIT state, don't reset the cache */
244 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
245 /*
246 * Crawl the stack to wipe out the returnAddr field so that
247 * 1) the soon-to-be-deleted code in the JIT cache won't be used
248 * 2) or the thread stuck in the JIT land will soon return
249 * to the interpreter land
250 */
251 crawlDalvikStack(thread, false);
252 if (thread->inJitCodeCache) {
253 inJit++;
254 }
255 }
256
257 if (inJit) {
258 LOGD("JIT code cache reset delayed (%d bytes %d/%d)",
259 gDvmJit.codeCacheByteUsed, gDvmJit.numCodeCacheReset,
260 ++gDvmJit.numCodeCacheResetDelayed);
261 return;
262 }
263
264 /* Lock the mutex to clean up the work queue */
265 dvmLockMutex(&gDvmJit.compilerLock);
266
267 /* Drain the work queue to free the work orders */
268 while (workQueueLength()) {
269 CompilerWorkOrder work = workDequeue();
270 free(work.info);
271 }
272
273 /* Reset the JitEntry table contents to the initial unpopulated state */
274 dvmJitResetTable();
275
276 UNPROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
277 /*
278 * Wipe out the code cache content to force immediate crashes if
279 * stale JIT'ed code is invoked.
280 */
281 memset((char *) gDvmJit.codeCache + gDvmJit.templateSize,
282 0,
283 gDvmJit.codeCacheByteUsed - gDvmJit.templateSize);
284 cacheflush((intptr_t) gDvmJit.codeCache,
285 (intptr_t) gDvmJit.codeCache + gDvmJit.codeCacheByteUsed, 0);
286
287 PROTECT_CODE_CACHE(gDvmJit.codeCache, gDvmJit.codeCacheByteUsed);
288
289 /* Reset the current mark of used bytes to the end of template code */
290 gDvmJit.codeCacheByteUsed = gDvmJit.templateSize;
291 gDvmJit.numCompilations = 0;
292
293 /* Reset the work queue */
294 memset(gDvmJit.compilerWorkQueue, 0,
295 sizeof(CompilerWorkOrder) * COMPILER_WORK_QUEUE_SIZE);
296 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
297 gDvmJit.compilerQueueLength = 0;
298
299 /* Reset the IC patch work queue */
300 dvmLockMutex(&gDvmJit.compilerICPatchLock);
301 gDvmJit.compilerICPatchIndex = 0;
302 dvmUnlockMutex(&gDvmJit.compilerICPatchLock);
303
304 /* All clear now */
305 gDvmJit.codeCacheFull = false;
306
307 dvmUnlockMutex(&gDvmJit.compilerLock);
308
309 LOGD("JIT code cache reset in %lld ms (%d bytes %d/%d)",
310 (dvmGetRelativeTimeUsec() - startTime) / 1000,
311 byteUsed, ++gDvmJit.numCodeCacheReset,
312 gDvmJit.numCodeCacheResetDelayed);
313 }
314
315 /*
316 * Perform actions that are only safe when all threads are suspended. Currently
317 * we do:
318 * 1) Check if the code cache is full. If so reset it and restart populating it
319 * from scratch.
320 * 2) Patch predicted chaining cells by consuming recorded work orders.
321 */
dvmCompilerPerformSafePointChecks(void)322 void dvmCompilerPerformSafePointChecks(void)
323 {
324 if (gDvmJit.codeCacheFull) {
325 resetCodeCache();
326 }
327 dvmCompilerPatchInlineCache();
328 }
329
compilerThreadStartup(void)330 bool compilerThreadStartup(void)
331 {
332 JitEntry *pJitTable = NULL;
333 unsigned char *pJitProfTable = NULL;
334 unsigned int i;
335
336 if (!dvmCompilerArchInit())
337 goto fail;
338
339 /*
340 * Setup the code cache if we have not inherited a valid code cache
341 * from the zygote.
342 */
343 if (gDvmJit.codeCache == NULL) {
344 if (!dvmCompilerSetupCodeCache())
345 goto fail;
346 }
347
348 /* Allocate the initial arena block */
349 if (dvmCompilerHeapInit() == false) {
350 goto fail;
351 }
352
353 dvmLockMutex(&gDvmJit.compilerLock);
354
355 /* Track method-level compilation statistics */
356 gDvmJit.methodStatsTable = dvmHashTableCreate(32, NULL);
357
358 #if defined(WITH_JIT_TUNING)
359 gDvm.verboseShutdown = true;
360 #endif
361
362 dvmUnlockMutex(&gDvmJit.compilerLock);
363
364 /* Set up the JitTable */
365
366 /* Power of 2? */
367 assert(gDvmJit.jitTableSize &&
368 !(gDvmJit.jitTableSize & (gDvmJit.jitTableSize - 1)));
369
370 dvmInitMutex(&gDvmJit.tableLock);
371 dvmLockMutex(&gDvmJit.tableLock);
372 pJitTable = (JitEntry*)
373 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
374 if (!pJitTable) {
375 LOGE("jit table allocation failed\n");
376 dvmUnlockMutex(&gDvmJit.tableLock);
377 goto fail;
378 }
379 /*
380 * NOTE: the profile table must only be allocated once, globally.
381 * Profiling is turned on and off by nulling out gDvm.pJitProfTable
382 * and then restoring its original value. However, this action
383 * is not syncronized for speed so threads may continue to hold
384 * and update the profile table after profiling has been turned
385 * off by null'ng the global pointer. Be aware.
386 */
387 pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
388 if (!pJitProfTable) {
389 LOGE("jit prof table allocation failed\n");
390 dvmUnlockMutex(&gDvmJit.tableLock);
391 goto fail;
392 }
393 memset(pJitProfTable, gDvmJit.threshold, JIT_PROF_SIZE);
394 for (i=0; i < gDvmJit.jitTableSize; i++) {
395 pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
396 }
397 /* Is chain field wide enough for termination pattern? */
398 assert(pJitTable[0].u.info.chain == gDvmJit.jitTableSize);
399
400 gDvmJit.pJitEntryTable = pJitTable;
401 gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
402 gDvmJit.jitTableEntriesUsed = 0;
403 gDvmJit.compilerHighWater =
404 COMPILER_WORK_QUEUE_SIZE - (COMPILER_WORK_QUEUE_SIZE/4);
405 /*
406 * If the VM is launched with wait-on-the-debugger, we will need to hide
407 * the profile table here
408 */
409 gDvmJit.pProfTable = dvmDebuggerOrProfilerActive() ? NULL : pJitProfTable;
410 gDvmJit.pProfTableCopy = pJitProfTable;
411 dvmUnlockMutex(&gDvmJit.tableLock);
412
413 /* Signal running threads to refresh their cached pJitTable pointers */
414 dvmSuspendAllThreads(SUSPEND_FOR_REFRESH);
415 dvmResumeAllThreads(SUSPEND_FOR_REFRESH);
416
417 /* Enable signature breakpoints by customizing the following code */
418 #if defined(SIGNATURE_BREAKPOINT)
419 /*
420 * Suppose one sees the following native crash in the bugreport:
421 * I/DEBUG ( 1638): Build fingerprint: 'unknown'
422 * I/DEBUG ( 1638): pid: 2468, tid: 2507 >>> com.google.android.gallery3d
423 * I/DEBUG ( 1638): signal 11 (SIGSEGV), fault addr 00001400
424 * I/DEBUG ( 1638): r0 44ea7190 r1 44e4f7b8 r2 44ebc710 r3 00000000
425 * I/DEBUG ( 1638): r4 00000a00 r5 41862dec r6 4710dc10 r7 00000280
426 * I/DEBUG ( 1638): r8 ad010f40 r9 46a37a12 10 001116b0 fp 42a78208
427 * I/DEBUG ( 1638): ip 00000090 sp 4710dbc8 lr ad060e67 pc 46b90682
428 * cpsr 00000030
429 * I/DEBUG ( 1638): #00 pc 46b90682 /dev/ashmem/dalvik-jit-code-cache
430 * I/DEBUG ( 1638): #01 pc 00060e62 /system/lib/libdvm.so
431 *
432 * I/DEBUG ( 1638): code around pc:
433 * I/DEBUG ( 1638): 46b90660 6888d01c 34091dcc d2174287 4a186b68
434 * I/DEBUG ( 1638): 46b90670 d0052800 68006809 28004790 6b68d00e
435 * I/DEBUG ( 1638): 46b90680 512000bc 37016eaf 6ea866af 6f696028
436 * I/DEBUG ( 1638): 46b90690 682a6069 429a686b e003da08 6df1480b
437 * I/DEBUG ( 1638): 46b906a0 1c2d4788 47806d70 46a378fa 47806d70
438 *
439 * Clearly it is a JIT bug. To find out which translation contains the
440 * offending code, the content of the memory dump around the faulting PC
441 * can be pasted into the gDvmJit.signatureBreakpoint[] array and next time
442 * when a similar compilation is being created, the JIT compiler replay the
443 * trace in the verbose mode and one can investigate the instruction
444 * sequence in details.
445 *
446 * The length of the signature may need additional experiments to determine.
447 * The rule of thumb is don't include PC-relative instructions in the
448 * signature since it may be affected by the alignment of the compiled code.
449 * However, a signature that's too short might increase the chance of false
450 * positive matches. Using gdbjithelper to disassembly the memory content
451 * first might be a good companion approach.
452 *
453 * For example, if the next 4 words starting from 46b90680 is pasted into
454 * the data structure:
455 */
456
457 gDvmJit.signatureBreakpointSize = 4;
458 gDvmJit.signatureBreakpoint =
459 malloc(sizeof(u4) * gDvmJit.signatureBreakpointSize);
460 gDvmJit.signatureBreakpoint[0] = 0x512000bc;
461 gDvmJit.signatureBreakpoint[1] = 0x37016eaf;
462 gDvmJit.signatureBreakpoint[2] = 0x6ea866af;
463 gDvmJit.signatureBreakpoint[3] = 0x6f696028;
464
465 /*
466 * The following log will be printed when a match is found in subsequent
467 * testings:
468 *
469 * D/dalvikvm( 2468): Signature match starting from offset 0x34 (4 words)
470 * D/dalvikvm( 2468): --------
471 * D/dalvikvm( 2468): Compiler: Building trace for computeVisibleItems,
472 * offset 0x1f7
473 * D/dalvikvm( 2468): 0x46a37a12: 0x0090 add-int v42, v5, v26
474 * D/dalvikvm( 2468): 0x46a37a16: 0x004d aput-object v13, v14, v42
475 * D/dalvikvm( 2468): 0x46a37a1a: 0x0028 goto, (#0), (#0)
476 * D/dalvikvm( 2468): 0x46a3794e: 0x00d8 add-int/lit8 v26, v26, (#1)
477 * D/dalvikvm( 2468): 0x46a37952: 0x0028 goto, (#0), (#0)
478 * D/dalvikvm( 2468): 0x46a378ee: 0x0002 move/from16 v0, v26, (#0)
479 * D/dalvikvm( 2468): 0x46a378f2: 0x0002 move/from16 v1, v29, (#0)
480 * D/dalvikvm( 2468): 0x46a378f6: 0x0035 if-ge v0, v1, (#10)
481 * D/dalvikvm( 2468): TRACEINFO (554): 0x46a37624
482 * Lcom/cooliris/media/GridLayer;computeVisibleItems 0x1f7 14 of 934, 8
483 * blocks
484 * :
485 * :
486 * D/dalvikvm( 2468): 0x20 (0020): ldr r0, [r5, #52]
487 * D/dalvikvm( 2468): 0x22 (0022): ldr r2, [pc, #96]
488 * D/dalvikvm( 2468): 0x24 (0024): cmp r0, #0
489 * D/dalvikvm( 2468): 0x26 (0026): beq 0x00000034
490 * D/dalvikvm( 2468): 0x28 (0028): ldr r1, [r1, #0]
491 * D/dalvikvm( 2468): 0x2a (002a): ldr r0, [r0, #0]
492 * D/dalvikvm( 2468): 0x2c (002c): blx r2
493 * D/dalvikvm( 2468): 0x2e (002e): cmp r0, #0
494 * D/dalvikvm( 2468): 0x30 (0030): beq 0x00000050
495 * D/dalvikvm( 2468): 0x32 (0032): ldr r0, [r5, #52]
496 * D/dalvikvm( 2468): 0x34 (0034): lsls r4, r7, #2
497 * D/dalvikvm( 2468): 0x36 (0036): str r0, [r4, r4]
498 * D/dalvikvm( 2468): -------- dalvik offset: 0x01fb @ goto, (#0), (#0)
499 * D/dalvikvm( 2468): L0x0195:
500 * D/dalvikvm( 2468): -------- dalvik offset: 0x0195 @ add-int/lit8 v26,
501 * v26, (#1)
502 * D/dalvikvm( 2468): 0x38 (0038): ldr r7, [r5, #104]
503 * D/dalvikvm( 2468): 0x3a (003a): adds r7, r7, #1
504 * D/dalvikvm( 2468): 0x3c (003c): str r7, [r5, #104]
505 * D/dalvikvm( 2468): -------- dalvik offset: 0x0197 @ goto, (#0), (#0)
506 * D/dalvikvm( 2468): L0x0165:
507 * D/dalvikvm( 2468): -------- dalvik offset: 0x0165 @ move/from16 v0, v26,
508 * (#0)
509 * D/dalvikvm( 2468): 0x3e (003e): ldr r0, [r5, #104]
510 * D/dalvikvm( 2468): 0x40 (0040): str r0, [r5, #0]
511 *
512 * The "str r0, [r4, r4]" is indeed the culprit of the native crash.
513 */
514 #endif
515
516 return true;
517
518 fail:
519 return false;
520
521 }
522
compilerThreadStart(void * arg)523 static void *compilerThreadStart(void *arg)
524 {
525 dvmChangeStatus(NULL, THREAD_VMWAIT);
526
527 /*
528 * If we're not running stand-alone, wait a little before
529 * recieving translation requests on the assumption that process start
530 * up code isn't worth compiling. We'll resume when the framework
531 * signals us that the first screen draw has happened, or the timer
532 * below expires (to catch daemons).
533 *
534 * There is a theoretical race between the callback to
535 * VMRuntime.startJitCompiation and when the compiler thread reaches this
536 * point. In case the callback happens earlier, in order not to permanently
537 * hold the system_server (which is not using the timed wait) in
538 * interpreter-only mode we bypass the delay here.
539 */
540 if (gDvmJit.runningInAndroidFramework &&
541 !gDvmJit.alreadyEnabledViaFramework) {
542 /*
543 * If the current VM instance is the system server (detected by having
544 * 0 in gDvm.systemServerPid), we will use the indefinite wait on the
545 * conditional variable to determine whether to start the JIT or not.
546 * If the system server detects that the whole system is booted in
547 * safe mode, the conditional variable will never be signaled and the
548 * system server will remain in the interpreter-only mode. All
549 * subsequent apps will be started with the --enable-safemode flag
550 * explicitly appended.
551 */
552 if (gDvm.systemServerPid == 0) {
553 dvmLockMutex(&gDvmJit.compilerLock);
554 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
555 &gDvmJit.compilerLock);
556 dvmUnlockMutex(&gDvmJit.compilerLock);
557 LOGD("JIT started for system_server");
558 } else {
559 dvmLockMutex(&gDvmJit.compilerLock);
560 /*
561 * TUNING: experiment with the delay & perhaps make it
562 * target-specific
563 */
564 dvmRelativeCondWait(&gDvmJit.compilerQueueActivity,
565 &gDvmJit.compilerLock, 3000, 0);
566 dvmUnlockMutex(&gDvmJit.compilerLock);
567 }
568 if (gDvmJit.haltCompilerThread) {
569 return NULL;
570 }
571 }
572
573 compilerThreadStartup();
574
575 dvmLockMutex(&gDvmJit.compilerLock);
576 /*
577 * Since the compiler thread will not touch any objects on the heap once
578 * being created, we just fake its state as VMWAIT so that it can be a
579 * bit late when there is suspend request pending.
580 */
581 while (!gDvmJit.haltCompilerThread) {
582 if (workQueueLength() == 0) {
583 int cc;
584 cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
585 assert(cc == 0);
586 pthread_cond_wait(&gDvmJit.compilerQueueActivity,
587 &gDvmJit.compilerLock);
588 continue;
589 } else {
590 do {
591 CompilerWorkOrder work = workDequeue();
592 dvmUnlockMutex(&gDvmJit.compilerLock);
593 #if defined(WITH_JIT_TUNING)
594 u8 startTime = dvmGetRelativeTimeUsec();
595 #endif
596 /*
597 * Check whether there is a suspend request on me. This
598 * is necessary to allow a clean shutdown.
599 *
600 * However, in the blocking stress testing mode, let the
601 * compiler thread continue doing compilations to unblock
602 * other requesting threads. This may occasionally cause
603 * shutdown from proceeding cleanly in the standalone invocation
604 * of the vm but this should be acceptable.
605 */
606 if (!gDvmJit.blockingMode)
607 dvmCheckSuspendPending(dvmThreadSelf());
608 /* Is JitTable filling up? */
609 if (gDvmJit.jitTableEntriesUsed >
610 (gDvmJit.jitTableSize - gDvmJit.jitTableSize/4)) {
611 bool resizeFail =
612 dvmJitResizeJitTable(gDvmJit.jitTableSize * 2);
613 /*
614 * If the jit table is full, consider it's time to reset
615 * the code cache too.
616 */
617 gDvmJit.codeCacheFull |= resizeFail;
618 }
619 if (gDvmJit.haltCompilerThread) {
620 LOGD("Compiler shutdown in progress - discarding request");
621 } else if (!gDvmJit.codeCacheFull) {
622 bool compileOK = false;
623 jmp_buf jmpBuf;
624 work.bailPtr = &jmpBuf;
625 bool aborted = setjmp(jmpBuf);
626 if (!aborted) {
627 compileOK = dvmCompilerDoWork(&work);
628 }
629 if (aborted || !compileOK) {
630 dvmCompilerArenaReset();
631 } else if (!work.result.discardResult &&
632 work.result.codeAddress) {
633 /* Make sure that proper code addr is installed */
634 assert(work.result.codeAddress != NULL);
635 dvmJitSetCodeAddr(work.pc, work.result.codeAddress,
636 work.result.instructionSet);
637 }
638 }
639 free(work.info);
640 #if defined(WITH_JIT_TUNING)
641 gDvmJit.jitTime += dvmGetRelativeTimeUsec() - startTime;
642 #endif
643 dvmLockMutex(&gDvmJit.compilerLock);
644 } while (workQueueLength() != 0);
645 }
646 }
647 pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
648 dvmUnlockMutex(&gDvmJit.compilerLock);
649
650 /*
651 * As part of detaching the thread we need to call into Java code to update
652 * the ThreadGroup, and we should not be in VMWAIT state while executing
653 * interpreted code.
654 */
655 dvmChangeStatus(NULL, THREAD_RUNNING);
656
657 if (gDvm.verboseShutdown)
658 LOGD("Compiler thread shutting down\n");
659 return NULL;
660 }
661
dvmCompilerStartup(void)662 bool dvmCompilerStartup(void)
663 {
664
665 dvmInitMutex(&gDvmJit.compilerLock);
666 dvmInitMutex(&gDvmJit.compilerICPatchLock);
667 dvmInitMutex(&gDvmJit.codeCacheProtectionLock);
668 dvmLockMutex(&gDvmJit.compilerLock);
669 pthread_cond_init(&gDvmJit.compilerQueueActivity, NULL);
670 pthread_cond_init(&gDvmJit.compilerQueueEmpty, NULL);
671
672 /* Reset the work queue */
673 gDvmJit.compilerWorkEnqueueIndex = gDvmJit.compilerWorkDequeueIndex = 0;
674 gDvmJit.compilerQueueLength = 0;
675 dvmUnlockMutex(&gDvmJit.compilerLock);
676
677 /*
678 * Defer rest of initialization until we're sure JIT'ng makes sense. Launch
679 * the compiler thread, which will do the real initialization if and
680 * when it is signalled to do so.
681 */
682 return dvmCreateInternalThread(&gDvmJit.compilerHandle, "Compiler",
683 compilerThreadStart, NULL);
684 }
685
dvmCompilerShutdown(void)686 void dvmCompilerShutdown(void)
687 {
688 void *threadReturn;
689
690 /* Disable new translation requests */
691 gDvmJit.pProfTable = NULL;
692 gDvmJit.pProfTableCopy = NULL;
693
694 if (gDvm.verboseShutdown) {
695 dvmCompilerDumpStats();
696 while (gDvmJit.compilerQueueLength)
697 sleep(5);
698 }
699
700 if (gDvmJit.compilerHandle) {
701
702 gDvmJit.haltCompilerThread = true;
703
704 dvmLockMutex(&gDvmJit.compilerLock);
705 pthread_cond_signal(&gDvmJit.compilerQueueActivity);
706 dvmUnlockMutex(&gDvmJit.compilerLock);
707
708 if (pthread_join(gDvmJit.compilerHandle, &threadReturn) != 0)
709 LOGW("Compiler thread join failed\n");
710 else if (gDvm.verboseShutdown)
711 LOGD("Compiler thread has shut down\n");
712 }
713
714 /* Break loops within the translation cache */
715 dvmJitUnchainAll();
716
717 /*
718 * NOTE: our current implementatation doesn't allow for the compiler
719 * thread to be restarted after it exits here. We aren't freeing
720 * the JitTable or the ProfTable because threads which still may be
721 * running or in the process of shutting down may hold references to
722 * them.
723 */
724 }
725
dvmCompilerStateRefresh()726 void dvmCompilerStateRefresh()
727 {
728 bool jitActive;
729 bool jitActivate;
730 bool needUnchain = false;
731
732 /*
733 * The tableLock might not be initialized yet by the compiler thread if
734 * debugger is attached from the very beginning of the VM launch. If
735 * pProfTableCopy is NULL, the lock is not initialized yet and we don't
736 * need to refresh anything either.
737 */
738 if (gDvmJit.pProfTableCopy == NULL) {
739 return;
740 }
741
742 dvmLockMutex(&gDvmJit.tableLock);
743 jitActive = gDvmJit.pProfTable != NULL;
744 jitActivate = !(gDvm.debuggerActive || (gDvm.activeProfilers > 0));
745
746 if (jitActivate && !jitActive) {
747 gDvmJit.pProfTable = gDvmJit.pProfTableCopy;
748 } else if (!jitActivate && jitActive) {
749 gDvmJit.pProfTable = NULL;
750 needUnchain = true;
751 }
752 dvmUnlockMutex(&gDvmJit.tableLock);
753 if (needUnchain)
754 dvmJitUnchainAll();
755 }
756