1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #ifdef WITH_JIT
17
18 /*
19 * Target independent portion of Android's Jit
20 */
21
22 #include "Dalvik.h"
23 #include "Jit.h"
24
25
26 #include "dexdump/OpCodeNames.h"
27 #include <unistd.h>
28 #include <pthread.h>
29 #include <sys/time.h>
30 #include <signal.h>
31 #include "compiler/Compiler.h"
32 #include "compiler/CompilerUtility.h"
33 #include "compiler/CompilerIR.h"
34 #include <errno.h>
35
dvmJitStartup(void)36 int dvmJitStartup(void)
37 {
38 unsigned int i;
39 bool res = true; /* Assume success */
40
41 // Create the compiler thread and setup miscellaneous chores */
42 res &= dvmCompilerStartup();
43
44 dvmInitMutex(&gDvmJit.tableLock);
45 if (res && gDvm.executionMode == kExecutionModeJit) {
46 JitEntry *pJitTable = NULL;
47 unsigned char *pJitProfTable = NULL;
48 assert(gDvm.jitTableSize &&
49 !(gDvm.jitTableSize & (gDvmJit.jitTableSize - 1))); // Power of 2?
50 dvmLockMutex(&gDvmJit.tableLock);
51 pJitTable = (JitEntry*)
52 calloc(gDvmJit.jitTableSize, sizeof(*pJitTable));
53 if (!pJitTable) {
54 LOGE("jit table allocation failed\n");
55 res = false;
56 goto done;
57 }
58 /*
59 * NOTE: the profile table must only be allocated once, globally.
60 * Profiling is turned on and off by nulling out gDvm.pJitProfTable
61 * and then restoring its original value. However, this action
62 * is not syncronized for speed so threads may continue to hold
63 * and update the profile table after profiling has been turned
64 * off by null'ng the global pointer. Be aware.
65 */
66 pJitProfTable = (unsigned char *)malloc(JIT_PROF_SIZE);
67 if (!pJitProfTable) {
68 LOGE("jit prof table allocation failed\n");
69 res = false;
70 goto done;
71 }
72 memset(pJitProfTable,0,JIT_PROF_SIZE);
73 for (i=0; i < gDvmJit.jitTableSize; i++) {
74 pJitTable[i].u.info.chain = gDvmJit.jitTableSize;
75 }
76 /* Is chain field wide enough for termination pattern? */
77 assert(pJitTable[0].u.info.chain == gDvm.maxJitTableEntries);
78
79 done:
80 gDvmJit.pJitEntryTable = pJitTable;
81 gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
82 gDvmJit.jitTableEntriesUsed = 0;
83 gDvmJit.pProfTableCopy = gDvmJit.pProfTable = pJitProfTable;
84 dvmUnlockMutex(&gDvmJit.tableLock);
85 }
86 return res;
87 }
88
89 /*
90 * If one of our fixed tables or the translation buffer fills up,
91 * call this routine to avoid wasting cycles on future translation requests.
92 */
dvmJitStopTranslationRequests()93 void dvmJitStopTranslationRequests()
94 {
95 /*
96 * Note 1: This won't necessarily stop all translation requests, and
97 * operates on a delayed mechanism. Running threads look to the copy
98 * of this value in their private InterpState structures and won't see
99 * this change until it is refreshed (which happens on interpreter
100 * entry).
101 * Note 2: This is a one-shot memory leak on this table. Because this is a
102 * permanent off switch for Jit profiling, it is a one-time leak of 1K
103 * bytes, and no further attempt will be made to re-allocate it. Can't
104 * free it because some thread may be holding a reference.
105 */
106 gDvmJit.pProfTable = gDvmJit.pProfTableCopy = NULL;
107 }
108
109 #if defined(EXIT_STATS)
110 /* Convenience function to increment counter from assembly code */
dvmBumpNoChain()111 void dvmBumpNoChain()
112 {
113 gDvm.jitNoChainExit++;
114 }
115
116 /* Convenience function to increment counter from assembly code */
dvmBumpNormal()117 void dvmBumpNormal()
118 {
119 gDvm.jitNormalExit++;
120 }
121
122 /* Convenience function to increment counter from assembly code */
dvmBumpPunt(int from)123 void dvmBumpPunt(int from)
124 {
125 gDvm.jitPuntExit++;
126 }
127 #endif
128
129 /* Dumps debugging & tuning stats to the log */
dvmJitStats()130 void dvmJitStats()
131 {
132 int i;
133 int hit;
134 int not_hit;
135 int chains;
136 if (gDvmJit.pJitEntryTable) {
137 for (i=0, chains=hit=not_hit=0;
138 i < (int) gDvmJit.jitTableSize;
139 i++) {
140 if (gDvmJit.pJitEntryTable[i].dPC != 0)
141 hit++;
142 else
143 not_hit++;
144 if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
145 chains++;
146 }
147 LOGD(
148 "JIT: %d traces, %d slots, %d chains, %d maxQ, %d thresh, %s",
149 hit, not_hit + hit, chains, gDvmJit.compilerMaxQueued,
150 gDvmJit.threshold, gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
151 #if defined(EXIT_STATS)
152 LOGD(
153 "JIT: Lookups: %d hits, %d misses; %d NoChain, %d normal, %d punt",
154 gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
155 gDvmJit.noChainExit, gDvmJit.normalExit, gDvmJit.puntExit);
156 #endif
157 LOGD("JIT: %d Translation chains", gDvmJit.translationChains);
158 #if defined(INVOKE_STATS)
159 LOGD("JIT: Invoke: %d chainable, %d pred. chain, %d native, "
160 "%d return",
161 gDvmJit.invokeChain, gDvmJit.invokePredictedChain,
162 gDvmJit.invokeNative, gDvmJit.returnOp);
163 #endif
164 if (gDvmJit.profile) {
165 dvmCompilerSortAndPrintTraceProfiles();
166 }
167 }
168 }
169
170
171 /*
172 * Final JIT shutdown. Only do this once, and do not attempt to restart
173 * the JIT later.
174 */
dvmJitShutdown(void)175 void dvmJitShutdown(void)
176 {
177 /* Shutdown the compiler thread */
178 dvmCompilerShutdown();
179
180 dvmCompilerDumpStats();
181
182 dvmDestroyMutex(&gDvmJit.tableLock);
183
184 if (gDvmJit.pJitEntryTable) {
185 free(gDvmJit.pJitEntryTable);
186 gDvmJit.pJitEntryTable = NULL;
187 }
188
189 if (gDvmJit.pProfTable) {
190 free(gDvmJit.pProfTable);
191 gDvmJit.pProfTable = NULL;
192 }
193 }
194
195 /*
196 * Adds to the current trace request one instruction at a time, just
197 * before that instruction is interpreted. This is the primary trace
198 * selection function. NOTE: return instruction are handled a little
199 * differently. In general, instructions are "proposed" to be added
200 * to the current trace prior to interpretation. If the interpreter
201 * then successfully completes the instruction, is will be considered
202 * part of the request. This allows us to examine machine state prior
203 * to interpretation, and also abort the trace request if the instruction
204 * throws or does something unexpected. However, return instructions
205 * will cause an immediate end to the translation request - which will
206 * be passed to the compiler before the return completes. This is done
207 * in response to special handling of returns by the interpreter (and
208 * because returns cannot throw in a way that causes problems for the
209 * translated code.
210 */
dvmCheckJit(const u2 * pc,Thread * self,InterpState * interpState)211 int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState)
212 {
213 int flags,i,len;
214 int switchInterp = false;
215 int debugOrProfile = (gDvm.debuggerActive || self->suspendCount
216 #if defined(WITH_PROFILER)
217 || gDvm.activeProfilers
218 #endif
219 );
220
221 switch (interpState->jitState) {
222 char* nopStr;
223 int target;
224 int offset;
225 DecodedInstruction decInsn;
226 case kJitTSelect:
227 dexDecodeInstruction(gDvm.instrFormat, pc, &decInsn);
228 #if defined(SHOW_TRACE)
229 LOGD("TraceGen: adding %s",getOpcodeName(decInsn.opCode));
230 #endif
231 flags = dexGetInstrFlags(gDvm.instrFlags, decInsn.opCode);
232 len = dexGetInstrOrTableWidthAbs(gDvm.instrWidth, pc);
233 offset = pc - interpState->method->insns;
234 if (pc != interpState->currRunHead + interpState->currRunLen) {
235 int currTraceRun;
236 /* We need to start a new trace run */
237 currTraceRun = ++interpState->currTraceRun;
238 interpState->currRunLen = 0;
239 interpState->currRunHead = (u2*)pc;
240 interpState->trace[currTraceRun].frag.startOffset = offset;
241 interpState->trace[currTraceRun].frag.numInsts = 0;
242 interpState->trace[currTraceRun].frag.runEnd = false;
243 interpState->trace[currTraceRun].frag.hint = kJitHintNone;
244 }
245 interpState->trace[interpState->currTraceRun].frag.numInsts++;
246 interpState->totalTraceLen++;
247 interpState->currRunLen += len;
248 if ( ((flags & kInstrUnconditional) == 0) &&
249 /* don't end trace on INVOKE_DIRECT_EMPTY */
250 (decInsn.opCode != OP_INVOKE_DIRECT_EMPTY) &&
251 ((flags & (kInstrCanBranch |
252 kInstrCanSwitch |
253 kInstrCanReturn |
254 kInstrInvoke)) != 0)) {
255 interpState->jitState = kJitTSelectEnd;
256 #if defined(SHOW_TRACE)
257 LOGD("TraceGen: ending on %s, basic block end",
258 getOpcodeName(decInsn.opCode));
259 #endif
260 }
261 if (decInsn.opCode == OP_THROW) {
262 interpState->jitState = kJitTSelectEnd;
263 }
264 if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
265 interpState->jitState = kJitTSelectEnd;
266 }
267 if (debugOrProfile) {
268 interpState->jitState = kJitTSelectAbort;
269 switchInterp = !debugOrProfile;
270 break;
271 }
272 if ((flags & kInstrCanReturn) != kInstrCanReturn) {
273 break;
274 }
275 /* NOTE: intentional fallthrough for returns */
276 case kJitTSelectEnd:
277 {
278 if (interpState->totalTraceLen == 0) {
279 switchInterp = !debugOrProfile;
280 break;
281 }
282 JitTraceDescription* desc =
283 (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
284 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
285 if (desc == NULL) {
286 LOGE("Out of memory in trace selection");
287 dvmJitStopTranslationRequests();
288 interpState->jitState = kJitTSelectAbort;
289 switchInterp = !debugOrProfile;
290 break;
291 }
292 interpState->trace[interpState->currTraceRun].frag.runEnd =
293 true;
294 interpState->jitState = kJitNormal;
295 desc->method = interpState->method;
296 memcpy((char*)&(desc->trace[0]),
297 (char*)&(interpState->trace[0]),
298 sizeof(JitTraceRun) * (interpState->currTraceRun+1));
299 #if defined(SHOW_TRACE)
300 LOGD("TraceGen: trace done, adding to queue");
301 #endif
302 dvmCompilerWorkEnqueue(
303 interpState->currTraceHead,kWorkOrderTrace,desc);
304 if (gDvmJit.blockingMode) {
305 dvmCompilerDrainQueue();
306 }
307 switchInterp = !debugOrProfile;
308 }
309 break;
310 case kJitSingleStep:
311 interpState->jitState = kJitSingleStepEnd;
312 break;
313 case kJitSingleStepEnd:
314 interpState->entryPoint = kInterpEntryResume;
315 switchInterp = !debugOrProfile;
316 break;
317 case kJitTSelectAbort:
318 #if defined(SHOW_TRACE)
319 LOGD("TraceGen: trace abort");
320 #endif
321 interpState->jitState = kJitNormal;
322 switchInterp = !debugOrProfile;
323 break;
324 case kJitNormal:
325 switchInterp = !debugOrProfile;
326 break;
327 default:
328 dvmAbort();
329 }
330 return switchInterp;
331 }
332
findJitEntry(const u2 * pc)333 static inline JitEntry *findJitEntry(const u2* pc)
334 {
335 int idx = dvmJitHash(pc);
336
337 /* Expect a high hit rate on 1st shot */
338 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
339 return &gDvmJit.pJitEntryTable[idx];
340 else {
341 int chainEndMarker = gDvmJit.jitTableSize;
342 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
343 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
344 if (gDvmJit.pJitEntryTable[idx].dPC == pc)
345 return &gDvmJit.pJitEntryTable[idx];
346 }
347 }
348 return NULL;
349 }
350
dvmFindJitEntry(const u2 * pc)351 JitEntry *dvmFindJitEntry(const u2* pc)
352 {
353 return findJitEntry(pc);
354 }
355
356 /*
357 * If a translated code address exists for the davik byte code
358 * pointer return it. This routine needs to be fast.
359 */
dvmJitGetCodeAddr(const u2 * dPC)360 void* dvmJitGetCodeAddr(const u2* dPC)
361 {
362 int idx = dvmJitHash(dPC);
363
364 /* If anything is suspended, don't re-enter the code cache */
365 if (gDvm.sumThreadSuspendCount > 0) {
366 return NULL;
367 }
368
369 /* Expect a high hit rate on 1st shot */
370 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
371 #if defined(EXIT_STATS)
372 gDvmJit.addrLookupsFound++;
373 #endif
374 return gDvmJit.pJitEntryTable[idx].codeAddress;
375 } else {
376 int chainEndMarker = gDvmJit.jitTableSize;
377 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
378 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
379 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
380 #if defined(EXIT_STATS)
381 gDvmJit.addrLookupsFound++;
382 #endif
383 return gDvmJit.pJitEntryTable[idx].codeAddress;
384 }
385 }
386 }
387 #if defined(EXIT_STATS)
388 gDvmJit.addrLookupsNotFound++;
389 #endif
390 return NULL;
391 }
392
393 /*
394 * Find an entry in the JitTable, creating if necessary.
395 * Returns null if table is full.
396 */
dvmJitLookupAndAdd(const u2 * dPC)397 JitEntry *dvmJitLookupAndAdd(const u2* dPC)
398 {
399 u4 chainEndMarker = gDvmJit.jitTableSize;
400 u4 idx = dvmJitHash(dPC);
401
402 /* Walk the bucket chain to find an exact match for our PC */
403 while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
404 (gDvmJit.pJitEntryTable[idx].dPC != dPC)) {
405 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
406 }
407
408 if (gDvmJit.pJitEntryTable[idx].dPC != dPC) {
409 /*
410 * No match. Aquire jitTableLock and find the last
411 * slot in the chain. Possibly continue the chain walk in case
412 * some other thread allocated the slot we were looking
413 * at previuosly (perhaps even the dPC we're trying to enter).
414 */
415 dvmLockMutex(&gDvmJit.tableLock);
416 /*
417 * At this point, if .dPC is NULL, then the slot we're
418 * looking at is the target slot from the primary hash
419 * (the simple, and common case). Otherwise we're going
420 * to have to find a free slot and chain it.
421 */
422 MEM_BARRIER(); /* Make sure we reload [].dPC after lock */
423 if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
424 u4 prev;
425 while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
426 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
427 /* Another thread got there first for this dPC */
428 dvmUnlockMutex(&gDvmJit.tableLock);
429 return &gDvmJit.pJitEntryTable[idx];
430 }
431 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
432 }
433 /* Here, idx should be pointing to the last cell of an
434 * active chain whose last member contains a valid dPC */
435 assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
436 /* Linear walk to find a free cell and add it to the end */
437 prev = idx;
438 while (true) {
439 idx++;
440 if (idx == chainEndMarker)
441 idx = 0; /* Wraparound */
442 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
443 (idx == prev))
444 break;
445 }
446 if (idx != prev) {
447 JitEntryInfoUnion oldValue;
448 JitEntryInfoUnion newValue;
449 /*
450 * Although we hold the lock so that noone else will
451 * be trying to update a chain field, the other fields
452 * packed into the word may be in use by other threads.
453 */
454 do {
455 oldValue = gDvmJit.pJitEntryTable[prev].u;
456 newValue = oldValue;
457 newValue.info.chain = idx;
458 } while (!ATOMIC_CMP_SWAP(
459 &gDvmJit.pJitEntryTable[prev].u.infoWord,
460 oldValue.infoWord, newValue.infoWord));
461 }
462 }
463 if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
464 /* Allocate the slot */
465 gDvmJit.pJitEntryTable[idx].dPC = dPC;
466 gDvmJit.jitTableEntriesUsed++;
467 } else {
468 /* Table is full */
469 idx = chainEndMarker;
470 }
471 dvmUnlockMutex(&gDvmJit.tableLock);
472 }
473 return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
474 }
475 /*
476 * Register the translated code pointer into the JitTable.
477 * NOTE: Once a codeAddress field transitions from NULL to
478 * JIT'd code, it must not be altered without first halting all
479 * threads. This routine should only be called by the compiler
480 * thread.
481 */
dvmJitSetCodeAddr(const u2 * dPC,void * nPC,JitInstructionSetType set)482 void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set) {
483 JitEntryInfoUnion oldValue;
484 JitEntryInfoUnion newValue;
485 JitEntry *jitEntry = dvmJitLookupAndAdd(dPC);
486 assert(jitEntry);
487 /* Note: order of update is important */
488 do {
489 oldValue = jitEntry->u;
490 newValue = oldValue;
491 newValue.info.instructionSet = set;
492 } while (!ATOMIC_CMP_SWAP(
493 &jitEntry->u.infoWord,
494 oldValue.infoWord, newValue.infoWord));
495 jitEntry->codeAddress = nPC;
496 }
497
498 /*
499 * Determine if valid trace-bulding request is active. Return true
500 * if we need to abort and switch back to the fast interpreter, false
501 * otherwise. NOTE: may be called even when trace selection is not being
502 * requested
503 */
504
dvmJitCheckTraceRequest(Thread * self,InterpState * interpState)505 bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
506 {
507 bool res = false; /* Assume success */
508 int i;
509 if (gDvmJit.pJitEntryTable != NULL) {
510 /* Two-level filtering scheme */
511 for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
512 if (interpState->pc == interpState->threshFilter[i]) {
513 break;
514 }
515 }
516 if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
517 /*
518 * Use random replacement policy - otherwise we could miss a large
519 * loop that contains more traces than the size of our filter array.
520 */
521 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
522 interpState->threshFilter[i] = interpState->pc;
523 res = true;
524 }
525 /*
526 * If the compiler is backlogged, or if a debugger or profiler is
527 * active, cancel any JIT actions
528 */
529 if ( res || (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) ||
530 gDvm.debuggerActive || self->suspendCount
531 #if defined(WITH_PROFILER)
532 || gDvm.activeProfilers
533 #endif
534 ) {
535 if (interpState->jitState != kJitOff) {
536 interpState->jitState = kJitNormal;
537 }
538 } else if (interpState->jitState == kJitTSelectRequest) {
539 JitEntry *slot = dvmJitLookupAndAdd(interpState->pc);
540 if (slot == NULL) {
541 /*
542 * Table is full. This should have been
543 * detected by the compiler thread and the table
544 * resized before we run into it here. Assume bad things
545 * are afoot and disable profiling.
546 */
547 interpState->jitState = kJitTSelectAbort;
548 LOGD("JIT: JitTable full, disabling profiling");
549 dvmJitStopTranslationRequests();
550 } else if (slot->u.info.traceRequested) {
551 /* Trace already requested - revert to interpreter */
552 interpState->jitState = kJitTSelectAbort;
553 } else {
554 /* Mark request */
555 JitEntryInfoUnion oldValue;
556 JitEntryInfoUnion newValue;
557 do {
558 oldValue = slot->u;
559 newValue = oldValue;
560 newValue.info.traceRequested = true;
561 } while (!ATOMIC_CMP_SWAP( &slot->u.infoWord,
562 oldValue.infoWord, newValue.infoWord));
563 }
564 }
565 switch (interpState->jitState) {
566 case kJitTSelectRequest:
567 interpState->jitState = kJitTSelect;
568 interpState->currTraceHead = interpState->pc;
569 interpState->currTraceRun = 0;
570 interpState->totalTraceLen = 0;
571 interpState->currRunHead = interpState->pc;
572 interpState->currRunLen = 0;
573 interpState->trace[0].frag.startOffset =
574 interpState->pc - interpState->method->insns;
575 interpState->trace[0].frag.numInsts = 0;
576 interpState->trace[0].frag.runEnd = false;
577 interpState->trace[0].frag.hint = kJitHintNone;
578 break;
579 case kJitTSelect:
580 case kJitTSelectAbort:
581 res = true;
582 case kJitSingleStep:
583 case kJitSingleStepEnd:
584 case kJitOff:
585 case kJitNormal:
586 break;
587 default:
588 dvmAbort();
589 }
590 }
591 return res;
592 }
593
594 /*
595 * Resizes the JitTable. Must be a power of 2, and returns true on failure.
596 * Stops all threads, and thus is a heavyweight operation.
597 */
dvmJitResizeJitTable(unsigned int size)598 bool dvmJitResizeJitTable( unsigned int size )
599 {
600 JitEntry *pNewTable;
601 JitEntry *pOldTable;
602 u4 newMask;
603 unsigned int oldSize;
604 unsigned int i;
605
606 assert(gDvm.pJitEntryTable != NULL);
607 assert(size && !(size & (size - 1))); /* Is power of 2? */
608
609 LOGD("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
610
611 newMask = size - 1;
612
613 if (size <= gDvmJit.jitTableSize) {
614 return true;
615 }
616
617 pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
618 if (pNewTable == NULL) {
619 return true;
620 }
621 for (i=0; i< size; i++) {
622 pNewTable[i].u.info.chain = size; /* Initialize chain termination */
623 }
624
625 /* Stop all other interpreting/jit'ng threads */
626 dvmSuspendAllThreads(SUSPEND_FOR_JIT);
627
628 pOldTable = gDvmJit.pJitEntryTable;
629 oldSize = gDvmJit.jitTableSize;
630
631 dvmLockMutex(&gDvmJit.tableLock);
632 gDvmJit.pJitEntryTable = pNewTable;
633 gDvmJit.jitTableSize = size;
634 gDvmJit.jitTableMask = size - 1;
635 gDvmJit.jitTableEntriesUsed = 0;
636 dvmUnlockMutex(&gDvmJit.tableLock);
637
638 for (i=0; i < oldSize; i++) {
639 if (pOldTable[i].dPC) {
640 JitEntry *p;
641 u2 chain;
642 p = dvmJitLookupAndAdd(pOldTable[i].dPC);
643 p->dPC = pOldTable[i].dPC;
644 /*
645 * Compiler thread may have just updated the new entry's
646 * code address field, so don't blindly copy null.
647 */
648 if (pOldTable[i].codeAddress != NULL) {
649 p->codeAddress = pOldTable[i].codeAddress;
650 }
651 /* We need to preserve the new chain field, but copy the rest */
652 dvmLockMutex(&gDvmJit.tableLock);
653 chain = p->u.info.chain;
654 p->u = pOldTable[i].u;
655 p->u.info.chain = chain;
656 dvmUnlockMutex(&gDvmJit.tableLock);
657 }
658 }
659
660 free(pOldTable);
661
662 /* Restart the world */
663 dvmResumeAllThreads(SUSPEND_FOR_JIT);
664
665 return false;
666 }
667
668 /*
669 * Float/double conversion requires clamping to min and max of integer form. If
670 * target doesn't support this normally, use these.
671 */
dvmJitd2l(double d)672 s8 dvmJitd2l(double d)
673 {
674 static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
675 static const double kMinLong = (double)(s8)0x8000000000000000ULL;
676 if (d >= kMaxLong)
677 return (s8)0x7fffffffffffffffULL;
678 else if (d <= kMinLong)
679 return (s8)0x8000000000000000ULL;
680 else if (d != d) // NaN case
681 return 0;
682 else
683 return (s8)d;
684 }
685
dvmJitf2l(float f)686 s8 dvmJitf2l(float f)
687 {
688 static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
689 static const float kMinLong = (float)(s8)0x8000000000000000ULL;
690 if (f >= kMaxLong)
691 return (s8)0x7fffffffffffffffULL;
692 else if (f <= kMinLong)
693 return (s8)0x8000000000000000ULL;
694 else if (f != f) // NaN case
695 return 0;
696 else
697 return (s8)f;
698 }
699
700
701 #endif /* WITH_JIT */
702