• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * Android's method call profiling goodies.
19  */
20 #include "Dalvik.h"
21 
22 #ifdef WITH_PROFILER        // -- include rest of file
23 
24 #include <stdlib.h>
25 #include <stddef.h>
26 #include <string.h>
27 #include <sys/time.h>
28 #include <time.h>
29 #include <sys/mman.h>
30 #include <sched.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 
34 #ifdef HAVE_ANDROID_OS
35 # define UPDATE_MAGIC_PAGE      1
36 # ifndef PAGESIZE
37 #  define PAGESIZE              4096
38 # endif
39 #endif
40 
41 /*
42  * File format:
43  *  header
44  *  record 0
45  *  record 1
46  *  ...
47  *
48  * Header format:
49  *  u4  magic ('SLOW')
50  *  u2  version
51  *  u2  offset to data
52  *  u8  start date/time in usec
53  *
54  * Record format:
55  *  u1  thread ID
56  *  u4  method ID | method action
57  *  u4  time delta since start, in usec
58  *
59  * 32 bits of microseconds is 70 minutes.
60  *
61  * All values are stored in little-endian order.
62  */
63 #define TRACE_REC_SIZE      9
64 #define TRACE_MAGIC         0x574f4c53
65 #define TRACE_HEADER_LEN    32
66 
67 
68 /*
69  * Get the wall-clock date/time, in usec.
70  */
getTimeInUsec()71 static inline u8 getTimeInUsec()
72 {
73     struct timeval tv;
74 
75     gettimeofday(&tv, NULL);
76     return tv.tv_sec * 1000000LL + tv.tv_usec;
77 }
78 
79 /*
80  * Get the current time, in microseconds.
81  *
82  * This can mean one of two things.  In "global clock" mode, we get the
83  * same time across all threads.  If we use CLOCK_THREAD_CPUTIME_ID, we
84  * get a per-thread CPU usage timer.  The latter is better, but a bit
85  * more complicated to implement.
86  */
getClock()87 static inline u8 getClock()
88 {
89 #if defined(HAVE_POSIX_CLOCKS)
90     struct timespec tm;
91 
92     clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tm);
93     //assert(tm.tv_nsec >= 0 && tm.tv_nsec < 1*1000*1000*1000);
94     if (!(tm.tv_nsec >= 0 && tm.tv_nsec < 1*1000*1000*1000)) {
95         LOGE("bad nsec: %ld\n", tm.tv_nsec);
96         dvmAbort();
97     }
98 
99     return tm.tv_sec * 1000000LL + tm.tv_nsec / 1000;
100 #else
101     struct timeval tv;
102 
103     gettimeofday(&tv, NULL);
104     return tv.tv_sec * 1000000LL + tv.tv_usec;
105 #endif
106 }
107 
108 /*
109  * Write little-endian data.
110  */
storeShortLE(u1 * buf,u2 val)111 static inline void storeShortLE(u1* buf, u2 val)
112 {
113     *buf++ = (u1) val;
114     *buf++ = (u1) (val >> 8);
115 }
storeIntLE(u1 * buf,u4 val)116 static inline void storeIntLE(u1* buf, u4 val)
117 {
118     *buf++ = (u1) val;
119     *buf++ = (u1) (val >> 8);
120     *buf++ = (u1) (val >> 16);
121     *buf++ = (u1) (val >> 24);
122 }
storeLongLE(u1 * buf,u8 val)123 static inline void storeLongLE(u1* buf, u8 val)
124 {
125     *buf++ = (u1) val;
126     *buf++ = (u1) (val >> 8);
127     *buf++ = (u1) (val >> 16);
128     *buf++ = (u1) (val >> 24);
129     *buf++ = (u1) (val >> 32);
130     *buf++ = (u1) (val >> 40);
131     *buf++ = (u1) (val >> 48);
132     *buf++ = (u1) (val >> 56);
133 }
134 
135 /*
136  * Boot-time init.
137  */
dvmProfilingStartup(void)138 bool dvmProfilingStartup(void)
139 {
140     /*
141      * Initialize "dmtrace" method profiling.
142      */
143     memset(&gDvm.methodTrace, 0, sizeof(gDvm.methodTrace));
144     dvmInitMutex(&gDvm.methodTrace.startStopLock);
145     pthread_cond_init(&gDvm.methodTrace.threadExitCond, NULL);
146 
147     ClassObject* clazz =
148         dvmFindClassNoInit("Ldalvik/system/VMDebug;", NULL);
149     assert(clazz != NULL);
150     gDvm.methodTrace.gcMethod =
151         dvmFindDirectMethodByDescriptor(clazz, "startGC", "()V");
152     gDvm.methodTrace.classPrepMethod =
153         dvmFindDirectMethodByDescriptor(clazz, "startClassPrep", "()V");
154     if (gDvm.methodTrace.gcMethod == NULL ||
155         gDvm.methodTrace.classPrepMethod == NULL)
156     {
157         LOGE("Unable to find startGC or startClassPrep\n");
158         return false;
159     }
160 
161     assert(!dvmCheckException(dvmThreadSelf()));
162 
163     /*
164      * Allocate storage for instruction counters.
165      */
166     gDvm.executedInstrCounts = (int*) malloc(kNumDalvikInstructions * sizeof(int));
167     if (gDvm.executedInstrCounts == NULL)
168         return false;
169     memset(gDvm.executedInstrCounts, 0, kNumDalvikInstructions * sizeof(int));
170 
171 #ifdef UPDATE_MAGIC_PAGE
172     /*
173      * If we're running on the emulator, there's a magic page into which
174      * we can put interpreted method information.  This allows interpreted
175      * methods to show up in the emulator's code traces.
176      *
177      * We could key this off of the "ro.kernel.qemu" property, but there's
178      * no real harm in doing this on a real device.
179      */
180     int fd = open("/dev/qemu_trace", O_RDWR);
181     if (fd < 0) {
182         LOGV("Unable to open /dev/qemu_trace\n");
183     } else {
184         gDvm.emulatorTracePage = mmap(0, PAGESIZE, PROT_READ|PROT_WRITE,
185                                       MAP_SHARED, fd, 0);
186         close(fd);
187         if (gDvm.emulatorTracePage == MAP_FAILED) {
188             LOGE("Unable to mmap /dev/qemu_trace\n");
189             gDvm.emulatorTracePage = NULL;
190         } else {
191             *(u4*) gDvm.emulatorTracePage = 0;
192         }
193     }
194 #else
195     assert(gDvm.emulatorTracePage == NULL);
196 #endif
197 
198     return true;
199 }
200 
201 /*
202  * Free up profiling resources.
203  */
dvmProfilingShutdown(void)204 void dvmProfilingShutdown(void)
205 {
206 #ifdef UPDATE_MAGIC_PAGE
207     if (gDvm.emulatorTracePage != NULL)
208         munmap(gDvm.emulatorTracePage, PAGESIZE);
209 #endif
210     free(gDvm.executedInstrCounts);
211 }
212 
213 /*
214  * Update the "active profilers" count.
215  *
216  * "count" should be +1 or -1.
217  */
updateActiveProfilers(int count)218 static void updateActiveProfilers(int count)
219 {
220     int oldValue, newValue;
221 
222     do {
223         oldValue = gDvm.activeProfilers;
224         newValue = oldValue + count;
225         if (newValue < 0) {
226             LOGE("Can't have %d active profilers\n", newValue);
227             dvmAbort();
228         }
229     } while (!ATOMIC_CMP_SWAP(&gDvm.activeProfilers, oldValue, newValue));
230 
231     LOGD("+++ active profiler count now %d\n", newValue);
232 }
233 
234 
235 /*
236  * Reset the "cpuClockBase" field in all threads.
237  */
resetCpuClockBase(void)238 static void resetCpuClockBase(void)
239 {
240     Thread* thread;
241 
242     dvmLockThreadList(NULL);
243     for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
244         thread->cpuClockBaseSet = false;
245         thread->cpuClockBase = 0;
246     }
247     dvmUnlockThreadList();
248 }
249 
250 /*
251  * Dump the thread list to the specified file.
252  */
dumpThreadList(FILE * fp)253 static void dumpThreadList(FILE* fp)
254 {
255     Thread* thread;
256 
257     dvmLockThreadList(NULL);
258     for (thread = gDvm.threadList; thread != NULL; thread = thread->next) {
259         char* name = dvmGetThreadName(thread);
260 
261         fprintf(fp, "%d\t%s\n", thread->threadId, name);
262         free(name);
263     }
264     dvmUnlockThreadList();
265 }
266 
267 /*
268  * This is a dvmHashForeach callback.
269  */
dumpMarkedMethods(void * vclazz,void * vfp)270 static int dumpMarkedMethods(void* vclazz, void* vfp)
271 {
272     DexStringCache stringCache;
273     ClassObject* clazz = (ClassObject*) vclazz;
274     FILE* fp = (FILE*) vfp;
275     Method* meth;
276     char* name;
277     int i, lineNum;
278 
279     dexStringCacheInit(&stringCache);
280 
281     for (i = 0; i < clazz->virtualMethodCount; i++) {
282         meth = &clazz->virtualMethods[i];
283         if (meth->inProfile) {
284             name = dvmDescriptorToName(meth->clazz->descriptor);
285             fprintf(fp, "0x%08x\t%s\t%s\t%s\t%s\t%d\n", (int) meth,
286                 name, meth->name,
287                 dexProtoGetMethodDescriptor(&meth->prototype, &stringCache),
288                 dvmGetMethodSourceFile(meth), dvmLineNumFromPC(meth, 0));
289             meth->inProfile = false;
290             free(name);
291         }
292     }
293 
294     for (i = 0; i < clazz->directMethodCount; i++) {
295         meth = &clazz->directMethods[i];
296         if (meth->inProfile) {
297             name = dvmDescriptorToName(meth->clazz->descriptor);
298             fprintf(fp, "0x%08x\t%s\t%s\t%s\t%s\t%d\n", (int) meth,
299                 name, meth->name,
300                 dexProtoGetMethodDescriptor(&meth->prototype, &stringCache),
301                 dvmGetMethodSourceFile(meth), dvmLineNumFromPC(meth, 0));
302             meth->inProfile = false;
303             free(name);
304         }
305     }
306 
307     dexStringCacheRelease(&stringCache);
308 
309     return 0;
310 }
311 
312 /*
313  * Dump the list of "marked" methods to the specified file.
314  */
dumpMethodList(FILE * fp)315 static void dumpMethodList(FILE* fp)
316 {
317     dvmHashTableLock(gDvm.loadedClasses);
318     dvmHashForeach(gDvm.loadedClasses, dumpMarkedMethods, (void*) fp);
319     dvmHashTableUnlock(gDvm.loadedClasses);
320 }
321 
322 /*
323  * Start method tracing.  This opens the file (if an already open fd has not
324  * been supplied) and allocates the buffer.
325  * If any of these fail, we throw an exception and return.
326  *
327  * Method tracing is global to the VM.
328  */
dvmMethodTraceStart(const char * traceFileName,int traceFd,int bufferSize,int flags)329 void dvmMethodTraceStart(const char* traceFileName, int traceFd, int bufferSize,
330         int flags)
331 {
332     MethodTraceState* state = &gDvm.methodTrace;
333 
334     assert(bufferSize > 0);
335 
336     if (state->traceEnabled != 0) {
337         LOGI("TRACE start requested, but already in progress; stopping\n");
338         dvmMethodTraceStop();
339     }
340     updateActiveProfilers(1);
341     LOGI("TRACE STARTED: '%s' %dKB\n",
342         traceFileName, bufferSize / 1024);
343     dvmLockMutex(&state->startStopLock);
344 
345     /*
346      * Allocate storage and open files.
347      *
348      * We don't need to initialize the buffer, but doing so might remove
349      * some fault overhead if the pages aren't mapped until touched.
350      */
351     state->buf = (u1*) malloc(bufferSize);
352     if (state->buf == NULL) {
353         dvmThrowException("Ljava/lang/InternalError;", "buffer alloc failed");
354         goto fail;
355     }
356     if (traceFd < 0) {
357         state->traceFile = fopen(traceFileName, "w");
358     } else {
359         state->traceFile = fdopen(traceFd, "w");
360     }
361     if (state->traceFile == NULL) {
362         LOGE("Unable to open trace file '%s': %s\n",
363             traceFileName, strerror(errno));
364         dvmThrowException("Ljava/lang/RuntimeException;", "file open failed");
365         goto fail;
366     }
367     memset(state->buf, 0xee, bufferSize);
368 
369     state->bufferSize = bufferSize;
370     state->overflow = false;
371 
372     /*
373      * Enable alloc counts if we've been requested to do so.
374      */
375     state->flags = flags;
376     if ((flags & TRACE_ALLOC_COUNTS) != 0)
377         dvmStartAllocCounting();
378 
379     /* reset our notion of the start time for all CPU threads */
380     resetCpuClockBase();
381 
382     state->startWhen = getTimeInUsec();
383 
384     /*
385      * Output the header.
386      */
387     memset(state->buf, 0, TRACE_HEADER_LEN);
388     storeIntLE(state->buf + 0, TRACE_MAGIC);
389     storeShortLE(state->buf + 4, TRACE_VERSION);
390     storeShortLE(state->buf + 6, TRACE_HEADER_LEN);
391     storeLongLE(state->buf + 8, state->startWhen);
392     state->curOffset = TRACE_HEADER_LEN;
393 
394     MEM_BARRIER();
395 
396     /*
397      * Set the "enabled" flag.  Once we do this, threads will wait to be
398      * signaled before exiting, so we have to make sure we wake them up.
399      */
400     state->traceEnabled = true;
401     dvmUnlockMutex(&state->startStopLock);
402     return;
403 
404 fail:
405     updateActiveProfilers(-1);
406     if (state->traceFile != NULL) {
407         fclose(state->traceFile);
408         state->traceFile = NULL;
409     }
410     if (state->buf != NULL) {
411         free(state->buf);
412         state->buf = NULL;
413     }
414     dvmUnlockMutex(&state->startStopLock);
415 }
416 
417 /*
418  * Run through the data buffer and pull out the methods that were visited.
419  * Set a mark so that we know which ones to output.
420  */
markTouchedMethods(void)421 static void markTouchedMethods(void)
422 {
423     u1* ptr = gDvm.methodTrace.buf + TRACE_HEADER_LEN;
424     u1* end = gDvm.methodTrace.buf + gDvm.methodTrace.curOffset;
425     unsigned int methodVal;
426     Method* method;
427 
428     while (ptr < end) {
429         methodVal = *(ptr+1) | (*(ptr+2) << 8) | (*(ptr+3) << 16)
430                     | (*(ptr+4) << 24);
431         method = (Method*) METHOD_ID(methodVal);
432 
433         method->inProfile = true;
434         ptr += TRACE_REC_SIZE;
435     }
436 }
437 
438 /*
439  * Compute the amount of overhead in a clock call, in nsec.
440  *
441  * This value is going to vary depending on what else is going on in the
442  * system.  When examined across several runs a pattern should emerge.
443  */
getClockOverhead(void)444 static u4 getClockOverhead(void)
445 {
446     u8 calStart, calElapsed;
447     int i;
448 
449     calStart = getClock();
450     for (i = 1000 * 4; i > 0; i--) {
451         getClock();
452         getClock();
453         getClock();
454         getClock();
455         getClock();
456         getClock();
457         getClock();
458         getClock();
459     }
460 
461     calElapsed = getClock() - calStart;
462     return (int) (calElapsed / (8*4));
463 }
464 
465 /*
466  * Returns "true" if method tracing is currently active.
467  */
dvmIsMethodTraceActive(void)468 bool dvmIsMethodTraceActive(void)
469 {
470     const MethodTraceState* state = &gDvm.methodTrace;
471     return state->traceEnabled;
472 }
473 
474 /*
475  * Stop method tracing.  We write the buffer to disk and generate a key
476  * file so we can interpret it.
477  */
dvmMethodTraceStop(void)478 void dvmMethodTraceStop(void)
479 {
480     MethodTraceState* state = &gDvm.methodTrace;
481     u8 elapsed;
482 
483     /*
484      * We need this to prevent somebody from starting a new trace while
485      * we're in the process of stopping the old.
486      */
487     dvmLockMutex(&state->startStopLock);
488 
489     if (!state->traceEnabled) {
490         /* somebody already stopped it, or it was never started */
491         LOGD("TRACE stop requested, but not running\n");
492         dvmUnlockMutex(&state->startStopLock);
493         return;
494     } else {
495         updateActiveProfilers(-1);
496     }
497 
498     /* compute elapsed time */
499     elapsed = getTimeInUsec() - state->startWhen;
500 
501     /*
502      * Globally disable it, and allow other threads to notice.  We want
503      * to stall here for at least as long as dvmMethodTraceAdd needs
504      * to finish.  There's no real risk though -- it will take a while to
505      * write the data to disk, and we don't clear the buffer pointer until
506      * after that completes.
507      */
508     state->traceEnabled = false;
509     MEM_BARRIER();
510     sched_yield();
511 
512     if ((state->flags & TRACE_ALLOC_COUNTS) != 0)
513         dvmStopAllocCounting();
514 
515     LOGI("TRACE STOPPED%s: writing %d records\n",
516         state->overflow ? " (NOTE: overflowed buffer)" : "",
517         (state->curOffset - TRACE_HEADER_LEN) / TRACE_REC_SIZE);
518     if (gDvm.debuggerActive) {
519         LOGW("WARNING: a debugger is active; method-tracing results "
520              "will be skewed\n");
521     }
522 
523     /*
524      * Do a quick calibration test to see how expensive our clock call is.
525      */
526     u4 clockNsec = getClockOverhead();
527 
528     markTouchedMethods();
529 
530     fprintf(state->traceFile, "%cversion\n", TOKEN_CHAR);
531     fprintf(state->traceFile, "%d\n", TRACE_VERSION);
532     fprintf(state->traceFile, "data-file-overflow=%s\n",
533         state->overflow ? "true" : "false");
534 #if defined(HAVE_POSIX_CLOCKS)
535     fprintf(state->traceFile, "clock=thread-cpu\n");
536 #else
537     fprintf(state->traceFile, "clock=global\n");
538 #endif
539     fprintf(state->traceFile, "elapsed-time-usec=%llu\n", elapsed);
540     fprintf(state->traceFile, "num-method-calls=%d\n",
541         (state->curOffset - TRACE_HEADER_LEN) / TRACE_REC_SIZE);
542     fprintf(state->traceFile, "clock-call-overhead-nsec=%d\n", clockNsec);
543     fprintf(state->traceFile, "vm=dalvik\n");
544     if ((state->flags & TRACE_ALLOC_COUNTS) != 0) {
545         fprintf(state->traceFile, "alloc-count=%d\n",
546             gDvm.allocProf.allocCount);
547         fprintf(state->traceFile, "alloc-size=%d\n",
548             gDvm.allocProf.allocSize);
549         fprintf(state->traceFile, "gc-count=%d\n",
550             gDvm.allocProf.gcCount);
551     }
552     fprintf(state->traceFile, "%cthreads\n", TOKEN_CHAR);
553     dumpThreadList(state->traceFile);
554     fprintf(state->traceFile, "%cmethods\n", TOKEN_CHAR);
555     dumpMethodList(state->traceFile);
556     fprintf(state->traceFile, "%cend\n", TOKEN_CHAR);
557 
558     if (fwrite(state->buf, state->curOffset, 1, state->traceFile) != 1) {
559         LOGE("trace fwrite(%d) failed, errno=%d\n", state->curOffset, errno);
560         dvmThrowException("Ljava/lang/RuntimeException;", "data write failed");
561         goto bail;
562     }
563 
564 bail:
565     free(state->buf);
566     state->buf = NULL;
567     fclose(state->traceFile);
568     state->traceFile = NULL;
569 
570     int cc = pthread_cond_broadcast(&state->threadExitCond);
571     assert(cc == 0);
572     dvmUnlockMutex(&state->startStopLock);
573 }
574 
575 
576 /*
577  * We just did something with a method.  Emit a record.
578  *
579  * Multiple threads may be banging on this all at once.  We use atomic ops
580  * rather than mutexes for speed.
581  */
dvmMethodTraceAdd(Thread * self,const Method * method,int action)582 void dvmMethodTraceAdd(Thread* self, const Method* method, int action)
583 {
584     MethodTraceState* state = &gDvm.methodTrace;
585     u4 clockDiff, methodVal;
586     int oldOffset, newOffset;
587     u1* ptr;
588 
589     /*
590      * We can only access the per-thread CPU clock from within the
591      * thread, so we have to initialize the base time on the first use.
592      * (Looks like pthread_getcpuclockid(thread, &id) will do what we
593      * want, but it doesn't appear to be defined on the device.)
594      */
595     if (!self->cpuClockBaseSet) {
596         self->cpuClockBase = getClock();
597         self->cpuClockBaseSet = true;
598         //LOGI("thread base id=%d 0x%llx\n",
599         //    self->threadId, self->cpuClockBase);
600     }
601 
602     /*
603      * Advance "curOffset" atomically.
604      */
605     do {
606         oldOffset = state->curOffset;
607         newOffset = oldOffset + TRACE_REC_SIZE;
608         if (newOffset > state->bufferSize) {
609             state->overflow = true;
610             return;
611         }
612     } while (!ATOMIC_CMP_SWAP(&state->curOffset, oldOffset, newOffset));
613 
614     //assert(METHOD_ACTION((u4) method) == 0);
615 
616     u8 now = getClock();
617     clockDiff = (u4) (now - self->cpuClockBase);
618 
619     methodVal = METHOD_COMBINE((u4) method, action);
620 
621     /*
622      * Write data into "oldOffset".
623      */
624     ptr = state->buf + oldOffset;
625     *ptr++ = self->threadId;
626     *ptr++ = (u1) methodVal;
627     *ptr++ = (u1) (methodVal >> 8);
628     *ptr++ = (u1) (methodVal >> 16);
629     *ptr++ = (u1) (methodVal >> 24);
630     *ptr++ = (u1) clockDiff;
631     *ptr++ = (u1) (clockDiff >> 8);
632     *ptr++ = (u1) (clockDiff >> 16);
633     *ptr++ = (u1) (clockDiff >> 24);
634 }
635 
636 /*
637  * We just did something with a method.  Emit a record by setting a value
638  * in a magic memory location.
639  */
dvmEmitEmulatorTrace(const Method * method,int action)640 void dvmEmitEmulatorTrace(const Method* method, int action)
641 {
642 #ifdef UPDATE_MAGIC_PAGE
643     /*
644      * We store the address of the Dalvik bytecodes to the memory-mapped
645      * trace page for normal Java methods.  We also trace calls to native
646      * functions by storing the address of the native function to the
647      * trace page.
648      * Abstract methods don't have any bytecodes, so we don't trace them.
649      * (Abstract methods are never called, but in Dalvik they can be
650      * because we do a "late trap" to a native method to generate the
651      * abstract method exception.)
652      */
653     if (dvmIsAbstractMethod(method))
654         return;
655 
656     u4* pMagic = (u4*) gDvm.emulatorTracePage;
657     u4 addr;
658 
659     if (dvmIsNativeMethod(method)) {
660         /*
661          * The "action" parameter is one of:
662          *   0 = ENTER
663          *   1 = EXIT
664          *   2 = UNROLL
665          * To help the trace tools reconstruct the runtime stack containing
666          * a mix of Java plus native methods, we add 4 to the action if this
667          * is a native method.
668          */
669         action += 4;
670 
671         /*
672          * Get the address of the native function.
673          * This isn't the right address -- how do I get it?
674          * Fortunately, the trace tools can get by without the address, but
675          * it would be nice to fix this.
676          */
677          addr = method->nativeFunc;
678     } else {
679         /*
680          * The dexlist output shows the &DexCode.insns offset value, which
681          * is offset from the start of the base DEX header. Method.insns
682          * is the absolute address, effectively offset from the start of
683          * the optimized DEX header. We either need to return the
684          * optimized DEX base file address offset by the right amount, or
685          * take the "real" address and subtract off the size of the
686          * optimized DEX header.
687          *
688          * Would be nice to factor this out at dexlist time, but we can't count
689          * on having access to the correct optimized DEX file.
690          */
691         assert(method->insns != NULL);
692         const DexOptHeader* pOptHdr = method->clazz->pDvmDex->pDexFile->pOptHeader;
693         addr = (u4) method->insns - pOptHdr->dexOffset;
694     }
695 
696     *(pMagic+action) = addr;
697     LOGVV("Set %p = 0x%08x (%s.%s)\n",
698         pMagic+action, addr, method->clazz->descriptor, method->name);
699 #endif
700 }
701 
702 /*
703  * The GC calls this when it's about to start.  We add a marker to the
704  * trace output so the tool can exclude the GC cost from the results.
705  */
dvmMethodTraceGCBegin(void)706 void dvmMethodTraceGCBegin(void)
707 {
708     TRACE_METHOD_ENTER(dvmThreadSelf(), gDvm.methodTrace.gcMethod);
709 }
dvmMethodTraceGCEnd(void)710 void dvmMethodTraceGCEnd(void)
711 {
712     TRACE_METHOD_EXIT(dvmThreadSelf(), gDvm.methodTrace.gcMethod);
713 }
714 
715 /*
716  * The class loader calls this when it's loading or initializing a class.
717  */
dvmMethodTraceClassPrepBegin(void)718 void dvmMethodTraceClassPrepBegin(void)
719 {
720     TRACE_METHOD_ENTER(dvmThreadSelf(), gDvm.methodTrace.classPrepMethod);
721 }
dvmMethodTraceClassPrepEnd(void)722 void dvmMethodTraceClassPrepEnd(void)
723 {
724     TRACE_METHOD_EXIT(dvmThreadSelf(), gDvm.methodTrace.classPrepMethod);
725 }
726 
727 
728 /*
729  * Enable emulator trace info.
730  */
dvmEmulatorTraceStart(void)731 void dvmEmulatorTraceStart(void)
732 {
733     /* If we could not map the emulator trace page, then do not enable tracing */
734     if (gDvm.emulatorTracePage == NULL)
735         return;
736 
737     updateActiveProfilers(1);
738 
739     /* in theory we should make this an atomic inc; in practice not important */
740     gDvm.emulatorTraceEnableCount++;
741     if (gDvm.emulatorTraceEnableCount == 1)
742         LOGD("--- emulator method traces enabled\n");
743 }
744 
745 /*
746  * Disable emulator trace info.
747  */
dvmEmulatorTraceStop(void)748 void dvmEmulatorTraceStop(void)
749 {
750     if (gDvm.emulatorTraceEnableCount == 0) {
751         LOGE("ERROR: emulator tracing not enabled\n");
752         return;
753     }
754     updateActiveProfilers(-1);
755     /* in theory we should make this an atomic inc; in practice not important */
756     gDvm.emulatorTraceEnableCount--;
757     if (gDvm.emulatorTraceEnableCount == 0)
758         LOGD("--- emulator method traces disabled\n");
759 }
760 
761 
762 /*
763  * Start instruction counting.
764  */
dvmStartInstructionCounting()765 void dvmStartInstructionCounting()
766 {
767     updateActiveProfilers(1);
768     /* in theory we should make this an atomic inc; in practice not important */
769     gDvm.instructionCountEnableCount++;
770 }
771 
772 /*
773  * Start instruction counting.
774  */
dvmStopInstructionCounting()775 void dvmStopInstructionCounting()
776 {
777     if (gDvm.instructionCountEnableCount == 0) {
778         LOGE("ERROR: instruction counting not enabled\n");
779         dvmAbort();
780     }
781     updateActiveProfilers(-1);
782     gDvm.instructionCountEnableCount--;
783 }
784 
785 
786 /*
787  * Start alloc counting.  Note this doesn't affect the "active profilers"
788  * count, since the interpreter loop is not involved.
789  */
dvmStartAllocCounting(void)790 void dvmStartAllocCounting(void)
791 {
792     gDvm.allocProf.enabled = true;
793 }
794 
795 /*
796  * Stop alloc counting.
797  */
dvmStopAllocCounting(void)798 void dvmStopAllocCounting(void)
799 {
800     gDvm.allocProf.enabled = false;
801 }
802 
803 #endif /*WITH_PROFILER*/
804