• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 /*
17  * DDM-related heap functions
18  */
19 #include <sys/time.h>
20 #include <time.h>
21 
22 #include "Dalvik.h"
23 #include "alloc/Heap.h"
24 #include "alloc/HeapInternal.h"
25 #include "alloc/DdmHeap.h"
26 #include "alloc/DlMalloc.h"
27 #include "alloc/HeapSource.h"
28 
29 #define DEFAULT_HEAP_ID  1
30 
31 enum HpifWhen {
32     HPIF_WHEN_NEVER = 0,
33     HPIF_WHEN_NOW = 1,
34     HPIF_WHEN_NEXT_GC = 2,
35     HPIF_WHEN_EVERY_GC = 3
36 };
37 
38 /*
39  * Chunk HPIF (client --> server)
40  *
41  * Heap Info. General information about the heap,
42  * suitable for a summary display.
43  *
44  *   [u4]: number of heaps
45  *
46  *   For each heap:
47  *     [u4]: heap ID
48  *     [u8]: timestamp in ms since Unix epoch
49  *     [u1]: capture reason (same as 'when' value from server)
50  *     [u4]: max heap size in bytes (-Xmx)
51  *     [u4]: current heap size in bytes
52  *     [u4]: current number of bytes allocated
53  *     [u4]: current number of objects allocated
54  */
55 #define HPIF_SIZE(numHeaps) \
56         (sizeof(u4) + (numHeaps) * (5 * sizeof(u4) + sizeof(u1) + sizeof(u8)))
dvmDdmSendHeapInfo(int reason,bool shouldLock)57 void dvmDdmSendHeapInfo(int reason, bool shouldLock)
58 {
59     struct timeval now;
60     u8 nowMs;
61     u1 *buf, *b;
62 
63     buf = (u1 *)malloc(HPIF_SIZE(1));
64     if (buf == NULL) {
65         return;
66     }
67     b = buf;
68 
69     /* If there's a one-shot 'when', reset it.
70      */
71     if (reason == gDvm.gcHeap->ddmHpifWhen) {
72         if (shouldLock && ! dvmLockHeap()) {
73             ALOGW("%s(): can't lock heap to clear when", __func__);
74             goto skip_when;
75         }
76         if (reason == gDvm.gcHeap->ddmHpifWhen) {
77             if (gDvm.gcHeap->ddmHpifWhen == HPIF_WHEN_NEXT_GC) {
78                 gDvm.gcHeap->ddmHpifWhen = HPIF_WHEN_NEVER;
79             }
80         }
81         if (shouldLock) {
82             dvmUnlockHeap();
83         }
84     }
85 skip_when:
86 
87     /* The current time, in milliseconds since 0:00 GMT, 1/1/70.
88      */
89     if (gettimeofday(&now, NULL) < 0) {
90         nowMs = 0;
91     } else {
92         nowMs = (u8)now.tv_sec * 1000 + now.tv_usec / 1000;
93     }
94 
95     /* number of heaps */
96     set4BE(b, 1); b += 4;
97 
98     /* For each heap (of which there is one) */
99     {
100         /* heap ID */
101         set4BE(b, DEFAULT_HEAP_ID); b += 4;
102 
103         /* timestamp */
104         set8BE(b, nowMs); b += 8;
105 
106         /* 'when' value */
107         *b++ = (u1)reason;
108 
109         /* max allowed heap size in bytes */
110         set4BE(b, dvmHeapSourceGetMaximumSize()); b += 4;
111 
112         /* current heap size in bytes */
113         set4BE(b, dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0)); b += 4;
114 
115         /* number of bytes allocated */
116         set4BE(b, dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0)); b += 4;
117 
118         /* number of objects allocated */
119         set4BE(b, dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0)); b += 4;
120     }
121     assert((intptr_t)b == (intptr_t)buf + (intptr_t)HPIF_SIZE(1));
122 
123     dvmDbgDdmSendChunk(CHUNK_TYPE("HPIF"), b - buf, buf);
124 }
125 
dvmDdmHandleHpifChunk(int when)126 bool dvmDdmHandleHpifChunk(int when)
127 {
128     switch (when) {
129     case HPIF_WHEN_NOW:
130         dvmDdmSendHeapInfo(when, true);
131         break;
132     case HPIF_WHEN_NEVER:
133     case HPIF_WHEN_NEXT_GC:
134     case HPIF_WHEN_EVERY_GC:
135         if (dvmLockHeap()) {
136             gDvm.gcHeap->ddmHpifWhen = when;
137             dvmUnlockHeap();
138         } else {
139             ALOGI("%s(): can't lock heap to set when", __func__);
140             return false;
141         }
142         break;
143     default:
144         ALOGI("%s(): bad when value 0x%08x", __func__, when);
145         return false;
146     }
147 
148     return true;
149 }
150 
151 enum HpsgSolidity {
152     SOLIDITY_FREE = 0,
153     SOLIDITY_HARD = 1,
154     SOLIDITY_SOFT = 2,
155     SOLIDITY_WEAK = 3,
156     SOLIDITY_PHANTOM = 4,
157     SOLIDITY_FINALIZABLE = 5,
158     SOLIDITY_SWEEP = 6,
159 };
160 
161 enum HpsgKind {
162     KIND_OBJECT = 0,
163     KIND_CLASS_OBJECT = 1,
164     KIND_ARRAY_1 = 2,
165     KIND_ARRAY_2 = 3,
166     KIND_ARRAY_4 = 4,
167     KIND_ARRAY_8 = 5,
168     KIND_UNKNOWN = 6,
169     KIND_NATIVE = 7,
170 };
171 
172 #define HPSG_PARTIAL (1<<7)
173 #define HPSG_STATE(solidity, kind) \
174     ((u1)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
175 
176 struct HeapChunkContext {
177     void* startOfNextMemoryChunk;
178     u1 *buf;
179     u1 *p;
180     u1 *pieceLenField;
181     size_t bufLen;
182     size_t totalAllocationUnits;
183     int type;
184     bool merge;
185     bool needHeader;
186 };
187 
188 #define ALLOCATION_UNIT_SIZE 8
189 
flush_hpsg_chunk(HeapChunkContext * ctx)190 static void flush_hpsg_chunk(HeapChunkContext *ctx)
191 {
192     /* Patch the "length of piece" field.
193      */
194     assert(ctx->buf <= ctx->pieceLenField &&
195             ctx->pieceLenField <= ctx->p);
196     set4BE(ctx->pieceLenField, ctx->totalAllocationUnits);
197 
198     /* Send the chunk.
199      */
200     dvmDbgDdmSendChunk(ctx->type, ctx->p - ctx->buf, ctx->buf);
201 
202     /* Reset the context.
203      */
204     ctx->p = ctx->buf;
205     ctx->totalAllocationUnits = 0;
206     ctx->needHeader = true;
207     ctx->pieceLenField = NULL;
208 }
209 
append_chunk(HeapChunkContext * ctx,u1 state,void * ptr,size_t length)210 static void append_chunk(HeapChunkContext *ctx, u1 state, void* ptr, size_t length) {
211     /* Make sure there's enough room left in the buffer.
212      * We need to use two bytes for every fractional 256
213      * allocation units used by the chunk and 17 bytes for
214      * any header.
215      */
216     {
217         size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
218         size_t bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
219         if (bytesLeft < needed) {
220             flush_hpsg_chunk(ctx);
221         }
222         bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
223         if (bytesLeft < needed) {
224             ALOGW("chunk is too big to transmit (length=%zd, %zd bytes)",
225                   length, needed);
226             return;
227         }
228     }
229     if (ctx->needHeader) {
230         /*
231          * Start a new HPSx chunk.
232          */
233 
234         /* [u4]: heap ID */
235         set4BE(ctx->p, DEFAULT_HEAP_ID); ctx->p += 4;
236 
237         /* [u1]: size of allocation unit, in bytes */
238         *ctx->p++ = 8;
239 
240         /* [u4]: virtual address of segment start */
241         set4BE(ctx->p, (uintptr_t)ptr); ctx->p += 4;
242 
243         /* [u4]: offset of this piece (relative to the virtual address) */
244         set4BE(ctx->p, 0); ctx->p += 4;
245 
246         /* [u4]: length of piece, in allocation units
247          * We won't know this until we're done, so save the offset
248          * and stuff in a dummy value.
249          */
250         ctx->pieceLenField = ctx->p;
251         set4BE(ctx->p, 0x55555555); ctx->p += 4;
252 
253         ctx->needHeader = false;
254     }
255     /* Write out the chunk description.
256      */
257     length /= ALLOCATION_UNIT_SIZE;   // convert to allocation units
258     ctx->totalAllocationUnits += length;
259     while (length > 256) {
260         *ctx->p++ = state | HPSG_PARTIAL;
261         *ctx->p++ = 255;     // length - 1
262         length -= 256;
263     }
264     *ctx->p++ = state;
265     *ctx->p++ = length - 1;
266 }
267 
268 /*
269  * Called by dlmalloc_inspect_all. If used_bytes != 0 then start is
270  * the start of a malloc-ed piece of memory of size used_bytes. If
271  * start is 0 then start is the beginning of any free space not
272  * including dlmalloc's book keeping and end the start of the next
273  * dlmalloc chunk. Regions purely containing book keeping don't
274  * callback.
275  */
heap_chunk_callback(void * start,void * end,size_t used_bytes,void * arg)276 static void heap_chunk_callback(void* start, void* end, size_t used_bytes,
277                                 void* arg)
278 {
279     u1 state;
280     HeapChunkContext *ctx = (HeapChunkContext *)arg;
281     UNUSED_PARAMETER(end);
282 
283     if (used_bytes == 0) {
284         if (start == NULL) {
285             // Reset for start of new heap.
286             ctx->startOfNextMemoryChunk = NULL;
287             flush_hpsg_chunk(ctx);
288         }
289         // Only process in use memory so that free region information
290         // also includes dlmalloc book keeping.
291         return;
292     }
293 
294     /* If we're looking at the native heap, we'll just return
295      * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
296      */
297     bool native = ctx->type == CHUNK_TYPE("NHSG");
298 
299     if (ctx->startOfNextMemoryChunk != NULL) {
300         // Transmit any pending free memory. Native free memory of
301         // over kMaxFreeLen could be because of the use of mmaps, so
302         // don't report. If not free memory then start a new segment.
303         bool flush = true;
304         if (start > ctx->startOfNextMemoryChunk) {
305             const size_t kMaxFreeLen = 2 * SYSTEM_PAGE_SIZE;
306             void* freeStart = ctx->startOfNextMemoryChunk;
307             void* freeEnd = start;
308             size_t freeLen = (char*)freeEnd - (char*)freeStart;
309             if (!native || freeLen < kMaxFreeLen) {
310                 append_chunk(ctx, HPSG_STATE(SOLIDITY_FREE, 0),
311                              freeStart, freeLen);
312                 flush = false;
313             }
314         }
315         if (flush) {
316             ctx->startOfNextMemoryChunk = NULL;
317             flush_hpsg_chunk(ctx);
318         }
319     }
320     const Object *obj = (const Object *)start;
321 
322     /* It's an allocated chunk.  Figure out what it is.
323      */
324 //TODO: if ctx.merge, see if this chunk is different from the last chunk.
325 //      If it's the same, we should combine them.
326     if (!native && dvmIsValidObject(obj)) {
327         ClassObject *clazz = obj->clazz;
328         if (clazz == NULL) {
329             /* The object was probably just created
330              * but hasn't been initialized yet.
331              */
332             state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
333         } else if (dvmIsTheClassClass(clazz)) {
334             state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
335         } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
336             if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
337                 state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
338             } else {
339                 switch (clazz->elementClass->primitiveType) {
340                 case PRIM_BOOLEAN:
341                 case PRIM_BYTE:
342                     state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
343                     break;
344                 case PRIM_CHAR:
345                 case PRIM_SHORT:
346                     state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
347                     break;
348                 case PRIM_INT:
349                 case PRIM_FLOAT:
350                     state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
351                     break;
352                 case PRIM_DOUBLE:
353                 case PRIM_LONG:
354                     state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
355                     break;
356                 default:
357                     assert(!"Unknown GC heap object type");
358                     state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
359                     break;
360                 }
361             }
362         } else {
363             state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
364         }
365     } else {
366         obj = NULL; // it's not actually an object
367         state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
368     }
369     append_chunk(ctx, state, start, used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD);
370     ctx->startOfNextMemoryChunk =
371         (char*)start + used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD;
372 }
373 
374 enum HpsgWhen {
375     HPSG_WHEN_NEVER = 0,
376     HPSG_WHEN_EVERY_GC = 1,
377 };
378 enum HpsgWhat {
379     HPSG_WHAT_MERGED_OBJECTS = 0,
380     HPSG_WHAT_DISTINCT_OBJECTS = 1,
381 };
382 
383 /*
384  * Maximum chunk size.  Obtain this from the formula:
385  *
386  * (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
387  */
388 #define HPSx_CHUNK_SIZE (16384 - 16)
389 
walkHeap(bool merge,bool native)390 static void walkHeap(bool merge, bool native)
391 {
392     HeapChunkContext ctx;
393 
394     memset(&ctx, 0, sizeof(ctx));
395     ctx.bufLen = HPSx_CHUNK_SIZE;
396     ctx.buf = (u1 *)malloc(ctx.bufLen);
397     if (ctx.buf == NULL) {
398         return;
399     }
400 
401     ctx.merge = merge;
402     if (native) {
403         ctx.type = CHUNK_TYPE("NHSG");
404     } else {
405         if (ctx.merge) {
406             ctx.type = CHUNK_TYPE("HPSG");
407         } else {
408             ctx.type = CHUNK_TYPE("HPSO");
409         }
410     }
411 
412     ctx.p = ctx.buf;
413     ctx.needHeader = true;
414     if (native) {
415         dlmalloc_inspect_all(heap_chunk_callback, (void*)&ctx);
416     } else {
417         dvmHeapSourceWalk(heap_chunk_callback, (void *)&ctx);
418     }
419     if (ctx.p > ctx.buf) {
420         flush_hpsg_chunk(&ctx);
421     }
422 
423     free(ctx.buf);
424 }
425 
dvmDdmSendHeapSegments(bool shouldLock,bool native)426 void dvmDdmSendHeapSegments(bool shouldLock, bool native)
427 {
428     u1 heapId[sizeof(u4)];
429     GcHeap *gcHeap = gDvm.gcHeap;
430     int when, what;
431     bool merge;
432 
433     /* Don't even grab the lock if there's nothing to do when we're called.
434      */
435     if (!native) {
436         when = gcHeap->ddmHpsgWhen;
437         what = gcHeap->ddmHpsgWhat;
438         if (when == HPSG_WHEN_NEVER) {
439             return;
440         }
441     } else {
442         when = gcHeap->ddmNhsgWhen;
443         what = gcHeap->ddmNhsgWhat;
444         if (when == HPSG_WHEN_NEVER) {
445             return;
446         }
447     }
448     if (shouldLock && !dvmLockHeap()) {
449         ALOGW("Can't lock heap for DDM HPSx dump");
450         return;
451     }
452 
453     /* Figure out what kind of chunks we'll be sending.
454      */
455     if (what == HPSG_WHAT_MERGED_OBJECTS) {
456         merge = true;
457     } else if (what == HPSG_WHAT_DISTINCT_OBJECTS) {
458         merge = false;
459     } else {
460         assert(!"bad HPSG.what value");
461         return;
462     }
463 
464     /* First, send a heap start chunk.
465      */
466     set4BE(heapId, DEFAULT_HEAP_ID);
467     dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"),
468         sizeof(u4), heapId);
469 
470     /* Send a series of heap segment chunks.
471      */
472     walkHeap(merge, native);
473 
474     /* Finally, send a heap end chunk.
475      */
476     dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"),
477         sizeof(u4), heapId);
478 
479     if (shouldLock) {
480         dvmUnlockHeap();
481     }
482 }
483 
dvmDdmHandleHpsgNhsgChunk(int when,int what,bool native)484 bool dvmDdmHandleHpsgNhsgChunk(int when, int what, bool native)
485 {
486     ALOGI("dvmDdmHandleHpsgChunk(when %d, what %d, heap %d)", when, what,
487          native);
488     switch (when) {
489     case HPSG_WHEN_NEVER:
490     case HPSG_WHEN_EVERY_GC:
491         break;
492     default:
493         ALOGI("%s(): bad when value 0x%08x", __func__, when);
494         return false;
495     }
496 
497     switch (what) {
498     case HPSG_WHAT_MERGED_OBJECTS:
499     case HPSG_WHAT_DISTINCT_OBJECTS:
500         break;
501     default:
502         ALOGI("%s(): bad what value 0x%08x", __func__, what);
503         return false;
504     }
505 
506     if (dvmLockHeap()) {
507         if (!native) {
508             gDvm.gcHeap->ddmHpsgWhen = when;
509             gDvm.gcHeap->ddmHpsgWhat = what;
510         } else {
511             gDvm.gcHeap->ddmNhsgWhen = when;
512             gDvm.gcHeap->ddmNhsgWhat = what;
513         }
514 //TODO: if what says we should dump immediately, signal (or do) it from here
515         dvmUnlockHeap();
516     } else {
517         ALOGI("%s(): can't lock heap to set when/what", __func__);
518         return false;
519     }
520 
521     return true;
522 }
523