• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 /*
17  * DDM-related heap functions
18  */
19 #include <sys/time.h>
20 #include <time.h>
21 
22 #include "Dalvik.h"
23 #include "alloc/Heap.h"
24 #include "alloc/HeapInternal.h"
25 #include "alloc/DdmHeap.h"
26 #include "alloc/HeapSource.h"
27 
28 #define DEFAULT_HEAP_ID  1
29 
30 enum HpifWhen {
31     HPIF_WHEN_NEVER = 0,
32     HPIF_WHEN_NOW = 1,
33     HPIF_WHEN_NEXT_GC = 2,
34     HPIF_WHEN_EVERY_GC = 3
35 };
36 
37 /*
38  * Chunk HPIF (client --> server)
39  *
40  * Heap Info. General information about the heap,
41  * suitable for a summary display.
42  *
43  *   [u4]: number of heaps
44  *
45  *   For each heap:
46  *     [u4]: heap ID
47  *     [u8]: timestamp in ms since Unix epoch
48  *     [u1]: capture reason (same as 'when' value from server)
49  *     [u4]: max heap size in bytes (-Xmx)
50  *     [u4]: current heap size in bytes
51  *     [u4]: current number of bytes allocated
52  *     [u4]: current number of objects allocated
53  */
54 #define HPIF_SIZE(numHeaps) \
55         (sizeof(u4) + (numHeaps) * (5 * sizeof(u4) + sizeof(u1) + sizeof(u8)))
56 void
dvmDdmSendHeapInfo(int reason,bool shouldLock)57 dvmDdmSendHeapInfo(int reason, bool shouldLock)
58 {
59     struct timeval now;
60     u8 nowMs;
61     u1 *buf, *b;
62 
63     buf = (u1 *)malloc(HPIF_SIZE(1));
64     if (buf == NULL) {
65         return;
66     }
67     b = buf;
68 
69     /* If there's a one-shot 'when', reset it.
70      */
71     if (reason == gDvm.gcHeap->ddmHpifWhen) {
72         if (shouldLock && ! dvmLockHeap()) {
73             LOGW("%s(): can't lock heap to clear when\n", __func__);
74             goto skip_when;
75         }
76         if (reason == gDvm.gcHeap->ddmHpifWhen) {
77             if (gDvm.gcHeap->ddmHpifWhen == HPIF_WHEN_NEXT_GC) {
78                 gDvm.gcHeap->ddmHpifWhen = HPIF_WHEN_NEVER;
79             }
80         }
81         if (shouldLock) {
82             dvmUnlockHeap();
83         }
84     }
85 skip_when:
86 
87     /* The current time, in milliseconds since 0:00 GMT, 1/1/70.
88      */
89     if (gettimeofday(&now, NULL) < 0) {
90         nowMs = 0;
91     } else {
92         nowMs = (u8)now.tv_sec * 1000 + now.tv_usec / 1000;
93     }
94 
95     /* number of heaps */
96     set4BE(b, 1); b += 4;
97 
98     /* For each heap (of which there is one) */
99     {
100         /* heap ID */
101         set4BE(b, DEFAULT_HEAP_ID); b += 4;
102 
103         /* timestamp */
104         set8BE(b, nowMs); b += 8;
105 
106         /* 'when' value */
107         *b++ = (u1)reason;
108 
109         /* max allowed heap size in bytes */
110         set4BE(b, gDvm.heapSizeMax); b += 4;
111 
112         /* current heap size in bytes */
113         set4BE(b, dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0)); b += 4;
114 
115         /* number of bytes allocated */
116         set4BE(b, dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0)); b += 4;
117 
118         /* number of objects allocated */
119         set4BE(b, dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0)); b += 4;
120     }
121     assert((intptr_t)b == (intptr_t)buf + (intptr_t)HPIF_SIZE(1));
122 
123     dvmDbgDdmSendChunk(CHUNK_TYPE("HPIF"), b - buf, buf);
124 }
125 
126 bool
dvmDdmHandleHpifChunk(int when)127 dvmDdmHandleHpifChunk(int when)
128 {
129     switch (when) {
130     case HPIF_WHEN_NOW:
131         dvmDdmSendHeapInfo(when, true);
132         break;
133     case HPIF_WHEN_NEVER:
134     case HPIF_WHEN_NEXT_GC:
135     case HPIF_WHEN_EVERY_GC:
136         if (dvmLockHeap()) {
137             gDvm.gcHeap->ddmHpifWhen = when;
138             dvmUnlockHeap();
139         } else {
140             LOGI("%s(): can't lock heap to set when\n", __func__);
141             return false;
142         }
143         break;
144     default:
145         LOGI("%s(): bad when value 0x%08x\n", __func__, when);
146         return false;
147     }
148 
149     return true;
150 }
151 
152 enum HpsgSolidity {
153     SOLIDITY_FREE = 0,
154     SOLIDITY_HARD = 1,
155     SOLIDITY_SOFT = 2,
156     SOLIDITY_WEAK = 3,
157     SOLIDITY_PHANTOM = 4,
158     SOLIDITY_FINALIZABLE = 5,
159     SOLIDITY_SWEEP = 6,
160 };
161 
162 enum HpsgKind {
163     KIND_OBJECT = 0,
164     KIND_CLASS_OBJECT = 1,
165     KIND_ARRAY_1 = 2,
166     KIND_ARRAY_2 = 3,
167     KIND_ARRAY_4 = 4,
168     KIND_ARRAY_8 = 5,
169     KIND_UNKNOWN = 6,
170     KIND_NATIVE = 7,
171 };
172 
173 #define HPSG_PARTIAL (1<<7)
174 #define HPSG_STATE(solidity, kind) \
175     ((u1)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
176 
177 typedef struct HeapChunkContext {
178     u1 *buf;
179     u1 *p;
180     u1 *pieceLenField;
181     size_t bufLen;
182     size_t totalAllocationUnits;
183     int type;
184     bool merge;
185     bool needHeader;
186 } HeapChunkContext;
187 
188 #define ALLOCATION_UNIT_SIZE 8
189 
190 static void
flush_hpsg_chunk(HeapChunkContext * ctx)191 flush_hpsg_chunk(HeapChunkContext *ctx)
192 {
193     /* Patch the "length of piece" field.
194      */
195     assert(ctx->buf <= ctx->pieceLenField &&
196             ctx->pieceLenField <= ctx->p);
197     set4BE(ctx->pieceLenField, ctx->totalAllocationUnits);
198 
199     /* Send the chunk.
200      */
201     dvmDbgDdmSendChunk(ctx->type, ctx->p - ctx->buf, ctx->buf);
202 
203     /* Reset the context.
204      */
205     ctx->p = ctx->buf;
206     ctx->totalAllocationUnits = 0;
207     ctx->needHeader = true;
208     ctx->pieceLenField = NULL;
209 }
210 
211 static void
heap_chunk_callback(const void * chunkptr,size_t chunklen,const void * userptr,size_t userlen,void * arg)212 heap_chunk_callback(const void *chunkptr, size_t chunklen,
213                     const void *userptr, size_t userlen, void *arg)
214 {
215     HeapChunkContext *ctx = (HeapChunkContext *)arg;
216     u1 state;
217 
218     UNUSED_PARAMETER(userlen);
219 
220     assert((chunklen & (ALLOCATION_UNIT_SIZE-1)) == 0);
221 
222     /* Make sure there's enough room left in the buffer.
223      * We need to use two bytes for every fractional 256
224      * allocation units used by the chunk.
225      */
226     {
227         size_t needed = (((chunklen/ALLOCATION_UNIT_SIZE + 255) / 256) * 2);
228         size_t bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
229         if (bytesLeft < needed) {
230             flush_hpsg_chunk(ctx);
231         }
232 
233         bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
234         if (bytesLeft < needed) {
235             LOGW("chunk is too big to transmit (chunklen=%zd, %zd bytes)\n",
236                 chunklen, needed);
237             return;
238         }
239     }
240 
241 //TODO: notice when there's a gap and start a new heap, or at least a new range.
242     if (ctx->needHeader) {
243         /*
244          * Start a new HPSx chunk.
245          */
246 
247         /* [u4]: heap ID */
248         set4BE(ctx->p, DEFAULT_HEAP_ID); ctx->p += 4;
249 
250         /* [u1]: size of allocation unit, in bytes */
251         *ctx->p++ = 8;
252 
253         /* [u4]: virtual address of segment start */
254         set4BE(ctx->p, (uintptr_t)chunkptr); ctx->p += 4;
255 
256         /* [u4]: offset of this piece (relative to the virtual address) */
257         set4BE(ctx->p, 0); ctx->p += 4;
258 
259         /* [u4]: length of piece, in allocation units
260          * We won't know this until we're done, so save the offset
261          * and stuff in a dummy value.
262          */
263         ctx->pieceLenField = ctx->p;
264         set4BE(ctx->p, 0x55555555); ctx->p += 4;
265 
266         ctx->needHeader = false;
267     }
268 
269     /* Determine the type of this chunk.
270      */
271     if (userptr == NULL) {
272         /* It's a free chunk.
273          */
274         state = HPSG_STATE(SOLIDITY_FREE, 0);
275     } else {
276         const Object *obj = userptr;
277         /* If we're looking at the native heap, we'll just return
278          * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
279          */
280         bool native = ctx->type == CHUNK_TYPE("NHSG");
281 
282         /* It's an allocated chunk.  Figure out what it is.
283          */
284 //TODO: if ctx.merge, see if this chunk is different from the last chunk.
285 //      If it's the same, we should combine them.
286         if (!native && dvmIsValidObject(obj)) {
287             ClassObject *clazz = obj->clazz;
288             if (clazz == NULL) {
289                 /* The object was probably just created
290                  * but hasn't been initialized yet.
291                  */
292                 state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
293             } else if (clazz == gDvm.classJavaLangClass) {
294                 state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
295             } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
296                 if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
297                     state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
298                 } else {
299                     switch (clazz->elementClass->primitiveType) {
300                     case PRIM_BOOLEAN:
301                     case PRIM_BYTE:
302                         state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
303                         break;
304                     case PRIM_CHAR:
305                     case PRIM_SHORT:
306                         state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
307                         break;
308                     case PRIM_INT:
309                     case PRIM_FLOAT:
310                         state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
311                         break;
312                     case PRIM_DOUBLE:
313                     case PRIM_LONG:
314                         state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
315                         break;
316                     default:
317                         assert(!"Unknown GC heap object type");
318                         state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
319                         break;
320                     }
321                 }
322             } else {
323                 state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
324             }
325         } else {
326             obj = NULL; // it's not actually an object
327             state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
328         }
329     }
330 
331     /* Write out the chunk description.
332      */
333     chunklen /= ALLOCATION_UNIT_SIZE;   // convert to allocation units
334     ctx->totalAllocationUnits += chunklen;
335     while (chunklen > 256) {
336         *ctx->p++ = state | HPSG_PARTIAL;
337         *ctx->p++ = 255;     // length - 1
338         chunklen -= 256;
339     }
340     *ctx->p++ = state;
341     *ctx->p++ = chunklen - 1;
342 }
343 
344 enum HpsgWhen {
345     HPSG_WHEN_NEVER = 0,
346     HPSG_WHEN_EVERY_GC = 1,
347 };
348 enum HpsgWhat {
349     HPSG_WHAT_MERGED_OBJECTS = 0,
350     HPSG_WHAT_DISTINCT_OBJECTS = 1,
351 };
352 
353 /*
354  * Maximum chunk size.  Obtain this from the formula:
355  *
356  * (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
357  */
358 #define HPSx_CHUNK_SIZE (16384 - 16)
359 
360 void dlmalloc_walk_heap(void(*)(const void*, size_t, const void*, size_t, void*),void*);
361 
362 static void
walkHeap(bool merge,bool native)363 walkHeap(bool merge, bool native)
364 {
365     HeapChunkContext ctx;
366 
367     memset(&ctx, 0, sizeof(ctx));
368     ctx.bufLen = HPSx_CHUNK_SIZE;
369     ctx.buf = (u1 *)malloc(ctx.bufLen);
370     if (ctx.buf == NULL) {
371         return;
372     }
373 
374     ctx.merge = merge;
375     if (native) {
376         ctx.type = CHUNK_TYPE("NHSG");
377     } else {
378         if (ctx.merge) {
379             ctx.type = CHUNK_TYPE("HPSG");
380         } else {
381             ctx.type = CHUNK_TYPE("HPSO");
382         }
383     }
384 
385     ctx.p = ctx.buf;
386     ctx.needHeader = true;
387     if (native) {
388         dlmalloc_walk_heap(heap_chunk_callback, (void *)&ctx);
389     } else {
390         dvmHeapSourceWalk(heap_chunk_callback, (void *)&ctx);
391     }
392     if (ctx.p > ctx.buf) {
393         flush_hpsg_chunk(&ctx);
394     }
395 
396     free(ctx.buf);
397 }
398 
399 void
dvmDdmSendHeapSegments(bool shouldLock,bool native)400 dvmDdmSendHeapSegments(bool shouldLock, bool native)
401 {
402     u1 heapId[sizeof(u4)];
403     GcHeap *gcHeap = gDvm.gcHeap;
404     int when, what;
405     bool merge;
406 
407     /* Don't even grab the lock if there's nothing to do when we're called.
408      */
409     if (!native) {
410         when = gcHeap->ddmHpsgWhen;
411         what = gcHeap->ddmHpsgWhat;
412         if (when == HPSG_WHEN_NEVER) {
413             return;
414         }
415     } else {
416         when = gcHeap->ddmNhsgWhen;
417         what = gcHeap->ddmNhsgWhat;
418         if (when == HPSG_WHEN_NEVER) {
419             return;
420         }
421     }
422     if (shouldLock && !dvmLockHeap()) {
423         LOGW("Can't lock heap for DDM HPSx dump\n");
424         return;
425     }
426 
427     /* Figure out what kind of chunks we'll be sending.
428      */
429     if (what == HPSG_WHAT_MERGED_OBJECTS) {
430         merge = true;
431     } else if (what == HPSG_WHAT_DISTINCT_OBJECTS) {
432         merge = false;
433     } else {
434         assert(!"bad HPSG.what value");
435         return;
436     }
437 
438     /* First, send a heap start chunk.
439      */
440     set4BE(heapId, DEFAULT_HEAP_ID);
441     dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"),
442         sizeof(u4), heapId);
443 
444     /* Send a series of heap segment chunks.
445      */
446     walkHeap(merge, native);
447 
448     /* Finally, send a heap end chunk.
449      */
450     dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"),
451         sizeof(u4), heapId);
452 
453     if (shouldLock) {
454         dvmUnlockHeap();
455     }
456 }
457 
458 bool
dvmDdmHandleHpsgNhsgChunk(int when,int what,bool native)459 dvmDdmHandleHpsgNhsgChunk(int when, int what, bool native)
460 {
461     LOGI("dvmDdmHandleHpsgChunk(when %d, what %d, heap %d)\n", when, what,
462          native);
463     switch (when) {
464     case HPSG_WHEN_NEVER:
465     case HPSG_WHEN_EVERY_GC:
466         break;
467     default:
468         LOGI("%s(): bad when value 0x%08x\n", __func__, when);
469         return false;
470     }
471 
472     switch (what) {
473     case HPSG_WHAT_MERGED_OBJECTS:
474     case HPSG_WHAT_DISTINCT_OBJECTS:
475         break;
476     default:
477         LOGI("%s(): bad what value 0x%08x\n", __func__, what);
478         return false;
479     }
480 
481     if (dvmLockHeap()) {
482         if (!native) {
483             gDvm.gcHeap->ddmHpsgWhen = when;
484             gDvm.gcHeap->ddmHpsgWhat = what;
485         } else {
486             gDvm.gcHeap->ddmNhsgWhen = when;
487             gDvm.gcHeap->ddmNhsgWhat = what;
488         }
489 //TODO: if what says we should dump immediately, signal (or do) it from here
490         dvmUnlockHeap();
491     } else {
492         LOGI("%s(): can't lock heap to set when/what\n", __func__);
493         return false;
494     }
495 
496     return true;
497 }
498