1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 /*
17 * DDM-related heap functions
18 */
19 #include <sys/time.h>
20 #include <time.h>
21
22 #include "Dalvik.h"
23 #include "alloc/Heap.h"
24 #include "alloc/HeapInternal.h"
25 #include "alloc/DdmHeap.h"
26 #include "alloc/DlMalloc.h"
27 #include "alloc/HeapSource.h"
28
29 #define DEFAULT_HEAP_ID 1
30
31 enum HpifWhen {
32 HPIF_WHEN_NEVER = 0,
33 HPIF_WHEN_NOW = 1,
34 HPIF_WHEN_NEXT_GC = 2,
35 HPIF_WHEN_EVERY_GC = 3
36 };
37
38 /*
39 * Chunk HPIF (client --> server)
40 *
41 * Heap Info. General information about the heap,
42 * suitable for a summary display.
43 *
44 * [u4]: number of heaps
45 *
46 * For each heap:
47 * [u4]: heap ID
48 * [u8]: timestamp in ms since Unix epoch
49 * [u1]: capture reason (same as 'when' value from server)
50 * [u4]: max heap size in bytes (-Xmx)
51 * [u4]: current heap size in bytes
52 * [u4]: current number of bytes allocated
53 * [u4]: current number of objects allocated
54 */
55 #define HPIF_SIZE(numHeaps) \
56 (sizeof(u4) + (numHeaps) * (5 * sizeof(u4) + sizeof(u1) + sizeof(u8)))
dvmDdmSendHeapInfo(int reason,bool shouldLock)57 void dvmDdmSendHeapInfo(int reason, bool shouldLock)
58 {
59 struct timeval now;
60 u8 nowMs;
61 u1 *buf, *b;
62
63 buf = (u1 *)malloc(HPIF_SIZE(1));
64 if (buf == NULL) {
65 return;
66 }
67 b = buf;
68
69 /* If there's a one-shot 'when', reset it.
70 */
71 if (reason == gDvm.gcHeap->ddmHpifWhen) {
72 if (shouldLock && ! dvmLockHeap()) {
73 ALOGW("%s(): can't lock heap to clear when", __func__);
74 goto skip_when;
75 }
76 if (reason == gDvm.gcHeap->ddmHpifWhen) {
77 if (gDvm.gcHeap->ddmHpifWhen == HPIF_WHEN_NEXT_GC) {
78 gDvm.gcHeap->ddmHpifWhen = HPIF_WHEN_NEVER;
79 }
80 }
81 if (shouldLock) {
82 dvmUnlockHeap();
83 }
84 }
85 skip_when:
86
87 /* The current time, in milliseconds since 0:00 GMT, 1/1/70.
88 */
89 if (gettimeofday(&now, NULL) < 0) {
90 nowMs = 0;
91 } else {
92 nowMs = (u8)now.tv_sec * 1000 + now.tv_usec / 1000;
93 }
94
95 /* number of heaps */
96 set4BE(b, 1); b += 4;
97
98 /* For each heap (of which there is one) */
99 {
100 /* heap ID */
101 set4BE(b, DEFAULT_HEAP_ID); b += 4;
102
103 /* timestamp */
104 set8BE(b, nowMs); b += 8;
105
106 /* 'when' value */
107 *b++ = (u1)reason;
108
109 /* max allowed heap size in bytes */
110 set4BE(b, dvmHeapSourceGetMaximumSize()); b += 4;
111
112 /* current heap size in bytes */
113 set4BE(b, dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0)); b += 4;
114
115 /* number of bytes allocated */
116 set4BE(b, dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0)); b += 4;
117
118 /* number of objects allocated */
119 set4BE(b, dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0)); b += 4;
120 }
121 assert((intptr_t)b == (intptr_t)buf + (intptr_t)HPIF_SIZE(1));
122
123 dvmDbgDdmSendChunk(CHUNK_TYPE("HPIF"), b - buf, buf);
124 }
125
dvmDdmHandleHpifChunk(int when)126 bool dvmDdmHandleHpifChunk(int when)
127 {
128 switch (when) {
129 case HPIF_WHEN_NOW:
130 dvmDdmSendHeapInfo(when, true);
131 break;
132 case HPIF_WHEN_NEVER:
133 case HPIF_WHEN_NEXT_GC:
134 case HPIF_WHEN_EVERY_GC:
135 if (dvmLockHeap()) {
136 gDvm.gcHeap->ddmHpifWhen = when;
137 dvmUnlockHeap();
138 } else {
139 ALOGI("%s(): can't lock heap to set when", __func__);
140 return false;
141 }
142 break;
143 default:
144 ALOGI("%s(): bad when value 0x%08x", __func__, when);
145 return false;
146 }
147
148 return true;
149 }
150
151 enum HpsgSolidity {
152 SOLIDITY_FREE = 0,
153 SOLIDITY_HARD = 1,
154 SOLIDITY_SOFT = 2,
155 SOLIDITY_WEAK = 3,
156 SOLIDITY_PHANTOM = 4,
157 SOLIDITY_FINALIZABLE = 5,
158 SOLIDITY_SWEEP = 6,
159 };
160
161 enum HpsgKind {
162 KIND_OBJECT = 0,
163 KIND_CLASS_OBJECT = 1,
164 KIND_ARRAY_1 = 2,
165 KIND_ARRAY_2 = 3,
166 KIND_ARRAY_4 = 4,
167 KIND_ARRAY_8 = 5,
168 KIND_UNKNOWN = 6,
169 KIND_NATIVE = 7,
170 };
171
172 #define HPSG_PARTIAL (1<<7)
173 #define HPSG_STATE(solidity, kind) \
174 ((u1)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
175
176 struct HeapChunkContext {
177 void* startOfNextMemoryChunk;
178 u1 *buf;
179 u1 *p;
180 u1 *pieceLenField;
181 size_t bufLen;
182 size_t totalAllocationUnits;
183 int type;
184 bool merge;
185 bool needHeader;
186 };
187
188 #define ALLOCATION_UNIT_SIZE 8
189
flush_hpsg_chunk(HeapChunkContext * ctx)190 static void flush_hpsg_chunk(HeapChunkContext *ctx)
191 {
192 if (ctx->pieceLenField == NULL && ctx->needHeader) {
193 /* Already flushed */
194 return;
195 }
196 /* Patch the "length of piece" field.
197 */
198 assert(ctx->buf <= ctx->pieceLenField &&
199 ctx->pieceLenField <= ctx->p);
200 set4BE(ctx->pieceLenField, ctx->totalAllocationUnits);
201
202 /* Send the chunk.
203 */
204 dvmDbgDdmSendChunk(ctx->type, ctx->p - ctx->buf, ctx->buf);
205
206 /* Reset the context.
207 */
208 ctx->p = ctx->buf;
209 ctx->totalAllocationUnits = 0;
210 ctx->needHeader = true;
211 ctx->pieceLenField = NULL;
212 }
213
append_chunk(HeapChunkContext * ctx,u1 state,void * ptr,size_t length)214 static void append_chunk(HeapChunkContext *ctx, u1 state, void* ptr, size_t length) {
215 /* Make sure there's enough room left in the buffer.
216 * We need to use two bytes for every fractional 256
217 * allocation units used by the chunk and 17 bytes for
218 * any header.
219 */
220 {
221 size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
222 size_t bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
223 if (bytesLeft < needed) {
224 flush_hpsg_chunk(ctx);
225 }
226 bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
227 if (bytesLeft < needed) {
228 ALOGW("chunk is too big to transmit (length=%zd, %zd bytes)",
229 length, needed);
230 return;
231 }
232 }
233 if (ctx->needHeader) {
234 /*
235 * Start a new HPSx chunk.
236 */
237
238 /* [u4]: heap ID */
239 set4BE(ctx->p, DEFAULT_HEAP_ID); ctx->p += 4;
240
241 /* [u1]: size of allocation unit, in bytes */
242 *ctx->p++ = 8;
243
244 /* [u4]: virtual address of segment start */
245 set4BE(ctx->p, (uintptr_t)ptr); ctx->p += 4;
246
247 /* [u4]: offset of this piece (relative to the virtual address) */
248 set4BE(ctx->p, 0); ctx->p += 4;
249
250 /* [u4]: length of piece, in allocation units
251 * We won't know this until we're done, so save the offset
252 * and stuff in a dummy value.
253 */
254 ctx->pieceLenField = ctx->p;
255 set4BE(ctx->p, 0x55555555); ctx->p += 4;
256
257 ctx->needHeader = false;
258 }
259 /* Write out the chunk description.
260 */
261 length /= ALLOCATION_UNIT_SIZE; // convert to allocation units
262 ctx->totalAllocationUnits += length;
263 while (length > 256) {
264 *ctx->p++ = state | HPSG_PARTIAL;
265 *ctx->p++ = 255; // length - 1
266 length -= 256;
267 }
268 *ctx->p++ = state;
269 *ctx->p++ = length - 1;
270 }
271
272 /*
273 * Called by dlmalloc_inspect_all. If used_bytes != 0 then start is
274 * the start of a malloc-ed piece of memory of size used_bytes. If
275 * start is 0 then start is the beginning of any free space not
276 * including dlmalloc's book keeping and end the start of the next
277 * dlmalloc chunk. Regions purely containing book keeping don't
278 * callback.
279 */
heap_chunk_callback(void * start,void * end,size_t used_bytes,void * arg)280 static void heap_chunk_callback(void* start, void* end, size_t used_bytes,
281 void* arg)
282 {
283 u1 state;
284 HeapChunkContext *ctx = (HeapChunkContext *)arg;
285 UNUSED_PARAMETER(end);
286
287 if (used_bytes == 0) {
288 if (start == NULL) {
289 // Reset for start of new heap.
290 ctx->startOfNextMemoryChunk = NULL;
291 flush_hpsg_chunk(ctx);
292 }
293 // Only process in use memory so that free region information
294 // also includes dlmalloc book keeping.
295 return;
296 }
297
298 /* If we're looking at the native heap, we'll just return
299 * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
300 */
301 bool native = ctx->type == CHUNK_TYPE("NHSG");
302
303 if (ctx->startOfNextMemoryChunk != NULL) {
304 // Transmit any pending free memory. Native free memory of
305 // over kMaxFreeLen could be because of the use of mmaps, so
306 // don't report. If not free memory then start a new segment.
307 bool flush = true;
308 if (start > ctx->startOfNextMemoryChunk) {
309 const size_t kMaxFreeLen = 2 * SYSTEM_PAGE_SIZE;
310 void* freeStart = ctx->startOfNextMemoryChunk;
311 void* freeEnd = start;
312 size_t freeLen = (char*)freeEnd - (char*)freeStart;
313 if (!native || freeLen < kMaxFreeLen) {
314 append_chunk(ctx, HPSG_STATE(SOLIDITY_FREE, 0),
315 freeStart, freeLen);
316 flush = false;
317 }
318 }
319 if (flush) {
320 ctx->startOfNextMemoryChunk = NULL;
321 flush_hpsg_chunk(ctx);
322 }
323 }
324 const Object *obj = (const Object *)start;
325
326 /* It's an allocated chunk. Figure out what it is.
327 */
328 //TODO: if ctx.merge, see if this chunk is different from the last chunk.
329 // If it's the same, we should combine them.
330 if (!native && dvmIsValidObject(obj)) {
331 ClassObject *clazz = obj->clazz;
332 if (clazz == NULL) {
333 /* The object was probably just created
334 * but hasn't been initialized yet.
335 */
336 state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
337 } else if (dvmIsTheClassClass(clazz)) {
338 state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
339 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
340 if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
341 state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
342 } else {
343 switch (clazz->elementClass->primitiveType) {
344 case PRIM_BOOLEAN:
345 case PRIM_BYTE:
346 state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
347 break;
348 case PRIM_CHAR:
349 case PRIM_SHORT:
350 state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
351 break;
352 case PRIM_INT:
353 case PRIM_FLOAT:
354 state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
355 break;
356 case PRIM_DOUBLE:
357 case PRIM_LONG:
358 state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
359 break;
360 default:
361 assert(!"Unknown GC heap object type");
362 state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
363 break;
364 }
365 }
366 } else {
367 state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
368 }
369 } else {
370 obj = NULL; // it's not actually an object
371 state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
372 }
373 append_chunk(ctx, state, start, used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD);
374 ctx->startOfNextMemoryChunk =
375 (char*)start + used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD;
376 }
377
378 enum HpsgWhen {
379 HPSG_WHEN_NEVER = 0,
380 HPSG_WHEN_EVERY_GC = 1,
381 };
382 enum HpsgWhat {
383 HPSG_WHAT_MERGED_OBJECTS = 0,
384 HPSG_WHAT_DISTINCT_OBJECTS = 1,
385 };
386
387 /*
388 * Maximum chunk size. Obtain this from the formula:
389 *
390 * (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
391 */
392 #define HPSx_CHUNK_SIZE (16384 - 16)
393
walkHeap(bool merge,bool native)394 static void walkHeap(bool merge, bool native)
395 {
396 HeapChunkContext ctx;
397
398 memset(&ctx, 0, sizeof(ctx));
399 ctx.bufLen = HPSx_CHUNK_SIZE;
400 ctx.buf = (u1 *)malloc(ctx.bufLen);
401 if (ctx.buf == NULL) {
402 return;
403 }
404
405 ctx.merge = merge;
406 if (native) {
407 ctx.type = CHUNK_TYPE("NHSG");
408 } else {
409 if (ctx.merge) {
410 ctx.type = CHUNK_TYPE("HPSG");
411 } else {
412 ctx.type = CHUNK_TYPE("HPSO");
413 }
414 }
415
416 ctx.p = ctx.buf;
417 ctx.needHeader = true;
418 if (native) {
419 dlmalloc_inspect_all(heap_chunk_callback, (void*)&ctx);
420 } else {
421 dvmHeapSourceWalk(heap_chunk_callback, (void *)&ctx);
422 }
423 if (ctx.p > ctx.buf) {
424 flush_hpsg_chunk(&ctx);
425 }
426
427 free(ctx.buf);
428 }
429
dvmDdmSendHeapSegments(bool shouldLock,bool native)430 void dvmDdmSendHeapSegments(bool shouldLock, bool native)
431 {
432 u1 heapId[sizeof(u4)];
433 GcHeap *gcHeap = gDvm.gcHeap;
434 int when, what;
435 bool merge;
436
437 /* Don't even grab the lock if there's nothing to do when we're called.
438 */
439 if (!native) {
440 when = gcHeap->ddmHpsgWhen;
441 what = gcHeap->ddmHpsgWhat;
442 if (when == HPSG_WHEN_NEVER) {
443 return;
444 }
445 } else {
446 when = gcHeap->ddmNhsgWhen;
447 what = gcHeap->ddmNhsgWhat;
448 if (when == HPSG_WHEN_NEVER) {
449 return;
450 }
451 }
452 if (shouldLock && !dvmLockHeap()) {
453 ALOGW("Can't lock heap for DDM HPSx dump");
454 return;
455 }
456
457 /* Figure out what kind of chunks we'll be sending.
458 */
459 if (what == HPSG_WHAT_MERGED_OBJECTS) {
460 merge = true;
461 } else if (what == HPSG_WHAT_DISTINCT_OBJECTS) {
462 merge = false;
463 } else {
464 assert(!"bad HPSG.what value");
465 return;
466 }
467
468 /* First, send a heap start chunk.
469 */
470 set4BE(heapId, DEFAULT_HEAP_ID);
471 dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"),
472 sizeof(u4), heapId);
473
474 /* Send a series of heap segment chunks.
475 */
476 walkHeap(merge, native);
477
478 /* Finally, send a heap end chunk.
479 */
480 dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"),
481 sizeof(u4), heapId);
482
483 if (shouldLock) {
484 dvmUnlockHeap();
485 }
486 }
487
dvmDdmHandleHpsgNhsgChunk(int when,int what,bool native)488 bool dvmDdmHandleHpsgNhsgChunk(int when, int what, bool native)
489 {
490 ALOGI("dvmDdmHandleHpsgChunk(when %d, what %d, heap %d)", when, what,
491 native);
492 switch (when) {
493 case HPSG_WHEN_NEVER:
494 case HPSG_WHEN_EVERY_GC:
495 break;
496 default:
497 ALOGI("%s(): bad when value 0x%08x", __func__, when);
498 return false;
499 }
500
501 switch (what) {
502 case HPSG_WHAT_MERGED_OBJECTS:
503 case HPSG_WHAT_DISTINCT_OBJECTS:
504 break;
505 default:
506 ALOGI("%s(): bad what value 0x%08x", __func__, what);
507 return false;
508 }
509
510 if (dvmLockHeap()) {
511 if (!native) {
512 gDvm.gcHeap->ddmHpsgWhen = when;
513 gDvm.gcHeap->ddmHpsgWhat = what;
514 } else {
515 gDvm.gcHeap->ddmNhsgWhen = when;
516 gDvm.gcHeap->ddmNhsgWhat = what;
517 }
518 //TODO: if what says we should dump immediately, signal (or do) it from here
519 dvmUnlockHeap();
520 } else {
521 ALOGI("%s(): can't lock heap to set when/what", __func__);
522 return false;
523 }
524
525 return true;
526 }
527