1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 /*
17 * DDM-related heap functions
18 */
19 #include <sys/time.h>
20 #include <time.h>
21
22 #include "Dalvik.h"
23 #include "alloc/Heap.h"
24 #include "alloc/HeapInternal.h"
25 #include "alloc/DdmHeap.h"
26 #include "alloc/HeapSource.h"
27
28 #define DEFAULT_HEAP_ID 1
29
30 enum HpifWhen {
31 HPIF_WHEN_NEVER = 0,
32 HPIF_WHEN_NOW = 1,
33 HPIF_WHEN_NEXT_GC = 2,
34 HPIF_WHEN_EVERY_GC = 3
35 };
36
37 /*
38 * Chunk HPIF (client --> server)
39 *
40 * Heap Info. General information about the heap,
41 * suitable for a summary display.
42 *
43 * [u4]: number of heaps
44 *
45 * For each heap:
46 * [u4]: heap ID
47 * [u8]: timestamp in ms since Unix epoch
48 * [u1]: capture reason (same as 'when' value from server)
49 * [u4]: max heap size in bytes (-Xmx)
50 * [u4]: current heap size in bytes
51 * [u4]: current number of bytes allocated
52 * [u4]: current number of objects allocated
53 */
54 #define HPIF_SIZE(numHeaps) \
55 (sizeof(u4) + (numHeaps) * (5 * sizeof(u4) + sizeof(u1) + sizeof(u8)))
dvmDdmSendHeapInfo(int reason,bool shouldLock)56 void dvmDdmSendHeapInfo(int reason, bool shouldLock)
57 {
58 struct timeval now;
59 u8 nowMs;
60 u1 *buf, *b;
61
62 buf = (u1 *)malloc(HPIF_SIZE(1));
63 if (buf == NULL) {
64 return;
65 }
66 b = buf;
67
68 /* If there's a one-shot 'when', reset it.
69 */
70 if (reason == gDvm.gcHeap->ddmHpifWhen) {
71 if (shouldLock && ! dvmLockHeap()) {
72 LOGW("%s(): can't lock heap to clear when", __func__);
73 goto skip_when;
74 }
75 if (reason == gDvm.gcHeap->ddmHpifWhen) {
76 if (gDvm.gcHeap->ddmHpifWhen == HPIF_WHEN_NEXT_GC) {
77 gDvm.gcHeap->ddmHpifWhen = HPIF_WHEN_NEVER;
78 }
79 }
80 if (shouldLock) {
81 dvmUnlockHeap();
82 }
83 }
84 skip_when:
85
86 /* The current time, in milliseconds since 0:00 GMT, 1/1/70.
87 */
88 if (gettimeofday(&now, NULL) < 0) {
89 nowMs = 0;
90 } else {
91 nowMs = (u8)now.tv_sec * 1000 + now.tv_usec / 1000;
92 }
93
94 /* number of heaps */
95 set4BE(b, 1); b += 4;
96
97 /* For each heap (of which there is one) */
98 {
99 /* heap ID */
100 set4BE(b, DEFAULT_HEAP_ID); b += 4;
101
102 /* timestamp */
103 set8BE(b, nowMs); b += 8;
104
105 /* 'when' value */
106 *b++ = (u1)reason;
107
108 /* max allowed heap size in bytes */
109 set4BE(b, dvmHeapSourceGetMaximumSize()); b += 4;
110
111 /* current heap size in bytes */
112 set4BE(b, dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0)); b += 4;
113
114 /* number of bytes allocated */
115 set4BE(b, dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0)); b += 4;
116
117 /* number of objects allocated */
118 set4BE(b, dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0)); b += 4;
119 }
120 assert((intptr_t)b == (intptr_t)buf + (intptr_t)HPIF_SIZE(1));
121
122 dvmDbgDdmSendChunk(CHUNK_TYPE("HPIF"), b - buf, buf);
123 }
124
dvmDdmHandleHpifChunk(int when)125 bool dvmDdmHandleHpifChunk(int when)
126 {
127 switch (when) {
128 case HPIF_WHEN_NOW:
129 dvmDdmSendHeapInfo(when, true);
130 break;
131 case HPIF_WHEN_NEVER:
132 case HPIF_WHEN_NEXT_GC:
133 case HPIF_WHEN_EVERY_GC:
134 if (dvmLockHeap()) {
135 gDvm.gcHeap->ddmHpifWhen = when;
136 dvmUnlockHeap();
137 } else {
138 LOGI("%s(): can't lock heap to set when", __func__);
139 return false;
140 }
141 break;
142 default:
143 LOGI("%s(): bad when value 0x%08x", __func__, when);
144 return false;
145 }
146
147 return true;
148 }
149
150 enum HpsgSolidity {
151 SOLIDITY_FREE = 0,
152 SOLIDITY_HARD = 1,
153 SOLIDITY_SOFT = 2,
154 SOLIDITY_WEAK = 3,
155 SOLIDITY_PHANTOM = 4,
156 SOLIDITY_FINALIZABLE = 5,
157 SOLIDITY_SWEEP = 6,
158 };
159
160 enum HpsgKind {
161 KIND_OBJECT = 0,
162 KIND_CLASS_OBJECT = 1,
163 KIND_ARRAY_1 = 2,
164 KIND_ARRAY_2 = 3,
165 KIND_ARRAY_4 = 4,
166 KIND_ARRAY_8 = 5,
167 KIND_UNKNOWN = 6,
168 KIND_NATIVE = 7,
169 };
170
171 #define HPSG_PARTIAL (1<<7)
172 #define HPSG_STATE(solidity, kind) \
173 ((u1)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
174
175 struct HeapChunkContext {
176 u1 *buf;
177 u1 *p;
178 u1 *pieceLenField;
179 size_t bufLen;
180 size_t totalAllocationUnits;
181 int type;
182 bool merge;
183 bool needHeader;
184 };
185
186 #define ALLOCATION_UNIT_SIZE 8
187
flush_hpsg_chunk(HeapChunkContext * ctx)188 static void flush_hpsg_chunk(HeapChunkContext *ctx)
189 {
190 /* Patch the "length of piece" field.
191 */
192 assert(ctx->buf <= ctx->pieceLenField &&
193 ctx->pieceLenField <= ctx->p);
194 set4BE(ctx->pieceLenField, ctx->totalAllocationUnits);
195
196 /* Send the chunk.
197 */
198 dvmDbgDdmSendChunk(ctx->type, ctx->p - ctx->buf, ctx->buf);
199
200 /* Reset the context.
201 */
202 ctx->p = ctx->buf;
203 ctx->totalAllocationUnits = 0;
204 ctx->needHeader = true;
205 ctx->pieceLenField = NULL;
206 }
207
heap_chunk_callback(const void * chunkptr,size_t chunklen,const void * userptr,size_t userlen,void * arg)208 static void heap_chunk_callback(const void *chunkptr, size_t chunklen,
209 const void *userptr, size_t userlen, void *arg)
210 {
211 HeapChunkContext *ctx = (HeapChunkContext *)arg;
212 u1 state;
213
214 UNUSED_PARAMETER(userlen);
215
216 assert((chunklen & (ALLOCATION_UNIT_SIZE-1)) == 0);
217
218 /* Make sure there's enough room left in the buffer.
219 * We need to use two bytes for every fractional 256
220 * allocation units used by the chunk.
221 */
222 {
223 size_t needed = (((chunklen/ALLOCATION_UNIT_SIZE + 255) / 256) * 2);
224 size_t bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
225 if (bytesLeft < needed) {
226 flush_hpsg_chunk(ctx);
227 }
228
229 bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf);
230 if (bytesLeft < needed) {
231 LOGW("chunk is too big to transmit (chunklen=%zd, %zd bytes)",
232 chunklen, needed);
233 return;
234 }
235 }
236
237 //TODO: notice when there's a gap and start a new heap, or at least a new range.
238 if (ctx->needHeader) {
239 /*
240 * Start a new HPSx chunk.
241 */
242
243 /* [u4]: heap ID */
244 set4BE(ctx->p, DEFAULT_HEAP_ID); ctx->p += 4;
245
246 /* [u1]: size of allocation unit, in bytes */
247 *ctx->p++ = 8;
248
249 /* [u4]: virtual address of segment start */
250 set4BE(ctx->p, (uintptr_t)chunkptr); ctx->p += 4;
251
252 /* [u4]: offset of this piece (relative to the virtual address) */
253 set4BE(ctx->p, 0); ctx->p += 4;
254
255 /* [u4]: length of piece, in allocation units
256 * We won't know this until we're done, so save the offset
257 * and stuff in a dummy value.
258 */
259 ctx->pieceLenField = ctx->p;
260 set4BE(ctx->p, 0x55555555); ctx->p += 4;
261
262 ctx->needHeader = false;
263 }
264
265 /* Determine the type of this chunk.
266 */
267 if (userptr == NULL) {
268 /* It's a free chunk.
269 */
270 state = HPSG_STATE(SOLIDITY_FREE, 0);
271 } else {
272 const Object *obj = (const Object *)userptr;
273 /* If we're looking at the native heap, we'll just return
274 * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
275 */
276 bool native = ctx->type == CHUNK_TYPE("NHSG");
277
278 /* It's an allocated chunk. Figure out what it is.
279 */
280 //TODO: if ctx.merge, see if this chunk is different from the last chunk.
281 // If it's the same, we should combine them.
282 if (!native && dvmIsValidObject(obj)) {
283 ClassObject *clazz = obj->clazz;
284 if (clazz == NULL) {
285 /* The object was probably just created
286 * but hasn't been initialized yet.
287 */
288 state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
289 } else if (dvmIsTheClassClass(clazz)) {
290 state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
291 } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
292 if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
293 state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
294 } else {
295 switch (clazz->elementClass->primitiveType) {
296 case PRIM_BOOLEAN:
297 case PRIM_BYTE:
298 state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
299 break;
300 case PRIM_CHAR:
301 case PRIM_SHORT:
302 state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
303 break;
304 case PRIM_INT:
305 case PRIM_FLOAT:
306 state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
307 break;
308 case PRIM_DOUBLE:
309 case PRIM_LONG:
310 state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
311 break;
312 default:
313 assert(!"Unknown GC heap object type");
314 state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
315 break;
316 }
317 }
318 } else {
319 state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
320 }
321 } else {
322 obj = NULL; // it's not actually an object
323 state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
324 }
325 }
326
327 /* Write out the chunk description.
328 */
329 chunklen /= ALLOCATION_UNIT_SIZE; // convert to allocation units
330 ctx->totalAllocationUnits += chunklen;
331 while (chunklen > 256) {
332 *ctx->p++ = state | HPSG_PARTIAL;
333 *ctx->p++ = 255; // length - 1
334 chunklen -= 256;
335 }
336 *ctx->p++ = state;
337 *ctx->p++ = chunklen - 1;
338 }
339
340 enum HpsgWhen {
341 HPSG_WHEN_NEVER = 0,
342 HPSG_WHEN_EVERY_GC = 1,
343 };
344 enum HpsgWhat {
345 HPSG_WHAT_MERGED_OBJECTS = 0,
346 HPSG_WHAT_DISTINCT_OBJECTS = 1,
347 };
348
349 /*
350 * Maximum chunk size. Obtain this from the formula:
351 *
352 * (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
353 */
354 #define HPSx_CHUNK_SIZE (16384 - 16)
355
356 extern "C" void dlmalloc_walk_heap(void(*)(const void*, size_t, const void*, size_t, void*),void*);
357
walkHeap(bool merge,bool native)358 static void walkHeap(bool merge, bool native)
359 {
360 HeapChunkContext ctx;
361
362 memset(&ctx, 0, sizeof(ctx));
363 ctx.bufLen = HPSx_CHUNK_SIZE;
364 ctx.buf = (u1 *)malloc(ctx.bufLen);
365 if (ctx.buf == NULL) {
366 return;
367 }
368
369 ctx.merge = merge;
370 if (native) {
371 ctx.type = CHUNK_TYPE("NHSG");
372 } else {
373 if (ctx.merge) {
374 ctx.type = CHUNK_TYPE("HPSG");
375 } else {
376 ctx.type = CHUNK_TYPE("HPSO");
377 }
378 }
379
380 ctx.p = ctx.buf;
381 ctx.needHeader = true;
382 if (native) {
383 dlmalloc_walk_heap(heap_chunk_callback, (void *)&ctx);
384 } else {
385 dvmHeapSourceWalk(heap_chunk_callback, (void *)&ctx);
386 }
387 if (ctx.p > ctx.buf) {
388 flush_hpsg_chunk(&ctx);
389 }
390
391 free(ctx.buf);
392 }
393
dvmDdmSendHeapSegments(bool shouldLock,bool native)394 void dvmDdmSendHeapSegments(bool shouldLock, bool native)
395 {
396 u1 heapId[sizeof(u4)];
397 GcHeap *gcHeap = gDvm.gcHeap;
398 int when, what;
399 bool merge;
400
401 /* Don't even grab the lock if there's nothing to do when we're called.
402 */
403 if (!native) {
404 when = gcHeap->ddmHpsgWhen;
405 what = gcHeap->ddmHpsgWhat;
406 if (when == HPSG_WHEN_NEVER) {
407 return;
408 }
409 } else {
410 when = gcHeap->ddmNhsgWhen;
411 what = gcHeap->ddmNhsgWhat;
412 if (when == HPSG_WHEN_NEVER) {
413 return;
414 }
415 }
416 if (shouldLock && !dvmLockHeap()) {
417 LOGW("Can't lock heap for DDM HPSx dump");
418 return;
419 }
420
421 /* Figure out what kind of chunks we'll be sending.
422 */
423 if (what == HPSG_WHAT_MERGED_OBJECTS) {
424 merge = true;
425 } else if (what == HPSG_WHAT_DISTINCT_OBJECTS) {
426 merge = false;
427 } else {
428 assert(!"bad HPSG.what value");
429 return;
430 }
431
432 /* First, send a heap start chunk.
433 */
434 set4BE(heapId, DEFAULT_HEAP_ID);
435 dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"),
436 sizeof(u4), heapId);
437
438 /* Send a series of heap segment chunks.
439 */
440 walkHeap(merge, native);
441
442 /* Finally, send a heap end chunk.
443 */
444 dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"),
445 sizeof(u4), heapId);
446
447 if (shouldLock) {
448 dvmUnlockHeap();
449 }
450 }
451
dvmDdmHandleHpsgNhsgChunk(int when,int what,bool native)452 bool dvmDdmHandleHpsgNhsgChunk(int when, int what, bool native)
453 {
454 LOGI("dvmDdmHandleHpsgChunk(when %d, what %d, heap %d)", when, what,
455 native);
456 switch (when) {
457 case HPSG_WHEN_NEVER:
458 case HPSG_WHEN_EVERY_GC:
459 break;
460 default:
461 LOGI("%s(): bad when value 0x%08x", __func__, when);
462 return false;
463 }
464
465 switch (what) {
466 case HPSG_WHAT_MERGED_OBJECTS:
467 case HPSG_WHAT_DISTINCT_OBJECTS:
468 break;
469 default:
470 LOGI("%s(): bad what value 0x%08x", __func__, what);
471 return false;
472 }
473
474 if (dvmLockHeap()) {
475 if (!native) {
476 gDvm.gcHeap->ddmHpsgWhen = when;
477 gDvm.gcHeap->ddmHpsgWhat = what;
478 } else {
479 gDvm.gcHeap->ddmNhsgWhen = when;
480 gDvm.gcHeap->ddmNhsgWhat = what;
481 }
482 //TODO: if what says we should dump immediately, signal (or do) it from here
483 dvmUnlockHeap();
484 } else {
485 LOGI("%s(): can't lock heap to set when/what", __func__);
486 return false;
487 }
488
489 return true;
490 }
491