/external/jemalloc/src/ |
D | android_je_mallinfo.c | 26 if (arenas[i] != NULL) { in je_mallinfo() 27 malloc_mutex_lock(TSDN_NULL, &arenas[i]->lock); in je_mallinfo() 28 mi.hblkhd += arenas[i]->stats.mapped; in je_mallinfo() 29 mi.uordblks += arenas[i]->stats.allocated_large; in je_mallinfo() 30 mi.uordblks += arenas[i]->stats.allocated_huge; in je_mallinfo() 31 malloc_mutex_unlock(TSDN_NULL, &arenas[i]->lock); in je_mallinfo() 34 arena_bin_t* bin = &arenas[i]->bins[j]; in je_mallinfo() 62 if (arenas[aidx] != NULL) { in __mallinfo_arena_info() 63 malloc_mutex_lock(TSDN_NULL, &arenas[aidx]->lock); in __mallinfo_arena_info() 64 mi.hblkhd = arenas[aidx]->stats.mapped; in __mallinfo_arena_info() [all …]
|
D | ctl.c | 502 {NAME("arenas"), CHILD(named, arenas)}, 661 ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; in ctl_arena_refresh() 662 ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; in ctl_arena_refresh() 686 memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * in ctl_grow() 703 a0dalloc(ctl_stats.arenas); in ctl_grow() 704 ctl_stats.arenas = astats; in ctl_grow() 720 ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); in ctl_refresh() 728 ctl_stats.arenas[i].initialized = initialized; in ctl_refresh() 738 ctl_stats.arenas[ctl_stats.narenas].allocated_small + in ctl_refresh() 739 ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + in ctl_refresh() [all …]
|
D | jemalloc.c | 64 arena_t **arenas; variable 412 atomic_write_p((void **)&arenas[ind], arena); in arena_set() 1384 arenas = &a0; 1385 memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1471 arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * 1473 if (arenas == NULL)
|
D | stats.c | 731 arenas.lg_dirty_mult, ",") in stats_general_print() 734 OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",") in stats_general_print()
|
/external/python/cpython2/Objects/ |
D | obmalloc.c | 521 static struct arena_object* arenas = NULL; variable 580 if (numarenas > PY_SIZE_MAX / sizeof(*arenas)) in new_arena() 583 nbytes = numarenas * sizeof(*arenas); in new_arena() 584 arenaobj = (struct arena_object *)realloc(arenas, nbytes); in new_arena() 587 arenas = arenaobj; in new_arena() 600 arenas[i].address = 0; /* mark as unassociated */ in new_arena() 601 arenas[i].nextarena = i < numarenas - 1 ? in new_arena() 602 &arenas[i+1] : NULL; in new_arena() 606 unused_arena_objects = &arenas[maxarenas]; in new_arena() 739 (uptr)(P) - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && \ [all …]
|
/external/jemalloc_new/ |
D | ChangeLog | 36 + arenas.lookup (@lionkov) 190 associated with destroyed arenas. (@jasone) 213 + arenas.{dirty,muzzy}_decay_ms 214 + arenas.bin.<i>.slab_size 215 + arenas.nlextents 216 + arenas.lextent.<i>.size 217 + arenas.create 222 + stats.arenas.<i>.{dirty,muzzy}_decay_ms 223 + stats.arenas.<i>.uptime 224 + stats.arenas.<i>.{pmuzzy,base,internal,resident} [all …]
|
D | TUNING.md | 51 By default jemalloc uses multiple arenas to reduce internal lock contention. 53 since arenas manage memory independently. When high degree of parallelism 54 is not expected at the allocator level, lower number of arenas often 103 * [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create) 105 Manually created arenas can help performance in various ways, e.g. by 110 locality. In addition, explicit arenas often benefit from individually 120 uses explicit arenas with customized extent hooks to manage 1GB huge pages 128 from explicit binding, e.g. binding very active threads to dedicated arenas
|
/external/jemalloc/ |
D | ChangeLog | 60 - Fix stats.arenas.<i>.nthreads accounting. (@interwq) 103 - Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone) 161 + arenas.decay_time 162 + stats.arenas.<i>.decay_time 198 - Refactor arenas array. In addition to fixing a fork-related deadlock, this 212 - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for 344 - Refactor huge allocation to be managed by arenas, so that arenas now 348 + The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc", 349 "stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests" 351 + The "arenas.nhchunks", "arenas.hchunk.<i>.size", [all …]
|
D | Android.bp | 29 // The total number of arenas will be less than or equal to this number. 30 // The number of arenas will be calculated as 2 * the number of cpus
|
/external/python/cpython3/Objects/ |
D | obmalloc.c | 1136 static struct arena_object* arenas = NULL; variable 1205 if (numarenas > SIZE_MAX / sizeof(*arenas)) in new_arena() 1208 nbytes = numarenas * sizeof(*arenas); in new_arena() 1209 arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes); in new_arena() 1212 arenas = arenaobj; in new_arena() 1225 arenas[i].address = 0; /* mark as unassociated */ in new_arena() 1226 arenas[i].nextarena = i < numarenas - 1 ? in new_arena() 1227 &arenas[i+1] : NULL; in new_arena() 1231 unused_arena_objects = &arenas[maxarenas]; in new_arena() 1359 (uintptr_t)p - arenas[arenaindex].address < ARENA_SIZE && in address_in_range() [all …]
|
/external/jemalloc_new/include/jemalloc/ |
D | jemalloc_macros.h.in | 36 * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This 48 * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select 49 * destroyed arenas.
|
/external/jemalloc_new/src/ |
D | android_je_mallinfo.c | 57 arena_t* arena = atomic_load_p(&arenas[i], ATOMIC_ACQUIRE); in je_mallinfo() 85 arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE); in je_mallinfo_arena_info() 102 arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE); in je_mallinfo_bin_info()
|
/external/jemalloc_new/include/jemalloc/internal/ |
D | jemalloc_internal_externs.h | 32 extern atomic_p_t arenas[];
|
D | ctl.h | 90 ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT]; member
|
D | jemalloc_internal_inlines_a.h | 88 ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); in arena_get()
|
D | private_namespace_jet.h | 7 #define arenas JEMALLOC_N(arenas) macro
|
D | private_namespace.h | 7 #define arenas JEMALLOC_N(arenas) macro
|
D | extent_inlines.h | 49 return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE); in extent_arena_get()
|
/external/jemalloc/include/jemalloc/internal/ |
D | ctl.h | 66 ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ member
|
D | jemalloc_internal.h.in | 456 /* Number of arenas used for automatic multiplexing of threads and arenas. */ 461 * arenas array are necessarily used; arenas are created lazily as needed. 463 extern arena_t **arenas; 954 ret = arenas[ind]; 956 ret = atomic_read_p((void *)&arenas[ind]);
|
D | jemalloc_internal.h | 463 extern arena_t **arenas; 954 ret = arenas[ind]; in arena_get() 956 ret = atomic_read_p((void *)&arenas[ind]); in arena_get()
|
D | private_symbols.txt | 119 arenas
|
D | private_namespace.h | 119 #define arenas JEMALLOC_N(arenas) macro
|
/external/python/cpython2/Misc/ |
D | README.valgrind | 62 The memory pymalloc manages itself is in one or more "arenas",
|
/external/python/cpython3/Misc/ |
D | README.valgrind | 65 The memory pymalloc manages itself is in one or more "arenas",
|