/external/python/cpython2/Objects/ |
D | obmalloc.c | 521 static struct arena_object* arenas = NULL; variable 580 if (numarenas > PY_SIZE_MAX / sizeof(*arenas)) in new_arena() 583 nbytes = numarenas * sizeof(*arenas); in new_arena() 584 arenaobj = (struct arena_object *)realloc(arenas, nbytes); in new_arena() 587 arenas = arenaobj; in new_arena() 600 arenas[i].address = 0; /* mark as unassociated */ in new_arena() 601 arenas[i].nextarena = i < numarenas - 1 ? in new_arena() 602 &arenas[i+1] : NULL; in new_arena() 606 unused_arena_objects = &arenas[maxarenas]; in new_arena() 739 (uptr)(P) - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && \ [all …]
|
/external/skia/src/gpu/ |
D | GrRecordingContextPriv.h | 45 SkArenaAlloc* recordTimeAllocator() { return fContext->arenas().recordTimeAllocator(); } in recordTimeAllocator() 47 return fContext->arenas().recordTimeSubRunAllocator(); in recordTimeSubRunAllocator() 49 GrRecordingContext::Arenas arenas() { return fContext->arenas(); } in arenas() function
|
D | GrSurfaceFillContext.h | 198 SkArenaAlloc* arenaAlloc() { return this->arenas()->arenaAlloc(); } in arenaAlloc() 199 GrSubRunAllocator* subRunAlloc() { return this->arenas()->subRunAlloc(); } in subRunAlloc() 216 sk_sp<GrArenas> arenas() { return fWriteView.proxy()->asRenderTargetProxy()->arenas(); } in arenas() function
|
D | GrDirectContextPriv.h | 56 SkArenaAlloc* recordTimeAllocator() { return fContext->arenas().recordTimeAllocator(); } in recordTimeAllocator() 57 GrRecordingContext::Arenas arenas() { return fContext->arenas(); } in arenas() function
|
D | GrRenderTargetProxy.h | 105 sk_sp<GrArenas> arenas() { in arenas() function
|
D | GrDrawingManager.h | 52 sk_sp<GrArenas> arenas,
|
D | GrDrawingManager.cpp | 681 sk_sp<GrArenas> arenas, in newOpsTask() argument 691 std::move(arenas))); in newOpsTask()
|
D | GrOpsTask.cpp | 360 sk_sp<GrArenas> arenas) in GrOpsTask() argument 366 , fArenas{std::move(arenas)} in GrOpsTask()
|
D | GrSurfaceFillContext.cpp | 344 this->writeSurfaceView(), this->arenas(), fFlushTimeOpsTask); in getOpsTask()
|
/external/jemalloc_new/ |
D | TUNING.md | 51 By default jemalloc uses multiple arenas to reduce internal lock contention. 53 since arenas manage memory independently. When high degree of parallelism 54 is not expected at the allocator level, lower number of arenas often 103 * [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create) 105 Manually created arenas can help performance in various ways, e.g. by 110 locality. In addition, explicit arenas often benefit from individually 120 uses explicit arenas with customized extent hooks to manage 1GB huge pages 128 from explicit binding, e.g. binding very active threads to dedicated arenas
|
D | ChangeLog | 36 + arenas.lookup (@lionkov) 190 associated with destroyed arenas. (@jasone) 213 + arenas.{dirty,muzzy}_decay_ms 214 + arenas.bin.<i>.slab_size 215 + arenas.nlextents 216 + arenas.lextent.<i>.size 217 + arenas.create 222 + stats.arenas.<i>.{dirty,muzzy}_decay_ms 223 + stats.arenas.<i>.uptime 224 + stats.arenas.<i>.{pmuzzy,base,internal,resident} [all …]
|
/external/python/cpython3/Objects/ |
D | obmalloc.c | 1180 static struct arena_object* arenas = NULL; variable 1220 if (arenas[i].address == 0) { in _Py_GetAllocatedBlocks() 1224 uintptr_t base = (uintptr_t)_Py_ALIGN_UP(arenas[i].address, POOL_SIZE); in _Py_GetAllocatedBlocks() 1227 assert(base <= (uintptr_t) arenas[i].pool_address); in _Py_GetAllocatedBlocks() 1228 for (; base < (uintptr_t) arenas[i].pool_address; base += POOL_SIZE) { in _Py_GetAllocatedBlocks() 1269 if (numarenas > SIZE_MAX / sizeof(*arenas)) in new_arena() 1272 nbytes = numarenas * sizeof(*arenas); in new_arena() 1273 arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes); in new_arena() 1276 arenas = arenaobj; in new_arena() 1289 arenas[i].address = 0; /* mark as unassociated */ in new_arena() [all …]
|
/external/jemalloc_new/include/jemalloc/ |
D | jemalloc_macros.h.in | 36 * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This 48 * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select 49 * destroyed arenas.
|
/external/skia/tests/ |
D | OpChainTest.cpp | 142 CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc* arenas, const GrCaps&) override { in onCombineIfPossible() argument 144 SkASSERT(arenas); in onCombineIfPossible() 145 (void) arenas; in onCombineIfPossible() 208 sk_sp<GrArenas> arenas = sk_make_sp<GrArenas>(); variable 226 arenas);
|
/external/jemalloc_new/src/ |
D | android_je_mallinfo.c | 57 arena_t* arena = atomic_load_p(&arenas[i], ATOMIC_ACQUIRE); in je_mallinfo() 85 arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE); in je_mallinfo_arena_info() 102 arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE); in je_mallinfo_bin_info()
|
/external/jemalloc_new/include/jemalloc/internal/ |
D | jemalloc_internal_externs.h | 32 extern atomic_p_t arenas[];
|
D | ctl.h | 90 ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT]; member
|
D | jemalloc_internal_inlines_a.h | 88 ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); in arena_get()
|
D | private_namespace_jet.h | 7 #define arenas JEMALLOC_N(arenas) macro
|
D | private_namespace.h | 7 #define arenas JEMALLOC_N(arenas) macro
|
D | extent_inlines.h | 49 return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE); in extent_arena_get()
|
/external/skia/include/gpu/ |
D | GrRecordingContext.h | 150 Arenas arenas() { return fArenas.get(); } in arenas() function
|
/external/python/cpython2/Misc/ |
D | README.valgrind | 62 The memory pymalloc manages itself is in one or more "arenas",
|
/external/rust/crates/intrusive-collections/ |
D | DESIGN.md | 29 …jects that have been inserted into it. As a bonus, this also works with [arenas](https://crates.io…
|
/external/python/cpython3/Misc/ |
D | README.valgrind | 73 The memory pymalloc manages itself is in one or more "arenas",
|