Home
last modified time | relevance | path

Searched refs:arenas (Results 1 – 25 of 64) sorted by relevance

123

/external/python/cpython2/Objects/
Dobmalloc.c521 static struct arena_object* arenas = NULL; variable
580 if (numarenas > PY_SIZE_MAX / sizeof(*arenas)) in new_arena()
583 nbytes = numarenas * sizeof(*arenas); in new_arena()
584 arenaobj = (struct arena_object *)realloc(arenas, nbytes); in new_arena()
587 arenas = arenaobj; in new_arena()
600 arenas[i].address = 0; /* mark as unassociated */ in new_arena()
601 arenas[i].nextarena = i < numarenas - 1 ? in new_arena()
602 &arenas[i+1] : NULL; in new_arena()
606 unused_arena_objects = &arenas[maxarenas]; in new_arena()
739 (uptr)(P) - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && \
[all …]
/external/skia/src/gpu/
DGrRecordingContextPriv.h45 SkArenaAlloc* recordTimeAllocator() { return fContext->arenas().recordTimeAllocator(); } in recordTimeAllocator()
47 return fContext->arenas().recordTimeSubRunAllocator(); in recordTimeSubRunAllocator()
49 GrRecordingContext::Arenas arenas() { return fContext->arenas(); } in arenas() function
DGrSurfaceFillContext.h198 SkArenaAlloc* arenaAlloc() { return this->arenas()->arenaAlloc(); } in arenaAlloc()
199 GrSubRunAllocator* subRunAlloc() { return this->arenas()->subRunAlloc(); } in subRunAlloc()
216 sk_sp<GrArenas> arenas() { return fWriteView.proxy()->asRenderTargetProxy()->arenas(); } in arenas() function
DGrDirectContextPriv.h56 SkArenaAlloc* recordTimeAllocator() { return fContext->arenas().recordTimeAllocator(); } in recordTimeAllocator()
57 GrRecordingContext::Arenas arenas() { return fContext->arenas(); } in arenas() function
DGrRenderTargetProxy.h105 sk_sp<GrArenas> arenas() { in arenas() function
DGrDrawingManager.h52 sk_sp<GrArenas> arenas,
DGrDrawingManager.cpp681 sk_sp<GrArenas> arenas, in newOpsTask() argument
691 std::move(arenas))); in newOpsTask()
DGrOpsTask.cpp360 sk_sp<GrArenas> arenas) in GrOpsTask() argument
366 , fArenas{std::move(arenas)} in GrOpsTask()
DGrSurfaceFillContext.cpp344 this->writeSurfaceView(), this->arenas(), fFlushTimeOpsTask); in getOpsTask()
/external/jemalloc_new/
DTUNING.md51 By default jemalloc uses multiple arenas to reduce internal lock contention.
53 since arenas manage memory independently. When high degree of parallelism
54 is not expected at the allocator level, lower number of arenas often
103 * [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create)
105 Manually created arenas can help performance in various ways, e.g. by
110 locality. In addition, explicit arenas often benefit from individually
120 uses explicit arenas with customized extent hooks to manage 1GB huge pages
128 from explicit binding, e.g. binding very active threads to dedicated arenas
DChangeLog36 + arenas.lookup (@lionkov)
190 associated with destroyed arenas. (@jasone)
213 + arenas.{dirty,muzzy}_decay_ms
214 + arenas.bin.<i>.slab_size
215 + arenas.nlextents
216 + arenas.lextent.<i>.size
217 + arenas.create
222 + stats.arenas.<i>.{dirty,muzzy}_decay_ms
223 + stats.arenas.<i>.uptime
224 + stats.arenas.<i>.{pmuzzy,base,internal,resident}
[all …]
/external/python/cpython3/Objects/
Dobmalloc.c1180 static struct arena_object* arenas = NULL; variable
1220 if (arenas[i].address == 0) { in _Py_GetAllocatedBlocks()
1224 uintptr_t base = (uintptr_t)_Py_ALIGN_UP(arenas[i].address, POOL_SIZE); in _Py_GetAllocatedBlocks()
1227 assert(base <= (uintptr_t) arenas[i].pool_address); in _Py_GetAllocatedBlocks()
1228 for (; base < (uintptr_t) arenas[i].pool_address; base += POOL_SIZE) { in _Py_GetAllocatedBlocks()
1269 if (numarenas > SIZE_MAX / sizeof(*arenas)) in new_arena()
1272 nbytes = numarenas * sizeof(*arenas); in new_arena()
1273 arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes); in new_arena()
1276 arenas = arenaobj; in new_arena()
1289 arenas[i].address = 0; /* mark as unassociated */ in new_arena()
[all …]
/external/jemalloc_new/include/jemalloc/
Djemalloc_macros.h.in36 * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
48 * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
49 * destroyed arenas.
/external/skia/tests/
DOpChainTest.cpp142 CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc* arenas, const GrCaps&) override { in onCombineIfPossible() argument
144 SkASSERT(arenas); in onCombineIfPossible()
145 (void) arenas; in onCombineIfPossible()
208 sk_sp<GrArenas> arenas = sk_make_sp<GrArenas>(); variable
226 arenas);
/external/jemalloc_new/src/
Dandroid_je_mallinfo.c57 arena_t* arena = atomic_load_p(&arenas[i], ATOMIC_ACQUIRE); in je_mallinfo()
85 arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE); in je_mallinfo_arena_info()
102 arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE); in je_mallinfo_bin_info()
/external/jemalloc_new/include/jemalloc/internal/
Djemalloc_internal_externs.h32 extern atomic_p_t arenas[];
Dctl.h90 ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT]; member
Djemalloc_internal_inlines_a.h88 ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); in arena_get()
Dprivate_namespace_jet.h7 #define arenas JEMALLOC_N(arenas) macro
Dprivate_namespace.h7 #define arenas JEMALLOC_N(arenas) macro
Dextent_inlines.h49 return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE); in extent_arena_get()
/external/skia/include/gpu/
DGrRecordingContext.h150 Arenas arenas() { return fArenas.get(); } in arenas() function
/external/python/cpython2/Misc/
DREADME.valgrind62 The memory pymalloc manages itself is in one or more "arenas",
/external/rust/crates/intrusive-collections/
DDESIGN.md29 …jects that have been inserted into it. As a bonus, this also works with [arenas](https://crates.io…
/external/python/cpython3/Misc/
DREADME.valgrind73 The memory pymalloc manages itself is in one or more "arenas",

123