Lines Matching +full:vm +full:- +full:valgrind +full:- +full:pointer
86 /* bpo-35053: Declare tracemalloc configuration here rather than
97 return a pointer with no memory behind it, which would break pymalloc. in _PyMem_RawMalloc()
109 would return a pointer with no memory behind it, which would break in _PyMem_RawCalloc()
153 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); in _PyObject_ArenaMmap()
249 return -1; in pymem_set_default_allocator()
276 /* PYTHONMALLOC is empty or is not set or ignored (-E/-I command line in _PyMem_GetAllocatorName()
302 return -1; in _PyMem_GetAllocatorName()
362 return -1; in _PyMem_SetupAllocators()
531 allocator->ctx = NULL; in PyMem_GetAllocator()
532 allocator->malloc = NULL; in PyMem_GetAllocator()
533 allocator->calloc = NULL; in PyMem_GetAllocator()
534 allocator->realloc = NULL; in PyMem_GetAllocator()
535 allocator->free = NULL; in PyMem_GetAllocator()
662 if (len > (size_t)PY_SSIZE_T_MAX / sizeof(wchar_t) - 1) { in _PyMem_RawWcsdup()
746 the valgrind checks */
758 #include <valgrind/valgrind.h>
760 /* -1 indicates that we haven't checked that we're running on valgrind yet. */
761 static int running_on_valgrind = -1;
770 unless the object-specific allocators implement a proprietary allocation
775 Object-specific allocators
778 +3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
781 +2 | ####### Object memory ####### | <------ Internal buffers ------> |
784 +1 | <----- Python memory (under PyMem manager's control) ------> | |
786 [ Underlying general-purpose allocator (ex: C library malloc) ]
787 0 | <------ Virtual memory allocated for the python process -------> |
791 [ OS-specific Virtual Memory Manager (VMM) ]
792 -1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
795 -2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
800 /* A fast, special-purpose memory allocator for small blocks, to be used
801 on top of a general-purpose malloc -- heavily based on previous art. */
803 /* Vladimir Marangozov -- August 2000 */
806 * "Memory management is where the rubber meets the road -- if we do the wrong
822 * For small requests, the allocator sub-allocates <Big> blocks of memory.
830 * particular size class. In other words, there is a fixed-size allocator
840 * for memory intensive programs which allocate mainly small-sized blocks.
845 * ----------------------------------------------------------------
846 * 1-8 8 0
847 * 9-16 16 1
848 * 17-24 24 2
849 * 25-32 32 3
850 * 33-40 40 4
851 * 41-48 48 5
852 * 49-56 56 6
853 * 57-64 64 7
854 * 65-72 72 8
856 * 497-504 504 62
857 * 505-512 512 63
866 * -- Main tunable settings section --
870 * Alignment of addresses returned to the user. 8-bytes alignment works
871 * on most current architectures (with 32-bit or 64-bit address buses).
895 * will be allocated from preallocated memory pools on 64-bit.
913 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
924 #define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
929 /* Use radix-tree to track arena memory regions, for address_in_range().
931 * using -DWITH_PYMALLOC_RADIX_TREE=0 */
936 /* on 64-bit platforms use larger pools and arenas if we can */
939 /* large pools only supported if radix-tree is enabled */
945 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
963 #define ARENA_SIZE_MASK (ARENA_SIZE - 1)
978 #define POOL_SIZE_MASK (POOL_SIZE - 1)
992 * -- End of tunable settings section --
1024 /* Pool-aligned pointer to the next pool to be carved off. */
1027 /* The number of available pools in the arena: free pools + never-
1035 /* Singly-linked list of available pools. */
1040 * arena_objects in the singly-linked `unused_arena_objects` list.
1045 * doubly-linked `usable_arenas` list, which is maintained in
1060 /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
1064 #define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
1069 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
1074 16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
1103 and linked to the front of its arena_object's singly-linked freepools list,
1113 Blocks within pools are again carved out as needed. pool->freeblock points to
1114 the start of a singly-linked list of free blocks within the pool. When a
1118 set up, returning the first such block, and setting pool->freeblock to a
1119 one-block list holding the second such block. This is consistent with that
1124 available for allocating, and pool->freeblock is not NULL. If pool->freeblock
1126 blocks, that means we simply haven't yet gotten to one of the higher-address
1142 nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
1151 contains is a fudged-up pointer p such that *if* C believes it's a poolp
1152 pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
1156 minimize the amount of cache required to hold this heavily-referenced table
1157 (which only *needs* the two interpool pointer members of a pool_header). OTOH,
1159 free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
1164 #define PTA(x) ((poolp )((uint8_t *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
1209 This is a singly-linked list of the arena_objects that are currently not
1217 This is a doubly-linked list of the arena_objects associated with arenas
1219 or have not been used before. The list is sorted to have the most-
1230 used to be done by one-at-a-time linear search when an arena's number of
1234 hundreds of thousands. See bpo-37029.
1247 /* The head of the singly-linked, NULL-terminated list of available
1252 /* The head of the doubly-linked, NULL-terminated at each end, list of
1293 n += p->ref.count; in _Py_GetAllocatedBlocks()
1306 64-bit pointers, IGNORE_BITS=0 and 2^20 arena size:
1307 15 -> MAP_TOP_BITS
1308 15 -> MAP_MID_BITS
1309 14 -> MAP_BOT_BITS
1310 20 -> ideal aligned arena
1311 ----
1314 64-bit pointers, IGNORE_BITS=16, and 2^20 arena size:
1315 16 -> IGNORE_BITS
1316 10 -> MAP_TOP_BITS
1317 10 -> MAP_MID_BITS
1318 8 -> MAP_BOT_BITS
1319 20 -> ideal aligned arena
1320 ----
1323 32-bit pointers and 2^18 arena size:
1324 14 -> MAP_BOT_BITS
1325 18 -> ideal aligned arena
1326 ----
1333 /* number of bits in a pointer */
1337 * radix tree. Setting this to zero is the safe default. For most 64-bit
1339 * user-space virtual memory addresses that have significant information in
1357 /* Currently this code works for 64-bit or 32-bit pointers only. */
1358 #error "obmalloc radix tree requires 64-bit or 32-bit pointers."
1368 #define ADDRESS_BITS (POINTER_BITS - IGNORE_BITS)
1372 #define INTERIOR_BITS ((ADDRESS_BITS - ARENA_BITS + 2) / 3)
1379 #define MAP_TOP_MASK (MAP_TOP_LENGTH - 1)
1383 #define MAP_MID_MASK (MAP_MID_LENGTH - 1)
1385 #define MAP_BOT_BITS (ADDRESS_BITS - ARENA_BITS - 2*INTERIOR_BITS)
1387 #define MAP_BOT_MASK (MAP_BOT_LENGTH - 1)
1399 /* Return the ignored part of the pointer address. Those bits should be same
1446 /* Return a pointer to a bottom tree node, return NULL if it doesn't exist or
1467 if (arena_map_root.ptrs[i1]->ptrs[i2] == NULL) { in arena_map_get()
1475 arena_map_root.ptrs[i1]->ptrs[i2] = n; in arena_map_get()
1478 return arena_map_root.ptrs[i1]->ptrs[i2]; in arena_map_get()
1487 * the tree compared to similar radix tree page-map schemes. In
1494 * pointer address is inside an actual arena, we have to check two ideal
1495 * arena addresses. E.g. if pointer is 357, we need to check 200 and
1523 n_hi->arenas[i3].tail_hi = is_used ? -1 : 0; in arena_map_mark_used()
1532 n_hi->arenas[i3].tail_hi = is_used ? tail : 0; in arena_map_mark_used()
1534 /* If arena_base is a legit arena address, so is arena_base_next - 1 in arena_map_mark_used()
1542 n_hi->arenas[i3].tail_hi = 0; in arena_map_mark_used()
1546 n_lo->arenas[i3_next].tail_lo = is_used ? tail : 0; in arena_map_mark_used()
1551 /* Return true if 'p' is a pointer inside an obmalloc arena.
1561 /* ARENA_BITS must be < 32 so that the tail is a non-negative int32_t. */ in arena_map_is_used()
1562 int32_t hi = n->arenas[i3].tail_hi; in arena_map_is_used()
1563 int32_t lo = n->arenas[i3].tail_lo; in arena_map_is_used()
1584 static int debug_stats = -1; in new_arena()
1586 if (debug_stats == -1) { in new_arena()
1627 arenas[i].nextarena = i < numarenas - 1 ? in new_arena()
1639 unused_arena_objects = arenaobj->nextarena; in new_arena()
1640 assert(arenaobj->address == 0); in new_arena()
1655 arenaobj->nextarena = unused_arena_objects; in new_arena()
1659 arenaobj->address = (uintptr_t)address; in new_arena()
1665 arenaobj->freepools = NULL; in new_arena()
1666 /* pool_address <- first pool-aligned address in the arena in new_arena()
1667 nfreepools <- number of whole pools that fit after alignment */ in new_arena()
1668 arenaobj->pool_address = (block*)arenaobj->address; in new_arena()
1669 arenaobj->nfreepools = MAX_POOLS_IN_ARENA; in new_arena()
1670 excess = (uint)(arenaobj->address & POOL_SIZE_MASK); in new_arena()
1672 --arenaobj->nfreepools; in new_arena()
1673 arenaobj->pool_address += POOL_SIZE - excess; in new_arena()
1675 arenaobj->ntotalpools = arenaobj->nfreepools; in new_arena()
1700 called on every alloc/realloc/free, micro-efficiency is important here).
1703 arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
1709 0 <= P-B < ARENA_SIZE
1716 (POOL)->arenaindex < maxarenas must be false, saving us from trying to index
1720 arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
1728 call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
1729 in this case -- it may even be uninitialized trash. If the trash arenaindex
1754 corresponded to a currently-allocated arena, so the "P is not controlled by
1759 memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
1773 // by Python, it is important that pool->arenaindex is read only once, as in address_in_range()
1775 // the GIL. The following dance forces the compiler to read pool->arenaindex in address_in_range()
1777 uint arenaindex = *((volatile uint *)&pool->arenaindex); in address_in_range()
1779 (uintptr_t)p - arenas[arenaindex].address < ARENA_SIZE && in address_in_range()
1792 if (UNLIKELY(pool->nextoffset <= pool->maxnextoffset)) { in pymalloc_pool_extend()
1794 pool->freeblock = (block*)pool + pool->nextoffset; in pymalloc_pool_extend()
1795 pool->nextoffset += INDEX2SIZE(size); in pymalloc_pool_extend()
1796 *(block **)(pool->freeblock) = NULL; in pymalloc_pool_extend()
1802 next = pool->nextpool; in pymalloc_pool_extend()
1803 pool = pool->prevpool; in pymalloc_pool_extend()
1804 next->prevpool = pool; in pymalloc_pool_extend()
1805 pool->nextpool = next; in pymalloc_pool_extend()
1828 usable_arenas->nextarena = usable_arenas->prevarena = NULL; in allocate_from_new_pool()
1829 assert(nfp2lasta[usable_arenas->nfreepools] == NULL); in allocate_from_new_pool()
1830 nfp2lasta[usable_arenas->nfreepools] = usable_arenas; in allocate_from_new_pool()
1832 assert(usable_arenas->address != 0); in allocate_from_new_pool()
1839 assert(usable_arenas->nfreepools > 0); in allocate_from_new_pool()
1840 if (nfp2lasta[usable_arenas->nfreepools] == usable_arenas) { in allocate_from_new_pool()
1842 nfp2lasta[usable_arenas->nfreepools] = NULL; in allocate_from_new_pool()
1845 if (usable_arenas->nfreepools > 1) { in allocate_from_new_pool()
1846 assert(nfp2lasta[usable_arenas->nfreepools - 1] == NULL); in allocate_from_new_pool()
1847 nfp2lasta[usable_arenas->nfreepools - 1] = usable_arenas; in allocate_from_new_pool()
1851 poolp pool = usable_arenas->freepools; in allocate_from_new_pool()
1854 usable_arenas->freepools = pool->nextpool; in allocate_from_new_pool()
1855 usable_arenas->nfreepools--; in allocate_from_new_pool()
1856 if (UNLIKELY(usable_arenas->nfreepools == 0)) { in allocate_from_new_pool()
1858 assert(usable_arenas->freepools == NULL); in allocate_from_new_pool()
1859 assert(usable_arenas->nextarena == NULL || in allocate_from_new_pool()
1860 usable_arenas->nextarena->prevarena == in allocate_from_new_pool()
1862 usable_arenas = usable_arenas->nextarena; in allocate_from_new_pool()
1864 usable_arenas->prevarena = NULL; in allocate_from_new_pool()
1865 assert(usable_arenas->address != 0); in allocate_from_new_pool()
1874 assert(usable_arenas->freepools != NULL || in allocate_from_new_pool()
1875 usable_arenas->pool_address <= in allocate_from_new_pool()
1876 (block*)usable_arenas->address + in allocate_from_new_pool()
1877 ARENA_SIZE - POOL_SIZE); in allocate_from_new_pool()
1882 assert(usable_arenas->nfreepools > 0); in allocate_from_new_pool()
1883 assert(usable_arenas->freepools == NULL); in allocate_from_new_pool()
1884 pool = (poolp)usable_arenas->pool_address; in allocate_from_new_pool()
1885 assert((block*)pool <= (block*)usable_arenas->address + in allocate_from_new_pool()
1886 ARENA_SIZE - POOL_SIZE); in allocate_from_new_pool()
1887 pool->arenaindex = (uint)(usable_arenas - arenas); in allocate_from_new_pool()
1888 assert(&arenas[pool->arenaindex] == usable_arenas); in allocate_from_new_pool()
1889 pool->szidx = DUMMY_SIZE_IDX; in allocate_from_new_pool()
1890 usable_arenas->pool_address += POOL_SIZE; in allocate_from_new_pool()
1891 --usable_arenas->nfreepools; in allocate_from_new_pool()
1893 if (usable_arenas->nfreepools == 0) { in allocate_from_new_pool()
1894 assert(usable_arenas->nextarena == NULL || in allocate_from_new_pool()
1895 usable_arenas->nextarena->prevarena == in allocate_from_new_pool()
1898 usable_arenas = usable_arenas->nextarena; in allocate_from_new_pool()
1900 usable_arenas->prevarena = NULL; in allocate_from_new_pool()
1901 assert(usable_arenas->address != 0); in allocate_from_new_pool()
1909 pool->nextpool = next; in allocate_from_new_pool()
1910 pool->prevpool = next; in allocate_from_new_pool()
1911 next->nextpool = pool; in allocate_from_new_pool()
1912 next->prevpool = pool; in allocate_from_new_pool()
1913 pool->ref.count = 1; in allocate_from_new_pool()
1914 if (pool->szidx == size) { in allocate_from_new_pool()
1919 bp = pool->freeblock; in allocate_from_new_pool()
1921 pool->freeblock = *(block **)bp; in allocate_from_new_pool()
1929 pool->szidx = size; in allocate_from_new_pool()
1932 pool->nextoffset = POOL_OVERHEAD + (size << 1); in allocate_from_new_pool()
1933 pool->maxnextoffset = POOL_SIZE - size; in allocate_from_new_pool()
1934 pool->freeblock = bp + size; in allocate_from_new_pool()
1935 *(block **)(pool->freeblock) = NULL; in allocate_from_new_pool()
1941 Return a pointer to newly allocated memory if pymalloc allocated memory.
1951 if (UNLIKELY(running_on_valgrind == -1)) { in pymalloc_alloc()
1966 uint size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT; in pymalloc_alloc()
1970 if (LIKELY(pool != pool->nextpool)) { in pymalloc_alloc()
1975 ++pool->ref.count; in pymalloc_alloc()
1976 bp = pool->freeblock; in pymalloc_alloc()
1979 if (UNLIKELY((pool->freeblock = *(block **)bp) == NULL)) { in pymalloc_alloc()
2034 assert(pool->ref.count > 0); /* else the pool is empty */ in insert_to_usedpool()
2036 uint size = pool->szidx; in insert_to_usedpool()
2038 poolp prev = next->prevpool; in insert_to_usedpool()
2040 /* insert pool before next: prev <-> pool <-> next */ in insert_to_usedpool()
2041 pool->nextpool = next; in insert_to_usedpool()
2042 pool->prevpool = prev; in insert_to_usedpool()
2043 next->prevpool = pool; in insert_to_usedpool()
2044 prev->nextpool = pool; in insert_to_usedpool()
2050 poolp next = pool->nextpool; in insert_to_freepool()
2051 poolp prev = pool->prevpool; in insert_to_freepool()
2052 next->prevpool = prev; in insert_to_freepool()
2053 prev->nextpool = next; in insert_to_freepool()
2055 /* Link the pool to freepools. This is a singly-linked in insert_to_freepool()
2056 * list, and pool->prevpool isn't used there. in insert_to_freepool()
2058 struct arena_object *ao = &arenas[pool->arenaindex]; in insert_to_freepool()
2059 pool->nextpool = ao->freepools; in insert_to_freepool()
2060 ao->freepools = pool; in insert_to_freepool()
2061 uint nf = ao->nfreepools; in insert_to_freepool()
2070 lastnf->nfreepools == nf && in insert_to_freepool()
2071 (lastnf->nextarena == NULL || in insert_to_freepool()
2072 nf < lastnf->nextarena->nfreepools))); in insert_to_freepool()
2074 struct arena_object* p = ao->prevarena; in insert_to_freepool()
2075 nfp2lasta[nf] = (p != NULL && p->nfreepools == nf) ? p : NULL; in insert_to_freepool()
2077 ao->nfreepools = ++nf; in insert_to_freepool()
2087 * arena on every iteration. See bpo-37257. in insert_to_freepool()
2096 if (nf == ao->ntotalpools && ao->nextarena != NULL) { in insert_to_freepool()
2099 assert(ao->prevarena == NULL || in insert_to_freepool()
2100 ao->prevarena->address != 0); in insert_to_freepool()
2101 assert(ao ->nextarena == NULL || in insert_to_freepool()
2102 ao->nextarena->address != 0); in insert_to_freepool()
2104 /* Fix the pointer in the prevarena, or the in insert_to_freepool()
2105 * usable_arenas pointer. in insert_to_freepool()
2107 if (ao->prevarena == NULL) { in insert_to_freepool()
2108 usable_arenas = ao->nextarena; in insert_to_freepool()
2110 usable_arenas->address != 0); in insert_to_freepool()
2113 assert(ao->prevarena->nextarena == ao); in insert_to_freepool()
2114 ao->prevarena->nextarena = in insert_to_freepool()
2115 ao->nextarena; in insert_to_freepool()
2117 /* Fix the pointer in the nextarena. */ in insert_to_freepool()
2118 if (ao->nextarena != NULL) { in insert_to_freepool()
2119 assert(ao->nextarena->prevarena == ao); in insert_to_freepool()
2120 ao->nextarena->prevarena = in insert_to_freepool()
2121 ao->prevarena; in insert_to_freepool()
2126 ao->nextarena = unused_arena_objects; in insert_to_freepool()
2131 arena_map_mark_used(ao->address, 0); in insert_to_freepool()
2136 (void *)ao->address, ARENA_SIZE); in insert_to_freepool()
2137 ao->address = 0; /* mark unassociated */ in insert_to_freepool()
2138 --narenas_currently_allocated; in insert_to_freepool()
2146 * ao->nfreepools was 0 before, ao isn't in insert_to_freepool()
2149 ao->nextarena = usable_arenas; in insert_to_freepool()
2150 ao->prevarena = NULL; in insert_to_freepool()
2152 usable_arenas->prevarena = ao; in insert_to_freepool()
2154 assert(usable_arenas->address != 0); in insert_to_freepool()
2166 * a few un-scientific tests, it seems like this in insert_to_freepool()
2181 assert(ao->nextarena != NULL); in insert_to_freepool()
2188 if (ao->prevarena != NULL) { in insert_to_freepool()
2190 assert(ao->prevarena->nextarena == ao); in insert_to_freepool()
2191 ao->prevarena->nextarena = ao->nextarena; in insert_to_freepool()
2196 usable_arenas = ao->nextarena; in insert_to_freepool()
2198 ao->nextarena->prevarena = ao->prevarena; in insert_to_freepool()
2200 ao->prevarena = lastnf; in insert_to_freepool()
2201 ao->nextarena = lastnf->nextarena; in insert_to_freepool()
2202 if (ao->nextarena != NULL) { in insert_to_freepool()
2203 ao->nextarena->prevarena = ao; in insert_to_freepool()
2205 lastnf->nextarena = ao; in insert_to_freepool()
2207 assert(ao->nextarena == NULL || nf <= ao->nextarena->nfreepools); in insert_to_freepool()
2208 assert(ao->prevarena == NULL || nf > ao->prevarena->nfreepools); in insert_to_freepool()
2209 assert(ao->nextarena == NULL || ao->nextarena->prevarena == ao); in insert_to_freepool()
2210 assert((usable_arenas == ao && ao->prevarena == NULL) in insert_to_freepool()
2211 || ao->prevarena->nextarena == ao); in insert_to_freepool()
2237 * was full and is in no list -- it's not in the freeblocks in pymalloc_free()
2240 assert(pool->ref.count > 0); /* else it was empty */ in pymalloc_free()
2241 block *lastfree = pool->freeblock; in pymalloc_free()
2243 pool->freeblock = (block *)p; in pymalloc_free()
2244 pool->ref.count--; in pymalloc_free()
2260 if (LIKELY(pool->ref.count != 0)) { in pymalloc_free()
2286 raw_allocated_blocks--; in _PyObject_Free()
2294 free(p), and return a non-NULL result.
2296 Return 1 if pymalloc reallocated memory and wrote the new pointer into
2310 /* Treat running_on_valgrind == -1 the same as 0 */ in pymalloc_realloc()
2322 from the C-managed block to one of our blocks, and there's no in pymalloc_realloc()
2327 C-managed block is "at the end" of allocated VM space, so that a in pymalloc_realloc()
2334 size = INDEX2SIZE(pool->szidx); in pymalloc_realloc()
2393 /* A x-platform debugging allocator. This doesn't manage memory directly,
2421 /* Read sizeof(size_t) bytes at p as a big-endian size_t. */
2429 for (i = SST; --i > 0; ++q) in read_size_t()
2434 /* Write n as a big-endian size_t, MSB at address p, LSB at
2435 * p + sizeof(size_t) - 1.
2440 uint8_t *q = (uint8_t *)p + SST - 1; in write_size_t()
2443 for (i = SST; --i >= 0; --q) { in write_size_t()
2453 Number of bytes originally asked for. This is a size_t, big-endian (easier
2458 Copies of PYMEM_FORBIDDENBYTE. Used to catch under- writes and reads.
2462 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
2465 Copies of PYMEM_FORBIDDENBYTE. Used to catch over- writes and reads.
2469 This is a big-endian size_t.
2483 uint8_t *data; /* p + 2*SST == pointer to data bytes */ in _PyMem_DebugRawAlloc()
2484 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */ in _PyMem_DebugRawAlloc()
2487 if (nbytes > (size_t)PY_SSIZE_T_MAX - PYMEM_DEBUG_EXTRA_BYTES) { in _PyMem_DebugRawAlloc()
2494 ^--- p ^--- data ^--- tail in _PyMem_DebugRawAlloc()
2497 F: Forbidden bytes (size_t - 1 bytes before, size_t bytes after) in _PyMem_DebugRawAlloc()
2505 p = (uint8_t *)api->alloc.calloc(api->alloc.ctx, 1, total); in _PyMem_DebugRawAlloc()
2508 p = (uint8_t *)api->alloc.malloc(api->alloc.ctx, total); in _PyMem_DebugRawAlloc()
2519 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */ in _PyMem_DebugRawAlloc()
2521 p[SST] = (uint8_t)api->api_id; in _PyMem_DebugRawAlloc()
2522 memset(p + SST + 1, PYMEM_FORBIDDENBYTE, SST-1); in _PyMem_DebugRawAlloc()
2568 uint8_t *q = (uint8_t *)p - 2*SST; /* address returned from malloc */ in _PyMem_DebugRawFree()
2571 _PyMem_DebugCheckAddress(__func__, api->api_id, p); in _PyMem_DebugRawFree()
2575 api->alloc.free(api->alloc.ctx, q); in _PyMem_DebugRawFree()
2588 uint8_t *data; /* pointer to data bytes */ in _PyMem_DebugRawRealloc()
2590 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */ in _PyMem_DebugRawRealloc()
2596 _PyMem_DebugCheckAddress(__func__, api->api_id, p); in _PyMem_DebugRawRealloc()
2599 head = data - 2*SST; in _PyMem_DebugRawRealloc()
2601 if (nbytes > (size_t)PY_SSIZE_T_MAX - PYMEM_DEBUG_EXTRA_BYTES) { in _PyMem_DebugRawRealloc()
2616 memset(data - 2 * SST, PYMEM_DEADBYTE, in _PyMem_DebugRawRealloc()
2622 memcpy(&save[ERASED_SIZE], tail - ERASED_SIZE, ERASED_SIZE); in _PyMem_DebugRawRealloc()
2623 memset(tail - ERASED_SIZE, PYMEM_DEADBYTE, in _PyMem_DebugRawRealloc()
2624 ERASED_SIZE + PYMEM_DEBUG_EXTRA_BYTES - 2 * SST); in _PyMem_DebugRawRealloc()
2628 r = (uint8_t *)api->alloc.realloc(api->alloc.ctx, head, total); in _PyMem_DebugRawRealloc()
2644 head[SST] = (uint8_t)api->api_id; in _PyMem_DebugRawRealloc()
2645 memset(head + SST + 1, PYMEM_FORBIDDENBYTE, SST-1); in _PyMem_DebugRawRealloc()
2658 size_t i = original_nbytes - ERASED_SIZE; in _PyMem_DebugRawRealloc()
2662 Py_MIN(nbytes - i, ERASED_SIZE)); in _PyMem_DebugRawRealloc()
2673 nbytes - original_nbytes); in _PyMem_DebugRawRealloc()
2736 id = (char)q[-SST]; in _PyMem_DebugCheckAddress()
2746 * corruption, the number-of-bytes field may be nuts, and checking in _PyMem_DebugCheckAddress()
2749 for (i = SST-1; i >= 1; --i) { in _PyMem_DebugCheckAddress()
2750 if (*(q-i) != PYMEM_FORBIDDENBYTE) { in _PyMem_DebugCheckAddress()
2756 nbytes = read_size_t(q - 2*SST); in _PyMem_DebugCheckAddress()
2782 id = (char)q[-SST]; in _PyObject_DebugDumpAddress()
2785 nbytes = read_size_t(q - 2*SST); in _PyObject_DebugDumpAddress()
2789 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1); in _PyObject_DebugDumpAddress()
2791 for (i = 1; i <= SST-1; ++i) { in _PyObject_DebugDumpAddress()
2792 if (*(q-i) != PYMEM_FORBIDDENBYTE) { in _PyObject_DebugDumpAddress()
2802 for (i = SST-1; i >= 1; --i) { in _PyObject_DebugDumpAddress()
2803 const uint8_t byte = *(q-i); in _PyObject_DebugDumpAddress()
2804 fprintf(stderr, " at p-%d: 0x%02x", i, byte); in _PyObject_DebugDumpAddress()
2858 if (tail - q > 8) { in _PyObject_DebugDumpAddress()
2860 q = tail - 8; in _PyObject_DebugDumpAddress()
2890 buf[i--] = '\0'; in printone()
2891 buf[i--] = '\n'; in printone()
2895 unsigned int digit = (unsigned int)(value - nextvalue * 10); in printone()
2897 buf[i--] = (char)(digit + '0'); in printone()
2898 --k; in printone()
2901 buf[i--] = ','; in printone()
2906 buf[i--] = ' '; in printone()
2931 * The list may be NULL-terminated, or circular. Return 1 if target is in
2944 list = list->nextpool; in pool_is_in_list()
2987 /* running total -- should equal narenas * ARENA_SIZE */ in _PyObject_DebugMallocStats()
2999 * will be living in full pools -- would be a shame to miss them. in _PyObject_DebugMallocStats()
3022 const uint sz = p->szidx; in _PyObject_DebugMallocStats()
3025 if (p->ref.count == 0) { in _PyObject_DebugMallocStats()
3033 numblocks[sz] += p->ref.count; in _PyObject_DebugMallocStats()
3034 freeblocks = NUMBLOCKS(sz) - p->ref.count; in _PyObject_DebugMallocStats()
3046 "----- ---- --------- ------------- ------------\n", in _PyObject_DebugMallocStats()
3063 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size); in _PyObject_DebugMallocStats()
3072 (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas); in _PyObject_DebugMallocStats()