1 #ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
2 #define JEMALLOC_INTERNAL_ARENA_STATS_H
3
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/mutex.h"
6 #include "jemalloc/internal/mutex_prof.h"
7 #include "jemalloc/internal/size_classes.h"
8
9 /*
10 * In those architectures that support 64-bit atomics, we use atomic updates for
11 * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
12 * externally.
13 */
14 #ifdef JEMALLOC_ATOMIC_U64
15 typedef atomic_u64_t arena_stats_u64_t;
16 #else
17 /* Must hold the arena stats mutex while reading atomically. */
18 typedef uint64_t arena_stats_u64_t;
19 #endif
20
21 typedef struct arena_stats_large_s arena_stats_large_t;
22 struct arena_stats_large_s {
23 /*
24 * Total number of allocation/deallocation requests served directly by
25 * the arena.
26 */
27 arena_stats_u64_t nmalloc;
28 arena_stats_u64_t ndalloc;
29
30 /*
31 * Number of allocation requests that correspond to this size class.
32 * This includes requests served by tcache, though tcache only
33 * periodically merges into this counter.
34 */
35 arena_stats_u64_t nrequests; /* Partially derived. */
36
37 /* Current number of allocations of this size class. */
38 size_t curlextents; /* Derived. */
39 };
40
41 typedef struct arena_stats_decay_s arena_stats_decay_t;
42 struct arena_stats_decay_s {
43 /* Total number of purge sweeps. */
44 arena_stats_u64_t npurge;
45 /* Total number of madvise calls made. */
46 arena_stats_u64_t nmadvise;
47 /* Total number of pages purged. */
48 arena_stats_u64_t purged;
49 };
50
51 /*
52 * Arena stats. Note that fields marked "derived" are not directly maintained
53 * within the arena code; rather their values are derived during stats merge
54 * requests.
55 */
56 typedef struct arena_stats_s arena_stats_t;
57 struct arena_stats_s {
58 #ifndef JEMALLOC_ATOMIC_U64
59 malloc_mutex_t mtx;
60 #endif
61
62 /* Number of bytes currently mapped, excluding retained memory. */
63 atomic_zu_t mapped; /* Partially derived. */
64
65 /*
66 * Number of unused virtual memory bytes currently retained. Retained
67 * bytes are technically mapped (though always decommitted or purged),
68 * but they are excluded from the mapped statistic (above).
69 */
70 atomic_zu_t retained; /* Derived. */
71
72 arena_stats_decay_t decay_dirty;
73 arena_stats_decay_t decay_muzzy;
74
75 atomic_zu_t base; /* Derived. */
76 atomic_zu_t internal;
77 atomic_zu_t resident; /* Derived. */
78 atomic_zu_t metadata_thp;
79
80 atomic_zu_t allocated_large; /* Derived. */
81 arena_stats_u64_t nmalloc_large; /* Derived. */
82 arena_stats_u64_t ndalloc_large; /* Derived. */
83 arena_stats_u64_t nrequests_large; /* Derived. */
84
85 /* Number of bytes cached in tcache associated with this arena. */
86 atomic_zu_t tcache_bytes; /* Derived. */
87
88 mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
89
90 /* One element for each large size class. */
91 arena_stats_large_t lstats[NSIZES - NBINS];
92
93 /* Arena uptime. */
94 nstime_t uptime;
95 };
96
97 static inline bool
arena_stats_init(UNUSED tsdn_t * tsdn,arena_stats_t * arena_stats)98 arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) {
99 if (config_debug) {
100 for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
101 assert(((char *)arena_stats)[i] == 0);
102 }
103 }
104 #ifndef JEMALLOC_ATOMIC_U64
105 if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
106 WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
107 return true;
108 }
109 #endif
110 /* Memory is zeroed, so there is no need to clear stats. */
111 return false;
112 }
113
114 static inline void
arena_stats_lock(tsdn_t * tsdn,arena_stats_t * arena_stats)115 arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
116 #ifndef JEMALLOC_ATOMIC_U64
117 malloc_mutex_lock(tsdn, &arena_stats->mtx);
118 #endif
119 }
120
121 static inline void
arena_stats_unlock(tsdn_t * tsdn,arena_stats_t * arena_stats)122 arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
123 #ifndef JEMALLOC_ATOMIC_U64
124 malloc_mutex_unlock(tsdn, &arena_stats->mtx);
125 #endif
126 }
127
128 static inline uint64_t
arena_stats_read_u64(tsdn_t * tsdn,arena_stats_t * arena_stats,arena_stats_u64_t * p)129 arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
130 arena_stats_u64_t *p) {
131 #ifdef JEMALLOC_ATOMIC_U64
132 return atomic_load_u64(p, ATOMIC_RELAXED);
133 #else
134 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
135 return *p;
136 #endif
137 }
138
139 static inline void
arena_stats_add_u64(tsdn_t * tsdn,arena_stats_t * arena_stats,arena_stats_u64_t * p,uint64_t x)140 arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
141 arena_stats_u64_t *p, uint64_t x) {
142 #ifdef JEMALLOC_ATOMIC_U64
143 atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
144 #else
145 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
146 *p += x;
147 #endif
148 }
149
150 UNUSED static inline void
arena_stats_sub_u64(tsdn_t * tsdn,arena_stats_t * arena_stats,arena_stats_u64_t * p,uint64_t x)151 arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
152 arena_stats_u64_t *p, uint64_t x) {
153 #ifdef JEMALLOC_ATOMIC_U64
154 UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
155 assert(r - x <= r);
156 #else
157 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
158 *p -= x;
159 assert(*p + x >= *p);
160 #endif
161 }
162
163 /*
164 * Non-atomically sets *dst += src. *dst needs external synchronization.
165 * This lets us avoid the cost of a fetch_add when its unnecessary (note that
166 * the types here are atomic).
167 */
168 static inline void
arena_stats_accum_u64(arena_stats_u64_t * dst,uint64_t src)169 arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
170 #ifdef JEMALLOC_ATOMIC_U64
171 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
172 atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
173 #else
174 *dst += src;
175 #endif
176 }
177
178 static inline size_t
arena_stats_read_zu(tsdn_t * tsdn,arena_stats_t * arena_stats,atomic_zu_t * p)179 arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
180 #ifdef JEMALLOC_ATOMIC_U64
181 return atomic_load_zu(p, ATOMIC_RELAXED);
182 #else
183 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
184 return atomic_load_zu(p, ATOMIC_RELAXED);
185 #endif
186 }
187
188 static inline void
arena_stats_add_zu(tsdn_t * tsdn,arena_stats_t * arena_stats,atomic_zu_t * p,size_t x)189 arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
190 size_t x) {
191 #ifdef JEMALLOC_ATOMIC_U64
192 atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
193 #else
194 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
195 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
196 atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
197 #endif
198 }
199
200 static inline void
arena_stats_sub_zu(tsdn_t * tsdn,arena_stats_t * arena_stats,atomic_zu_t * p,size_t x)201 arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
202 size_t x) {
203 #ifdef JEMALLOC_ATOMIC_U64
204 UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
205 assert(r - x <= r);
206 #else
207 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
208 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
209 atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
210 #endif
211 }
212
213 /* Like the _u64 variant, needs an externally synchronized *dst. */
214 static inline void
arena_stats_accum_zu(atomic_zu_t * dst,size_t src)215 arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
216 size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
217 atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
218 }
219
220 static inline void
arena_stats_large_nrequests_add(tsdn_t * tsdn,arena_stats_t * arena_stats,szind_t szind,uint64_t nrequests)221 arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
222 szind_t szind, uint64_t nrequests) {
223 arena_stats_lock(tsdn, arena_stats);
224 arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
225 NBINS].nrequests, nrequests);
226 arena_stats_unlock(tsdn, arena_stats);
227 }
228
229 static inline void
arena_stats_mapped_add(tsdn_t * tsdn,arena_stats_t * arena_stats,size_t size)230 arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
231 arena_stats_lock(tsdn, arena_stats);
232 arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
233 arena_stats_unlock(tsdn, arena_stats);
234 }
235
236
237 #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
238