1 #include "test/jemalloc_test.h"
2
3 const char *malloc_conf = "purge:decay,decay_time:1";
4
5 static nstime_monotonic_t *nstime_monotonic_orig;
6 static nstime_update_t *nstime_update_orig;
7
8 static unsigned nupdates_mock;
9 static nstime_t time_mock;
10 static bool monotonic_mock;
11
12 static bool
nstime_monotonic_mock(void)13 nstime_monotonic_mock(void)
14 {
15
16 return (monotonic_mock);
17 }
18
19 static bool
nstime_update_mock(nstime_t * time)20 nstime_update_mock(nstime_t *time)
21 {
22
23 nupdates_mock++;
24 if (monotonic_mock)
25 nstime_copy(time, &time_mock);
26 return (!monotonic_mock);
27 }
28
TEST_BEGIN(test_decay_ticks)29 TEST_BEGIN(test_decay_ticks)
30 {
31 ticker_t *decay_ticker;
32 unsigned tick0, tick1;
33 size_t sz, huge0, large0;
34 void *p;
35
36 test_skip_if(opt_purge != purge_mode_decay);
37
38 decay_ticker = decay_ticker_get(tsd_fetch(), 0);
39 assert_ptr_not_null(decay_ticker,
40 "Unexpected failure getting decay ticker");
41
42 sz = sizeof(size_t);
43 assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL,
44 0), 0, "Unexpected mallctl failure");
45 assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL,
46 0), 0, "Unexpected mallctl failure");
47
48 /*
49 * Test the standard APIs using a huge size class, since we can't
50 * control tcache interactions (except by completely disabling tcache
51 * for the entire test program).
52 */
53
54 /* malloc(). */
55 tick0 = ticker_read(decay_ticker);
56 p = malloc(huge0);
57 assert_ptr_not_null(p, "Unexpected malloc() failure");
58 tick1 = ticker_read(decay_ticker);
59 assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
60 /* free(). */
61 tick0 = ticker_read(decay_ticker);
62 free(p);
63 tick1 = ticker_read(decay_ticker);
64 assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
65
66 /* calloc(). */
67 tick0 = ticker_read(decay_ticker);
68 p = calloc(1, huge0);
69 assert_ptr_not_null(p, "Unexpected calloc() failure");
70 tick1 = ticker_read(decay_ticker);
71 assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
72 free(p);
73
74 /* posix_memalign(). */
75 tick0 = ticker_read(decay_ticker);
76 assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0,
77 "Unexpected posix_memalign() failure");
78 tick1 = ticker_read(decay_ticker);
79 assert_u32_ne(tick1, tick0,
80 "Expected ticker to tick during posix_memalign()");
81 free(p);
82
83 /* aligned_alloc(). */
84 tick0 = ticker_read(decay_ticker);
85 p = aligned_alloc(sizeof(size_t), huge0);
86 assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
87 tick1 = ticker_read(decay_ticker);
88 assert_u32_ne(tick1, tick0,
89 "Expected ticker to tick during aligned_alloc()");
90 free(p);
91
92 /* realloc(). */
93 /* Allocate. */
94 tick0 = ticker_read(decay_ticker);
95 p = realloc(NULL, huge0);
96 assert_ptr_not_null(p, "Unexpected realloc() failure");
97 tick1 = ticker_read(decay_ticker);
98 assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
99 /* Reallocate. */
100 tick0 = ticker_read(decay_ticker);
101 p = realloc(p, huge0);
102 assert_ptr_not_null(p, "Unexpected realloc() failure");
103 tick1 = ticker_read(decay_ticker);
104 assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
105 /* Deallocate. */
106 tick0 = ticker_read(decay_ticker);
107 realloc(p, 0);
108 tick1 = ticker_read(decay_ticker);
109 assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
110
111 /*
112 * Test the *allocx() APIs using huge, large, and small size classes,
113 * with tcache explicitly disabled.
114 */
115 {
116 unsigned i;
117 size_t allocx_sizes[3];
118 allocx_sizes[0] = huge0;
119 allocx_sizes[1] = large0;
120 allocx_sizes[2] = 1;
121
122 for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
123 sz = allocx_sizes[i];
124
125 /* mallocx(). */
126 tick0 = ticker_read(decay_ticker);
127 p = mallocx(sz, MALLOCX_TCACHE_NONE);
128 assert_ptr_not_null(p, "Unexpected mallocx() failure");
129 tick1 = ticker_read(decay_ticker);
130 assert_u32_ne(tick1, tick0,
131 "Expected ticker to tick during mallocx() (sz=%zu)",
132 sz);
133 /* rallocx(). */
134 tick0 = ticker_read(decay_ticker);
135 p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
136 assert_ptr_not_null(p, "Unexpected rallocx() failure");
137 tick1 = ticker_read(decay_ticker);
138 assert_u32_ne(tick1, tick0,
139 "Expected ticker to tick during rallocx() (sz=%zu)",
140 sz);
141 /* xallocx(). */
142 tick0 = ticker_read(decay_ticker);
143 xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
144 tick1 = ticker_read(decay_ticker);
145 assert_u32_ne(tick1, tick0,
146 "Expected ticker to tick during xallocx() (sz=%zu)",
147 sz);
148 /* dallocx(). */
149 tick0 = ticker_read(decay_ticker);
150 dallocx(p, MALLOCX_TCACHE_NONE);
151 tick1 = ticker_read(decay_ticker);
152 assert_u32_ne(tick1, tick0,
153 "Expected ticker to tick during dallocx() (sz=%zu)",
154 sz);
155 /* sdallocx(). */
156 p = mallocx(sz, MALLOCX_TCACHE_NONE);
157 assert_ptr_not_null(p, "Unexpected mallocx() failure");
158 tick0 = ticker_read(decay_ticker);
159 sdallocx(p, sz, MALLOCX_TCACHE_NONE);
160 tick1 = ticker_read(decay_ticker);
161 assert_u32_ne(tick1, tick0,
162 "Expected ticker to tick during sdallocx() "
163 "(sz=%zu)", sz);
164 }
165 }
166
167 /*
168 * Test tcache fill/flush interactions for large and small size classes,
169 * using an explicit tcache.
170 */
171 if (config_tcache) {
172 unsigned tcache_ind, i;
173 size_t tcache_sizes[2];
174 tcache_sizes[0] = large0;
175 tcache_sizes[1] = 1;
176
177 sz = sizeof(unsigned);
178 assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
179 NULL, 0), 0, "Unexpected mallctl failure");
180
181 for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
182 sz = tcache_sizes[i];
183
184 /* tcache fill. */
185 tick0 = ticker_read(decay_ticker);
186 p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
187 assert_ptr_not_null(p, "Unexpected mallocx() failure");
188 tick1 = ticker_read(decay_ticker);
189 assert_u32_ne(tick1, tick0,
190 "Expected ticker to tick during tcache fill "
191 "(sz=%zu)", sz);
192 /* tcache flush. */
193 dallocx(p, MALLOCX_TCACHE(tcache_ind));
194 tick0 = ticker_read(decay_ticker);
195 assert_d_eq(mallctl("tcache.flush", NULL, NULL,
196 (void *)&tcache_ind, sizeof(unsigned)), 0,
197 "Unexpected mallctl failure");
198 tick1 = ticker_read(decay_ticker);
199 assert_u32_ne(tick1, tick0,
200 "Expected ticker to tick during tcache flush "
201 "(sz=%zu)", sz);
202 }
203 }
204 }
205 TEST_END
206
TEST_BEGIN(test_decay_ticker)207 TEST_BEGIN(test_decay_ticker)
208 {
209 #define NPS 1024
210 int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
211 void *ps[NPS];
212 uint64_t epoch;
213 uint64_t npurge0 = 0;
214 uint64_t npurge1 = 0;
215 size_t sz, large;
216 unsigned i, nupdates0;
217 nstime_t time, decay_time, deadline;
218
219 test_skip_if(opt_purge != purge_mode_decay);
220
221 /*
222 * Allocate a bunch of large objects, pause the clock, deallocate the
223 * objects, restore the clock, then [md]allocx() in a tight loop to
224 * verify the ticker triggers purging.
225 */
226
227 if (config_tcache) {
228 size_t tcache_max;
229
230 sz = sizeof(size_t);
231 assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
232 &sz, NULL, 0), 0, "Unexpected mallctl failure");
233 large = nallocx(tcache_max + 1, flags);
234 } else {
235 sz = sizeof(size_t);
236 assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large, &sz,
237 NULL, 0), 0, "Unexpected mallctl failure");
238 }
239
240 assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
241 "Unexpected mallctl failure");
242 assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
243 sizeof(uint64_t)), 0, "Unexpected mallctl failure");
244 sz = sizeof(uint64_t);
245 assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
246 NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
247
248 for (i = 0; i < NPS; i++) {
249 ps[i] = mallocx(large, flags);
250 assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
251 }
252
253 nupdates_mock = 0;
254 nstime_init(&time_mock, 0);
255 nstime_update(&time_mock);
256 monotonic_mock = true;
257
258 nstime_monotonic_orig = nstime_monotonic;
259 nstime_update_orig = nstime_update;
260 nstime_monotonic = nstime_monotonic_mock;
261 nstime_update = nstime_update_mock;
262
263 for (i = 0; i < NPS; i++) {
264 dallocx(ps[i], flags);
265 nupdates0 = nupdates_mock;
266 assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
267 "Unexpected arena.0.decay failure");
268 assert_u_gt(nupdates_mock, nupdates0,
269 "Expected nstime_update() to be called");
270 }
271
272 nstime_monotonic = nstime_monotonic_orig;
273 nstime_update = nstime_update_orig;
274
275 nstime_init(&time, 0);
276 nstime_update(&time);
277 nstime_init2(&decay_time, opt_decay_time, 0);
278 nstime_copy(&deadline, &time);
279 nstime_add(&deadline, &decay_time);
280 do {
281 for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) {
282 void *p = mallocx(1, flags);
283 assert_ptr_not_null(p, "Unexpected mallocx() failure");
284 dallocx(p, flags);
285 }
286 assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
287 sizeof(uint64_t)), 0, "Unexpected mallctl failure");
288 sz = sizeof(uint64_t);
289 assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1,
290 &sz, NULL, 0), config_stats ? 0 : ENOENT,
291 "Unexpected mallctl result");
292
293 nstime_update(&time);
294 } while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0);
295
296 if (config_stats)
297 assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
298 #undef NPS
299 }
300 TEST_END
301
TEST_BEGIN(test_decay_nonmonotonic)302 TEST_BEGIN(test_decay_nonmonotonic)
303 {
304 #define NPS (SMOOTHSTEP_NSTEPS + 1)
305 int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
306 void *ps[NPS];
307 uint64_t epoch;
308 uint64_t npurge0 = 0;
309 uint64_t npurge1 = 0;
310 size_t sz, large0;
311 unsigned i, nupdates0;
312
313 test_skip_if(opt_purge != purge_mode_decay);
314
315 sz = sizeof(size_t);
316 assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL,
317 0), 0, "Unexpected mallctl failure");
318
319 assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
320 "Unexpected mallctl failure");
321 assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
322 sizeof(uint64_t)), 0, "Unexpected mallctl failure");
323 sz = sizeof(uint64_t);
324 assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
325 NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
326
327 nupdates_mock = 0;
328 nstime_init(&time_mock, 0);
329 nstime_update(&time_mock);
330 monotonic_mock = false;
331
332 nstime_monotonic_orig = nstime_monotonic;
333 nstime_update_orig = nstime_update;
334 nstime_monotonic = nstime_monotonic_mock;
335 nstime_update = nstime_update_mock;
336
337 for (i = 0; i < NPS; i++) {
338 ps[i] = mallocx(large0, flags);
339 assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
340 }
341
342 for (i = 0; i < NPS; i++) {
343 dallocx(ps[i], flags);
344 nupdates0 = nupdates_mock;
345 assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
346 "Unexpected arena.0.decay failure");
347 assert_u_gt(nupdates_mock, nupdates0,
348 "Expected nstime_update() to be called");
349 }
350
351 assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
352 sizeof(uint64_t)), 0, "Unexpected mallctl failure");
353 sz = sizeof(uint64_t);
354 assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz,
355 NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
356
357 if (config_stats)
358 assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
359
360 nstime_monotonic = nstime_monotonic_orig;
361 nstime_update = nstime_update_orig;
362 #undef NPS
363 }
364 TEST_END
365
366 int
main(void)367 main(void)
368 {
369
370 return (test(
371 test_decay_ticks,
372 test_decay_ticker,
373 test_decay_nonmonotonic));
374 }
375