• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef JEMALLOC_INTERNAL_INLINES_A_H
2 #define JEMALLOC_INTERNAL_INLINES_A_H
3 
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/bit_util.h"
6 #include "jemalloc/internal/jemalloc_internal_types.h"
7 #include "jemalloc/internal/size_classes.h"
8 #include "jemalloc/internal/ticker.h"
9 
10 JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
malloc_getcpu(void)11 malloc_getcpu(void) {
12 	assert(have_percpu_arena);
13 #if defined(JEMALLOC_HAVE_SCHED_GETCPU)
14 	return (malloc_cpuid_t)sched_getcpu();
15 #else
16 	not_reached();
17 	return -1;
18 #endif
19 }
20 
21 /* Return the chosen arena index based on current cpu. */
22 JEMALLOC_ALWAYS_INLINE unsigned
percpu_arena_choose(void)23 percpu_arena_choose(void) {
24 	assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
25 
26 	malloc_cpuid_t cpuid = malloc_getcpu();
27 	assert(cpuid >= 0);
28 
29 	unsigned arena_ind;
30 	if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
31 	    2)) {
32 		arena_ind = cpuid;
33 	} else {
34 		assert(opt_percpu_arena == per_phycpu_arena);
35 		/* Hyper threads on the same physical CPU share arena. */
36 		arena_ind = cpuid - ncpus / 2;
37 	}
38 
39 	return arena_ind;
40 }
41 
42 /* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
43 JEMALLOC_ALWAYS_INLINE unsigned
percpu_arena_ind_limit(percpu_arena_mode_t mode)44 percpu_arena_ind_limit(percpu_arena_mode_t mode) {
45 	assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
46 	if (mode == per_phycpu_arena && ncpus > 1) {
47 		if (ncpus % 2) {
48 			/* This likely means a misconfig. */
49 			return ncpus / 2 + 1;
50 		}
51 		return ncpus / 2;
52 	} else {
53 		return ncpus;
54 	}
55 }
56 
57 static inline arena_tdata_t *
arena_tdata_get(tsd_t * tsd,unsigned ind,bool refresh_if_missing)58 arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
59 	arena_tdata_t *tdata;
60 	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
61 
62 	if (unlikely(arenas_tdata == NULL)) {
63 		/* arenas_tdata hasn't been initialized yet. */
64 		return arena_tdata_get_hard(tsd, ind);
65 	}
66 	if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
67 		/*
68 		 * ind is invalid, cache is old (too small), or tdata to be
69 		 * initialized.
70 		 */
71 		return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
72 		    NULL);
73 	}
74 
75 	tdata = &arenas_tdata[ind];
76 	if (likely(tdata != NULL) || !refresh_if_missing) {
77 		return tdata;
78 	}
79 	return arena_tdata_get_hard(tsd, ind);
80 }
81 
82 static inline arena_t *
arena_get(tsdn_t * tsdn,unsigned ind,bool init_if_missing)83 arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
84 	arena_t *ret;
85 
86 	assert(ind < MALLOCX_ARENA_LIMIT);
87 
88 	ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
89 	if (unlikely(ret == NULL)) {
90 		if (init_if_missing) {
91 			ret = arena_init(tsdn, ind,
92 			    (extent_hooks_t *)&extent_hooks_default);
93 		}
94 	}
95 	return ret;
96 }
97 
98 static inline ticker_t *
decay_ticker_get(tsd_t * tsd,unsigned ind)99 decay_ticker_get(tsd_t *tsd, unsigned ind) {
100 	arena_tdata_t *tdata;
101 
102 	tdata = arena_tdata_get(tsd, ind, true);
103 	if (unlikely(tdata == NULL)) {
104 		return NULL;
105 	}
106 	return &tdata->decay_ticker;
107 }
108 
109 JEMALLOC_ALWAYS_INLINE cache_bin_t *
tcache_small_bin_get(tcache_t * tcache,szind_t binind)110 tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
111 	assert(binind < NBINS);
112 	return &tcache->bins_small[binind];
113 }
114 
115 JEMALLOC_ALWAYS_INLINE cache_bin_t *
tcache_large_bin_get(tcache_t * tcache,szind_t binind)116 tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
117 	assert(binind >= NBINS &&binind < nhbins);
118 	return &tcache->bins_large[binind - NBINS];
119 }
120 
121 JEMALLOC_ALWAYS_INLINE bool
tcache_available(tsd_t * tsd)122 tcache_available(tsd_t *tsd) {
123 	/*
124 	 * Thread specific auto tcache might be unavailable if: 1) during tcache
125 	 * initialization, or 2) disabled through thread.tcache.enabled mallctl
126 	 * or config options.  This check covers all cases.
127 	 */
128 	if (likely(tsd_tcache_enabled_get(tsd))) {
129 		/* Associated arena == NULL implies tcache init in progress. */
130 		assert(tsd_tcachep_get(tsd)->arena == NULL ||
131 		    tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
132 		    NULL);
133 		return true;
134 	}
135 
136 	return false;
137 }
138 
139 JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(tsd_t * tsd)140 tcache_get(tsd_t *tsd) {
141 	if (!tcache_available(tsd)) {
142 		return NULL;
143 	}
144 
145 	return tsd_tcachep_get(tsd);
146 }
147 
148 static inline void
pre_reentrancy(tsd_t * tsd,arena_t * arena)149 pre_reentrancy(tsd_t *tsd, arena_t *arena) {
150 	/* arena is the current context.  Reentry from a0 is not allowed. */
151 	assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
152 
153 	bool fast = tsd_fast(tsd);
154 	assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
155 	++*tsd_reentrancy_levelp_get(tsd);
156 	if (fast) {
157 		/* Prepare slow path for reentrancy. */
158 		tsd_slow_update(tsd);
159 		assert(tsd->state == tsd_state_nominal_slow);
160 	}
161 }
162 
163 static inline void
post_reentrancy(tsd_t * tsd)164 post_reentrancy(tsd_t *tsd) {
165 	int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
166 	assert(*reentrancy_level > 0);
167 	if (--*reentrancy_level == 0) {
168 		tsd_slow_update(tsd);
169 	}
170 }
171 
172 #endif /* JEMALLOC_INTERNAL_INLINES_A_H */
173