• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
2 #define JEMALLOC_INTERNAL_TCACHE_INLINES_H
3 
4 #include "jemalloc/internal/bin.h"
5 #include "jemalloc/internal/jemalloc_internal_types.h"
6 #include "jemalloc/internal/size_classes.h"
7 #include "jemalloc/internal/sz.h"
8 #include "jemalloc/internal/ticker.h"
9 #include "jemalloc/internal/util.h"
10 
11 static inline bool
tcache_enabled_get(tsd_t * tsd)12 tcache_enabled_get(tsd_t *tsd) {
13 	return tsd_tcache_enabled_get(tsd);
14 }
15 
16 static inline void
tcache_enabled_set(tsd_t * tsd,bool enabled)17 tcache_enabled_set(tsd_t *tsd, bool enabled) {
18 	bool was_enabled = tsd_tcache_enabled_get(tsd);
19 
20 	if (!was_enabled && enabled) {
21 		tsd_tcache_data_init(tsd);
22 	} else if (was_enabled && !enabled) {
23 		tcache_cleanup(tsd);
24 	}
25 	/* Commit the state last.  Above calls check current state. */
26 	tsd_tcache_enabled_set(tsd, enabled);
27 	tsd_slow_update(tsd);
28 }
29 
30 JEMALLOC_ALWAYS_INLINE void
tcache_event(tsd_t * tsd,tcache_t * tcache)31 tcache_event(tsd_t *tsd, tcache_t *tcache) {
32 	if (TCACHE_GC_INCR == 0) {
33 		return;
34 	}
35 
36 	if (unlikely(ticker_tick(&tcache->gc_ticker))) {
37 		tcache_event_hard(tsd, tcache);
38 	}
39 }
40 
41 JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t * tsd,arena_t * arena,tcache_t * tcache,UNUSED size_t size,szind_t binind,bool zero,bool slow_path)42 tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
43     UNUSED size_t size, szind_t binind, bool zero, bool slow_path) {
44 	void *ret;
45 	cache_bin_t *bin;
46 	bool tcache_success;
47 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
48 
49 	assert(binind < NBINS);
50 	bin = tcache_small_bin_get(tcache, binind);
51 	ret = cache_bin_alloc_easy(bin, &tcache_success);
52 	assert(tcache_success == (ret != NULL));
53 	if (unlikely(!tcache_success)) {
54 		bool tcache_hard_success;
55 		arena = arena_choose(tsd, arena);
56 		if (unlikely(arena == NULL)) {
57 			return NULL;
58 		}
59 
60 		ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
61 		    bin, binind, &tcache_hard_success);
62 		if (tcache_hard_success == false) {
63 			return NULL;
64 		}
65 	}
66 
67 	assert(ret);
68 	/*
69 	 * Only compute usize if required.  The checks in the following if
70 	 * statement are all static.
71 	 */
72 	if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
73 		usize = sz_index2size(binind);
74 		assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
75 	}
76 
77 	if (likely(!zero)) {
78 		if (slow_path && config_fill) {
79 			if (unlikely(opt_junk_alloc)) {
80 				arena_alloc_junk_small(ret, &bin_infos[binind],
81 				    false);
82 			} else if (unlikely(opt_zero)) {
83 				memset(ret, 0, usize);
84 			}
85 		}
86 	} else {
87 		if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
88 			arena_alloc_junk_small(ret, &bin_infos[binind], true);
89 		}
90 		memset(ret, 0, usize);
91 	}
92 
93 #if defined(ANDROID_ENABLE_TCACHE_STATS)
94 	if (config_stats) {
95 		bin->tstats.nrequests++;
96 	}
97 #endif
98 	if (config_prof) {
99 		tcache->prof_accumbytes += usize;
100 	}
101 	tcache_event(tsd, tcache);
102 	return ret;
103 }
104 
105 JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t * tsd,arena_t * arena,tcache_t * tcache,size_t size,szind_t binind,bool zero,bool slow_path)106 tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
107     szind_t binind, bool zero, bool slow_path) {
108 	void *ret;
109 	cache_bin_t *bin;
110 	bool tcache_success;
111 
112 	assert(binind >= NBINS &&binind < nhbins);
113 	bin = tcache_large_bin_get(tcache, binind);
114 	ret = cache_bin_alloc_easy(bin, &tcache_success);
115 	assert(tcache_success == (ret != NULL));
116 	if (unlikely(!tcache_success)) {
117 		/*
118 		 * Only allocate one large object at a time, because it's quite
119 		 * expensive to create one and not use it.
120 		 */
121 		arena = arena_choose(tsd, arena);
122 		if (unlikely(arena == NULL)) {
123 			return NULL;
124 		}
125 
126 		ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
127 		if (ret == NULL) {
128 			return NULL;
129 		}
130 	} else {
131 		size_t usize JEMALLOC_CC_SILENCE_INIT(0);
132 
133 		/* Only compute usize on demand */
134 		if (config_prof || (slow_path && config_fill) ||
135 		    unlikely(zero)) {
136 			usize = sz_index2size(binind);
137 			assert(usize <= tcache_maxclass);
138 		}
139 
140 		if (likely(!zero)) {
141 			if (slow_path && config_fill) {
142 				if (unlikely(opt_junk_alloc)) {
143 					memset(ret, JEMALLOC_ALLOC_JUNK,
144 					    usize);
145 				} else if (unlikely(opt_zero)) {
146 					memset(ret, 0, usize);
147 				}
148 			}
149 		} else {
150 			memset(ret, 0, usize);
151 		}
152 
153 #if defined(ANDROID_ENABLE_TCACHE_STATUS)
154 		if (config_stats) {
155 			bin->tstats.nrequests++;
156 		}
157 #endif
158 		if (config_prof) {
159 			tcache->prof_accumbytes += usize;
160 		}
161 	}
162 
163 	tcache_event(tsd, tcache);
164 	return ret;
165 }
166 
167 JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t * tsd,tcache_t * tcache,void * ptr,szind_t binind,bool slow_path)168 tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
169     bool slow_path) {
170 	cache_bin_t *bin;
171 	cache_bin_info_t *bin_info;
172 
173 	assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
174 
175 	if (slow_path && config_fill && unlikely(opt_junk_free)) {
176 		arena_dalloc_junk_small(ptr, &bin_infos[binind]);
177 	}
178 
179 	bin = tcache_small_bin_get(tcache, binind);
180 	bin_info = &tcache_bin_info[binind];
181 	if (unlikely(bin->ncached == bin_info->ncached_max)) {
182 		tcache_bin_flush_small(tsd, tcache, bin, binind,
183 		    (bin_info->ncached_max >> 1));
184 	}
185 	assert(bin->ncached < bin_info->ncached_max);
186 	bin->ncached++;
187 	*(bin->avail - bin->ncached) = ptr;
188 
189 	tcache_event(tsd, tcache);
190 }
191 
192 JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t * tsd,tcache_t * tcache,void * ptr,szind_t binind,bool slow_path)193 tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
194     bool slow_path) {
195 	cache_bin_t *bin;
196 	cache_bin_info_t *bin_info;
197 
198 	assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
199 	assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
200 
201 	if (slow_path && config_fill && unlikely(opt_junk_free)) {
202 		large_dalloc_junk(ptr, sz_index2size(binind));
203 	}
204 
205 	bin = tcache_large_bin_get(tcache, binind);
206 	bin_info = &tcache_bin_info[binind];
207 	if (unlikely(bin->ncached == bin_info->ncached_max)) {
208 		tcache_bin_flush_large(tsd, bin, binind,
209 		    (bin_info->ncached_max >> 1), tcache);
210 	}
211 	assert(bin->ncached < bin_info->ncached_max);
212 	bin->ncached++;
213 	*(bin->avail - bin->ncached) = ptr;
214 
215 	tcache_event(tsd, tcache);
216 }
217 
218 JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t * tsd,unsigned ind)219 tcaches_get(tsd_t *tsd, unsigned ind) {
220 	tcaches_t *elm = &tcaches[ind];
221 	if (unlikely(elm->tcache == NULL)) {
222 		elm->tcache = tcache_create_explicit(tsd);
223 	}
224 	return elm->tcache;
225 }
226 
227 #endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
228