• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define	JEMALLOC_CHUNK_DSS_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
4 /* Data. */
5 
6 const char	*dss_prec_names[] = {
7 	"disabled",
8 	"primary",
9 	"secondary",
10 	"N/A"
11 };
12 
13 /*
14  * Current dss precedence default, used when creating new arenas.  NB: This is
15  * stored as unsigned rather than dss_prec_t because in principle there's no
16  * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
17  * atomic operations to synchronize the setting.
18  */
19 static unsigned		dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
20 
21 /* Base address of the DSS. */
22 static void		*dss_base;
23 /* Atomic boolean indicating whether the DSS is exhausted. */
24 static unsigned		dss_exhausted;
25 /* Atomic current upper limit on DSS addresses. */
26 static void		*dss_max;
27 
28 /******************************************************************************/
29 
30 static void *
chunk_dss_sbrk(intptr_t increment)31 chunk_dss_sbrk(intptr_t increment)
32 {
33 
34 #ifdef JEMALLOC_DSS
35 	return (sbrk(increment));
36 #else
37 	not_implemented();
38 	return (NULL);
39 #endif
40 }
41 
42 dss_prec_t
chunk_dss_prec_get(void)43 chunk_dss_prec_get(void)
44 {
45 	dss_prec_t ret;
46 
47 	if (!have_dss)
48 		return (dss_prec_disabled);
49 	ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
50 	return (ret);
51 }
52 
53 bool
chunk_dss_prec_set(dss_prec_t dss_prec)54 chunk_dss_prec_set(dss_prec_t dss_prec)
55 {
56 
57 	if (!have_dss)
58 		return (dss_prec != dss_prec_disabled);
59 	atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
60 	return (false);
61 }
62 
63 static void *
chunk_dss_max_update(void * new_addr)64 chunk_dss_max_update(void *new_addr)
65 {
66 	void *max_cur;
67 	spin_t spinner;
68 
69 	/*
70 	 * Get the current end of the DSS as max_cur and assure that dss_max is
71 	 * up to date.
72 	 */
73 	spin_init(&spinner);
74 	while (true) {
75 		void *max_prev = atomic_read_p(&dss_max);
76 
77 		max_cur = chunk_dss_sbrk(0);
78 		if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
79 			/*
80 			 * Another thread optimistically updated dss_max.  Wait
81 			 * for it to finish.
82 			 */
83 			spin_adaptive(&spinner);
84 			continue;
85 		}
86 		if (!atomic_cas_p(&dss_max, max_prev, max_cur))
87 			break;
88 	}
89 	/* Fixed new_addr can only be supported if it is at the edge of DSS. */
90 	if (new_addr != NULL && max_cur != new_addr)
91 		return (NULL);
92 
93 	return (max_cur);
94 }
95 
96 void *
chunk_alloc_dss(tsdn_t * tsdn,arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit)97 chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
98     size_t alignment, bool *zero, bool *commit)
99 {
100 	cassert(have_dss);
101 	assert(size > 0 && (size & chunksize_mask) == 0);
102 	assert(alignment > 0 && (alignment & chunksize_mask) == 0);
103 
104 	/*
105 	 * sbrk() uses a signed increment argument, so take care not to
106 	 * interpret a huge allocation request as a negative increment.
107 	 */
108 	if ((intptr_t)size < 0)
109 		return (NULL);
110 
111 	if (!atomic_read_u(&dss_exhausted)) {
112 		/*
113 		 * The loop is necessary to recover from races with other
114 		 * threads that are using the DSS for something other than
115 		 * malloc.
116 		 */
117 		while (true) {
118 			void *ret, *cpad, *max_cur, *dss_next, *dss_prev;
119 			size_t gap_size, cpad_size;
120 			intptr_t incr;
121 
122 			max_cur = chunk_dss_max_update(new_addr);
123 			if (max_cur == NULL)
124 				goto label_oom;
125 
126 			/*
127 			 * Calculate how much padding is necessary to
128 			 * chunk-align the end of the DSS.
129 			 */
130 			gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
131 			    chunksize_mask;
132 			/*
133 			 * Compute how much chunk-aligned pad space (if any) is
134 			 * necessary to satisfy alignment.  This space can be
135 			 * recycled for later use.
136 			 */
137 			cpad = (void *)((uintptr_t)dss_max + gap_size);
138 			ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
139 			    alignment);
140 			cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
141 			dss_next = (void *)((uintptr_t)ret + size);
142 			if ((uintptr_t)ret < (uintptr_t)dss_max ||
143 			    (uintptr_t)dss_next < (uintptr_t)dss_max)
144 				goto label_oom; /* Wrap-around. */
145 			incr = gap_size + cpad_size + size;
146 
147 			/*
148 			 * Optimistically update dss_max, and roll back below if
149 			 * sbrk() fails.  No other thread will try to extend the
150 			 * DSS while dss_max is greater than the current DSS
151 			 * max reported by sbrk(0).
152 			 */
153 			if (atomic_cas_p(&dss_max, max_cur, dss_next))
154 				continue;
155 
156 			/* Try to allocate. */
157 			dss_prev = chunk_dss_sbrk(incr);
158 			if (dss_prev == max_cur) {
159 				/* Success. */
160 				if (cpad_size != 0) {
161 					chunk_hooks_t chunk_hooks =
162 					    CHUNK_HOOKS_INITIALIZER;
163 					chunk_dalloc_wrapper(tsdn, arena,
164 					    &chunk_hooks, cpad, cpad_size,
165 					    arena_extent_sn_next(arena), false,
166 					    true);
167 				}
168 				if (*zero) {
169 					JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
170 					    ret, size);
171 					memset(ret, 0, size);
172 				}
173 				if (!*commit)
174 					*commit = pages_decommit(ret, size);
175 				return (ret);
176 			}
177 
178 			/*
179 			 * Failure, whether due to OOM or a race with a raw
180 			 * sbrk() call from outside the allocator.  Try to roll
181 			 * back optimistic dss_max update; if rollback fails,
182 			 * it's due to another caller of this function having
183 			 * succeeded since this invocation started, in which
184 			 * case rollback is not necessary.
185 			 */
186 			atomic_cas_p(&dss_max, dss_next, max_cur);
187 			if (dss_prev == (void *)-1) {
188 				/* OOM. */
189 				atomic_write_u(&dss_exhausted, (unsigned)true);
190 				goto label_oom;
191 			}
192 		}
193 	}
194 label_oom:
195 	return (NULL);
196 }
197 
198 static bool
chunk_in_dss_helper(void * chunk,void * max)199 chunk_in_dss_helper(void *chunk, void *max)
200 {
201 
202 	return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk <
203 	    (uintptr_t)max);
204 }
205 
206 bool
chunk_in_dss(void * chunk)207 chunk_in_dss(void *chunk)
208 {
209 
210 	cassert(have_dss);
211 
212 	return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max)));
213 }
214 
215 bool
chunk_dss_mergeable(void * chunk_a,void * chunk_b)216 chunk_dss_mergeable(void *chunk_a, void *chunk_b)
217 {
218 	void *max;
219 
220 	cassert(have_dss);
221 
222 	max = atomic_read_p(&dss_max);
223 	return (chunk_in_dss_helper(chunk_a, max) ==
224 	    chunk_in_dss_helper(chunk_b, max));
225 }
226 
227 void
chunk_dss_boot(void)228 chunk_dss_boot(void)
229 {
230 
231 	cassert(have_dss);
232 
233 	dss_base = chunk_dss_sbrk(0);
234 	dss_exhausted = (unsigned)(dss_base == (void *)-1);
235 	dss_max = dss_base;
236 }
237 
238 /******************************************************************************/
239