1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
21 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28 #include "pb_slab.h"
29
30 #include "util/u_math.h"
31 #include "util/u_memory.h"
32
33 /* All slab allocations from the same heap and with the same size belong
34 * to the same group.
35 */
36 struct pb_slab_group
37 {
38 /* Slabs with allocation candidates. Typically, slabs in this list should
39 * have some free entries.
40 *
41 * However, when the head becomes full we purposefully keep it around
42 * until the next allocation attempt, at which time we try a reclaim.
43 * The intention is to keep serving allocations from the same slab as long
44 * as possible for better locality.
45 *
46 * Due to a race in new slab allocation, additional slabs in this list
47 * can be fully allocated as well.
48 */
49 struct list_head slabs;
50 };
51
52
53 static void
pb_slab_reclaim(struct pb_slabs * slabs,struct pb_slab_entry * entry)54 pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
55 {
56 struct pb_slab *slab = entry->slab;
57
58 list_del(&entry->head); /* remove from reclaim list */
59 list_add(&entry->head, &slab->free);
60 slab->num_free++;
61
62 /* Add slab to the group's list if it isn't already linked. */
63 if (!list_is_linked(&slab->head)) {
64 struct pb_slab_group *group = &slabs->groups[entry->group_index];
65 list_addtail(&slab->head, &group->slabs);
66 }
67
68 if (slab->num_free >= slab->num_entries) {
69 list_del(&slab->head);
70 slabs->slab_free(slabs->priv, slab);
71 }
72 }
73
74 #define MAX_FAILED_RECLAIMS 2
75
76 static void
pb_slabs_reclaim_locked(struct pb_slabs * slabs)77 pb_slabs_reclaim_locked(struct pb_slabs *slabs)
78 {
79 struct pb_slab_entry *entry, *next;
80 unsigned num_failed_reclaims = 0;
81 LIST_FOR_EACH_ENTRY_SAFE(entry, next, &slabs->reclaim, head) {
82 if (slabs->can_reclaim(slabs->priv, entry)) {
83 pb_slab_reclaim(slabs, entry);
84 /* there are typically three possible scenarios when reclaiming:
85 * - all entries reclaimed
86 * - no entries reclaimed
87 * - all but one entry reclaimed
88 * in the scenario where a slab contains many (10+) unused entries,
89 * the driver should not walk the entire list, as this is likely to
90 * result in zero reclaims if the first few entries fail to reclaim
91 */
92 } else if (++num_failed_reclaims >= MAX_FAILED_RECLAIMS) {
93 break;
94 }
95 }
96 }
97
98 static void
pb_slabs_reclaim_all_locked(struct pb_slabs * slabs)99 pb_slabs_reclaim_all_locked(struct pb_slabs *slabs)
100 {
101 struct pb_slab_entry *entry, *next;
102 LIST_FOR_EACH_ENTRY_SAFE(entry, next, &slabs->reclaim, head) {
103 if (slabs->can_reclaim(slabs->priv, entry)) {
104 pb_slab_reclaim(slabs, entry);
105 }
106 }
107 }
108
109 /* Allocate a slab entry of the given size from the given heap.
110 *
111 * This will try to re-use entries that have previously been freed. However,
112 * if no entries are free (or all free entries are still "in flight" as
113 * determined by the can_reclaim fallback function), a new slab will be
114 * requested via the slab_alloc callback.
115 *
116 * Note that slab_free can also be called by this function.
117 */
118 struct pb_slab_entry *
pb_slab_alloc_reclaimed(struct pb_slabs * slabs,unsigned size,unsigned heap,bool reclaim_all)119 pb_slab_alloc_reclaimed(struct pb_slabs *slabs, unsigned size, unsigned heap, bool reclaim_all)
120 {
121 unsigned order = MAX2(slabs->min_order, util_logbase2_ceil(size));
122 unsigned group_index;
123 struct pb_slab_group *group;
124 struct pb_slab *slab;
125 struct pb_slab_entry *entry;
126 unsigned entry_size = 1 << order;
127 bool three_fourths = false;
128
129 /* If the size is <= 3/4 of the entry size, use a slab with entries using
130 * 3/4 sizes to reduce overallocation.
131 */
132 if (slabs->allow_three_fourths_allocations && size <= entry_size * 3 / 4) {
133 entry_size = entry_size * 3 / 4;
134 three_fourths = true;
135 }
136
137 assert(order < slabs->min_order + slabs->num_orders);
138 assert(heap < slabs->num_heaps);
139
140 group_index = (heap * slabs->num_orders + (order - slabs->min_order)) *
141 (1 + slabs->allow_three_fourths_allocations) + three_fourths;
142 group = &slabs->groups[group_index];
143
144 simple_mtx_lock(&slabs->mutex);
145
146 /* If there is no candidate slab at all, or the first slab has no free
147 * entries, try reclaiming entries.
148 */
149 if (list_is_empty(&group->slabs) ||
150 list_is_empty(&list_entry(group->slabs.next, struct pb_slab, head)->free)) {
151 if (reclaim_all)
152 pb_slabs_reclaim_all_locked(slabs);
153 else
154 pb_slabs_reclaim_locked(slabs);
155 }
156
157 /* Remove slabs without free entries. */
158 while (!list_is_empty(&group->slabs)) {
159 slab = list_entry(group->slabs.next, struct pb_slab, head);
160 if (!list_is_empty(&slab->free))
161 break;
162
163 list_del(&slab->head);
164 }
165
166 if (list_is_empty(&group->slabs)) {
167 /* Drop the mutex temporarily to prevent a deadlock where the allocation
168 * calls back into slab functions (most likely to happen for
169 * pb_slab_reclaim if memory is low).
170 *
171 * There's a chance that racing threads will end up allocating multiple
172 * slabs for the same group, but that doesn't hurt correctness.
173 */
174 simple_mtx_unlock(&slabs->mutex);
175 slab = slabs->slab_alloc(slabs->priv, heap, entry_size, group_index);
176 if (!slab)
177 return NULL;
178 simple_mtx_lock(&slabs->mutex);
179
180 list_add(&slab->head, &group->slabs);
181 }
182
183 entry = list_entry(slab->free.next, struct pb_slab_entry, head);
184 list_del(&entry->head);
185 slab->num_free--;
186
187 simple_mtx_unlock(&slabs->mutex);
188
189 return entry;
190 }
191
192 struct pb_slab_entry *
pb_slab_alloc(struct pb_slabs * slabs,unsigned size,unsigned heap)193 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
194 {
195 return pb_slab_alloc_reclaimed(slabs, size, heap, false);
196 }
197
198 /* Free the given slab entry.
199 *
200 * The entry may still be in use e.g. by in-flight command submissions. The
201 * can_reclaim callback function will be called to determine whether the entry
202 * can be handed out again by pb_slab_alloc.
203 */
204 void
pb_slab_free(struct pb_slabs * slabs,struct pb_slab_entry * entry)205 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
206 {
207 simple_mtx_lock(&slabs->mutex);
208 list_addtail(&entry->head, &slabs->reclaim);
209 simple_mtx_unlock(&slabs->mutex);
210 }
211
212 /* Check if any of the entries handed to pb_slab_free are ready to be re-used.
213 *
214 * This may end up freeing some slabs and is therefore useful to try to reclaim
215 * some no longer used memory. However, calling this function is not strictly
216 * required since pb_slab_alloc will eventually do the same thing.
217 */
218 void
pb_slabs_reclaim(struct pb_slabs * slabs)219 pb_slabs_reclaim(struct pb_slabs *slabs)
220 {
221 simple_mtx_lock(&slabs->mutex);
222 pb_slabs_reclaim_locked(slabs);
223 simple_mtx_unlock(&slabs->mutex);
224 }
225
226 /* Initialize the slabs manager.
227 *
228 * The minimum and maximum size of slab entries are 2^min_order and
229 * 2^max_order, respectively.
230 *
231 * priv will be passed to the given callback functions.
232 */
233 bool
pb_slabs_init(struct pb_slabs * slabs,unsigned min_order,unsigned max_order,unsigned num_heaps,bool allow_three_fourth_allocations,void * priv,slab_can_reclaim_fn * can_reclaim,slab_alloc_fn * slab_alloc,slab_free_fn * slab_free)234 pb_slabs_init(struct pb_slabs *slabs,
235 unsigned min_order, unsigned max_order,
236 unsigned num_heaps, bool allow_three_fourth_allocations,
237 void *priv,
238 slab_can_reclaim_fn *can_reclaim,
239 slab_alloc_fn *slab_alloc,
240 slab_free_fn *slab_free)
241 {
242 unsigned num_groups;
243 unsigned i;
244
245 assert(min_order <= max_order);
246 assert(max_order < sizeof(unsigned) * 8 - 1);
247
248 slabs->min_order = min_order;
249 slabs->num_orders = max_order - min_order + 1;
250 slabs->num_heaps = num_heaps;
251 slabs->allow_three_fourths_allocations = allow_three_fourth_allocations;
252
253 slabs->priv = priv;
254 slabs->can_reclaim = can_reclaim;
255 slabs->slab_alloc = slab_alloc;
256 slabs->slab_free = slab_free;
257
258 list_inithead(&slabs->reclaim);
259
260 num_groups = slabs->num_orders * slabs->num_heaps *
261 (1 + allow_three_fourth_allocations);
262 slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));
263 if (!slabs->groups)
264 return false;
265
266 for (i = 0; i < num_groups; ++i) {
267 struct pb_slab_group *group = &slabs->groups[i];
268 list_inithead(&group->slabs);
269 }
270
271 (void) simple_mtx_init(&slabs->mutex, mtx_plain);
272
273 return true;
274 }
275
276 /* Shutdown the slab manager.
277 *
278 * This will free all allocated slabs and internal structures, even if some
279 * of the slab entries are still in flight (i.e. if can_reclaim would return
280 * false).
281 */
282 void
pb_slabs_deinit(struct pb_slabs * slabs)283 pb_slabs_deinit(struct pb_slabs *slabs)
284 {
285 /* Reclaim all slab entries (even those that are still in flight). This
286 * implicitly calls slab_free for everything.
287 */
288 while (!list_is_empty(&slabs->reclaim)) {
289 struct pb_slab_entry *entry =
290 list_entry(slabs->reclaim.next, struct pb_slab_entry, head);
291 pb_slab_reclaim(slabs, entry);
292 }
293
294 FREE(slabs->groups);
295 simple_mtx_destroy(&slabs->mutex);
296 }
297