1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Fast and scalable bitmaps.
4 *
5 * Copyright (C) 2016 Facebook
6 * Copyright (C) 2013-2014 Jens Axboe
7 */
8
9 #ifndef __LINUX_SCALE_BITMAP_H
10 #define __LINUX_SCALE_BITMAP_H
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14
15 struct seq_file;
16
17 /**
18 * struct sbitmap_word - Word in a &struct sbitmap.
19 */
20 struct sbitmap_word {
21 /**
22 * @depth: Number of bits being used in @word/@cleared
23 */
24 unsigned long depth;
25
26 /**
27 * @word: word holding free bits
28 */
29 unsigned long word ____cacheline_aligned_in_smp;
30
31 /**
32 * @cleared: word holding cleared bits
33 */
34 unsigned long cleared ____cacheline_aligned_in_smp;
35 } ____cacheline_aligned_in_smp;
36
37 /**
38 * struct sbitmap - Scalable bitmap.
39 *
40 * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
41 * trades off higher memory usage for better scalability.
42 */
43 struct sbitmap {
44 /**
45 * @depth: Number of bits used in the whole bitmap.
46 */
47 unsigned int depth;
48
49 /**
50 * @shift: log2(number of bits used per word)
51 */
52 unsigned int shift;
53
54 /**
55 * @map_nr: Number of words (cachelines) being used for the bitmap.
56 */
57 unsigned int map_nr;
58
59 /**
60 * @round_robin: Allocate bits in strict round-robin order.
61 */
62 bool round_robin;
63
64 /**
65 * @map: Allocated bitmap.
66 */
67 struct sbitmap_word *map;
68
69 /*
70 * @alloc_hint: Cache of last successfully allocated or freed bit.
71 *
72 * This is per-cpu, which allows multiple users to stick to different
73 * cachelines until the map is exhausted.
74 */
75 unsigned int __percpu *alloc_hint;
76 };
77
78 #define SBQ_WAIT_QUEUES 8
79 #define SBQ_WAKE_BATCH 8
80
81 /**
82 * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
83 */
84 struct sbq_wait_state {
85 /**
86 * @wait_cnt: Number of frees remaining before we wake up.
87 */
88 atomic_t wait_cnt;
89
90 /**
91 * @wait: Wait queue.
92 */
93 wait_queue_head_t wait;
94 } ____cacheline_aligned_in_smp;
95
96 /**
97 * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
98 * bits.
99 *
100 * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
101 * avoid contention on the wait queue spinlock. This ensures that we don't hit a
102 * scalability wall when we run out of free bits and have to start putting tasks
103 * to sleep.
104 */
105 struct sbitmap_queue {
106 /**
107 * @sb: Scalable bitmap.
108 */
109 struct sbitmap sb;
110
111 /**
112 * @wake_batch: Number of bits which must be freed before we wake up any
113 * waiters.
114 */
115 unsigned int wake_batch;
116
117 /**
118 * @wake_index: Next wait queue in @ws to wake up.
119 */
120 atomic_t wake_index;
121
122 /**
123 * @ws: Wait queues.
124 */
125 struct sbq_wait_state *ws;
126
127 /*
128 * @ws_active: count of currently active ws waitqueues
129 */
130 atomic_t ws_active;
131
132 /**
133 * @min_shallow_depth: The minimum shallow depth which may be passed to
134 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
135 */
136 unsigned int min_shallow_depth;
137 };
138
139 /**
140 * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
141 * @sb: Bitmap to initialize.
142 * @depth: Number of bits to allocate.
143 * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
144 * given, a good default is chosen.
145 * @flags: Allocation flags.
146 * @node: Memory node to allocate on.
147 * @round_robin: If true, be stricter about allocation order; always allocate
148 * starting from the last allocated bit. This is less efficient
149 * than the default behavior (false).
150 * @alloc_hint: If true, apply percpu hint for where to start searching for
151 * a free bit.
152 *
153 * Return: Zero on success or negative errno on failure.
154 */
155 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
156 gfp_t flags, int node, bool round_robin, bool alloc_hint);
157
158 /**
159 * sbitmap_free() - Free memory used by a &struct sbitmap.
160 * @sb: Bitmap to free.
161 */
sbitmap_free(struct sbitmap * sb)162 static inline void sbitmap_free(struct sbitmap *sb)
163 {
164 free_percpu(sb->alloc_hint);
165 kfree(sb->map);
166 sb->map = NULL;
167 }
168
169 /**
170 * sbitmap_resize() - Resize a &struct sbitmap.
171 * @sb: Bitmap to resize.
172 * @depth: New number of bits to resize to.
173 *
174 * Doesn't reallocate anything. It's up to the caller to ensure that the new
175 * depth doesn't exceed the depth that the sb was initialized with.
176 */
177 void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
178
179 /**
180 * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
181 * @sb: Bitmap to allocate from.
182 *
183 * This operation provides acquire barrier semantics if it succeeds.
184 *
185 * Return: Non-negative allocated bit number if successful, -1 otherwise.
186 */
187 int sbitmap_get(struct sbitmap *sb);
188
189 /**
190 * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
191 * limiting the depth used from each word.
192 * @sb: Bitmap to allocate from.
193 * @shallow_depth: The maximum number of bits to allocate from a single word.
194 *
195 * This rather specific operation allows for having multiple users with
196 * different allocation limits. E.g., there can be a high-priority class that
197 * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
198 * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
199 * class can only allocate half of the total bits in the bitmap, preventing it
200 * from starving out the high-priority class.
201 *
202 * Return: Non-negative allocated bit number if successful, -1 otherwise.
203 */
204 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
205
206 /**
207 * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
208 * @sb: Bitmap to check.
209 *
210 * Return: true if any bit in the bitmap is set, false otherwise.
211 */
212 bool sbitmap_any_bit_set(const struct sbitmap *sb);
213
214 #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
215 #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
216
217 typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
218
219 /**
220 * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
221 * @start: Where to start the iteration.
222 * @sb: Bitmap to iterate over.
223 * @fn: Callback. Should return true to continue or false to break early.
224 * @data: Pointer to pass to callback.
225 *
226 * This is inline even though it's non-trivial so that the function calls to the
227 * callback will hopefully get optimized away.
228 */
__sbitmap_for_each_set(struct sbitmap * sb,unsigned int start,sb_for_each_fn fn,void * data)229 static inline void __sbitmap_for_each_set(struct sbitmap *sb,
230 unsigned int start,
231 sb_for_each_fn fn, void *data)
232 {
233 unsigned int index;
234 unsigned int nr;
235 unsigned int scanned = 0;
236
237 if (start >= sb->depth)
238 start = 0;
239 index = SB_NR_TO_INDEX(sb, start);
240 nr = SB_NR_TO_BIT(sb, start);
241
242 while (scanned < sb->depth) {
243 unsigned long word;
244 unsigned int depth = min_t(unsigned int,
245 sb->map[index].depth - nr,
246 sb->depth - scanned);
247
248 scanned += depth;
249 word = sb->map[index].word & ~sb->map[index].cleared;
250 if (!word)
251 goto next;
252
253 /*
254 * On the first iteration of the outer loop, we need to add the
255 * bit offset back to the size of the word for find_next_bit().
256 * On all other iterations, nr is zero, so this is a noop.
257 */
258 depth += nr;
259 while (1) {
260 nr = find_next_bit(&word, depth, nr);
261 if (nr >= depth)
262 break;
263 if (!fn(sb, (index << sb->shift) + nr, data))
264 return;
265
266 nr++;
267 }
268 next:
269 nr = 0;
270 if (++index >= sb->map_nr)
271 index = 0;
272 }
273 }
274
275 /**
276 * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
277 * @sb: Bitmap to iterate over.
278 * @fn: Callback. Should return true to continue or false to break early.
279 * @data: Pointer to pass to callback.
280 */
sbitmap_for_each_set(struct sbitmap * sb,sb_for_each_fn fn,void * data)281 static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
282 void *data)
283 {
284 __sbitmap_for_each_set(sb, 0, fn, data);
285 }
286
__sbitmap_word(struct sbitmap * sb,unsigned int bitnr)287 static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
288 unsigned int bitnr)
289 {
290 return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
291 }
292
293 /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
294
sbitmap_set_bit(struct sbitmap * sb,unsigned int bitnr)295 static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
296 {
297 set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
298 }
299
sbitmap_clear_bit(struct sbitmap * sb,unsigned int bitnr)300 static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
301 {
302 clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
303 }
304
305 /*
306 * This one is special, since it doesn't actually clear the bit, rather it
307 * sets the corresponding bit in the ->cleared mask instead. Paired with
308 * the caller doing sbitmap_deferred_clear() if a given index is full, which
309 * will clear the previously freed entries in the corresponding ->word.
310 */
sbitmap_deferred_clear_bit(struct sbitmap * sb,unsigned int bitnr)311 static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
312 {
313 unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
314
315 set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
316 }
317
318 /*
319 * Pair of sbitmap_get, and this one applies both cleared bit and
320 * allocation hint.
321 */
sbitmap_put(struct sbitmap * sb,unsigned int bitnr)322 static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
323 {
324 sbitmap_deferred_clear_bit(sb, bitnr);
325
326 if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
327 *raw_cpu_ptr(sb->alloc_hint) = bitnr;
328 }
329
sbitmap_test_bit(struct sbitmap * sb,unsigned int bitnr)330 static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
331 {
332 return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
333 }
334
sbitmap_calculate_shift(unsigned int depth)335 static inline int sbitmap_calculate_shift(unsigned int depth)
336 {
337 int shift = ilog2(BITS_PER_LONG);
338
339 /*
340 * If the bitmap is small, shrink the number of bits per word so
341 * we spread over a few cachelines, at least. If less than 4
342 * bits, just forget about it, it's not going to work optimally
343 * anyway.
344 */
345 if (depth >= 4) {
346 while ((4U << shift) > depth)
347 shift--;
348 }
349
350 return shift;
351 }
352
353 /**
354 * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
355 * @sb: Bitmap to show.
356 * @m: struct seq_file to write to.
357 *
358 * This is intended for debugging. The format may change at any time.
359 */
360 void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
361
362
363 /**
364 * sbitmap_weight() - Return how many set and not cleared bits in a &struct
365 * sbitmap.
366 * @sb: Bitmap to check.
367 *
368 * Return: How many set and not cleared bits set
369 */
370 unsigned int sbitmap_weight(const struct sbitmap *sb);
371
372 /**
373 * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
374 * seq_file.
375 * @sb: Bitmap to show.
376 * @m: struct seq_file to write to.
377 *
378 * This is intended for debugging. The output isn't guaranteed to be internally
379 * consistent.
380 */
381 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
382
383 /**
384 * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
385 * memory node.
386 * @sbq: Bitmap queue to initialize.
387 * @depth: See sbitmap_init_node().
388 * @shift: See sbitmap_init_node().
389 * @round_robin: See sbitmap_get().
390 * @flags: Allocation flags.
391 * @node: Memory node to allocate on.
392 *
393 * Return: Zero on success or negative errno on failure.
394 */
395 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
396 int shift, bool round_robin, gfp_t flags, int node);
397
398 /**
399 * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
400 *
401 * @sbq: Bitmap queue to free.
402 */
sbitmap_queue_free(struct sbitmap_queue * sbq)403 static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
404 {
405 kfree(sbq->ws);
406 sbitmap_free(&sbq->sb);
407 }
408
409 /**
410 * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
411 * @sbq: Bitmap queue to resize.
412 * @depth: New number of bits to resize to.
413 *
414 * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
415 * some extra work on the &struct sbitmap_queue, so it's not safe to just
416 * resize the underlying &struct sbitmap.
417 */
418 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
419
420 /**
421 * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
422 * sbitmap_queue with preemption already disabled.
423 * @sbq: Bitmap queue to allocate from.
424 *
425 * Return: Non-negative allocated bit number if successful, -1 otherwise.
426 */
427 int __sbitmap_queue_get(struct sbitmap_queue *sbq);
428
429 /**
430 * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
431 * sbitmap_queue, limiting the depth used from each word, with preemption
432 * already disabled.
433 * @sbq: Bitmap queue to allocate from.
434 * @shallow_depth: The maximum number of bits to allocate from a single word.
435 * See sbitmap_get_shallow().
436 *
437 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
438 * initializing @sbq.
439 *
440 * Return: Non-negative allocated bit number if successful, -1 otherwise.
441 */
442 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
443 unsigned int shallow_depth);
444
445 /**
446 * sbitmap_queue_get() - Try to allocate a free bit from a &struct
447 * sbitmap_queue.
448 * @sbq: Bitmap queue to allocate from.
449 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
450 * sbitmap_queue_clear()).
451 *
452 * Return: Non-negative allocated bit number if successful, -1 otherwise.
453 */
sbitmap_queue_get(struct sbitmap_queue * sbq,unsigned int * cpu)454 static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
455 unsigned int *cpu)
456 {
457 int nr;
458
459 *cpu = get_cpu();
460 nr = __sbitmap_queue_get(sbq);
461 put_cpu();
462 return nr;
463 }
464
465 /**
466 * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
467 * sbitmap_queue, limiting the depth used from each word.
468 * @sbq: Bitmap queue to allocate from.
469 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
470 * sbitmap_queue_clear()).
471 * @shallow_depth: The maximum number of bits to allocate from a single word.
472 * See sbitmap_get_shallow().
473 *
474 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
475 * initializing @sbq.
476 *
477 * Return: Non-negative allocated bit number if successful, -1 otherwise.
478 */
sbitmap_queue_get_shallow(struct sbitmap_queue * sbq,unsigned int * cpu,unsigned int shallow_depth)479 static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
480 unsigned int *cpu,
481 unsigned int shallow_depth)
482 {
483 int nr;
484
485 *cpu = get_cpu();
486 nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
487 put_cpu();
488 return nr;
489 }
490
491 /**
492 * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
493 * minimum shallow depth that will be used.
494 * @sbq: Bitmap queue in question.
495 * @min_shallow_depth: The minimum shallow depth that will be passed to
496 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
497 *
498 * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
499 * depends on the depth of the bitmap. Since the shallow allocation functions
500 * effectively operate with a different depth, the shallow depth must be taken
501 * into account when calculating the batch size. This function must be called
502 * with the minimum shallow depth that will be used. Failure to do so can result
503 * in missed wakeups.
504 */
505 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
506 unsigned int min_shallow_depth);
507
508 /**
509 * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
510 * &struct sbitmap_queue.
511 * @sbq: Bitmap to free from.
512 * @nr: Bit number to free.
513 * @cpu: CPU the bit was allocated on.
514 */
515 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
516 unsigned int cpu);
517
sbq_index_inc(int index)518 static inline int sbq_index_inc(int index)
519 {
520 return (index + 1) & (SBQ_WAIT_QUEUES - 1);
521 }
522
sbq_index_atomic_inc(atomic_t * index)523 static inline void sbq_index_atomic_inc(atomic_t *index)
524 {
525 int old = atomic_read(index);
526 int new = sbq_index_inc(old);
527 atomic_cmpxchg(index, old, new);
528 }
529
530 /**
531 * sbq_wait_ptr() - Get the next wait queue to use for a &struct
532 * sbitmap_queue.
533 * @sbq: Bitmap queue to wait on.
534 * @wait_index: A counter per "user" of @sbq.
535 */
sbq_wait_ptr(struct sbitmap_queue * sbq,atomic_t * wait_index)536 static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
537 atomic_t *wait_index)
538 {
539 struct sbq_wait_state *ws;
540
541 ws = &sbq->ws[atomic_read(wait_index)];
542 sbq_index_atomic_inc(wait_index);
543 return ws;
544 }
545
546 /**
547 * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
548 * sbitmap_queue.
549 * @sbq: Bitmap queue to wake up.
550 */
551 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
552
553 /**
554 * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
555 * on a &struct sbitmap_queue.
556 * @sbq: Bitmap queue to wake up.
557 */
558 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
559
560 /**
561 * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
562 * seq_file.
563 * @sbq: Bitmap queue to show.
564 * @m: struct seq_file to write to.
565 *
566 * This is intended for debugging. The format may change at any time.
567 */
568 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
569
570 struct sbq_wait {
571 struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */
572 struct wait_queue_entry wait;
573 };
574
575 #define DEFINE_SBQ_WAIT(name) \
576 struct sbq_wait name = { \
577 .sbq = NULL, \
578 .wait = { \
579 .private = current, \
580 .func = autoremove_wake_function, \
581 .entry = LIST_HEAD_INIT((name).wait.entry), \
582 } \
583 }
584
585 /*
586 * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
587 * internal state.
588 */
589 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
590 struct sbq_wait_state *ws,
591 struct sbq_wait *sbq_wait, int state);
592
593 /*
594 * Must be paired with sbitmap_prepare_to_wait().
595 */
596 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
597 struct sbq_wait *sbq_wait);
598
599 /*
600 * Wrapper around add_wait_queue(), which maintains some extra internal state
601 */
602 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
603 struct sbq_wait_state *ws,
604 struct sbq_wait *sbq_wait);
605
606 /*
607 * Must be paired with sbitmap_add_wait_queue()
608 */
609 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
610
611 #endif /* __LINUX_SCALE_BITMAP_H */
612