Lines Matching +full:depth +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2013-2014 Jens Axboe
21 spin_lock_irqsave(&sb->map[index].swap_lock, flags); in sbitmap_deferred_clear()
23 if (!sb->map[index].cleared) in sbitmap_deferred_clear()
29 mask = xchg(&sb->map[index].cleared, 0); in sbitmap_deferred_clear()
35 val = sb->map[index].word; in sbitmap_deferred_clear()
36 } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val); in sbitmap_deferred_clear()
40 spin_unlock_irqrestore(&sb->map[index].swap_lock, flags); in sbitmap_deferred_clear()
44 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, in sbitmap_init_node() argument
58 if (depth >= 4) { in sbitmap_init_node()
59 while ((4U << shift) > depth) in sbitmap_init_node()
60 shift--; in sbitmap_init_node()
65 return -EINVAL; in sbitmap_init_node()
67 sb->shift = shift; in sbitmap_init_node()
68 sb->depth = depth; in sbitmap_init_node()
69 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); in sbitmap_init_node()
71 if (depth == 0) { in sbitmap_init_node()
72 sb->map = NULL; in sbitmap_init_node()
76 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); in sbitmap_init_node()
77 if (!sb->map) in sbitmap_init_node()
78 return -ENOMEM; in sbitmap_init_node()
80 for (i = 0; i < sb->map_nr; i++) { in sbitmap_init_node()
81 sb->map[i].depth = min(depth, bits_per_word); in sbitmap_init_node()
82 depth -= sb->map[i].depth; in sbitmap_init_node()
83 spin_lock_init(&sb->map[i].swap_lock); in sbitmap_init_node()
89 void sbitmap_resize(struct sbitmap *sb, unsigned int depth) in sbitmap_resize() argument
91 unsigned int bits_per_word = 1U << sb->shift; in sbitmap_resize()
94 for (i = 0; i < sb->map_nr; i++) in sbitmap_resize()
97 sb->depth = depth; in sbitmap_resize()
98 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); in sbitmap_resize()
100 for (i = 0; i < sb->map_nr; i++) { in sbitmap_resize()
101 sb->map[i].depth = min(depth, bits_per_word); in sbitmap_resize()
102 depth -= sb->map[i].depth; in sbitmap_resize()
107 static int __sbitmap_get_word(unsigned long *word, unsigned long depth, in __sbitmap_get_word() argument
114 nr = find_next_zero_bit(word, depth, hint); in __sbitmap_get_word()
115 if (unlikely(nr >= depth)) { in __sbitmap_get_word()
125 return -1; in __sbitmap_get_word()
132 if (hint >= depth - 1) in __sbitmap_get_word()
145 nr = __sbitmap_get_word(&sb->map[index].word, in sbitmap_find_bit_in_index()
146 sb->map[index].depth, alloc_hint, in sbitmap_find_bit_in_index()
148 if (nr != -1) in sbitmap_find_bit_in_index()
160 int nr = -1; in sbitmap_get()
174 for (i = 0; i < sb->map_nr; i++) { in sbitmap_get()
177 if (nr != -1) { in sbitmap_get()
178 nr += index << sb->shift; in sbitmap_get()
184 if (++index >= sb->map_nr) in sbitmap_get()
196 int nr = -1; in sbitmap_get_shallow()
200 for (i = 0; i < sb->map_nr; i++) { in sbitmap_get_shallow()
202 nr = __sbitmap_get_word(&sb->map[index].word, in sbitmap_get_shallow()
203 min(sb->map[index].depth, shallow_depth), in sbitmap_get_shallow()
205 if (nr != -1) { in sbitmap_get_shallow()
206 nr += index << sb->shift; in sbitmap_get_shallow()
215 alloc_hint = index << sb->shift; in sbitmap_get_shallow()
217 if (index >= sb->map_nr) { in sbitmap_get_shallow()
231 for (i = 0; i < sb->map_nr; i++) { in sbitmap_any_bit_set()
232 if (sb->map[i].word & ~sb->map[i].cleared) in sbitmap_any_bit_set()
243 for (i = 0; i < sb->map_nr; i++) { in __sbitmap_weight()
244 const struct sbitmap_word *word = &sb->map[i]; in __sbitmap_weight()
247 weight += bitmap_weight(&word->word, word->depth); in __sbitmap_weight()
249 weight += bitmap_weight(&word->cleared, word->depth); in __sbitmap_weight()
266 seq_printf(m, "depth=%u\n", sb->depth); in sbitmap_show()
267 seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb)); in sbitmap_show()
269 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); in sbitmap_show()
270 seq_printf(m, "map_nr=%u\n", sb->map_nr); in sbitmap_show()
293 for (i = 0; i < sb->map_nr; i++) { in sbitmap_bitmap_show()
294 unsigned long word = READ_ONCE(sb->map[i].word); in sbitmap_bitmap_show()
295 unsigned long cleared = READ_ONCE(sb->map[i].cleared); in sbitmap_bitmap_show()
296 unsigned int word_bits = READ_ONCE(sb->map[i].depth); in sbitmap_bitmap_show()
301 unsigned int bits = min(8 - byte_bits, word_bits); in sbitmap_bitmap_show()
303 byte |= (word & (BIT(bits) - 1)) << byte_bits; in sbitmap_bitmap_show()
312 word_bits -= bits; in sbitmap_bitmap_show()
325 unsigned int depth) in sbq_calc_wake_batch() argument
332 * batch size is small enough that the full depth of the bitmap, in sbq_calc_wake_batch()
333 * potentially limited by a shallow depth, is enough to wake up all of in sbq_calc_wake_batch()
337 * be a partial word. There are depth / bits_per_word full words and in sbq_calc_wake_batch()
338 * depth % bits_per_word bits left over. In bitwise arithmetic: in sbq_calc_wake_batch()
341 * depth / bits_per_word = depth >> shift in sbq_calc_wake_batch()
342 * depth % bits_per_word = depth & ((1 << shift) - 1) in sbq_calc_wake_batch()
344 * Each word can be limited to sbq->min_shallow_depth bits. in sbq_calc_wake_batch()
346 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); in sbq_calc_wake_batch()
347 depth = ((depth >> sbq->sb.shift) * shallow_depth + in sbq_calc_wake_batch()
348 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); in sbq_calc_wake_batch()
349 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, in sbq_calc_wake_batch()
355 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, in sbitmap_queue_init_node() argument
361 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); in sbitmap_queue_init_node()
365 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); in sbitmap_queue_init_node()
366 if (!sbq->alloc_hint) { in sbitmap_queue_init_node()
367 sbitmap_free(&sbq->sb); in sbitmap_queue_init_node()
368 return -ENOMEM; in sbitmap_queue_init_node()
371 if (depth && !round_robin) { in sbitmap_queue_init_node()
373 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; in sbitmap_queue_init_node()
376 sbq->min_shallow_depth = UINT_MAX; in sbitmap_queue_init_node()
377 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_init_node()
378 atomic_set(&sbq->wake_index, 0); in sbitmap_queue_init_node()
379 atomic_set(&sbq->ws_active, 0); in sbitmap_queue_init_node()
381 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); in sbitmap_queue_init_node()
382 if (!sbq->ws) { in sbitmap_queue_init_node()
383 free_percpu(sbq->alloc_hint); in sbitmap_queue_init_node()
384 sbitmap_free(&sbq->sb); in sbitmap_queue_init_node()
385 return -ENOMEM; in sbitmap_queue_init_node()
389 init_waitqueue_head(&sbq->ws[i].wait); in sbitmap_queue_init_node()
390 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); in sbitmap_queue_init_node()
393 sbq->round_robin = round_robin; in sbitmap_queue_init_node()
399 unsigned int depth) in sbitmap_queue_update_wake_batch() argument
401 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_update_wake_batch()
404 if (sbq->wake_batch != wake_batch) { in sbitmap_queue_update_wake_batch()
405 WRITE_ONCE(sbq->wake_batch, wake_batch); in sbitmap_queue_update_wake_batch()
413 atomic_set(&sbq->ws[i].wait_cnt, 1); in sbitmap_queue_update_wake_batch()
417 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) in sbitmap_queue_resize() argument
419 sbitmap_queue_update_wake_batch(sbq, depth); in sbitmap_queue_resize()
420 sbitmap_resize(&sbq->sb, depth); in sbitmap_queue_resize()
426 unsigned int hint, depth; in __sbitmap_queue_get() local
429 hint = this_cpu_read(*sbq->alloc_hint); in __sbitmap_queue_get()
430 depth = READ_ONCE(sbq->sb.depth); in __sbitmap_queue_get()
431 if (unlikely(hint >= depth)) { in __sbitmap_queue_get()
432 hint = depth ? prandom_u32() % depth : 0; in __sbitmap_queue_get()
433 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get()
435 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); in __sbitmap_queue_get()
437 if (nr == -1) { in __sbitmap_queue_get()
439 this_cpu_write(*sbq->alloc_hint, 0); in __sbitmap_queue_get()
440 } else if (nr == hint || unlikely(sbq->round_robin)) { in __sbitmap_queue_get()
443 if (hint >= depth - 1) in __sbitmap_queue_get()
445 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get()
455 unsigned int hint, depth; in __sbitmap_queue_get_shallow() local
458 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); in __sbitmap_queue_get_shallow()
460 hint = this_cpu_read(*sbq->alloc_hint); in __sbitmap_queue_get_shallow()
461 depth = READ_ONCE(sbq->sb.depth); in __sbitmap_queue_get_shallow()
462 if (unlikely(hint >= depth)) { in __sbitmap_queue_get_shallow()
463 hint = depth ? prandom_u32() % depth : 0; in __sbitmap_queue_get_shallow()
464 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get_shallow()
466 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); in __sbitmap_queue_get_shallow()
468 if (nr == -1) { in __sbitmap_queue_get_shallow()
470 this_cpu_write(*sbq->alloc_hint, 0); in __sbitmap_queue_get_shallow()
471 } else if (nr == hint || unlikely(sbq->round_robin)) { in __sbitmap_queue_get_shallow()
474 if (hint >= depth - 1) in __sbitmap_queue_get_shallow()
476 this_cpu_write(*sbq->alloc_hint, hint); in __sbitmap_queue_get_shallow()
486 sbq->min_shallow_depth = min_shallow_depth; in sbitmap_queue_min_shallow_depth()
487 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); in sbitmap_queue_min_shallow_depth()
495 if (!atomic_read(&sbq->ws_active)) in sbq_wake_ptr()
498 wake_index = atomic_read(&sbq->wake_index); in sbq_wake_ptr()
500 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbq_wake_ptr()
502 if (waitqueue_active(&ws->wait)) { in sbq_wake_ptr()
503 if (wake_index != atomic_read(&sbq->wake_index)) in sbq_wake_ptr()
504 atomic_set(&sbq->wake_index, wake_index); in sbq_wake_ptr()
524 wait_cnt = atomic_dec_return(&ws->wait_cnt); in __sbq_wake_up()
528 wake_batch = READ_ONCE(sbq->wake_batch); in __sbq_wake_up()
542 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch); in __sbq_wake_up()
544 sbq_index_atomic_inc(&sbq->wake_index); in __sbq_wake_up()
545 wake_up_nr(&ws->wait, wake_batch); in __sbq_wake_up()
569 * of blk_mq) by this bit for avoiding race with re-allocation, in sbitmap_queue_clear()
576 sbitmap_deferred_clear_bit(&sbq->sb, nr); in sbitmap_queue_clear()
587 if (likely(!sbq->round_robin && nr < sbq->sb.depth)) in sbitmap_queue_clear()
588 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; in sbitmap_queue_clear()
601 wake_index = atomic_read(&sbq->wake_index); in sbitmap_queue_wake_all()
603 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbitmap_queue_wake_all()
605 if (waitqueue_active(&ws->wait)) in sbitmap_queue_wake_all()
606 wake_up(&ws->wait); in sbitmap_queue_wake_all()
618 sbitmap_show(&sbq->sb, m); in sbitmap_queue_show()
626 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i)); in sbitmap_queue_show()
630 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); in sbitmap_queue_show()
631 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); in sbitmap_queue_show()
632 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); in sbitmap_queue_show()
636 struct sbq_wait_state *ws = &sbq->ws[i]; in sbitmap_queue_show()
639 atomic_read(&ws->wait_cnt), in sbitmap_queue_show()
640 waitqueue_active(&ws->wait) ? "active" : "inactive"); in sbitmap_queue_show()
644 seq_printf(m, "round_robin=%d\n", sbq->round_robin); in sbitmap_queue_show()
645 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); in sbitmap_queue_show()
653 if (!sbq_wait->sbq) { in sbitmap_add_wait_queue()
654 sbq_wait->sbq = sbq; in sbitmap_add_wait_queue()
655 atomic_inc(&sbq->ws_active); in sbitmap_add_wait_queue()
656 add_wait_queue(&ws->wait, &sbq_wait->wait); in sbitmap_add_wait_queue()
663 list_del_init(&sbq_wait->wait.entry); in sbitmap_del_wait_queue()
664 if (sbq_wait->sbq) { in sbitmap_del_wait_queue()
665 atomic_dec(&sbq_wait->sbq->ws_active); in sbitmap_del_wait_queue()
666 sbq_wait->sbq = NULL; in sbitmap_del_wait_queue()
675 if (!sbq_wait->sbq) { in sbitmap_prepare_to_wait()
676 atomic_inc(&sbq->ws_active); in sbitmap_prepare_to_wait()
677 sbq_wait->sbq = sbq; in sbitmap_prepare_to_wait()
679 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); in sbitmap_prepare_to_wait()
686 finish_wait(&ws->wait, &sbq_wait->wait); in sbitmap_finish_wait()
687 if (sbq_wait->sbq) { in sbitmap_finish_wait()
688 atomic_dec(&sbq->ws_active); in sbitmap_finish_wait()
689 sbq_wait->sbq = NULL; in sbitmap_finish_wait()