Lines Matching +full:depth +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2013-2014 Jens Axboe
14 unsigned depth = sb->depth; in init_alloc_hint() local
16 sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags); in init_alloc_hint()
17 if (!sb->alloc_hint) in init_alloc_hint()
18 return -ENOMEM; in init_alloc_hint()
20 if (depth && !sb->round_robin) { in init_alloc_hint()
24 *per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth); in init_alloc_hint()
30 unsigned int depth) in update_alloc_hint_before_get() argument
34 hint = this_cpu_read(*sb->alloc_hint); in update_alloc_hint_before_get()
35 if (unlikely(hint >= depth)) { in update_alloc_hint_before_get()
36 hint = depth ? get_random_u32_below(depth) : 0; in update_alloc_hint_before_get()
37 this_cpu_write(*sb->alloc_hint, hint); in update_alloc_hint_before_get()
44 unsigned int depth, in update_alloc_hint_after_get() argument
48 if (nr == -1) { in update_alloc_hint_after_get()
50 this_cpu_write(*sb->alloc_hint, 0); in update_alloc_hint_after_get()
51 } else if (nr == hint || unlikely(sb->round_robin)) { in update_alloc_hint_after_get()
54 if (hint >= depth - 1) in update_alloc_hint_after_get()
56 this_cpu_write(*sb->alloc_hint, hint); in update_alloc_hint_after_get()
67 if (!READ_ONCE(map->cleared)) in sbitmap_deferred_clear()
73 mask = xchg(&map->cleared, 0); in sbitmap_deferred_clear()
78 atomic_long_andnot(mask, (atomic_long_t *)&map->word); in sbitmap_deferred_clear()
79 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word)); in sbitmap_deferred_clear()
83 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, in sbitmap_init_node() argument
90 shift = sbitmap_calculate_shift(depth); in sbitmap_init_node()
94 return -EINVAL; in sbitmap_init_node()
96 sb->shift = shift; in sbitmap_init_node()
97 sb->depth = depth; in sbitmap_init_node()
98 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); in sbitmap_init_node()
99 sb->round_robin = round_robin; in sbitmap_init_node()
101 if (depth == 0) { in sbitmap_init_node()
102 sb->map = NULL; in sbitmap_init_node()
108 return -ENOMEM; in sbitmap_init_node()
110 sb->alloc_hint = NULL; in sbitmap_init_node()
113 sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node); in sbitmap_init_node()
114 if (!sb->map) { in sbitmap_init_node()
115 free_percpu(sb->alloc_hint); in sbitmap_init_node()
116 return -ENOMEM; in sbitmap_init_node()
123 void sbitmap_resize(struct sbitmap *sb, unsigned int depth) in sbitmap_resize() argument
125 unsigned int bits_per_word = 1U << sb->shift; in sbitmap_resize()
128 for (i = 0; i < sb->map_nr; i++) in sbitmap_resize()
129 sbitmap_deferred_clear(&sb->map[i]); in sbitmap_resize()
131 sb->depth = depth; in sbitmap_resize()
132 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); in sbitmap_resize()
136 static int __sbitmap_get_word(unsigned long *word, unsigned long depth, in __sbitmap_get_word() argument
145 nr = find_next_zero_bit(word, depth, hint); in __sbitmap_get_word()
146 if (unlikely(nr >= depth)) { in __sbitmap_get_word()
156 return -1; in __sbitmap_get_word()
163 if (hint >= depth - 1) in __sbitmap_get_word()
171 unsigned int depth, in sbitmap_find_bit_in_word() argument
178 nr = __sbitmap_get_word(&map->word, depth, in sbitmap_find_bit_in_word()
180 if (nr != -1) in sbitmap_find_bit_in_word()
190 unsigned int depth, in sbitmap_find_bit() argument
196 int nr = -1; in sbitmap_find_bit()
198 for (i = 0; i < sb->map_nr; i++) { in sbitmap_find_bit()
199 nr = sbitmap_find_bit_in_word(&sb->map[index], in sbitmap_find_bit()
202 depth), in sbitmap_find_bit()
205 if (nr != -1) { in sbitmap_find_bit()
206 nr += index << sb->shift; in sbitmap_find_bit()
212 if (++index >= sb->map_nr) in sbitmap_find_bit()
230 if (sb->round_robin) in __sbitmap_get()
236 !sb->round_robin); in __sbitmap_get()
242 unsigned int hint, depth; in sbitmap_get() local
244 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) in sbitmap_get()
245 return -1; in sbitmap_get()
247 depth = READ_ONCE(sb->depth); in sbitmap_get()
248 hint = update_alloc_hint_before_get(sb, depth); in sbitmap_get()
250 update_alloc_hint_after_get(sb, depth, hint, nr); in sbitmap_get()
271 unsigned int hint, depth; in sbitmap_get_shallow() local
273 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) in sbitmap_get_shallow()
274 return -1; in sbitmap_get_shallow()
276 depth = READ_ONCE(sb->depth); in sbitmap_get_shallow()
277 hint = update_alloc_hint_before_get(sb, depth); in sbitmap_get_shallow()
279 update_alloc_hint_after_get(sb, depth, hint, nr); in sbitmap_get_shallow()
289 for (i = 0; i < sb->map_nr; i++) { in sbitmap_any_bit_set()
290 if (sb->map[i].word & ~sb->map[i].cleared) in sbitmap_any_bit_set()
301 for (i = 0; i < sb->map_nr; i++) { in __sbitmap_weight()
302 const struct sbitmap_word *word = &sb->map[i]; in __sbitmap_weight()
306 weight += bitmap_weight(&word->word, word_depth); in __sbitmap_weight()
308 weight += bitmap_weight(&word->cleared, word_depth); in __sbitmap_weight()
320 return __sbitmap_weight(sb, true) - sbitmap_cleared(sb); in sbitmap_weight()
326 seq_printf(m, "depth=%u\n", sb->depth); in sbitmap_show()
329 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); in sbitmap_show()
330 seq_printf(m, "map_nr=%u\n", sb->map_nr); in sbitmap_show()
353 for (i = 0; i < sb->map_nr; i++) { in sbitmap_bitmap_show()
354 unsigned long word = READ_ONCE(sb->map[i].word); in sbitmap_bitmap_show()
355 unsigned long cleared = READ_ONCE(sb->map[i].cleared); in sbitmap_bitmap_show()
361 unsigned int bits = min(8 - byte_bits, word_bits); in sbitmap_bitmap_show()
363 byte |= (word & (BIT(bits) - 1)) << byte_bits; in sbitmap_bitmap_show()
372 word_bits -= bits; in sbitmap_bitmap_show()
385 unsigned int depth) in sbq_calc_wake_batch() argument
392 * batch size is small enough that the full depth of the bitmap, in sbq_calc_wake_batch()
393 * potentially limited by a shallow depth, is enough to wake up all of in sbq_calc_wake_batch()
397 * be a partial word. There are depth / bits_per_word full words and in sbq_calc_wake_batch()
398 * depth % bits_per_word bits left over. In bitwise arithmetic: in sbq_calc_wake_batch()
401 * depth / bits_per_word = depth >> shift in sbq_calc_wake_batch()
402 * depth % bits_per_word = depth & ((1 << shift) - 1) in sbq_calc_wake_batch()
404 * Each word can be limited to sbq->min_shallow_depth bits. in sbq_calc_wake_batch()
406 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); in sbq_calc_wake_batch()
407 depth = ((depth >> sbq->sb.shift) * shallow_depth + in sbq_calc_wake_batch()
408 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); in sbq_calc_wake_batch()
409 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, in sbq_calc_wake_batch()
415 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, in sbitmap_queue_init_node() argument
421 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node, in sbitmap_queue_init_node()
426 sbq->min_shallow_depth = UINT_MAX; in sbitmap_queue_init_node()
427 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_init_node()
428 atomic_set(&sbq->wake_index, 0); in sbitmap_queue_init_node()
429 atomic_set(&sbq->ws_active, 0); in sbitmap_queue_init_node()
430 atomic_set(&sbq->completion_cnt, 0); in sbitmap_queue_init_node()
431 atomic_set(&sbq->wakeup_cnt, 0); in sbitmap_queue_init_node()
433 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); in sbitmap_queue_init_node()
434 if (!sbq->ws) { in sbitmap_queue_init_node()
435 sbitmap_free(&sbq->sb); in sbitmap_queue_init_node()
436 return -ENOMEM; in sbitmap_queue_init_node()
440 init_waitqueue_head(&sbq->ws[i].wait); in sbitmap_queue_init_node()
447 unsigned int depth) in sbitmap_queue_update_wake_batch() argument
451 wake_batch = sbq_calc_wake_batch(sbq, depth); in sbitmap_queue_update_wake_batch()
452 if (sbq->wake_batch != wake_batch) in sbitmap_queue_update_wake_batch()
453 WRITE_ONCE(sbq->wake_batch, wake_batch); in sbitmap_queue_update_wake_batch()
460 unsigned int depth = (sbq->sb.depth + users - 1) / users; in sbitmap_queue_recalculate_wake_batch() local
462 wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES, in sbitmap_queue_recalculate_wake_batch()
465 WRITE_ONCE(sbq->wake_batch, wake_batch); in sbitmap_queue_recalculate_wake_batch()
469 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) in sbitmap_queue_resize() argument
471 sbitmap_queue_update_wake_batch(sbq, depth); in sbitmap_queue_resize()
472 sbitmap_resize(&sbq->sb, depth); in sbitmap_queue_resize()
478 return sbitmap_get(&sbq->sb); in __sbitmap_queue_get()
485 struct sbitmap *sb = &sbq->sb; in __sbitmap_queue_get_batch()
486 unsigned int hint, depth; in __sbitmap_queue_get_batch() local
490 if (unlikely(sb->round_robin)) in __sbitmap_queue_get_batch()
493 depth = READ_ONCE(sb->depth); in __sbitmap_queue_get_batch()
494 hint = update_alloc_hint_before_get(sb, depth); in __sbitmap_queue_get_batch()
498 for (i = 0; i < sb->map_nr; i++) { in __sbitmap_queue_get_batch()
499 struct sbitmap_word *map = &sb->map[index]; in __sbitmap_queue_get_batch()
504 if (map->word == (1UL << (map_depth - 1)) - 1) in __sbitmap_queue_get_batch()
507 nr = find_first_zero_bit(&map->word, map_depth); in __sbitmap_queue_get_batch()
509 atomic_long_t *ptr = (atomic_long_t *) &map->word; in __sbitmap_queue_get_batch()
512 get_mask = ((1UL << nr_tags) - 1) << nr; in __sbitmap_queue_get_batch()
513 val = READ_ONCE(map->word); in __sbitmap_queue_get_batch()
519 *offset = nr + (index << sb->shift); in __sbitmap_queue_get_batch()
520 update_alloc_hint_after_get(sb, depth, hint, in __sbitmap_queue_get_batch()
521 *offset + nr_tags - 1); in __sbitmap_queue_get_batch()
527 if (++index >= sb->map_nr) in __sbitmap_queue_get_batch()
537 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); in sbitmap_queue_get_shallow()
539 return sbitmap_get_shallow(&sbq->sb, shallow_depth); in sbitmap_queue_get_shallow()
546 sbq->min_shallow_depth = min_shallow_depth; in sbitmap_queue_min_shallow_depth()
547 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); in sbitmap_queue_min_shallow_depth()
555 if (!atomic_read(&sbq->ws_active)) in __sbitmap_queue_wake_up()
558 wake_index = atomic_read(&sbq->wake_index); in __sbitmap_queue_wake_up()
560 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in __sbitmap_queue_wake_up()
570 if (waitqueue_active(&ws->wait)) { in __sbitmap_queue_wake_up()
571 woken = wake_up_nr(&ws->wait, nr); in __sbitmap_queue_wake_up()
574 nr -= woken; in __sbitmap_queue_wake_up()
578 if (wake_index != atomic_read(&sbq->wake_index)) in __sbitmap_queue_wake_up()
579 atomic_set(&sbq->wake_index, wake_index); in __sbitmap_queue_wake_up()
584 unsigned int wake_batch = READ_ONCE(sbq->wake_batch); in sbitmap_queue_wake_up()
587 if (!atomic_read(&sbq->ws_active)) in sbitmap_queue_wake_up()
590 atomic_add(nr, &sbq->completion_cnt); in sbitmap_queue_wake_up()
591 wakeups = atomic_read(&sbq->wakeup_cnt); in sbitmap_queue_wake_up()
594 if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch) in sbitmap_queue_wake_up()
596 } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt, in sbitmap_queue_wake_up()
605 if (likely(!sb->round_robin && tag < sb->depth)) in sbitmap_update_cpu_hint()
606 data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag); in sbitmap_update_cpu_hint()
612 struct sbitmap *sb = &sbq->sb; in sbitmap_queue_clear_batch()
619 const int tag = tags[i] - offset; in sbitmap_queue_clear_batch()
623 this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word; in sbitmap_queue_clear_batch()
639 sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), in sbitmap_queue_clear_batch()
640 tags[nr_tags - 1] - offset); in sbitmap_queue_clear_batch()
650 * of blk_mq) by this bit for avoiding race with re-allocation, in sbitmap_queue_clear()
657 sbitmap_deferred_clear_bit(&sbq->sb, nr); in sbitmap_queue_clear()
667 sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); in sbitmap_queue_clear()
680 wake_index = atomic_read(&sbq->wake_index); in sbitmap_queue_wake_all()
682 struct sbq_wait_state *ws = &sbq->ws[wake_index]; in sbitmap_queue_wake_all()
684 if (waitqueue_active(&ws->wait)) in sbitmap_queue_wake_all()
685 wake_up(&ws->wait); in sbitmap_queue_wake_all()
697 sbitmap_show(&sbq->sb, m); in sbitmap_queue_show()
705 seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i)); in sbitmap_queue_show()
709 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); in sbitmap_queue_show()
710 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); in sbitmap_queue_show()
711 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); in sbitmap_queue_show()
715 struct sbq_wait_state *ws = &sbq->ws[i]; in sbitmap_queue_show()
717 waitqueue_active(&ws->wait) ? "active" : "inactive"); in sbitmap_queue_show()
721 seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin); in sbitmap_queue_show()
722 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); in sbitmap_queue_show()
730 if (!sbq_wait->sbq) { in sbitmap_add_wait_queue()
731 sbq_wait->sbq = sbq; in sbitmap_add_wait_queue()
732 atomic_inc(&sbq->ws_active); in sbitmap_add_wait_queue()
733 add_wait_queue(&ws->wait, &sbq_wait->wait); in sbitmap_add_wait_queue()
740 list_del_init(&sbq_wait->wait.entry); in sbitmap_del_wait_queue()
741 if (sbq_wait->sbq) { in sbitmap_del_wait_queue()
742 atomic_dec(&sbq_wait->sbq->ws_active); in sbitmap_del_wait_queue()
743 sbq_wait->sbq = NULL; in sbitmap_del_wait_queue()
752 if (!sbq_wait->sbq) { in sbitmap_prepare_to_wait()
753 atomic_inc(&sbq->ws_active); in sbitmap_prepare_to_wait()
754 sbq_wait->sbq = sbq; in sbitmap_prepare_to_wait()
756 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); in sbitmap_prepare_to_wait()
763 finish_wait(&ws->wait, &sbq_wait->wait); in sbitmap_finish_wait()
764 if (sbq_wait->sbq) { in sbitmap_finish_wait()
765 atomic_dec(&sbq->ws_active); in sbitmap_finish_wait()
766 sbq_wait->sbq = NULL; in sbitmap_finish_wait()