Lines Matching +full:sync +full:- +full:1
1 // SPDX-License-Identifier: GPL-2.0
17 * pblk-rb.c - pblk's write buffer
31 list_for_each_entry_safe(p, t, &rb->pages, list) { in pblk_rb_data_free()
32 free_pages((unsigned long)page_address(p->pages), p->order); in pblk_rb_data_free()
33 list_del(&p->list); in pblk_rb_data_free()
42 vfree(rb->entries); in pblk_rb_free()
46 * pblk_rb_calculate_size -- calculate the size of the write buffer
51 unsigned int thr_sz = 1 << (get_count_order(threshold + NVM_MAX_VLBA)); in pblk_rb_calculate_size()
59 max_io = (1 << max((int)(get_count_order(max_sz)), in pblk_rb_calculate_size()
60 (int)(get_count_order(NVM_MAX_VLBA << 1)))); in pblk_rb_calculate_size()
62 max_io <<= 1; in pblk_rb_calculate_size()
70 * (Documentation/core-api/circular-buffers.rst)
78 unsigned int max_order = MAX_ORDER - 1; in pblk_rb_init()
86 return -ENOMEM; in pblk_rb_init()
92 rb->entries = entries; in pblk_rb_init()
93 rb->seg_size = (1 << power_seg_sz); in pblk_rb_init()
94 rb->nr_entries = (1 << power_size); in pblk_rb_init()
95 rb->mem = rb->subm = rb->sync = rb->l2p_update = 0; in pblk_rb_init()
96 rb->back_thres = threshold; in pblk_rb_init()
97 rb->flush_point = EMPTY_ENTRY; in pblk_rb_init()
99 spin_lock_init(&rb->w_lock); in pblk_rb_init()
100 spin_lock_init(&rb->s_lock); in pblk_rb_init()
102 INIT_LIST_HEAD(&rb->pages); in pblk_rb_init()
107 iter = (1 << (alloc_order - max_order)); in pblk_rb_init()
110 iter = 1; in pblk_rb_init()
124 return -ENOMEM; in pblk_rb_init()
127 page_set->order = order; in pblk_rb_init()
128 page_set->pages = alloc_pages(GFP_KERNEL, order); in pblk_rb_init()
129 if (!page_set->pages) { in pblk_rb_init()
134 return -ENOMEM; in pblk_rb_init()
136 kaddr = page_address(page_set->pages); in pblk_rb_init()
138 entry = &rb->entries[init_entry]; in pblk_rb_init()
139 entry->data = kaddr; in pblk_rb_init()
140 entry->cacheline = pblk_cacheline_to_addr(init_entry++); in pblk_rb_init()
141 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY; in pblk_rb_init()
143 set_size = (1 << order); in pblk_rb_init()
144 for (i = 1; i < set_size; i++) { in pblk_rb_init()
145 entry = &rb->entries[init_entry]; in pblk_rb_init()
146 entry->cacheline = pblk_cacheline_to_addr(init_entry++); in pblk_rb_init()
147 entry->data = kaddr + (i * rb->seg_size); in pblk_rb_init()
148 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY; in pblk_rb_init()
149 bio_list_init(&entry->w_ctx.bios); in pblk_rb_init()
152 list_add_tail(&page_set->list, &rb->pages); in pblk_rb_init()
153 iter--; in pblk_rb_init()
158 atomic_set(&rb->inflight_flush_point, 0); in pblk_rb_init()
162 * Initialize rate-limiter, which controls access to the write buffer in pblk_rb_init()
165 pblk_rl_init(&pblk->rl, rb->nr_entries, threshold); in pblk_rb_init()
174 flags = READ_ONCE(w_ctx->flags); in clean_wctx()
179 smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY); in clean_wctx()
180 pblk_ppa_set_empty(&w_ctx->ppa); in clean_wctx()
181 w_ctx->lba = ADDR_EMPTY; in clean_wctx()
194 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_space()
195 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_space() local
197 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries); in pblk_rb_space()
203 return (p + nr_entries) & (rb->nr_entries - 1); in pblk_rb_ptr_wrap()
212 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_read_count()
213 unsigned int subm = READ_ONCE(rb->subm); in pblk_rb_read_count()
215 return pblk_rb_ring_count(mem, subm, rb->nr_entries); in pblk_rb_read_count()
220 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_sync_count()
221 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_sync_count() local
223 return pblk_rb_ring_count(mem, sync, rb->nr_entries); in pblk_rb_sync_count()
230 subm = READ_ONCE(rb->subm); in pblk_rb_read_commit()
232 smp_store_release(&rb->subm, pblk_rb_ptr_wrap(rb, subm, nr_entries)); in pblk_rb_read_commit()
248 entry = &rb->entries[rb->l2p_update]; in __pblk_rb_update_l2p()
249 w_ctx = &entry->w_ctx; in __pblk_rb_update_l2p()
251 flags = READ_ONCE(entry->w_ctx.flags); in __pblk_rb_update_l2p()
257 WARN(1, "pblk: unknown IO type\n"); in __pblk_rb_update_l2p()
259 pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa, in __pblk_rb_update_l2p()
260 entry->cacheline); in __pblk_rb_update_l2p()
262 line = pblk_ppa_to_line(pblk, w_ctx->ppa); in __pblk_rb_update_l2p()
263 atomic_dec(&line->sec_to_update); in __pblk_rb_update_l2p()
264 kref_put(&line->ref, pblk_line_put); in __pblk_rb_update_l2p()
266 rb->l2p_update = pblk_rb_ptr_wrap(rb, rb->l2p_update, 1); in __pblk_rb_update_l2p()
269 pblk_rl_out(&pblk->rl, user_io, gc_io); in __pblk_rb_update_l2p()
275 * When we move the l2p_update pointer, we update the l2p table - lookups will
280 unsigned int mem, unsigned int sync) in pblk_rb_update_l2p() argument
285 lockdep_assert_held(&rb->w_lock); in pblk_rb_update_l2p()
288 space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries); in pblk_rb_update_l2p()
292 count = nr_entries - space; in pblk_rb_update_l2p()
293 /* l2p_update used exclusively under rb->w_lock */ in pblk_rb_update_l2p()
307 unsigned int sync; in pblk_rb_sync_l2p() local
310 spin_lock(&rb->w_lock); in pblk_rb_sync_l2p()
313 sync = smp_load_acquire(&rb->sync); in pblk_rb_sync_l2p()
315 to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries); in pblk_rb_sync_l2p()
318 spin_unlock(&rb->w_lock); in pblk_rb_sync_l2p()
331 memcpy(entry->data, data, rb->seg_size); in __pblk_rb_write_entry()
333 entry->w_ctx.lba = w_ctx.lba; in __pblk_rb_write_entry()
334 entry->w_ctx.ppa = w_ctx.ppa; in __pblk_rb_write_entry()
344 entry = &rb->entries[ring_pos]; in pblk_rb_write_entry_user()
345 flags = READ_ONCE(entry->w_ctx.flags); in pblk_rb_write_entry_user()
353 pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline); in pblk_rb_write_entry_user()
357 smp_store_release(&entry->w_ctx.flags, flags); in pblk_rb_write_entry_user()
368 entry = &rb->entries[ring_pos]; in pblk_rb_write_entry_gc()
369 flags = READ_ONCE(entry->w_ctx.flags); in pblk_rb_write_entry_gc()
377 if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr)) in pblk_rb_write_entry_gc()
378 entry->w_ctx.lba = ADDR_EMPTY; in pblk_rb_write_entry_gc()
383 smp_store_release(&entry->w_ctx.flags, flags); in pblk_rb_write_entry_gc()
390 unsigned int sync, flush_point; in pblk_rb_flush_point_set() local
393 sync = READ_ONCE(rb->sync); in pblk_rb_flush_point_set()
395 if (pos == sync) { in pblk_rb_flush_point_set()
401 atomic_inc(&rb->inflight_flush_point); in pblk_rb_flush_point_set()
404 flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1); in pblk_rb_flush_point_set()
405 entry = &rb->entries[flush_point]; in pblk_rb_flush_point_set()
408 smp_store_release(&rb->flush_point, flush_point); in pblk_rb_flush_point_set()
411 bio_list_add(&entry->w_ctx.bios, bio); in pblk_rb_flush_point_set()
415 return bio ? 1 : 0; in pblk_rb_flush_point_set()
422 unsigned int sync; in __pblk_rb_may_write() local
425 sync = READ_ONCE(rb->sync); in __pblk_rb_may_write()
426 mem = READ_ONCE(rb->mem); in __pblk_rb_may_write()
428 threshold = nr_entries + rb->back_thres; in __pblk_rb_may_write()
430 if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < threshold) in __pblk_rb_may_write()
433 if (pblk_rb_update_l2p(rb, nr_entries, mem, sync)) in __pblk_rb_may_write()
438 return 1; in __pblk_rb_may_write()
448 smp_store_release(&rb->mem, pblk_rb_ptr_wrap(rb, *pos, nr_entries)); in pblk_rb_may_write()
449 return 1; in pblk_rb_may_write()
455 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_flush()
475 if (bio->bi_opf & REQ_PREFLUSH) { in pblk_rb_may_write_flush()
478 atomic64_inc(&pblk->nr_flush); in pblk_rb_may_write_flush()
479 if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem)) in pblk_rb_may_write_flush()
484 smp_store_release(&rb->mem, mem); in pblk_rb_may_write_flush()
486 return 1; in pblk_rb_may_write_flush()
492 * buffer (rate-limiter).
500 spin_lock(&rb->w_lock); in pblk_rb_may_write_user()
501 io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries); in pblk_rb_may_write_user()
503 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
508 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
512 pblk_rl_user_in(&pblk->rl, nr_entries); in pblk_rb_may_write_user()
513 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
526 spin_lock(&rb->w_lock); in pblk_rb_may_write_gc()
527 if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) { in pblk_rb_may_write_gc()
528 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
533 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
537 pblk_rl_gc_in(&pblk->rl, nr_entries); in pblk_rb_may_write_gc()
538 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
540 return 1; in pblk_rb_may_write_gc()
555 struct request_queue *q = pblk->dev->q; in pblk_rb_read_to_bio()
557 struct bio *bio = rqd->bio; in pblk_rb_read_to_bio()
565 pad = nr_entries - count; in pblk_rb_read_to_bio()
570 pad += (pblk->min_write_pgs - pblk->min_write_pgs_data); in pblk_rb_read_to_bio()
572 c_ctx->sentry = pos; in pblk_rb_read_to_bio()
573 c_ctx->nr_valid = to_read; in pblk_rb_read_to_bio()
574 c_ctx->nr_padded = pad; in pblk_rb_read_to_bio()
577 entry = &rb->entries[pos]; in pblk_rb_read_to_bio()
583 flags = READ_ONCE(entry->w_ctx.flags); in pblk_rb_read_to_bio()
589 page = virt_to_page(entry->data); in pblk_rb_read_to_bio()
595 smp_store_release(&entry->w_ctx.flags, flags); in pblk_rb_read_to_bio()
599 if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) != in pblk_rb_read_to_bio()
600 rb->seg_size) { in pblk_rb_read_to_bio()
605 smp_store_release(&entry->w_ctx.flags, flags); in pblk_rb_read_to_bio()
613 smp_store_release(&entry->w_ctx.flags, flags); in pblk_rb_read_to_bio()
615 pos = pblk_rb_ptr_wrap(rb, pos, 1); in pblk_rb_read_to_bio()
624 if (pad < pblk->min_write_pgs) in pblk_rb_read_to_bio()
625 atomic64_inc(&pblk->pad_dist[pad - 1]); in pblk_rb_read_to_bio()
629 atomic64_add(pad, &pblk->pad_wa); in pblk_rb_read_to_bio()
633 atomic_long_add(pad, &pblk->padded_writes); in pblk_rb_read_to_bio()
654 int ret = 1; in pblk_rb_copy_to_bio()
659 BUG_ON(pos >= rb->nr_entries); in pblk_rb_copy_to_bio()
661 entry = &rb->entries[pos]; in pblk_rb_copy_to_bio()
662 w_ctx = &entry->w_ctx; in pblk_rb_copy_to_bio()
663 flags = READ_ONCE(w_ctx->flags); in pblk_rb_copy_to_bio()
665 spin_lock(&rb->w_lock); in pblk_rb_copy_to_bio()
666 spin_lock(&pblk->trans_lock); in pblk_rb_copy_to_bio()
668 spin_unlock(&pblk->trans_lock); in pblk_rb_copy_to_bio()
671 if (!pblk_ppa_comp(l2p_ppa, ppa) || w_ctx->lba != lba || in pblk_rb_copy_to_bio()
677 memcpy(data, entry->data, rb->seg_size); in pblk_rb_copy_to_bio()
680 spin_unlock(&rb->w_lock); in pblk_rb_copy_to_bio()
688 return &rb->entries[entry].w_ctx; in pblk_rb_w_ctx()
692 __acquires(&rb->s_lock) in pblk_rb_sync_init()
695 spin_lock_irqsave(&rb->s_lock, *flags); in pblk_rb_sync_init()
697 spin_lock_irq(&rb->s_lock); in pblk_rb_sync_init()
699 return rb->sync; in pblk_rb_sync_init()
703 __releases(&rb->s_lock) in pblk_rb_sync_end()
705 lockdep_assert_held(&rb->s_lock); in pblk_rb_sync_end()
708 spin_unlock_irqrestore(&rb->s_lock, *flags); in pblk_rb_sync_end()
710 spin_unlock_irq(&rb->s_lock); in pblk_rb_sync_end()
715 unsigned int sync, flush_point; in pblk_rb_sync_advance() local
716 lockdep_assert_held(&rb->s_lock); in pblk_rb_sync_advance()
718 sync = READ_ONCE(rb->sync); in pblk_rb_sync_advance()
719 flush_point = READ_ONCE(rb->flush_point); in pblk_rb_sync_advance()
724 secs_to_flush = pblk_rb_ring_count(flush_point, sync, in pblk_rb_sync_advance()
725 rb->nr_entries); in pblk_rb_sync_advance()
728 smp_store_release(&rb->flush_point, EMPTY_ENTRY); in pblk_rb_sync_advance()
732 sync = pblk_rb_ptr_wrap(rb, sync, nr_entries); in pblk_rb_sync_advance()
735 smp_store_release(&rb->sync, sync); in pblk_rb_sync_advance()
737 return sync; in pblk_rb_sync_advance()
743 unsigned int subm, sync, flush_point; in pblk_rb_flush_point_count() local
747 flush_point = smp_load_acquire(&rb->flush_point); in pblk_rb_flush_point_count()
752 sync = smp_load_acquire(&rb->sync); in pblk_rb_flush_point_count()
754 subm = READ_ONCE(rb->subm); in pblk_rb_flush_point_count()
755 submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries); in pblk_rb_flush_point_count()
757 /* The sync point itself counts as a sector to sync */ in pblk_rb_flush_point_count()
758 to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1; in pblk_rb_flush_point_count()
760 return (submitted < to_flush) ? (to_flush - submitted) : 0; in pblk_rb_flush_point_count()
769 spin_lock(&rb->w_lock); in pblk_rb_tear_down_check()
770 spin_lock_irq(&rb->s_lock); in pblk_rb_tear_down_check()
772 if ((rb->mem == rb->subm) && (rb->subm == rb->sync) && in pblk_rb_tear_down_check()
773 (rb->sync == rb->l2p_update) && in pblk_rb_tear_down_check()
774 (rb->flush_point == EMPTY_ENTRY)) { in pblk_rb_tear_down_check()
778 if (!rb->entries) { in pblk_rb_tear_down_check()
779 ret = 1; in pblk_rb_tear_down_check()
783 for (i = 0; i < rb->nr_entries; i++) { in pblk_rb_tear_down_check()
784 entry = &rb->entries[i]; in pblk_rb_tear_down_check()
786 if (!entry->data) { in pblk_rb_tear_down_check()
787 ret = 1; in pblk_rb_tear_down_check()
793 spin_unlock_irq(&rb->s_lock); in pblk_rb_tear_down_check()
794 spin_unlock(&rb->w_lock); in pblk_rb_tear_down_check()
801 return (pos & (rb->nr_entries - 1)); in pblk_rb_wrap_pos()
806 return (pos >= rb->nr_entries); in pblk_rb_pos_oob()
816 spin_lock_irq(&rb->s_lock); in pblk_rb_sysfs()
817 list_for_each_entry(c, &pblk->compl_list, list) in pblk_rb_sysfs()
819 spin_unlock_irq(&rb->s_lock); in pblk_rb_sysfs()
821 if (rb->flush_point != EMPTY_ENTRY) in pblk_rb_sysfs()
823 "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n", in pblk_rb_sysfs()
824 rb->nr_entries, in pblk_rb_sysfs()
825 rb->mem, in pblk_rb_sysfs()
826 rb->subm, in pblk_rb_sysfs()
827 rb->sync, in pblk_rb_sysfs()
828 rb->l2p_update, in pblk_rb_sysfs()
830 atomic_read(&rb->inflight_flush_point), in pblk_rb_sysfs()
834 rb->flush_point, in pblk_rb_sysfs()
841 "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n", in pblk_rb_sysfs()
842 rb->nr_entries, in pblk_rb_sysfs()
843 rb->mem, in pblk_rb_sysfs()
844 rb->subm, in pblk_rb_sysfs()
845 rb->sync, in pblk_rb_sysfs()
846 rb->l2p_update, in pblk_rb_sysfs()
848 atomic_read(&rb->inflight_flush_point), in pblk_rb_sysfs()