Lines Matching +full:write +full:- +full:protect
16 * pblk-rb.c - pblk's write buffer
30 list_for_each_entry_safe(p, t, &rb->pages, list) { in pblk_rb_data_free()
31 free_pages((unsigned long)page_address(p->pages), p->order); in pblk_rb_data_free()
32 list_del(&p->list); in pblk_rb_data_free()
41 * (Documentation/core-api/circular-buffers.rst)
49 unsigned int max_order = MAX_ORDER - 1; in pblk_rb_init()
53 rb->entries = rb_entry_base; in pblk_rb_init()
54 rb->seg_size = (1 << power_seg_sz); in pblk_rb_init()
55 rb->nr_entries = (1 << power_size); in pblk_rb_init()
56 rb->mem = rb->subm = rb->sync = rb->l2p_update = 0; in pblk_rb_init()
57 rb->flush_point = EMPTY_ENTRY; in pblk_rb_init()
59 spin_lock_init(&rb->w_lock); in pblk_rb_init()
60 spin_lock_init(&rb->s_lock); in pblk_rb_init()
62 INIT_LIST_HEAD(&rb->pages); in pblk_rb_init()
66 iter = (1 << (alloc_order - max_order)); in pblk_rb_init()
82 return -ENOMEM; in pblk_rb_init()
85 page_set->order = order; in pblk_rb_init()
86 page_set->pages = alloc_pages(GFP_KERNEL, order); in pblk_rb_init()
87 if (!page_set->pages) { in pblk_rb_init()
91 return -ENOMEM; in pblk_rb_init()
93 kaddr = page_address(page_set->pages); in pblk_rb_init()
95 entry = &rb->entries[init_entry]; in pblk_rb_init()
96 entry->data = kaddr; in pblk_rb_init()
97 entry->cacheline = pblk_cacheline_to_addr(init_entry++); in pblk_rb_init()
98 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY; in pblk_rb_init()
102 entry = &rb->entries[init_entry]; in pblk_rb_init()
103 entry->cacheline = pblk_cacheline_to_addr(init_entry++); in pblk_rb_init()
104 entry->data = kaddr + (i * rb->seg_size); in pblk_rb_init()
105 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY; in pblk_rb_init()
106 bio_list_init(&entry->w_ctx.bios); in pblk_rb_init()
109 list_add_tail(&page_set->list, &rb->pages); in pblk_rb_init()
110 iter--; in pblk_rb_init()
115 atomic_set(&rb->inflight_flush_point, 0); in pblk_rb_init()
119 * Initialize rate-limiter, which controls access to the write buffer in pblk_rb_init()
122 pblk_rl_init(&pblk->rl, rb->nr_entries); in pblk_rb_init()
128 * pblk_rb_calculate_size -- calculate the size of the write buffer
132 /* Alloc a write buffer that can at least fit 128 entries */ in pblk_rb_calculate_size()
138 return rb->entries; in pblk_rb_entries_ref()
145 flags = READ_ONCE(w_ctx->flags); in clean_wctx()
149 /* Release flags on context. Protect from writes and reads */ in clean_wctx()
150 smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY); in clean_wctx()
151 pblk_ppa_set_empty(&w_ctx->ppa); in clean_wctx()
152 w_ctx->lba = ADDR_EMPTY; in clean_wctx()
165 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_space()
166 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_space()
168 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries); in pblk_rb_space()
177 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_read_count()
178 unsigned int subm = READ_ONCE(rb->subm); in pblk_rb_read_count()
180 return pblk_rb_ring_count(mem, subm, rb->nr_entries); in pblk_rb_read_count()
185 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_sync_count()
186 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_sync_count()
188 return pblk_rb_ring_count(mem, sync, rb->nr_entries); in pblk_rb_sync_count()
195 subm = READ_ONCE(rb->subm); in pblk_rb_read_commit()
197 smp_store_release(&rb->subm, in pblk_rb_read_commit()
198 (subm + nr_entries) & (rb->nr_entries - 1)); in pblk_rb_read_commit()
214 entry = &rb->entries[rb->l2p_update]; in __pblk_rb_update_l2p()
215 w_ctx = &entry->w_ctx; in __pblk_rb_update_l2p()
217 flags = READ_ONCE(entry->w_ctx.flags); in __pblk_rb_update_l2p()
225 pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa, in __pblk_rb_update_l2p()
226 entry->cacheline); in __pblk_rb_update_l2p()
228 line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)]; in __pblk_rb_update_l2p()
229 kref_put(&line->ref, pblk_line_put); in __pblk_rb_update_l2p()
231 rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1); in __pblk_rb_update_l2p()
234 pblk_rl_out(&pblk->rl, user_io, gc_io); in __pblk_rb_update_l2p()
240 * When we move the l2p_update pointer, we update the l2p table - lookups will
241 * point to the physical address instead of to the cacheline in the write buffer
250 lockdep_assert_held(&rb->w_lock); in pblk_rb_update_l2p()
253 space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries); in pblk_rb_update_l2p()
257 count = nr_entries - space; in pblk_rb_update_l2p()
258 /* l2p_update used exclusively under rb->w_lock */ in pblk_rb_update_l2p()
266 * Update the l2p entry for all sectors stored on the write buffer. This means
268 * to the cacheline in the write buffer.
275 spin_lock(&rb->w_lock); in pblk_rb_sync_l2p()
277 /* Protect from reads and writes */ in pblk_rb_sync_l2p()
278 sync = smp_load_acquire(&rb->sync); in pblk_rb_sync_l2p()
280 to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries); in pblk_rb_sync_l2p()
283 spin_unlock(&rb->w_lock); in pblk_rb_sync_l2p()
287 * Write @nr_entries to ring buffer from @data buffer if there is enough space.
289 * buffer, thus the write will fail if not all incoming data can be copied.
296 memcpy(entry->data, data, rb->seg_size); in __pblk_rb_write_entry()
298 entry->w_ctx.lba = w_ctx.lba; in __pblk_rb_write_entry()
299 entry->w_ctx.ppa = w_ctx.ppa; in __pblk_rb_write_entry()
309 entry = &rb->entries[ring_pos]; in pblk_rb_write_entry_user()
310 flags = READ_ONCE(entry->w_ctx.flags); in pblk_rb_write_entry_user()
318 pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline); in pblk_rb_write_entry_user()
321 /* Release flags on write context. Protect from writes */ in pblk_rb_write_entry_user()
322 smp_store_release(&entry->w_ctx.flags, flags); in pblk_rb_write_entry_user()
333 entry = &rb->entries[ring_pos]; in pblk_rb_write_entry_gc()
334 flags = READ_ONCE(entry->w_ctx.flags); in pblk_rb_write_entry_gc()
342 if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr)) in pblk_rb_write_entry_gc()
343 entry->w_ctx.lba = ADDR_EMPTY; in pblk_rb_write_entry_gc()
347 /* Release flags on write context. Protect from writes */ in pblk_rb_write_entry_gc()
348 smp_store_release(&entry->w_ctx.flags, flags); in pblk_rb_write_entry_gc()
358 sync = READ_ONCE(rb->sync); in pblk_rb_flush_point_set()
366 atomic_inc(&rb->inflight_flush_point); in pblk_rb_flush_point_set()
369 flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1); in pblk_rb_flush_point_set()
370 entry = &rb->entries[flush_point]; in pblk_rb_flush_point_set()
372 /* Protect flush points */ in pblk_rb_flush_point_set()
373 smp_store_release(&rb->flush_point, flush_point); in pblk_rb_flush_point_set()
376 bio_list_add(&entry->w_ctx.bios, bio); in pblk_rb_flush_point_set()
389 sync = READ_ONCE(rb->sync); in __pblk_rb_may_write()
390 mem = READ_ONCE(rb->mem); in __pblk_rb_may_write()
392 if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < nr_entries) in __pblk_rb_may_write()
409 /* Protect from read count */ in pblk_rb_may_write()
410 smp_store_release(&rb->mem, (*pos + nr_entries) & (rb->nr_entries - 1)); in pblk_rb_may_write()
417 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_flush()
434 mem = (*pos + nr_entries) & (rb->nr_entries - 1); in pblk_rb_may_write_flush()
437 if (bio->bi_opf & REQ_PREFLUSH) { in pblk_rb_may_write_flush()
440 atomic64_inc(&pblk->nr_flush); in pblk_rb_may_write_flush()
441 if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem)) in pblk_rb_may_write_flush()
445 /* Protect from read count */ in pblk_rb_may_write_flush()
446 smp_store_release(&rb->mem, mem); in pblk_rb_may_write_flush()
452 * Atomically check that (i) there is space on the write buffer for the
453 * incoming I/O, and (ii) the current I/O type has enough budget in the write
454 * buffer (rate-limiter).
462 spin_lock(&rb->w_lock); in pblk_rb_may_write_user()
463 io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries); in pblk_rb_may_write_user()
465 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
470 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
474 pblk_rl_user_in(&pblk->rl, nr_entries); in pblk_rb_may_write_user()
475 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
488 spin_lock(&rb->w_lock); in pblk_rb_may_write_gc()
489 if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) { in pblk_rb_may_write_gc()
490 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
495 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
499 pblk_rl_gc_in(&pblk->rl, nr_entries); in pblk_rb_may_write_gc()
500 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
507 * copy, a page reference to the write buffer is used to be added to the bio.
509 * This function is used by the write thread to form the write bio that will
510 * persist data on the write buffer to the media.
517 struct request_queue *q = pblk->dev->q; in pblk_rb_read_to_bio()
519 struct bio *bio = rqd->bio; in pblk_rb_read_to_bio()
527 pad = nr_entries - count; in pblk_rb_read_to_bio()
531 c_ctx->sentry = pos; in pblk_rb_read_to_bio()
532 c_ctx->nr_valid = to_read; in pblk_rb_read_to_bio()
533 c_ctx->nr_padded = pad; in pblk_rb_read_to_bio()
536 entry = &rb->entries[pos]; in pblk_rb_read_to_bio()
538 /* A write has been allowed into the buffer, but data is still in pblk_rb_read_to_bio()
542 flags = READ_ONCE(entry->w_ctx.flags); in pblk_rb_read_to_bio()
548 page = virt_to_page(entry->data); in pblk_rb_read_to_bio()
550 pblk_err(pblk, "could not allocate write bio page\n"); in pblk_rb_read_to_bio()
553 /* Release flags on context. Protect from writes */ in pblk_rb_read_to_bio()
554 smp_store_release(&entry->w_ctx.flags, flags); in pblk_rb_read_to_bio()
558 if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) != in pblk_rb_read_to_bio()
559 rb->seg_size) { in pblk_rb_read_to_bio()
560 pblk_err(pblk, "could not add page to write bio\n"); in pblk_rb_read_to_bio()
563 /* Release flags on context. Protect from writes */ in pblk_rb_read_to_bio()
564 smp_store_release(&entry->w_ctx.flags, flags); in pblk_rb_read_to_bio()
571 /* Release flags on context. Protect from writes */ in pblk_rb_read_to_bio()
572 smp_store_release(&entry->w_ctx.flags, flags); in pblk_rb_read_to_bio()
574 pos = (pos + 1) & (rb->nr_entries - 1); in pblk_rb_read_to_bio()
579 pblk_err(pblk, "could not pad page in write bio\n"); in pblk_rb_read_to_bio()
583 if (pad < pblk->min_write_pgs) in pblk_rb_read_to_bio()
584 atomic64_inc(&pblk->pad_dist[pad - 1]); in pblk_rb_read_to_bio()
588 atomic64_add(pad, &pblk->pad_wa); in pblk_rb_read_to_bio()
592 atomic_long_add(pad, &pblk->padded_writes); in pblk_rb_read_to_bio()
618 BUG_ON(pos >= rb->nr_entries); in pblk_rb_copy_to_bio()
620 entry = &rb->entries[pos]; in pblk_rb_copy_to_bio()
621 w_ctx = &entry->w_ctx; in pblk_rb_copy_to_bio()
622 flags = READ_ONCE(w_ctx->flags); in pblk_rb_copy_to_bio()
624 spin_lock(&rb->w_lock); in pblk_rb_copy_to_bio()
625 spin_lock(&pblk->trans_lock); in pblk_rb_copy_to_bio()
627 spin_unlock(&pblk->trans_lock); in pblk_rb_copy_to_bio()
630 if (!pblk_ppa_comp(l2p_ppa, ppa) || w_ctx->lba != lba || in pblk_rb_copy_to_bio()
645 memcpy(data, entry->data, rb->seg_size); in pblk_rb_copy_to_bio()
648 spin_unlock(&rb->w_lock); in pblk_rb_copy_to_bio()
654 unsigned int entry = pos & (rb->nr_entries - 1); in pblk_rb_w_ctx()
656 return &rb->entries[entry].w_ctx; in pblk_rb_w_ctx()
660 __acquires(&rb->s_lock) in pblk_rb_sync_init()
663 spin_lock_irqsave(&rb->s_lock, *flags); in pblk_rb_sync_init()
665 spin_lock_irq(&rb->s_lock); in pblk_rb_sync_init()
667 return rb->sync; in pblk_rb_sync_init()
671 __releases(&rb->s_lock) in pblk_rb_sync_end()
673 lockdep_assert_held(&rb->s_lock); in pblk_rb_sync_end()
676 spin_unlock_irqrestore(&rb->s_lock, *flags); in pblk_rb_sync_end()
678 spin_unlock_irq(&rb->s_lock); in pblk_rb_sync_end()
684 lockdep_assert_held(&rb->s_lock); in pblk_rb_sync_advance()
686 sync = READ_ONCE(rb->sync); in pblk_rb_sync_advance()
687 flush_point = READ_ONCE(rb->flush_point); in pblk_rb_sync_advance()
693 rb->nr_entries); in pblk_rb_sync_advance()
695 /* Protect flush points */ in pblk_rb_sync_advance()
696 smp_store_release(&rb->flush_point, EMPTY_ENTRY); in pblk_rb_sync_advance()
700 sync = (sync + nr_entries) & (rb->nr_entries - 1); in pblk_rb_sync_advance()
702 /* Protect from counts */ in pblk_rb_sync_advance()
703 smp_store_release(&rb->sync, sync); in pblk_rb_sync_advance()
714 /* Protect flush points */ in pblk_rb_flush_point_count()
715 flush_point = smp_load_acquire(&rb->flush_point); in pblk_rb_flush_point_count()
719 /* Protect syncs */ in pblk_rb_flush_point_count()
720 sync = smp_load_acquire(&rb->sync); in pblk_rb_flush_point_count()
722 subm = READ_ONCE(rb->subm); in pblk_rb_flush_point_count()
723 submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries); in pblk_rb_flush_point_count()
726 to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1; in pblk_rb_flush_point_count()
728 return (submitted < to_flush) ? (to_flush - submitted) : 0; in pblk_rb_flush_point_count()
733 * corresponds to the given ppa. This is necessary since write requests can be
747 sync = READ_ONCE(rb->sync); in pblk_rb_sync_scan_entry()
748 subm = READ_ONCE(rb->subm); in pblk_rb_sync_scan_entry()
749 count = pblk_rb_ring_count(subm, sync, rb->nr_entries); in pblk_rb_sync_scan_entry()
752 sync = (sync + 1) & (rb->nr_entries - 1); in pblk_rb_sync_scan_entry()
763 spin_lock(&rb->w_lock); in pblk_rb_tear_down_check()
764 spin_lock_irq(&rb->s_lock); in pblk_rb_tear_down_check()
766 if ((rb->mem == rb->subm) && (rb->subm == rb->sync) && in pblk_rb_tear_down_check()
767 (rb->sync == rb->l2p_update) && in pblk_rb_tear_down_check()
768 (rb->flush_point == EMPTY_ENTRY)) { in pblk_rb_tear_down_check()
772 if (!rb->entries) { in pblk_rb_tear_down_check()
777 for (i = 0; i < rb->nr_entries; i++) { in pblk_rb_tear_down_check()
778 entry = &rb->entries[i]; in pblk_rb_tear_down_check()
780 if (!entry->data) { in pblk_rb_tear_down_check()
787 spin_unlock_irq(&rb->s_lock); in pblk_rb_tear_down_check()
788 spin_unlock(&rb->w_lock); in pblk_rb_tear_down_check()
795 return (pos & (rb->nr_entries - 1)); in pblk_rb_wrap_pos()
800 return (pos >= rb->nr_entries); in pblk_rb_pos_oob()
810 spin_lock_irq(&rb->s_lock); in pblk_rb_sysfs()
811 list_for_each_entry(c, &pblk->compl_list, list) in pblk_rb_sysfs()
813 spin_unlock_irq(&rb->s_lock); in pblk_rb_sysfs()
815 if (rb->flush_point != EMPTY_ENTRY) in pblk_rb_sysfs()
817 "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n", in pblk_rb_sysfs()
818 rb->nr_entries, in pblk_rb_sysfs()
819 rb->mem, in pblk_rb_sysfs()
820 rb->subm, in pblk_rb_sysfs()
821 rb->sync, in pblk_rb_sysfs()
822 rb->l2p_update, in pblk_rb_sysfs()
824 atomic_read(&rb->inflight_flush_point), in pblk_rb_sysfs()
828 rb->flush_point, in pblk_rb_sysfs()
835 "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n", in pblk_rb_sysfs()
836 rb->nr_entries, in pblk_rb_sysfs()
837 rb->mem, in pblk_rb_sysfs()
838 rb->subm, in pblk_rb_sysfs()
839 rb->sync, in pblk_rb_sysfs()
840 rb->l2p_update, in pblk_rb_sysfs()
842 atomic_read(&rb->inflight_flush_point), in pblk_rb_sysfs()