• Home
  • Raw
  • Download

Lines Matching full:rb

16  * pblk-rb.c - pblk's write buffer
25 void pblk_rb_data_free(struct pblk_rb *rb) in pblk_rb_data_free() argument
30 list_for_each_entry_safe(p, t, &rb->pages, list) { in pblk_rb_data_free()
43 int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base, in pblk_rb_init() argument
46 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_init()
53 rb->entries = rb_entry_base; in pblk_rb_init()
54 rb->seg_size = (1 << power_seg_sz); in pblk_rb_init()
55 rb->nr_entries = (1 << power_size); in pblk_rb_init()
56 rb->mem = rb->subm = rb->sync = rb->l2p_update = 0; in pblk_rb_init()
57 rb->flush_point = EMPTY_ENTRY; in pblk_rb_init()
59 spin_lock_init(&rb->w_lock); in pblk_rb_init()
60 spin_lock_init(&rb->s_lock); in pblk_rb_init()
62 INIT_LIST_HEAD(&rb->pages); in pblk_rb_init()
89 pblk_rb_data_free(rb); in pblk_rb_init()
95 entry = &rb->entries[init_entry]; in pblk_rb_init()
102 entry = &rb->entries[init_entry]; in pblk_rb_init()
104 entry->data = kaddr + (i * rb->seg_size); in pblk_rb_init()
109 list_add_tail(&page_set->list, &rb->pages); in pblk_rb_init()
115 atomic_set(&rb->inflight_flush_point, 0); in pblk_rb_init()
122 pblk_rl_init(&pblk->rl, rb->nr_entries); in pblk_rb_init()
136 void *pblk_rb_entries_ref(struct pblk_rb *rb) in pblk_rb_entries_ref() argument
138 return rb->entries; in pblk_rb_entries_ref()
156 #define pblk_rb_ring_space(rb, head, tail, size) \ argument
163 static unsigned int pblk_rb_space(struct pblk_rb *rb) in pblk_rb_space() argument
165 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_space()
166 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_space()
168 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries); in pblk_rb_space()
175 unsigned int pblk_rb_read_count(struct pblk_rb *rb) in pblk_rb_read_count() argument
177 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_read_count()
178 unsigned int subm = READ_ONCE(rb->subm); in pblk_rb_read_count()
180 return pblk_rb_ring_count(mem, subm, rb->nr_entries); in pblk_rb_read_count()
183 unsigned int pblk_rb_sync_count(struct pblk_rb *rb) in pblk_rb_sync_count() argument
185 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_sync_count()
186 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_sync_count()
188 return pblk_rb_ring_count(mem, sync, rb->nr_entries); in pblk_rb_sync_count()
191 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries) in pblk_rb_read_commit() argument
195 subm = READ_ONCE(rb->subm); in pblk_rb_read_commit()
197 smp_store_release(&rb->subm, in pblk_rb_read_commit()
198 (subm + nr_entries) & (rb->nr_entries - 1)); in pblk_rb_read_commit()
203 static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update) in __pblk_rb_update_l2p() argument
205 struct pblk *pblk = container_of(rb, struct pblk, rwb); in __pblk_rb_update_l2p()
214 entry = &rb->entries[rb->l2p_update]; in __pblk_rb_update_l2p()
231 rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1); in __pblk_rb_update_l2p()
244 static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries, in pblk_rb_update_l2p() argument
250 lockdep_assert_held(&rb->w_lock); in pblk_rb_update_l2p()
253 space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries); in pblk_rb_update_l2p()
258 /* l2p_update used exclusively under rb->w_lock */ in pblk_rb_update_l2p()
259 ret = __pblk_rb_update_l2p(rb, count); in pblk_rb_update_l2p()
270 void pblk_rb_sync_l2p(struct pblk_rb *rb) in pblk_rb_sync_l2p() argument
275 spin_lock(&rb->w_lock); in pblk_rb_sync_l2p()
278 sync = smp_load_acquire(&rb->sync); in pblk_rb_sync_l2p()
280 to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries); in pblk_rb_sync_l2p()
281 __pblk_rb_update_l2p(rb, to_update); in pblk_rb_sync_l2p()
283 spin_unlock(&rb->w_lock); in pblk_rb_sync_l2p()
292 static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data, in __pblk_rb_write_entry() argument
296 memcpy(entry->data, data, rb->seg_size); in __pblk_rb_write_entry()
302 void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data, in pblk_rb_write_entry_user() argument
305 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_write_entry_user()
309 entry = &rb->entries[ring_pos]; in pblk_rb_write_entry_user()
316 __pblk_rb_write_entry(rb, data, w_ctx, entry); in pblk_rb_write_entry_user()
325 void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data, in pblk_rb_write_entry_gc() argument
329 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_write_entry_gc()
333 entry = &rb->entries[ring_pos]; in pblk_rb_write_entry_gc()
340 __pblk_rb_write_entry(rb, data, w_ctx, entry); in pblk_rb_write_entry_gc()
351 static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio, in pblk_rb_flush_point_set() argument
357 pblk_rb_sync_init(rb, NULL); in pblk_rb_flush_point_set()
358 sync = READ_ONCE(rb->sync); in pblk_rb_flush_point_set()
361 pblk_rb_sync_end(rb, NULL); in pblk_rb_flush_point_set()
366 atomic_inc(&rb->inflight_flush_point); in pblk_rb_flush_point_set()
369 flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1); in pblk_rb_flush_point_set()
370 entry = &rb->entries[flush_point]; in pblk_rb_flush_point_set()
373 smp_store_release(&rb->flush_point, flush_point); in pblk_rb_flush_point_set()
378 pblk_rb_sync_end(rb, NULL); in pblk_rb_flush_point_set()
383 static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries, in __pblk_rb_may_write() argument
389 sync = READ_ONCE(rb->sync); in __pblk_rb_may_write()
390 mem = READ_ONCE(rb->mem); in __pblk_rb_may_write()
392 if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < nr_entries) in __pblk_rb_may_write()
395 if (pblk_rb_update_l2p(rb, nr_entries, mem, sync)) in __pblk_rb_may_write()
403 static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries, in pblk_rb_may_write() argument
406 if (!__pblk_rb_may_write(rb, nr_entries, pos)) in pblk_rb_may_write()
410 smp_store_release(&rb->mem, (*pos + nr_entries) & (rb->nr_entries - 1)); in pblk_rb_may_write()
414 void pblk_rb_flush(struct pblk_rb *rb) in pblk_rb_flush() argument
416 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_flush()
417 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_flush()
419 if (pblk_rb_flush_point_set(rb, NULL, mem)) in pblk_rb_flush()
425 static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries, in pblk_rb_may_write_flush() argument
431 if (!__pblk_rb_may_write(rb, nr_entries, pos)) in pblk_rb_may_write_flush()
434 mem = (*pos + nr_entries) & (rb->nr_entries - 1); in pblk_rb_may_write_flush()
438 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_may_write_flush()
446 smp_store_release(&rb->mem, mem); in pblk_rb_may_write_flush()
456 int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio, in pblk_rb_may_write_user() argument
459 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_may_write_user()
462 spin_lock(&rb->w_lock); in pblk_rb_may_write_user()
465 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
469 if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) { in pblk_rb_may_write_user()
470 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
475 spin_unlock(&rb->w_lock); in pblk_rb_may_write_user()
483 int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries, in pblk_rb_may_write_gc() argument
486 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_may_write_gc()
488 spin_lock(&rb->w_lock); in pblk_rb_may_write_gc()
490 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
494 if (!pblk_rb_may_write(rb, nr_entries, pos)) { in pblk_rb_may_write_gc()
495 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
500 spin_unlock(&rb->w_lock); in pblk_rb_may_write_gc()
506 * Read available entries on rb and add them to the given bio. To avoid a memory
512 unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd, in pblk_rb_read_to_bio() argument
516 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_read_to_bio()
536 entry = &rb->entries[pos]; in pblk_rb_read_to_bio()
558 if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) != in pblk_rb_read_to_bio()
559 rb->seg_size) { in pblk_rb_read_to_bio()
574 pos = (pos + 1) & (rb->nr_entries - 1); in pblk_rb_read_to_bio()
603 int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, in pblk_rb_copy_to_bio() argument
606 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_copy_to_bio()
618 BUG_ON(pos >= rb->nr_entries); in pblk_rb_copy_to_bio()
620 entry = &rb->entries[pos]; in pblk_rb_copy_to_bio()
624 spin_lock(&rb->w_lock); in pblk_rb_copy_to_bio()
645 memcpy(data, entry->data, rb->seg_size); in pblk_rb_copy_to_bio()
648 spin_unlock(&rb->w_lock); in pblk_rb_copy_to_bio()
652 struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos) in pblk_rb_w_ctx() argument
654 unsigned int entry = pos & (rb->nr_entries - 1); in pblk_rb_w_ctx()
656 return &rb->entries[entry].w_ctx; in pblk_rb_w_ctx()
659 unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags) in pblk_rb_sync_init() argument
660 __acquires(&rb->s_lock) in pblk_rb_sync_init()
663 spin_lock_irqsave(&rb->s_lock, *flags); in pblk_rb_sync_init()
665 spin_lock_irq(&rb->s_lock); in pblk_rb_sync_init()
667 return rb->sync; in pblk_rb_sync_init()
670 void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags) in pblk_rb_sync_end() argument
671 __releases(&rb->s_lock) in pblk_rb_sync_end()
673 lockdep_assert_held(&rb->s_lock); in pblk_rb_sync_end()
676 spin_unlock_irqrestore(&rb->s_lock, *flags); in pblk_rb_sync_end()
678 spin_unlock_irq(&rb->s_lock); in pblk_rb_sync_end()
681 unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries) in pblk_rb_sync_advance() argument
684 lockdep_assert_held(&rb->s_lock); in pblk_rb_sync_advance()
686 sync = READ_ONCE(rb->sync); in pblk_rb_sync_advance()
687 flush_point = READ_ONCE(rb->flush_point); in pblk_rb_sync_advance()
693 rb->nr_entries); in pblk_rb_sync_advance()
696 smp_store_release(&rb->flush_point, EMPTY_ENTRY); in pblk_rb_sync_advance()
700 sync = (sync + nr_entries) & (rb->nr_entries - 1); in pblk_rb_sync_advance()
703 smp_store_release(&rb->sync, sync); in pblk_rb_sync_advance()
709 unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb) in pblk_rb_flush_point_count() argument
715 flush_point = smp_load_acquire(&rb->flush_point); in pblk_rb_flush_point_count()
720 sync = smp_load_acquire(&rb->sync); in pblk_rb_flush_point_count()
722 subm = READ_ONCE(rb->subm); in pblk_rb_flush_point_count()
723 submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries); in pblk_rb_flush_point_count()
726 to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1; in pblk_rb_flush_point_count()
741 struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb, in pblk_rb_sync_scan_entry() argument
747 sync = READ_ONCE(rb->sync); in pblk_rb_sync_scan_entry()
748 subm = READ_ONCE(rb->subm); in pblk_rb_sync_scan_entry()
749 count = pblk_rb_ring_count(subm, sync, rb->nr_entries); in pblk_rb_sync_scan_entry()
752 sync = (sync + 1) & (rb->nr_entries - 1); in pblk_rb_sync_scan_entry()
757 int pblk_rb_tear_down_check(struct pblk_rb *rb) in pblk_rb_tear_down_check() argument
763 spin_lock(&rb->w_lock); in pblk_rb_tear_down_check()
764 spin_lock_irq(&rb->s_lock); in pblk_rb_tear_down_check()
766 if ((rb->mem == rb->subm) && (rb->subm == rb->sync) && in pblk_rb_tear_down_check()
767 (rb->sync == rb->l2p_update) && in pblk_rb_tear_down_check()
768 (rb->flush_point == EMPTY_ENTRY)) { in pblk_rb_tear_down_check()
772 if (!rb->entries) { in pblk_rb_tear_down_check()
777 for (i = 0; i < rb->nr_entries; i++) { in pblk_rb_tear_down_check()
778 entry = &rb->entries[i]; in pblk_rb_tear_down_check()
787 spin_unlock_irq(&rb->s_lock); in pblk_rb_tear_down_check()
788 spin_unlock(&rb->w_lock); in pblk_rb_tear_down_check()
793 unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos) in pblk_rb_wrap_pos() argument
795 return (pos & (rb->nr_entries - 1)); in pblk_rb_wrap_pos()
798 int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos) in pblk_rb_pos_oob() argument
800 return (pos >= rb->nr_entries); in pblk_rb_pos_oob()
803 ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf) in pblk_rb_sysfs() argument
805 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_sysfs()
810 spin_lock_irq(&rb->s_lock); in pblk_rb_sysfs()
813 spin_unlock_irq(&rb->s_lock); in pblk_rb_sysfs()
815 if (rb->flush_point != EMPTY_ENTRY) in pblk_rb_sysfs()
818 rb->nr_entries, in pblk_rb_sysfs()
819 rb->mem, in pblk_rb_sysfs()
820 rb->subm, in pblk_rb_sysfs()
821 rb->sync, in pblk_rb_sysfs()
822 rb->l2p_update, in pblk_rb_sysfs()
824 atomic_read(&rb->inflight_flush_point), in pblk_rb_sysfs()
828 rb->flush_point, in pblk_rb_sysfs()
829 pblk_rb_read_count(rb), in pblk_rb_sysfs()
830 pblk_rb_space(rb), in pblk_rb_sysfs()
831 pblk_rb_flush_point_count(rb), in pblk_rb_sysfs()
836 rb->nr_entries, in pblk_rb_sysfs()
837 rb->mem, in pblk_rb_sysfs()
838 rb->subm, in pblk_rb_sysfs()
839 rb->sync, in pblk_rb_sysfs()
840 rb->l2p_update, in pblk_rb_sysfs()
842 atomic_read(&rb->inflight_flush_point), in pblk_rb_sysfs()
846 pblk_rb_read_count(rb), in pblk_rb_sysfs()
847 pblk_rb_space(rb), in pblk_rb_sysfs()
848 pblk_rb_flush_point_count(rb), in pblk_rb_sysfs()