• Home
  • Raw
  • Download

Lines Matching refs:rqd

23 				struct nvm_rq *rqd, unsigned long flags);
68 struct nvm_rq *rqd; in rrpc_inflight_laddr_acquire() local
71 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC); in rrpc_inflight_laddr_acquire()
72 if (!rqd) in rrpc_inflight_laddr_acquire()
75 inf = rrpc_get_inflight_rq(rqd); in rrpc_inflight_laddr_acquire()
77 mempool_free(rqd, rrpc->rq_pool); in rrpc_inflight_laddr_acquire()
81 return rqd; in rrpc_inflight_laddr_acquire()
84 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd) in rrpc_inflight_laddr_release() argument
86 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd); in rrpc_inflight_laddr_release()
90 mempool_free(rqd, rrpc->rq_pool); in rrpc_inflight_laddr_release()
97 struct nvm_rq *rqd; in rrpc_discard() local
100 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len); in rrpc_discard()
101 if (rqd) in rrpc_discard()
107 if (IS_ERR(rqd)) { in rrpc_discard()
114 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_discard()
285 struct nvm_rq *rqd; in rrpc_move_valid_pages() local
324 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1); in rrpc_move_valid_pages()
325 if (IS_ERR_OR_NULL(rqd)) { in rrpc_move_valid_pages()
342 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { in rrpc_move_valid_pages()
344 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_move_valid_pages()
349 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_move_valid_pages()
366 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { in rrpc_move_valid_pages()
368 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_move_valid_pages()
373 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_move_valid_pages()
697 static void rrpc_end_io(struct nvm_rq *rqd) in rrpc_end_io() argument
699 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); in rrpc_end_io()
700 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); in rrpc_end_io()
701 uint8_t npages = rqd->nr_ppas; in rrpc_end_io()
702 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; in rrpc_end_io()
704 if (bio_data_dir(rqd->bio) == WRITE) in rrpc_end_io()
707 bio_put(rqd->bio); in rrpc_end_io()
712 rrpc_unlock_rq(rrpc, rqd); in rrpc_end_io()
715 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); in rrpc_end_io()
717 mempool_free(rqd, rrpc->rq_pool); in rrpc_end_io()
721 struct nvm_rq *rqd, unsigned long flags, int npages) in rrpc_read_ppalist_rq() argument
723 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); in rrpc_read_ppalist_rq()
729 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { in rrpc_read_ppalist_rq()
730 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); in rrpc_read_ppalist_rq()
740 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, in rrpc_read_ppalist_rq()
745 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, in rrpc_read_ppalist_rq()
746 rqd->dma_ppa_list); in rrpc_read_ppalist_rq()
751 rqd->opcode = NVM_OP_HBREAD; in rrpc_read_ppalist_rq()
756 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd, in rrpc_read_rq() argument
759 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); in rrpc_read_rq()
764 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) in rrpc_read_rq()
771 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr); in rrpc_read_rq()
774 rrpc_unlock_rq(rrpc, rqd); in rrpc_read_rq()
778 rqd->opcode = NVM_OP_HBREAD; in rrpc_read_rq()
785 struct nvm_rq *rqd, unsigned long flags, int npages) in rrpc_write_ppalist_rq() argument
787 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); in rrpc_write_ppalist_rq()
793 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { in rrpc_write_ppalist_rq()
794 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); in rrpc_write_ppalist_rq()
804 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, in rrpc_write_ppalist_rq()
805 rqd->dma_ppa_list); in rrpc_write_ppalist_rq()
810 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, in rrpc_write_ppalist_rq()
814 rqd->opcode = NVM_OP_HBWRITE; in rrpc_write_ppalist_rq()
820 struct nvm_rq *rqd, unsigned long flags) in rrpc_write_rq() argument
822 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); in rrpc_write_rq()
827 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) in rrpc_write_rq()
833 rrpc_unlock_rq(rrpc, rqd); in rrpc_write_rq()
838 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr); in rrpc_write_rq()
839 rqd->opcode = NVM_OP_HBWRITE; in rrpc_write_rq()
846 struct nvm_rq *rqd, unsigned long flags, uint8_t npages) in rrpc_setup_rq() argument
849 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL, in rrpc_setup_rq()
850 &rqd->dma_ppa_list); in rrpc_setup_rq()
851 if (!rqd->ppa_list) { in rrpc_setup_rq()
857 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, in rrpc_setup_rq()
860 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); in rrpc_setup_rq()
864 return rrpc_write_rq(rrpc, bio, rqd, flags); in rrpc_setup_rq()
866 return rrpc_read_rq(rrpc, bio, rqd, flags); in rrpc_setup_rq()
870 struct nvm_rq *rqd, unsigned long flags) in rrpc_submit_io() argument
873 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd); in rrpc_submit_io()
882 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages); in rrpc_submit_io()
887 rqd->bio = bio; in rrpc_submit_io()
888 rqd->ins = &rrpc->instance; in rrpc_submit_io()
889 rqd->nr_ppas = nr_pages; in rrpc_submit_io()
892 err = nvm_submit_io(rrpc->dev, rqd); in rrpc_submit_io()
897 rrpc_unlock_rq(rrpc, rqd); in rrpc_submit_io()
898 if (rqd->nr_ppas > 1) in rrpc_submit_io()
900 rqd->ppa_list, rqd->dma_ppa_list); in rrpc_submit_io()
911 struct nvm_rq *rqd; in rrpc_make_rq() local
919 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); in rrpc_make_rq()
920 if (!rqd) { in rrpc_make_rq()
925 memset(rqd, 0, sizeof(struct nvm_rq)); in rrpc_make_rq()
927 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); in rrpc_make_rq()
945 mempool_free(rqd, rrpc->rq_pool); in rrpc_make_rq()