Searched refs:nr_ppas (Results 1 – 8 of 8) sorted by relevance
/drivers/lightnvm/ |
D | pblk-read.c | 48 nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas, in pblk_read_ppalist_rq() 113 int nr_lbas = rqd->nr_ppas; in pblk_read_check_seq() 176 WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n"); in pblk_read_check_rand() 206 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads); in __pblk_end_io_read() 207 atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads); in __pblk_end_io_read() 278 rqd->nr_ppas = nr_secs; in pblk_submit_read() 308 if (from_cache && nr_secs == rqd->nr_ppas) { in pblk_submit_read() 313 } else if (nr_secs != rqd->nr_ppas) { in pblk_submit_read() 332 rqd->nr_ppas = nr_secs; in pblk_submit_read() 333 if (rqd->nr_ppas == 1) in pblk_submit_read() [all …]
|
D | core.c | 585 struct ppa_addr *ppa_list, int nr_ppas) in nvm_ppa_tgt_to_dev() argument 589 for (i = 0; i < nr_ppas; i++) { in nvm_ppa_tgt_to_dev() 596 struct ppa_addr *ppa_list, int nr_ppas) in nvm_ppa_dev_to_tgt() argument 600 for (i = 0; i < nr_ppas; i++) { in nvm_ppa_dev_to_tgt() 610 nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas); in nvm_rq_tgt_to_dev() 617 nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas); in nvm_rq_dev_to_tgt() 672 const struct ppa_addr *ppas, int nr_ppas) in nvm_set_rqd_ppalist() argument 679 if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) { in nvm_set_rqd_ppalist() 680 rqd->nr_ppas = nr_ppas; in nvm_set_rqd_ppalist() 686 rqd->nr_ppas = nr_ppas; in nvm_set_rqd_ppalist() [all …]
|
D | pblk-write.c | 57 atomic_long_add(rqd->nr_ppas, &pblk->sync_writes); in pblk_end_w_bio() 219 pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas); in pblk_submit_rec() 292 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync); in pblk_end_io_write_meta() 307 rqd->nr_ppas = nr_secs; in pblk_alloc_w_rq() 398 for (i = 0; i < rqd->nr_ppas; ) { in pblk_submit_meta_io()
|
D | pblk-map.c | 108 for (i = off; i < rqd->nr_ppas; i += min) { in pblk_map_rq() 139 for (i = 0; i < rqd->nr_ppas; i += min) { in pblk_map_erase_rq()
|
D | pblk.h | 1217 if (rqd->nr_ppas == 1) { in pblk_print_failed_rqd() 1222 while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas, in pblk_print_failed_rqd() 1223 bit + 1)) < rqd->nr_ppas) { in pblk_print_failed_rqd() 1231 struct ppa_addr *ppas, int nr_ppas) in pblk_boundary_ppa_checks() argument 1237 for (i = 0; i < nr_ppas; i++) { in pblk_boundary_ppa_checks() 1270 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) { in pblk_check_io() 1279 for (i = 0; i < rqd->nr_ppas; i++) { in pblk_check_io()
|
D | pblk-recovery.c | 225 rqd->nr_ppas = rq_ppas; in pblk_recov_pad_line() 232 for (i = 0; i < rqd->nr_ppas; ) { in pblk_recov_pad_line() 396 rqd->nr_ppas = rq_ppas; in pblk_recov_scan_oob() 405 for (i = 0; i < rqd->nr_ppas; ) { in pblk_recov_scan_oob() 452 for (i = 0; i < rqd->nr_ppas; i++) { in pblk_recov_scan_oob()
|
D | pblk-core.c | 250 if (rqd->nr_ppas == 1) in pblk_alloc_rqd_meta() 530 for (i = 0; i < rqd->nr_ppas; i++) { in pblk_check_chunk_state_update() 692 rqd.nr_ppas = lm->smeta_sec; in pblk_line_smeta_read() 734 rqd.nr_ppas = lm->smeta_sec; in pblk_line_smeta_write() 801 rqd.nr_ppas = rq_ppas; in pblk_line_emeta_read() 804 for (i = 0; i < rqd.nr_ppas; ) { in pblk_line_emeta_read() 860 rqd->nr_ppas = 1; in pblk_setup_e_rq() 1449 for (i = 0; i < rqd->nr_ppas; i++) in pblk_rq_to_line_put() 2144 for (; i < rqd->nr_ppas; i++) in pblk_get_packed_meta()
|
/drivers/nvme/host/ |
D | lightnvm.c | 532 int nr_ppas, int type) in nvme_nvm_set_bb_tbl() argument 541 c.set_bb.nlb = cpu_to_le16(nr_ppas - 1); in nvme_nvm_set_bb_tbl() 632 c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1); in nvme_nvm_rqtocmd() 690 ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas, in nvme_nvm_submit_io()
|