• Home
  • Raw
  • Download

Lines Matching refs:pblk

22 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,  in pblk_end_w_bio()  argument
26 struct pblk_rb *rwb = &pblk->rwb; in pblk_end_w_bio()
53 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid, in pblk_end_w_bio()
57 atomic_long_add(rqd->nr_ppas, &pblk->sync_writes); in pblk_end_w_bio()
60 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid); in pblk_end_w_bio()
63 pblk_free_rqd(pblk, rqd, PBLK_WRITE); in pblk_end_w_bio()
68 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk, in pblk_end_queued_w_bio() argument
73 return pblk_end_w_bio(pblk, rqd, c_ctx); in pblk_end_queued_w_bio()
76 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd, in pblk_complete_write() argument
84 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes); in pblk_complete_write()
86 pblk_up_rq(pblk, c_ctx->lun_bitmap); in pblk_complete_write()
88 pos = pblk_rb_sync_init(&pblk->rwb, &flags); in pblk_complete_write()
90 pos = pblk_end_w_bio(pblk, rqd, c_ctx); in pblk_complete_write()
93 list_for_each_entry_safe(c, r, &pblk->compl_list, list) { in pblk_complete_write()
96 pos = pblk_end_queued_w_bio(pblk, rqd, c); in pblk_complete_write()
102 list_add_tail(&c_ctx->list, &pblk->compl_list); in pblk_complete_write()
104 pblk_rb_sync_end(&pblk->rwb, &flags); in pblk_complete_write()
108 static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa, in pblk_map_remaining() argument
119 line = pblk_ppa_to_line(pblk, *ppa); in pblk_map_remaining()
120 lba_list = emeta_to_lbas(pblk, line->emeta->buf); in pblk_map_remaining()
125 paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa); in pblk_map_remaining()
138 done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa); in pblk_map_remaining()
147 static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry, in pblk_prepare_resubmit() argument
150 struct pblk_rb *rb = &pblk->rwb; in pblk_prepare_resubmit()
158 spin_lock(&pblk->trans_lock); in pblk_prepare_resubmit()
165 ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba); in pblk_prepare_resubmit()
179 line = pblk_ppa_to_line(pblk, w_ctx->ppa); in pblk_prepare_resubmit()
183 spin_unlock(&pblk->trans_lock); in pblk_prepare_resubmit()
186 static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx) in pblk_queue_resubmit() argument
199 spin_lock(&pblk->resubmit_lock); in pblk_queue_resubmit()
200 list_add_tail(&r_ctx->list, &pblk->resubmit_list); in pblk_queue_resubmit()
201 spin_unlock(&pblk->resubmit_lock); in pblk_queue_resubmit()
204 atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes); in pblk_queue_resubmit()
212 struct pblk *pblk = recovery->pblk; in pblk_submit_rec() local
217 pblk_log_write_err(pblk, rqd); in pblk_submit_rec()
219 pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas); in pblk_submit_rec()
220 pblk_queue_resubmit(pblk, c_ctx); in pblk_submit_rec()
222 pblk_up_rq(pblk, c_ctx->lun_bitmap); in pblk_submit_rec()
224 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid, in pblk_submit_rec()
227 pblk_free_rqd(pblk, rqd, PBLK_WRITE); in pblk_submit_rec()
228 mempool_free(recovery, &pblk->rec_pool); in pblk_submit_rec()
230 atomic_dec(&pblk->inflight_io); in pblk_submit_rec()
231 pblk_write_kick(pblk); in pblk_submit_rec()
235 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd) in pblk_end_w_fail() argument
239 recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC); in pblk_end_w_fail()
241 pblk_err(pblk, "could not allocate recovery work\n"); in pblk_end_w_fail()
245 recovery->pblk = pblk; in pblk_end_w_fail()
249 queue_work(pblk->close_wq, &recovery->ws_rec); in pblk_end_w_fail()
254 struct pblk *pblk = rqd->private; in pblk_end_io_write() local
258 pblk_end_w_fail(pblk, rqd); in pblk_end_io_write()
262 pblk_check_chunk_state_update(pblk, rqd); in pblk_end_io_write()
268 pblk_complete_write(pblk, rqd, c_ctx); in pblk_end_io_write()
269 atomic_dec(&pblk->inflight_io); in pblk_end_io_write()
274 struct pblk *pblk = rqd->private; in pblk_end_io_write_meta() local
281 pblk_up_chunk(pblk, ppa_list[0]); in pblk_end_io_write_meta()
284 pblk_log_write_err(pblk, rqd); in pblk_end_io_write_meta()
285 pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id); in pblk_end_io_write_meta()
289 pblk_check_chunk_state_update(pblk, rqd); in pblk_end_io_write_meta()
294 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws, in pblk_end_io_write_meta()
295 GFP_ATOMIC, pblk->close_wq); in pblk_end_io_write_meta()
297 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); in pblk_end_io_write_meta()
299 atomic_dec(&pblk->inflight_io); in pblk_end_io_write_meta()
302 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd, in pblk_alloc_w_rq() argument
309 rqd->private = pblk; in pblk_alloc_w_rq()
312 return pblk_alloc_rqd_meta(pblk, rqd); in pblk_alloc_w_rq()
315 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd, in pblk_setup_w_rq() argument
318 struct pblk_line_meta *lm = &pblk->lm; in pblk_setup_w_rq()
319 struct pblk_line *e_line = pblk_line_get_erase(pblk); in pblk_setup_w_rq()
332 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write); in pblk_setup_w_rq()
339 ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, in pblk_setup_w_rq()
342 ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, in pblk_setup_w_rq()
348 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail, in pblk_calc_secs_to_sync() argument
353 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true); in pblk_calc_secs_to_sync()
359 pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n", in pblk_calc_secs_to_sync()
367 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) in pblk_submit_meta_io() argument
369 struct nvm_tgt_dev *dev = pblk->dev; in pblk_submit_meta_io()
371 struct pblk_line_mgmt *l_mg = &pblk->l_mg; in pblk_submit_meta_io()
372 struct pblk_line_meta *lm = &pblk->lm; in pblk_submit_meta_io()
379 int rq_ppas = pblk->min_write_pgs; in pblk_submit_meta_io()
385 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT); in pblk_submit_meta_io()
393 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta); in pblk_submit_meta_io()
400 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas); in pblk_submit_meta_io()
403 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id); in pblk_submit_meta_io()
412 pblk_down_chunk(pblk, ppa_list[0]); in pblk_submit_meta_io()
414 ret = pblk_submit_io(pblk, rqd, data); in pblk_submit_meta_io()
416 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret); in pblk_submit_meta_io()
423 pblk_up_chunk(pblk, ppa_list[0]); in pblk_submit_meta_io()
425 pblk_dealloc_page(pblk, meta_line, rq_ppas); in pblk_submit_meta_io()
429 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT); in pblk_submit_meta_io()
433 static inline bool pblk_valid_meta_ppa(struct pblk *pblk, in pblk_valid_meta_ppa() argument
437 struct nvm_tgt_dev *dev = pblk->dev; in pblk_valid_meta_ppa()
440 struct pblk_line *data_line = pblk_line_get_data(pblk); in pblk_valid_meta_ppa()
454 paddr = pblk_lookup_page(pblk, meta_line); in pblk_valid_meta_ppa()
455 ppa = addr_to_gen_ppa(pblk, paddr, 0); in pblk_valid_meta_ppa()
456 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0); in pblk_valid_meta_ppa()
469 static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk, in pblk_should_submit_meta_io() argument
472 struct pblk_line_meta *lm = &pblk->lm; in pblk_should_submit_meta_io()
473 struct pblk_line_mgmt *l_mg = &pblk->l_mg; in pblk_should_submit_meta_io()
488 if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd)) in pblk_should_submit_meta_io()
494 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd) in pblk_submit_io_set() argument
503 err = pblk_setup_w_rq(pblk, rqd, &erase_ppa); in pblk_submit_io_set()
505 pblk_err(pblk, "could not setup write request: %d\n", err); in pblk_submit_io_set()
509 meta_line = pblk_should_submit_meta_io(pblk, rqd); in pblk_submit_io_set()
512 err = pblk_submit_io(pblk, rqd, NULL); in pblk_submit_io_set()
514 pblk_err(pblk, "data I/O submission failed: %d\n", err); in pblk_submit_io_set()
520 if (pblk_blk_erase_async(pblk, erase_ppa)) { in pblk_submit_io_set()
521 struct pblk_line *e_line = pblk_line_get_erase(pblk); in pblk_submit_io_set()
522 struct nvm_tgt_dev *dev = pblk->dev; in pblk_submit_io_set()
534 err = pblk_submit_meta_io(pblk, meta_line); in pblk_submit_io_set()
536 pblk_err(pblk, "metadata I/O submission failed: %d", in pblk_submit_io_set()
545 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd) in pblk_free_write_rqd() argument
551 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid, in pblk_free_write_rqd()
555 static int pblk_submit_write(struct pblk *pblk, int *secs_left) in pblk_submit_write() argument
566 spin_lock(&pblk->resubmit_lock); in pblk_submit_write()
567 resubmit = !list_empty(&pblk->resubmit_list); in pblk_submit_write()
568 spin_unlock(&pblk->resubmit_lock); in pblk_submit_write()
574 spin_lock(&pblk->resubmit_lock); in pblk_submit_write()
575 r_ctx = list_first_entry(&pblk->resubmit_list, in pblk_submit_write()
578 spin_unlock(&pblk->resubmit_lock); in pblk_submit_write()
583 pblk_prepare_resubmit(pblk, pos, secs_avail); in pblk_submit_write()
584 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, in pblk_submit_write()
593 secs_avail = pblk_rb_read_count(&pblk->rwb); in pblk_submit_write()
597 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb); in pblk_submit_write()
598 if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data) in pblk_submit_write()
601 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, in pblk_submit_write()
603 if (secs_to_sync > pblk->max_write_pgs) { in pblk_submit_write()
604 pblk_err(pblk, "bad buffer sync calculation\n"); in pblk_submit_write()
610 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com); in pblk_submit_write()
613 packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data); in pblk_submit_write()
619 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE); in pblk_submit_write()
622 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync, in pblk_submit_write()
624 pblk_err(pblk, "corrupted write bio\n"); in pblk_submit_write()
628 if (pblk_submit_io_set(pblk, rqd)) in pblk_submit_write()
632 atomic_long_add(secs_to_sync, &pblk->sub_writes); in pblk_submit_write()
639 pblk_free_write_rqd(pblk, rqd); in pblk_submit_write()
642 pblk_free_rqd(pblk, rqd, PBLK_WRITE); in pblk_submit_write()
649 struct pblk *pblk = data; in pblk_write_ts() local
655 write_failure = pblk_submit_write(pblk, &secs_left); in pblk_write_ts()