• Home
  • Raw
  • Download

Lines Matching +full:lines +full:- +full:initial +full:- +full:states

1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Initial release: Matias Bjorling <matias@cnexlabs.com>
17 * Implementation of a Physical Block-device target for Open-channel SSDs.
25 #include <linux/blk-mq.h>
59 PBLK_WRITE_INT, /* Internal write - no write buffer */
60 PBLK_READ_RECOV, /* Recovery read - errors allowed */
92 /* The number of GC lists and the rate-limiter states go together. This way the
93 * rate-limiter can dictate how much GC is needed based on resource utilization.
109 struct list_head list; /* Head for out-of-order completion */
140 struct bio_list bios; /* Original bios - used for completion
165 unsigned int mem; /* Write offset - points to next
168 unsigned int subm; /* Read offset - points to last entry
172 unsigned int sync; /* Synced - backpointer that signals
176 unsigned int flush_point; /* Sync point - last entry that must be
180 unsigned int l2p_update; /* l2p update point - next entry for
185 unsigned int nr_entries; /* Number of entries in write buffer -
195 * geo->mw_cunits on a per chunk basis
226 /* These states are not protected by a lock since (i) they are in the
243 atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
244 atomic_t pipeline_gc; /* Number of lines in the GC pipeline -
258 unsigned int high; /* Upper threshold for rate limiter (free run -
267 * given as a power-of-2. This guarantees that
271 * pblk->max_write_pgs size, which in NVMe is
278 int rb_state; /* Rate-limiter current state */
290 atomic_t werr_lines; /* Number of write error lines that needs gc */
379 * 4. u32 valid sector count (vsc) for all lines (~0U: free line)
412 int mem; /* Write offset - points to next
415 atomic_t sync; /* Synced - backpointer that signals the
486 PBLK_EMETA_TYPE_LLBA = 2, /* lba list - type: __le64 */
487 PBLK_EMETA_TYPE_VSC = 3, /* vsc list - type: __le32 */
491 int nr_lines; /* Total number of full lines */
492 int nr_free_lines; /* Number of full lines in free list */
494 /* Free lists - use free_lock */
495 struct list_head free_list; /* Full lines ready to use */
496 struct list_head corrupt_list; /* Full lines corrupted */
497 struct list_head bad_list; /* Full lines bad */
499 /* GC lists - use gc_lock */
501 struct list_head gc_high_list; /* Full lines ready to GC, high isc */
502 struct list_head gc_mid_list; /* Full lines ready to GC, mid isc */
503 struct list_head gc_low_list; /* Full lines ready to GC, low isc */
507 struct list_head gc_full_list; /* Full lines ready to GC, no valid */
508 struct list_head gc_empty_list; /* Full lines close, all valid */
515 struct list_head emeta_list; /* Lines queued to schedule emeta */
517 __le32 *vsc_list; /* Valid sector counts for all lines */
519 /* Pre-allocated metadata for data lines */
580 /* Internal format to support not power-of-2 device formats */
600 struct pblk_line *lines; /* Line array */ member
619 int op; /* Percentage of device used for over-provisioning */
620 int op_blks; /* Number of blocks used for over-provisioning */
645 /* Non-persistent debug counters, 4kb sector I/Os */
712 pr_err("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
714 pr_info("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
716 pr_warn("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
718 pr_debug("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
886 #define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
931 return c_ctx - sizeof(struct nvm_rq); in nvm_rq_from_c_ctx()
936 return emeta->bb_bitmap; in emeta_to_bb()
942 return emeta->bb_bitmap + lm->blk_bitmap_len; in emeta_to_wa()
947 return ((void *)emeta + pblk->lm.emeta_len[1]); in emeta_to_lbas()
952 return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]); in emeta_to_vsc()
957 return le32_to_cpu(*line->vsc); in pblk_line_vsc()
968 return &pblk->lines[pblk_ppa_to_line_id(p)]; in pblk_ppa_to_line()
973 return p.a.lun * geo->num_ch + p.a.ch; in pblk_ppa_to_pos()
979 struct nvm_tgt_dev *dev = pblk->dev; in addr_to_gen_ppa()
980 struct nvm_geo *geo = &dev->geo; in addr_to_gen_ppa()
983 if (geo->version == NVM_OCSSD_SPEC_12) { in addr_to_gen_ppa()
984 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; in addr_to_gen_ppa()
988 ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset; in addr_to_gen_ppa()
989 ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset; in addr_to_gen_ppa()
990 ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset; in addr_to_gen_ppa()
991 ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset; in addr_to_gen_ppa()
992 ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset; in addr_to_gen_ppa()
994 struct pblk_addrf *uaddrf = &pblk->uaddrf; in addr_to_gen_ppa()
1001 paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs); in addr_to_gen_ppa()
1004 paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls); in addr_to_gen_ppa()
1007 paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns); in addr_to_gen_ppa()
1010 ppa.m.sec += uaddrf->sec_stripe * paddr; in addr_to_gen_ppa()
1019 struct nvm_tgt_dev *dev = pblk->dev; in pblk_dev_ppa_to_chunk()
1020 struct nvm_geo *geo = &dev->geo; in pblk_dev_ppa_to_chunk()
1024 return &line->chks[pos]; in pblk_dev_ppa_to_chunk()
1030 struct nvm_tgt_dev *dev = pblk->dev; in pblk_dev_ppa_to_chunk_addr()
1032 return dev_to_chunk_addr(dev->parent, &pblk->addrf, p); in pblk_dev_ppa_to_chunk_addr()
1038 struct nvm_tgt_dev *dev = pblk->dev; in pblk_dev_ppa_to_line_addr()
1039 struct nvm_geo *geo = &dev->geo; in pblk_dev_ppa_to_line_addr()
1042 if (geo->version == NVM_OCSSD_SPEC_12) { in pblk_dev_ppa_to_line_addr()
1043 struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; in pblk_dev_ppa_to_line_addr()
1045 paddr = (u64)p.g.ch << ppaf->ch_offset; in pblk_dev_ppa_to_line_addr()
1046 paddr |= (u64)p.g.lun << ppaf->lun_offset; in pblk_dev_ppa_to_line_addr()
1047 paddr |= (u64)p.g.pg << ppaf->pg_offset; in pblk_dev_ppa_to_line_addr()
1048 paddr |= (u64)p.g.pl << ppaf->pln_offset; in pblk_dev_ppa_to_line_addr()
1049 paddr |= (u64)p.g.sec << ppaf->sec_offset; in pblk_dev_ppa_to_line_addr()
1051 struct pblk_addrf *uaddrf = &pblk->uaddrf; in pblk_dev_ppa_to_line_addr()
1055 paddr = (u64)p.m.grp * uaddrf->sec_stripe; in pblk_dev_ppa_to_line_addr()
1056 paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe; in pblk_dev_ppa_to_line_addr()
1058 secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe); in pblk_dev_ppa_to_line_addr()
1059 paddr += secs * uaddrf->sec_ws_stripe; in pblk_dev_ppa_to_line_addr()
1068 struct nvm_tgt_dev *dev = pblk->dev; in pblk_ppa32_to_ppa64()
1070 return nvm_ppa32_to_ppa64(dev->parent, &pblk->addrf, ppa32); in pblk_ppa32_to_ppa64()
1075 struct nvm_tgt_dev *dev = pblk->dev; in pblk_ppa64_to_ppa32()
1077 return nvm_ppa64_to_ppa32(dev->parent, &pblk->addrf, ppa64); in pblk_ppa64_to_ppa32()
1085 if (pblk->addrf_len < 32) { in pblk_trans_map_get()
1086 u32 *map = (u32 *)pblk->trans_map; in pblk_trans_map_get()
1090 struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map; in pblk_trans_map_get()
1101 if (pblk->addrf_len < 32) { in pblk_trans_map_set()
1102 u32 *map = (u32 *)pblk->trans_map; in pblk_trans_map_set()
1106 u64 *map = (u64 *)pblk->trans_map; in pblk_trans_map_set()
1119 ppa_addr->ppa = ADDR_EMPTY; in pblk_ppa_set_empty()
1153 sizeof(struct line_header) - sizeof(crc)); in pblk_calc_meta_header_crc()
1161 struct pblk_line_meta *lm = &pblk->lm; in pblk_calc_smeta_crc()
1166 lm->smeta_len - in pblk_calc_smeta_crc()
1167 sizeof(struct line_header) - sizeof(crc)); in pblk_calc_smeta_crc()
1175 struct pblk_line_meta *lm = &pblk->lm; in pblk_calc_emeta_crc()
1180 lm->emeta_len[0] - in pblk_calc_emeta_crc()
1181 sizeof(struct line_header) - sizeof(crc)); in pblk_calc_emeta_crc()
1188 return !(nr_secs % pblk->min_write_pgs); in pblk_io_aligned()
1195 struct nvm_geo *geo = &pblk->dev->geo; in print_ppa()
1197 if (p->c.is_cached) { in print_ppa()
1199 msg, error, (u64)p->c.line); in print_ppa()
1200 } else if (geo->version == NVM_OCSSD_SPEC_12) { in print_ppa()
1203 p->g.ch, p->g.lun, p->g.blk, in print_ppa()
1204 p->g.pg, p->g.pl, p->g.sec); in print_ppa()
1208 p->m.grp, p->m.pu, p->m.chk, p->m.sec); in print_ppa()
1215 int bit = -1; in pblk_print_failed_rqd()
1217 if (rqd->nr_ppas == 1) { in pblk_print_failed_rqd()
1218 print_ppa(pblk, &rqd->ppa_addr, "rqd", error); in pblk_print_failed_rqd()
1222 while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas, in pblk_print_failed_rqd()
1223 bit + 1)) < rqd->nr_ppas) { in pblk_print_failed_rqd()
1224 print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error); in pblk_print_failed_rqd()
1227 pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status); in pblk_print_failed_rqd()
1233 struct nvm_geo *geo = &tgt_dev->geo; in pblk_boundary_ppa_checks()
1240 if (geo->version == NVM_OCSSD_SPEC_12) { in pblk_boundary_ppa_checks()
1241 if (!ppa->c.is_cached && in pblk_boundary_ppa_checks()
1242 ppa->g.ch < geo->num_ch && in pblk_boundary_ppa_checks()
1243 ppa->g.lun < geo->num_lun && in pblk_boundary_ppa_checks()
1244 ppa->g.pl < geo->num_pln && in pblk_boundary_ppa_checks()
1245 ppa->g.blk < geo->num_chk && in pblk_boundary_ppa_checks()
1246 ppa->g.pg < geo->num_pg && in pblk_boundary_ppa_checks()
1247 ppa->g.sec < geo->ws_min) in pblk_boundary_ppa_checks()
1250 if (!ppa->c.is_cached && in pblk_boundary_ppa_checks()
1251 ppa->m.grp < geo->num_ch && in pblk_boundary_ppa_checks()
1252 ppa->m.pu < geo->num_lun && in pblk_boundary_ppa_checks()
1253 ppa->m.chk < geo->num_chk && in pblk_boundary_ppa_checks()
1254 ppa->m.sec < geo->clba) in pblk_boundary_ppa_checks()
1258 print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i); in pblk_boundary_ppa_checks()
1267 struct nvm_tgt_dev *dev = pblk->dev; in pblk_check_io()
1270 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) { in pblk_check_io()
1272 return -EINVAL; in pblk_check_io()
1275 if (rqd->opcode == NVM_OP_PWRITE) { in pblk_check_io()
1279 for (i = 0; i < rqd->nr_ppas; i++) { in pblk_check_io()
1282 spin_lock(&line->lock); in pblk_check_io()
1283 if (line->state != PBLK_LINESTATE_OPEN) { in pblk_check_io()
1285 line->id, line->state); in pblk_check_io()
1287 spin_unlock(&line->lock); in pblk_check_io()
1288 return -EINVAL; in pblk_check_io()
1290 spin_unlock(&line->lock); in pblk_check_io()
1300 struct pblk_line_meta *lm = &pblk->lm; in pblk_boundary_paddr_checks()
1302 if (paddr > lm->sec_per_line) in pblk_boundary_paddr_checks()
1310 return bio->bi_iter.bi_idx; in pblk_get_bi_idx()
1315 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG; in pblk_get_lba()
1320 return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE; in pblk_get_secs()
1325 struct gendisk *disk = pblk->disk; in pblk_disk_name()
1327 return disk->disk_name; in pblk_disk_name()
1332 struct pblk_line_meta *lm = &pblk->lm; in pblk_get_min_chks()
1333 /* In a worst-case scenario every line will have OP invalid sectors. in pblk_get_min_chks()
1334 * We will then need a minimum of 1/OP lines to free up a single line in pblk_get_min_chks()
1337 return DIV_ROUND_UP(100, pblk->op) * lm->blk_per_line; in pblk_get_min_chks()
1344 max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size) in pblk_get_meta()
1350 return max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size) in pblk_dma_meta_size()
1356 return pblk->oob_meta_size >= sizeof(struct pblk_sec_meta); in pblk_is_oob_meta_supported()