• Home
  • Raw
  • Download

Lines Matching refs:ns

51 void nvmet_bdev_ns_disable(struct nvmet_ns *ns)  in nvmet_bdev_ns_disable()  argument
53 if (ns->bdev) { in nvmet_bdev_ns_disable()
54 blkdev_put(ns->bdev, NULL); in nvmet_bdev_ns_disable()
55 ns->bdev = NULL; in nvmet_bdev_ns_disable()
59 static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns) in nvmet_bdev_ns_enable_integrity() argument
61 struct blk_integrity *bi = bdev_get_integrity(ns->bdev); in nvmet_bdev_ns_enable_integrity()
64 ns->metadata_size = bi->tuple_size; in nvmet_bdev_ns_enable_integrity()
66 ns->pi_type = NVME_NS_DPS_PI_TYPE1; in nvmet_bdev_ns_enable_integrity()
68 ns->pi_type = NVME_NS_DPS_PI_TYPE3; in nvmet_bdev_ns_enable_integrity()
71 ns->metadata_size = 0; in nvmet_bdev_ns_enable_integrity()
75 int nvmet_bdev_ns_enable(struct nvmet_ns *ns) in nvmet_bdev_ns_enable() argument
84 if (ns->buffered_io) in nvmet_bdev_ns_enable()
87 ns->bdev = blkdev_get_by_path(ns->device_path, in nvmet_bdev_ns_enable()
89 if (IS_ERR(ns->bdev)) { in nvmet_bdev_ns_enable()
90 ret = PTR_ERR(ns->bdev); in nvmet_bdev_ns_enable()
93 ns->device_path, PTR_ERR(ns->bdev)); in nvmet_bdev_ns_enable()
95 ns->bdev = NULL; in nvmet_bdev_ns_enable()
98 ns->size = bdev_nr_bytes(ns->bdev); in nvmet_bdev_ns_enable()
99 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); in nvmet_bdev_ns_enable()
101 ns->pi_type = 0; in nvmet_bdev_ns_enable()
102 ns->metadata_size = 0; in nvmet_bdev_ns_enable()
104 nvmet_bdev_ns_enable_integrity(ns); in nvmet_bdev_ns_enable()
106 if (bdev_is_zoned(ns->bdev)) { in nvmet_bdev_ns_enable()
107 if (!nvmet_bdev_zns_enable(ns)) { in nvmet_bdev_ns_enable()
108 nvmet_bdev_ns_disable(ns); in nvmet_bdev_ns_enable()
111 ns->csi = NVME_CSI_ZNS; in nvmet_bdev_ns_enable()
117 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns) in nvmet_bdev_ns_revalidate() argument
119 ns->size = bdev_nr_bytes(ns->bdev); in nvmet_bdev_ns_revalidate()
196 bi = bdev_get_integrity(req->ns->bdev); in nvmet_bdev_alloc_bip()
274 sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_rw()
278 bio_init(bio, req->ns->bdev, req->inline_bvec, in nvmet_bdev_execute_rw()
281 bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf, in nvmet_bdev_execute_rw()
307 bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), in nvmet_bdev_execute_rw()
335 if (!bdev_write_cache(req->ns->bdev)) { in nvmet_bdev_execute_flush()
343 bio_init(bio, req->ns->bdev, req->inline_bvec, in nvmet_bdev_execute_flush()
353 if (!bdev_write_cache(req->ns->bdev)) in nvmet_bdev_flush()
356 if (blkdev_issue_flush(req->ns->bdev)) in nvmet_bdev_flush()
364 struct nvmet_ns *ns = req->ns; in nvmet_bdev_discard_range() local
367 ret = __blkdev_issue_discard(ns->bdev, in nvmet_bdev_discard_range()
368 nvmet_lba_to_sect(ns, range->slba), in nvmet_bdev_discard_range()
369 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), in nvmet_bdev_discard_range()
437 sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba); in nvmet_bdev_execute_write_zeroes()
439 (req->ns->blksize_shift - 9)); in nvmet_bdev_execute_write_zeroes()
441 ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, in nvmet_bdev_execute_write_zeroes()
458 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) in nvmet_bdev_parse_io_cmd()