• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe I/O command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/blkdev.h>
8 #include <linux/blk-integrity.h>
9 #include <linux/memremap.h>
10 #include <linux/module.h>
11 #include "nvmet.h"
12 
nvmet_bdev_set_limits(struct block_device * bdev,struct nvme_id_ns * id)13 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
14 {
15 	/* Logical blocks per physical block, 0's based. */
16 	const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) /
17 				      bdev_logical_block_size(bdev));
18 
19 	/*
20 	 * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
21 	 * NAWUPF, and NACWU are defined for this namespace and should be
22 	 * used by the host for this namespace instead of the AWUN, AWUPF,
23 	 * and ACWU fields in the Identify Controller data structure. If
24 	 * any of these fields are zero that means that the corresponding
25 	 * field from the identify controller data structure should be used.
26 	 */
27 	id->nsfeat |= 1 << 1;
28 	id->nawun = lpp0b;
29 	id->nawupf = lpp0b;
30 	id->nacwu = lpp0b;
31 
32 	/*
33 	 * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
34 	 * NOWS are defined for this namespace and should be used by
35 	 * the host for I/O optimization.
36 	 */
37 	id->nsfeat |= 1 << 4;
38 	/* NPWG = Namespace Preferred Write Granularity. 0's based */
39 	id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
40 	/* NPWA = Namespace Preferred Write Alignment. 0's based */
41 	id->npwa = id->npwg;
42 	/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
43 	id->npdg = to0based(bdev_discard_granularity(bdev) /
44 			    bdev_logical_block_size(bdev));
45 	/* NPDG = Namespace Preferred Deallocate Alignment */
46 	id->npda = id->npdg;
47 	/* NOWS = Namespace Optimal Write Size */
48 	id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
49 }
50 
nvmet_bdev_ns_disable(struct nvmet_ns * ns)51 void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
52 {
53 	if (ns->bdev_file) {
54 		fput(ns->bdev_file);
55 		ns->bdev = NULL;
56 		ns->bdev_file = NULL;
57 	}
58 }
59 
nvmet_bdev_ns_enable_integrity(struct nvmet_ns * ns)60 static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
61 {
62 	struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
63 
64 	if (!bi)
65 		return;
66 
67 	if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) {
68 		ns->metadata_size = bi->tuple_size;
69 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
70 			ns->pi_type = NVME_NS_DPS_PI_TYPE1;
71 		else
72 			ns->pi_type = NVME_NS_DPS_PI_TYPE3;
73 	} else {
74 		ns->metadata_size = 0;
75 	}
76 }
77 
nvmet_bdev_ns_enable(struct nvmet_ns * ns)78 int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
79 {
80 	int ret;
81 
82 	/*
83 	 * When buffered_io namespace attribute is enabled that means user want
84 	 * this block device to be used as a file, so block device can take
85 	 * an advantage of cache.
86 	 */
87 	if (ns->buffered_io)
88 		return -ENOTBLK;
89 
90 	ns->bdev_file = bdev_file_open_by_path(ns->device_path,
91 				BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
92 	if (IS_ERR(ns->bdev_file)) {
93 		ret = PTR_ERR(ns->bdev_file);
94 		if (ret != -ENOTBLK) {
95 			pr_err("failed to open block device %s: (%d)\n",
96 					ns->device_path, ret);
97 		}
98 		ns->bdev_file = NULL;
99 		return ret;
100 	}
101 	ns->bdev = file_bdev(ns->bdev_file);
102 	ns->size = bdev_nr_bytes(ns->bdev);
103 	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
104 
105 	ns->pi_type = 0;
106 	ns->metadata_size = 0;
107 	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
108 		nvmet_bdev_ns_enable_integrity(ns);
109 
110 	if (bdev_is_zoned(ns->bdev)) {
111 		if (!nvmet_bdev_zns_enable(ns)) {
112 			nvmet_bdev_ns_disable(ns);
113 			return -EINVAL;
114 		}
115 		ns->csi = NVME_CSI_ZNS;
116 	}
117 
118 	return 0;
119 }
120 
nvmet_bdev_ns_revalidate(struct nvmet_ns * ns)121 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
122 {
123 	ns->size = bdev_nr_bytes(ns->bdev);
124 }
125 
blk_to_nvme_status(struct nvmet_req * req,blk_status_t blk_sts)126 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
127 {
128 	u16 status = NVME_SC_SUCCESS;
129 
130 	if (likely(blk_sts == BLK_STS_OK))
131 		return status;
132 	/*
133 	 * Right now there exists M : 1 mapping between block layer error
134 	 * to the NVMe status code (see nvme_error_status()). For consistency,
135 	 * when we reverse map we use most appropriate NVMe Status code from
136 	 * the group of the NVMe staus codes used in the nvme_error_status().
137 	 */
138 	switch (blk_sts) {
139 	case BLK_STS_NOSPC:
140 		status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
141 		req->error_loc = offsetof(struct nvme_rw_command, length);
142 		break;
143 	case BLK_STS_TARGET:
144 		status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
145 		req->error_loc = offsetof(struct nvme_rw_command, slba);
146 		break;
147 	case BLK_STS_NOTSUPP:
148 		status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
149 		req->error_loc = offsetof(struct nvme_common_command, opcode);
150 		break;
151 	case BLK_STS_MEDIUM:
152 		status = NVME_SC_ACCESS_DENIED;
153 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
154 		break;
155 	case BLK_STS_IOERR:
156 	default:
157 		status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
158 		req->error_loc = offsetof(struct nvme_common_command, opcode);
159 	}
160 
161 	switch (req->cmd->common.opcode) {
162 	case nvme_cmd_read:
163 	case nvme_cmd_write:
164 		req->error_slba = le64_to_cpu(req->cmd->rw.slba);
165 		break;
166 	case nvme_cmd_write_zeroes:
167 		req->error_slba =
168 			le64_to_cpu(req->cmd->write_zeroes.slba);
169 		break;
170 	default:
171 		req->error_slba = 0;
172 	}
173 	return status;
174 }
175 
nvmet_bio_done(struct bio * bio)176 static void nvmet_bio_done(struct bio *bio)
177 {
178 	struct nvmet_req *req = bio->bi_private;
179 
180 	nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
181 	nvmet_req_bio_put(req, bio);
182 }
183 
184 #ifdef CONFIG_BLK_DEV_INTEGRITY
nvmet_bdev_alloc_bip(struct nvmet_req * req,struct bio * bio,struct sg_mapping_iter * miter)185 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
186 				struct sg_mapping_iter *miter)
187 {
188 	struct blk_integrity *bi;
189 	struct bio_integrity_payload *bip;
190 	int rc;
191 	size_t resid, len;
192 
193 	bi = bdev_get_integrity(req->ns->bdev);
194 	if (unlikely(!bi)) {
195 		pr_err("Unable to locate bio_integrity\n");
196 		return -ENODEV;
197 	}
198 
199 	bip = bio_integrity_alloc(bio, GFP_NOIO,
200 					bio_max_segs(req->metadata_sg_cnt));
201 	if (IS_ERR(bip)) {
202 		pr_err("Unable to allocate bio_integrity_payload\n");
203 		return PTR_ERR(bip);
204 	}
205 
206 	/* virtual start sector must be in integrity interval units */
207 	bip_set_seed(bip, bio->bi_iter.bi_sector >>
208 		     (bi->interval_exp - SECTOR_SHIFT));
209 
210 	resid = bio_integrity_bytes(bi, bio_sectors(bio));
211 	while (resid > 0 && sg_miter_next(miter)) {
212 		len = min_t(size_t, miter->length, resid);
213 		rc = bio_integrity_add_page(bio, miter->page, len,
214 					    offset_in_page(miter->addr));
215 		if (unlikely(rc != len)) {
216 			pr_err("bio_integrity_add_page() failed; %d\n", rc);
217 			sg_miter_stop(miter);
218 			return -ENOMEM;
219 		}
220 
221 		resid -= len;
222 		if (len < miter->length)
223 			miter->consumed -= miter->length - len;
224 	}
225 	sg_miter_stop(miter);
226 
227 	return 0;
228 }
229 #else
nvmet_bdev_alloc_bip(struct nvmet_req * req,struct bio * bio,struct sg_mapping_iter * miter)230 static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
231 				struct sg_mapping_iter *miter)
232 {
233 	return -EINVAL;
234 }
235 #endif /* CONFIG_BLK_DEV_INTEGRITY */
236 
nvmet_bdev_execute_rw(struct nvmet_req * req)237 static void nvmet_bdev_execute_rw(struct nvmet_req *req)
238 {
239 	unsigned int sg_cnt = req->sg_cnt;
240 	struct bio *bio;
241 	struct scatterlist *sg;
242 	struct blk_plug plug;
243 	sector_t sector;
244 	blk_opf_t opf;
245 	int i, rc;
246 	struct sg_mapping_iter prot_miter;
247 	unsigned int iter_flags;
248 	unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
249 
250 	if (!nvmet_check_transfer_len(req, total_len))
251 		return;
252 
253 	if (!req->sg_cnt) {
254 		nvmet_req_complete(req, 0);
255 		return;
256 	}
257 
258 	if (req->cmd->rw.opcode == nvme_cmd_write) {
259 		opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
260 		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
261 			opf |= REQ_FUA;
262 		iter_flags = SG_MITER_TO_SG;
263 	} else {
264 		opf = REQ_OP_READ;
265 		iter_flags = SG_MITER_FROM_SG;
266 	}
267 
268 	if (is_pci_p2pdma_page(sg_page(req->sg)))
269 		opf |= REQ_NOMERGE;
270 
271 	sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
272 
273 	if (nvmet_use_inline_bvec(req)) {
274 		bio = &req->b.inline_bio;
275 		bio_init(bio, req->ns->bdev, req->inline_bvec,
276 			 ARRAY_SIZE(req->inline_bvec), opf);
277 	} else {
278 		bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf,
279 				GFP_KERNEL);
280 	}
281 	bio->bi_iter.bi_sector = sector;
282 	bio->bi_private = req;
283 	bio->bi_end_io = nvmet_bio_done;
284 
285 	blk_start_plug(&plug);
286 	if (req->metadata_len)
287 		sg_miter_start(&prot_miter, req->metadata_sg,
288 			       req->metadata_sg_cnt, iter_flags);
289 
290 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
291 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
292 				!= sg->length) {
293 			struct bio *prev = bio;
294 
295 			if (req->metadata_len) {
296 				rc = nvmet_bdev_alloc_bip(req, bio,
297 							  &prot_miter);
298 				if (unlikely(rc)) {
299 					bio_io_error(bio);
300 					return;
301 				}
302 			}
303 
304 			bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
305 					opf, GFP_KERNEL);
306 			bio->bi_iter.bi_sector = sector;
307 
308 			bio_chain(bio, prev);
309 			submit_bio(prev);
310 		}
311 
312 		sector += sg->length >> 9;
313 		sg_cnt--;
314 	}
315 
316 	if (req->metadata_len) {
317 		rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
318 		if (unlikely(rc)) {
319 			bio_io_error(bio);
320 			return;
321 		}
322 	}
323 
324 	submit_bio(bio);
325 	blk_finish_plug(&plug);
326 }
327 
nvmet_bdev_execute_flush(struct nvmet_req * req)328 static void nvmet_bdev_execute_flush(struct nvmet_req *req)
329 {
330 	struct bio *bio = &req->b.inline_bio;
331 
332 	if (!bdev_write_cache(req->ns->bdev)) {
333 		nvmet_req_complete(req, NVME_SC_SUCCESS);
334 		return;
335 	}
336 
337 	if (!nvmet_check_transfer_len(req, 0))
338 		return;
339 
340 	bio_init(bio, req->ns->bdev, req->inline_bvec,
341 		 ARRAY_SIZE(req->inline_bvec), REQ_OP_WRITE | REQ_PREFLUSH);
342 	bio->bi_private = req;
343 	bio->bi_end_io = nvmet_bio_done;
344 
345 	submit_bio(bio);
346 }
347 
nvmet_bdev_flush(struct nvmet_req * req)348 u16 nvmet_bdev_flush(struct nvmet_req *req)
349 {
350 	if (!bdev_write_cache(req->ns->bdev))
351 		return 0;
352 
353 	if (blkdev_issue_flush(req->ns->bdev))
354 		return NVME_SC_INTERNAL | NVME_STATUS_DNR;
355 	return 0;
356 }
357 
nvmet_bdev_discard_range(struct nvmet_req * req,struct nvme_dsm_range * range,struct bio ** bio)358 static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
359 		struct nvme_dsm_range *range, struct bio **bio)
360 {
361 	struct nvmet_ns *ns = req->ns;
362 	int ret;
363 
364 	ret = __blkdev_issue_discard(ns->bdev,
365 			nvmet_lba_to_sect(ns, range->slba),
366 			le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
367 			GFP_KERNEL, bio);
368 	if (ret && ret != -EOPNOTSUPP) {
369 		req->error_slba = le64_to_cpu(range->slba);
370 		return errno_to_nvme_status(req, ret);
371 	}
372 	return NVME_SC_SUCCESS;
373 }
374 
nvmet_bdev_execute_discard(struct nvmet_req * req)375 static void nvmet_bdev_execute_discard(struct nvmet_req *req)
376 {
377 	struct nvme_dsm_range range;
378 	struct bio *bio = NULL;
379 	int i;
380 	u16 status;
381 
382 	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
383 		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
384 				sizeof(range));
385 		if (status)
386 			break;
387 
388 		status = nvmet_bdev_discard_range(req, &range, &bio);
389 		if (status)
390 			break;
391 	}
392 
393 	if (bio) {
394 		bio->bi_private = req;
395 		bio->bi_end_io = nvmet_bio_done;
396 		if (status)
397 			bio_io_error(bio);
398 		else
399 			submit_bio(bio);
400 	} else {
401 		nvmet_req_complete(req, status);
402 	}
403 }
404 
nvmet_bdev_execute_dsm(struct nvmet_req * req)405 static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
406 {
407 	if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
408 		return;
409 
410 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
411 	case NVME_DSMGMT_AD:
412 		nvmet_bdev_execute_discard(req);
413 		return;
414 	case NVME_DSMGMT_IDR:
415 	case NVME_DSMGMT_IDW:
416 	default:
417 		/* Not supported yet */
418 		nvmet_req_complete(req, 0);
419 		return;
420 	}
421 }
422 
nvmet_bdev_execute_write_zeroes(struct nvmet_req * req)423 static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
424 {
425 	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
426 	struct bio *bio = NULL;
427 	sector_t sector;
428 	sector_t nr_sector;
429 	int ret;
430 
431 	if (!nvmet_check_transfer_len(req, 0))
432 		return;
433 
434 	sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
435 	nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
436 		(req->ns->blksize_shift - 9));
437 
438 	ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
439 			GFP_KERNEL, &bio, 0);
440 	if (bio) {
441 		bio->bi_private = req;
442 		bio->bi_end_io = nvmet_bio_done;
443 		submit_bio(bio);
444 	} else {
445 		nvmet_req_complete(req, errno_to_nvme_status(req, ret));
446 	}
447 }
448 
nvmet_bdev_parse_io_cmd(struct nvmet_req * req)449 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
450 {
451 	switch (req->cmd->common.opcode) {
452 	case nvme_cmd_read:
453 	case nvme_cmd_write:
454 		req->execute = nvmet_bdev_execute_rw;
455 		if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
456 			req->metadata_len = nvmet_rw_metadata_len(req);
457 		return 0;
458 	case nvme_cmd_flush:
459 		req->execute = nvmet_bdev_execute_flush;
460 		return 0;
461 	case nvme_cmd_dsm:
462 		req->execute = nvmet_bdev_execute_dsm;
463 		return 0;
464 	case nvme_cmd_write_zeroes:
465 		req->execute = nvmet_bdev_execute_write_zeroes;
466 		return 0;
467 	default:
468 		return nvmet_report_invalid_opcode(req);
469 	}
470 }
471