• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * NVMe I/O command implementation.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/blkdev.h>
16 #include <linux/module.h>
17 #include "nvmet.h"
18 
nvmet_bio_done(struct bio * bio)19 static void nvmet_bio_done(struct bio *bio)
20 {
21 	struct nvmet_req *req = bio->bi_private;
22 
23 	nvmet_req_complete(req,
24 		bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
25 
26 	if (bio != &req->inline_bio)
27 		bio_put(bio);
28 }
29 
nvmet_rw_len(struct nvmet_req * req)30 static inline u32 nvmet_rw_len(struct nvmet_req *req)
31 {
32 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
33 			req->ns->blksize_shift;
34 }
35 
nvmet_inline_bio_init(struct nvmet_req * req)36 static void nvmet_inline_bio_init(struct nvmet_req *req)
37 {
38 	struct bio *bio = &req->inline_bio;
39 
40 	bio_init(bio, req->inline_bvec, NVMET_MAX_INLINE_BIOVEC);
41 }
42 
nvmet_execute_rw(struct nvmet_req * req)43 static void nvmet_execute_rw(struct nvmet_req *req)
44 {
45 	int sg_cnt = req->sg_cnt;
46 	struct scatterlist *sg;
47 	struct bio *bio;
48 	sector_t sector;
49 	blk_qc_t cookie;
50 	int op, op_flags = 0, i;
51 
52 	if (!req->sg_cnt) {
53 		nvmet_req_complete(req, 0);
54 		return;
55 	}
56 
57 	if (req->cmd->rw.opcode == nvme_cmd_write) {
58 		op = REQ_OP_WRITE;
59 		op_flags = REQ_SYNC | REQ_IDLE;
60 		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
61 			op_flags |= REQ_FUA;
62 	} else {
63 		op = REQ_OP_READ;
64 	}
65 
66 	sector = le64_to_cpu(req->cmd->rw.slba);
67 	sector <<= (req->ns->blksize_shift - 9);
68 
69 	nvmet_inline_bio_init(req);
70 	bio = &req->inline_bio;
71 	bio_set_dev(bio, req->ns->bdev);
72 	bio->bi_iter.bi_sector = sector;
73 	bio->bi_private = req;
74 	bio->bi_end_io = nvmet_bio_done;
75 	bio_set_op_attrs(bio, op, op_flags);
76 
77 	for_each_sg(req->sg, sg, req->sg_cnt, i) {
78 		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
79 				!= sg->length) {
80 			struct bio *prev = bio;
81 
82 			bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
83 			bio_set_dev(bio, req->ns->bdev);
84 			bio->bi_iter.bi_sector = sector;
85 			bio_set_op_attrs(bio, op, op_flags);
86 
87 			bio_chain(bio, prev);
88 			submit_bio(prev);
89 		}
90 
91 		sector += sg->length >> 9;
92 		sg_cnt--;
93 	}
94 
95 	cookie = submit_bio(bio);
96 
97 	blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie);
98 }
99 
nvmet_execute_flush(struct nvmet_req * req)100 static void nvmet_execute_flush(struct nvmet_req *req)
101 {
102 	struct bio *bio;
103 
104 	nvmet_inline_bio_init(req);
105 	bio = &req->inline_bio;
106 
107 	bio_set_dev(bio, req->ns->bdev);
108 	bio->bi_private = req;
109 	bio->bi_end_io = nvmet_bio_done;
110 	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
111 
112 	submit_bio(bio);
113 }
114 
nvmet_discard_range(struct nvmet_ns * ns,struct nvme_dsm_range * range,struct bio ** bio)115 static u16 nvmet_discard_range(struct nvmet_ns *ns,
116 		struct nvme_dsm_range *range, struct bio **bio)
117 {
118 	if (__blkdev_issue_discard(ns->bdev,
119 			le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
120 			le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
121 			GFP_KERNEL, 0, bio))
122 		return NVME_SC_INTERNAL | NVME_SC_DNR;
123 	return 0;
124 }
125 
nvmet_execute_discard(struct nvmet_req * req)126 static void nvmet_execute_discard(struct nvmet_req *req)
127 {
128 	struct nvme_dsm_range range;
129 	struct bio *bio = NULL;
130 	int i;
131 	u16 status;
132 
133 	for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
134 		status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
135 				sizeof(range));
136 		if (status)
137 			break;
138 
139 		status = nvmet_discard_range(req->ns, &range, &bio);
140 		if (status)
141 			break;
142 	}
143 
144 	if (bio) {
145 		bio->bi_private = req;
146 		bio->bi_end_io = nvmet_bio_done;
147 		if (status) {
148 			bio->bi_status = BLK_STS_IOERR;
149 			bio_endio(bio);
150 		} else {
151 			submit_bio(bio);
152 		}
153 	} else {
154 		nvmet_req_complete(req, status);
155 	}
156 }
157 
nvmet_execute_dsm(struct nvmet_req * req)158 static void nvmet_execute_dsm(struct nvmet_req *req)
159 {
160 	switch (le32_to_cpu(req->cmd->dsm.attributes)) {
161 	case NVME_DSMGMT_AD:
162 		nvmet_execute_discard(req);
163 		return;
164 	case NVME_DSMGMT_IDR:
165 	case NVME_DSMGMT_IDW:
166 	default:
167 		/* Not supported yet */
168 		nvmet_req_complete(req, 0);
169 		return;
170 	}
171 }
172 
nvmet_execute_write_zeroes(struct nvmet_req * req)173 static void nvmet_execute_write_zeroes(struct nvmet_req *req)
174 {
175 	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
176 	struct bio *bio = NULL;
177 	u16 status = NVME_SC_SUCCESS;
178 	sector_t sector;
179 	sector_t nr_sector;
180 
181 	sector = le64_to_cpu(write_zeroes->slba) <<
182 		(req->ns->blksize_shift - 9);
183 	nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
184 		(req->ns->blksize_shift - 9)) + 1;
185 
186 	if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
187 				GFP_KERNEL, &bio, 0))
188 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
189 
190 	if (bio) {
191 		bio->bi_private = req;
192 		bio->bi_end_io = nvmet_bio_done;
193 		submit_bio(bio);
194 	} else {
195 		nvmet_req_complete(req, status);
196 	}
197 }
198 
nvmet_parse_io_cmd(struct nvmet_req * req)199 u16 nvmet_parse_io_cmd(struct nvmet_req *req)
200 {
201 	struct nvme_command *cmd = req->cmd;
202 	u16 ret;
203 
204 	ret = nvmet_check_ctrl_status(req, cmd);
205 	if (unlikely(ret)) {
206 		req->ns = NULL;
207 		return ret;
208 	}
209 
210 	req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
211 	if (unlikely(!req->ns))
212 		return NVME_SC_INVALID_NS | NVME_SC_DNR;
213 
214 	switch (cmd->common.opcode) {
215 	case nvme_cmd_read:
216 	case nvme_cmd_write:
217 		req->execute = nvmet_execute_rw;
218 		req->data_len = nvmet_rw_len(req);
219 		return 0;
220 	case nvme_cmd_flush:
221 		req->execute = nvmet_execute_flush;
222 		req->data_len = 0;
223 		return 0;
224 	case nvme_cmd_dsm:
225 		req->execute = nvmet_execute_dsm;
226 		req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
227 			sizeof(struct nvme_dsm_range);
228 		return 0;
229 	case nvme_cmd_write_zeroes:
230 		req->execute = nvmet_execute_write_zeroes;
231 		return 0;
232 	default:
233 		pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
234 		       req->sq->qid);
235 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
236 	}
237 }
238