• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2011-2014, Intel Corporation.
4  * Copyright (c) 2017-2021 Christoph Hellwig.
5  */
6 #include <linux/blk-integrity.h>
7 #include <linux/ptrace.h>	/* for force_successful_syscall_return */
8 #include <linux/nvme_ioctl.h>
9 #include <linux/io_uring/cmd.h>
10 #include "nvme.h"
11 
12 enum {
13 	NVME_IOCTL_VEC		= (1 << 0),
14 	NVME_IOCTL_PARTITION	= (1 << 1),
15 };
16 
nvme_cmd_allowed(struct nvme_ns * ns,struct nvme_command * c,unsigned int flags,bool open_for_write)17 static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
18 		unsigned int flags, bool open_for_write)
19 {
20 	u32 effects;
21 
22 	/*
23 	 * Do not allow unprivileged passthrough on partitions, as that allows an
24 	 * escape from the containment of the partition.
25 	 */
26 	if (flags & NVME_IOCTL_PARTITION)
27 		goto admin;
28 
29 	/*
30 	 * Do not allow unprivileged processes to send vendor specific or fabrics
31 	 * commands as we can't be sure about their effects.
32 	 */
33 	if (c->common.opcode >= nvme_cmd_vendor_start ||
34 	    c->common.opcode == nvme_fabrics_command)
35 		goto admin;
36 
37 	/*
38 	 * Do not allow unprivileged passthrough of admin commands except
39 	 * for a subset of identify commands that contain information required
40 	 * to form proper I/O commands in userspace and do not expose any
41 	 * potentially sensitive information.
42 	 */
43 	if (!ns) {
44 		if (c->common.opcode == nvme_admin_identify) {
45 			switch (c->identify.cns) {
46 			case NVME_ID_CNS_NS:
47 			case NVME_ID_CNS_CS_NS:
48 			case NVME_ID_CNS_NS_CS_INDEP:
49 			case NVME_ID_CNS_CS_CTRL:
50 			case NVME_ID_CNS_CTRL:
51 				return true;
52 			}
53 		}
54 		goto admin;
55 	}
56 
57 	/*
58 	 * Check if the controller provides a Commands Supported and Effects log
59 	 * and marks this command as supported.  If not reject unprivileged
60 	 * passthrough.
61 	 */
62 	effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
63 	if (!(effects & NVME_CMD_EFFECTS_CSUPP))
64 		goto admin;
65 
66 	/*
67 	 * Don't allow passthrough for command that have intrusive (or unknown)
68 	 * effects.
69 	 */
70 	if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
71 			NVME_CMD_EFFECTS_UUID_SEL |
72 			NVME_CMD_EFFECTS_SCOPE_MASK))
73 		goto admin;
74 
75 	/*
76 	 * Only allow I/O commands that transfer data to the controller or that
77 	 * change the logical block contents if the file descriptor is open for
78 	 * writing.
79 	 */
80 	if ((nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC)) &&
81 	    !open_for_write)
82 		goto admin;
83 
84 	return true;
85 admin:
86 	return capable(CAP_SYS_ADMIN);
87 }
88 
89 /*
90  * Convert integer values from ioctl structures to user pointers, silently
91  * ignoring the upper bits in the compat case to match behaviour of 32-bit
92  * kernels.
93  */
nvme_to_user_ptr(uintptr_t ptrval)94 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
95 {
96 	if (in_compat_syscall())
97 		ptrval = (compat_uptr_t)ptrval;
98 	return (void __user *)ptrval;
99 }
100 
nvme_alloc_user_request(struct request_queue * q,struct nvme_command * cmd,blk_opf_t rq_flags,blk_mq_req_flags_t blk_flags)101 static struct request *nvme_alloc_user_request(struct request_queue *q,
102 		struct nvme_command *cmd, blk_opf_t rq_flags,
103 		blk_mq_req_flags_t blk_flags)
104 {
105 	struct request *req;
106 
107 	req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
108 	if (IS_ERR(req))
109 		return req;
110 	nvme_init_request(req, cmd);
111 	nvme_req(req)->flags |= NVME_REQ_USERCMD;
112 	return req;
113 }
114 
nvme_map_user_request(struct request * req,u64 ubuffer,unsigned bufflen,void __user * meta_buffer,unsigned meta_len,u32 meta_seed,struct io_uring_cmd * ioucmd,unsigned int flags)115 static int nvme_map_user_request(struct request *req, u64 ubuffer,
116 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
117 		u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
118 {
119 	struct request_queue *q = req->q;
120 	struct nvme_ns *ns = q->queuedata;
121 	struct block_device *bdev = ns ? ns->disk->part0 : NULL;
122 	bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
123 	struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
124 	bool has_metadata = meta_buffer && meta_len;
125 	struct bio *bio = NULL;
126 	int ret;
127 
128 	if (!nvme_ctrl_sgl_supported(ctrl))
129 		dev_warn_once(ctrl->device, "using unchecked data buffer\n");
130 	if (has_metadata) {
131 		if (!supports_metadata) {
132 			ret = -EINVAL;
133 			goto out;
134 		}
135 		if (!nvme_ctrl_meta_sgl_supported(ctrl))
136 			dev_warn_once(ctrl->device,
137 				      "using unchecked metadata buffer\n");
138 	}
139 
140 	if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
141 		struct iov_iter iter;
142 
143 		/* fixedbufs is only for non-vectored io */
144 		if (flags & NVME_IOCTL_VEC) {
145 			ret = -EINVAL;
146 			goto out;
147 		}
148 		ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
149 				rq_data_dir(req), &iter, ioucmd);
150 		if (ret < 0)
151 			goto out;
152 		ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
153 	} else {
154 		ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
155 				bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
156 				0, rq_data_dir(req));
157 	}
158 
159 	if (ret)
160 		goto out;
161 
162 	bio = req->bio;
163 	if (bdev)
164 		bio_set_dev(bio, bdev);
165 
166 	if (has_metadata) {
167 		ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len,
168 						meta_seed);
169 		if (ret)
170 			goto out_unmap;
171 	}
172 
173 	return ret;
174 
175 out_unmap:
176 	if (bio)
177 		blk_rq_unmap_user(bio);
178 out:
179 	blk_mq_free_request(req);
180 	return ret;
181 }
182 
nvme_submit_user_cmd(struct request_queue * q,struct nvme_command * cmd,u64 ubuffer,unsigned bufflen,void __user * meta_buffer,unsigned meta_len,u32 meta_seed,u64 * result,unsigned timeout,unsigned int flags)183 static int nvme_submit_user_cmd(struct request_queue *q,
184 		struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
185 		void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
186 		u64 *result, unsigned timeout, unsigned int flags)
187 {
188 	struct nvme_ns *ns = q->queuedata;
189 	struct nvme_ctrl *ctrl;
190 	struct request *req;
191 	struct bio *bio;
192 	u32 effects;
193 	int ret;
194 
195 	req = nvme_alloc_user_request(q, cmd, 0, 0);
196 	if (IS_ERR(req))
197 		return PTR_ERR(req);
198 
199 	req->timeout = timeout;
200 	if (ubuffer && bufflen) {
201 		ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
202 				meta_len, meta_seed, NULL, flags);
203 		if (ret)
204 			return ret;
205 	}
206 
207 	bio = req->bio;
208 	ctrl = nvme_req(req)->ctrl;
209 
210 	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
211 	ret = nvme_execute_rq(req, false);
212 	if (result)
213 		*result = le64_to_cpu(nvme_req(req)->result.u64);
214 	if (bio)
215 		blk_rq_unmap_user(bio);
216 	blk_mq_free_request(req);
217 
218 	if (effects)
219 		nvme_passthru_end(ctrl, ns, effects, cmd, ret);
220 
221 	return ret;
222 }
223 
nvme_submit_io(struct nvme_ns * ns,struct nvme_user_io __user * uio)224 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
225 {
226 	struct nvme_user_io io;
227 	struct nvme_command c;
228 	unsigned length, meta_len;
229 	void __user *metadata;
230 
231 	if (copy_from_user(&io, uio, sizeof(io)))
232 		return -EFAULT;
233 	if (io.flags)
234 		return -EINVAL;
235 
236 	switch (io.opcode) {
237 	case nvme_cmd_write:
238 	case nvme_cmd_read:
239 	case nvme_cmd_compare:
240 		break;
241 	default:
242 		return -EINVAL;
243 	}
244 
245 	length = (io.nblocks + 1) << ns->head->lba_shift;
246 
247 	if ((io.control & NVME_RW_PRINFO_PRACT) &&
248 	    (ns->head->ms == ns->head->pi_size)) {
249 		/*
250 		 * Protection information is stripped/inserted by the
251 		 * controller.
252 		 */
253 		if (nvme_to_user_ptr(io.metadata))
254 			return -EINVAL;
255 		meta_len = 0;
256 		metadata = NULL;
257 	} else {
258 		meta_len = (io.nblocks + 1) * ns->head->ms;
259 		metadata = nvme_to_user_ptr(io.metadata);
260 	}
261 
262 	if (ns->head->features & NVME_NS_EXT_LBAS) {
263 		length += meta_len;
264 		meta_len = 0;
265 	} else if (meta_len) {
266 		if ((io.metadata & 3) || !io.metadata)
267 			return -EINVAL;
268 	}
269 
270 	memset(&c, 0, sizeof(c));
271 	c.rw.opcode = io.opcode;
272 	c.rw.flags = io.flags;
273 	c.rw.nsid = cpu_to_le32(ns->head->ns_id);
274 	c.rw.slba = cpu_to_le64(io.slba);
275 	c.rw.length = cpu_to_le16(io.nblocks);
276 	c.rw.control = cpu_to_le16(io.control);
277 	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
278 	c.rw.reftag = cpu_to_le32(io.reftag);
279 	c.rw.lbat = cpu_to_le16(io.apptag);
280 	c.rw.lbatm = cpu_to_le16(io.appmask);
281 
282 	return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
283 			meta_len, lower_32_bits(io.slba), NULL, 0, 0);
284 }
285 
nvme_validate_passthru_nsid(struct nvme_ctrl * ctrl,struct nvme_ns * ns,__u32 nsid)286 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
287 					struct nvme_ns *ns, __u32 nsid)
288 {
289 	if (ns && nsid != ns->head->ns_id) {
290 		dev_err(ctrl->device,
291 			"%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
292 			current->comm, nsid, ns->head->ns_id);
293 		return false;
294 	}
295 
296 	return true;
297 }
298 
nvme_user_cmd(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct nvme_passthru_cmd __user * ucmd,unsigned int flags,bool open_for_write)299 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
300 		struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
301 		bool open_for_write)
302 {
303 	struct nvme_passthru_cmd cmd;
304 	struct nvme_command c;
305 	unsigned timeout = 0;
306 	u64 result;
307 	int status;
308 
309 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
310 		return -EFAULT;
311 	if (cmd.flags)
312 		return -EINVAL;
313 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
314 		return -EINVAL;
315 
316 	memset(&c, 0, sizeof(c));
317 	c.common.opcode = cmd.opcode;
318 	c.common.flags = cmd.flags;
319 	c.common.nsid = cpu_to_le32(cmd.nsid);
320 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
321 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
322 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
323 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
324 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
325 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
326 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
327 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
328 
329 	if (!nvme_cmd_allowed(ns, &c, 0, open_for_write))
330 		return -EACCES;
331 
332 	if (cmd.timeout_ms)
333 		timeout = msecs_to_jiffies(cmd.timeout_ms);
334 
335 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
336 			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
337 			cmd.metadata_len, 0, &result, timeout, 0);
338 
339 	if (status >= 0) {
340 		if (put_user(result, &ucmd->result))
341 			return -EFAULT;
342 	}
343 
344 	return status;
345 }
346 
nvme_user_cmd64(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct nvme_passthru_cmd64 __user * ucmd,unsigned int flags,bool open_for_write)347 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
348 		struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
349 		bool open_for_write)
350 {
351 	struct nvme_passthru_cmd64 cmd;
352 	struct nvme_command c;
353 	unsigned timeout = 0;
354 	int status;
355 
356 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
357 		return -EFAULT;
358 	if (cmd.flags)
359 		return -EINVAL;
360 	if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
361 		return -EINVAL;
362 
363 	memset(&c, 0, sizeof(c));
364 	c.common.opcode = cmd.opcode;
365 	c.common.flags = cmd.flags;
366 	c.common.nsid = cpu_to_le32(cmd.nsid);
367 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
368 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
369 	c.common.cdw10 = cpu_to_le32(cmd.cdw10);
370 	c.common.cdw11 = cpu_to_le32(cmd.cdw11);
371 	c.common.cdw12 = cpu_to_le32(cmd.cdw12);
372 	c.common.cdw13 = cpu_to_le32(cmd.cdw13);
373 	c.common.cdw14 = cpu_to_le32(cmd.cdw14);
374 	c.common.cdw15 = cpu_to_le32(cmd.cdw15);
375 
376 	if (!nvme_cmd_allowed(ns, &c, flags, open_for_write))
377 		return -EACCES;
378 
379 	if (cmd.timeout_ms)
380 		timeout = msecs_to_jiffies(cmd.timeout_ms);
381 
382 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
383 			cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
384 			cmd.metadata_len, 0, &cmd.result, timeout, flags);
385 
386 	if (status >= 0) {
387 		if (put_user(cmd.result, &ucmd->result))
388 			return -EFAULT;
389 	}
390 
391 	return status;
392 }
393 
394 struct nvme_uring_data {
395 	__u64	metadata;
396 	__u64	addr;
397 	__u32	data_len;
398 	__u32	metadata_len;
399 	__u32	timeout_ms;
400 };
401 
402 /*
403  * This overlays struct io_uring_cmd pdu.
404  * Expect build errors if this grows larger than that.
405  */
406 struct nvme_uring_cmd_pdu {
407 	struct request *req;
408 	struct bio *bio;
409 	u64 result;
410 	int status;
411 };
412 
nvme_uring_cmd_pdu(struct io_uring_cmd * ioucmd)413 static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
414 		struct io_uring_cmd *ioucmd)
415 {
416 	return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
417 }
418 
nvme_uring_task_cb(struct io_uring_cmd * ioucmd,unsigned issue_flags)419 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
420 			       unsigned issue_flags)
421 {
422 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
423 
424 	if (pdu->bio)
425 		blk_rq_unmap_user(pdu->bio);
426 	io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
427 }
428 
nvme_uring_cmd_end_io(struct request * req,blk_status_t err)429 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
430 						blk_status_t err)
431 {
432 	struct io_uring_cmd *ioucmd = req->end_io_data;
433 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
434 
435 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
436 		pdu->status = -EINTR;
437 	} else {
438 		pdu->status = nvme_req(req)->status;
439 		if (!pdu->status)
440 			pdu->status = blk_status_to_errno(err);
441 	}
442 	pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
443 
444 	/*
445 	 * IOPOLL could potentially complete this request directly, but
446 	 * if multiple rings are polling on the same queue, then it's possible
447 	 * for one ring to find completions for another ring. Punting the
448 	 * completion via task_work will always direct it to the right
449 	 * location, rather than potentially complete requests for ringA
450 	 * under iopoll invocations from ringB.
451 	 */
452 	io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
453 	return RQ_END_IO_FREE;
454 }
455 
nvme_uring_cmd_io(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct io_uring_cmd * ioucmd,unsigned int issue_flags,bool vec)456 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
457 		struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
458 {
459 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
460 	const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
461 	struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
462 	struct nvme_uring_data d;
463 	struct nvme_command c;
464 	struct request *req;
465 	blk_opf_t rq_flags = REQ_ALLOC_CACHE;
466 	blk_mq_req_flags_t blk_flags = 0;
467 	int ret;
468 
469 	c.common.opcode = READ_ONCE(cmd->opcode);
470 	c.common.flags = READ_ONCE(cmd->flags);
471 	if (c.common.flags)
472 		return -EINVAL;
473 
474 	c.common.command_id = 0;
475 	c.common.nsid = cpu_to_le32(cmd->nsid);
476 	if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
477 		return -EINVAL;
478 
479 	c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
480 	c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
481 	c.common.metadata = 0;
482 	c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
483 	c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
484 	c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
485 	c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
486 	c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
487 	c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
488 	c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
489 
490 	if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode & FMODE_WRITE))
491 		return -EACCES;
492 
493 	d.metadata = READ_ONCE(cmd->metadata);
494 	d.addr = READ_ONCE(cmd->addr);
495 	d.data_len = READ_ONCE(cmd->data_len);
496 	d.metadata_len = READ_ONCE(cmd->metadata_len);
497 	d.timeout_ms = READ_ONCE(cmd->timeout_ms);
498 
499 	if (issue_flags & IO_URING_F_NONBLOCK) {
500 		rq_flags |= REQ_NOWAIT;
501 		blk_flags = BLK_MQ_REQ_NOWAIT;
502 	}
503 	if (issue_flags & IO_URING_F_IOPOLL)
504 		rq_flags |= REQ_POLLED;
505 
506 	req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
507 	if (IS_ERR(req))
508 		return PTR_ERR(req);
509 	req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
510 
511 	if (d.addr && d.data_len) {
512 		ret = nvme_map_user_request(req, d.addr,
513 			d.data_len, nvme_to_user_ptr(d.metadata),
514 			d.metadata_len, 0, ioucmd, vec);
515 		if (ret)
516 			return ret;
517 	}
518 
519 	/* to free bio on completion, as req->bio will be null at that time */
520 	pdu->bio = req->bio;
521 	pdu->req = req;
522 	req->end_io_data = ioucmd;
523 	req->end_io = nvme_uring_cmd_end_io;
524 	blk_execute_rq_nowait(req, false);
525 	return -EIOCBQUEUED;
526 }
527 
is_ctrl_ioctl(unsigned int cmd)528 static bool is_ctrl_ioctl(unsigned int cmd)
529 {
530 	if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
531 		return true;
532 	if (is_sed_ioctl(cmd))
533 		return true;
534 	return false;
535 }
536 
nvme_ctrl_ioctl(struct nvme_ctrl * ctrl,unsigned int cmd,void __user * argp,bool open_for_write)537 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
538 		void __user *argp, bool open_for_write)
539 {
540 	switch (cmd) {
541 	case NVME_IOCTL_ADMIN_CMD:
542 		return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
543 	case NVME_IOCTL_ADMIN64_CMD:
544 		return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
545 	default:
546 		return sed_ioctl(ctrl->opal_dev, cmd, argp);
547 	}
548 }
549 
550 #ifdef COMPAT_FOR_U64_ALIGNMENT
551 struct nvme_user_io32 {
552 	__u8	opcode;
553 	__u8	flags;
554 	__u16	control;
555 	__u16	nblocks;
556 	__u16	rsvd;
557 	__u64	metadata;
558 	__u64	addr;
559 	__u64	slba;
560 	__u32	dsmgmt;
561 	__u32	reftag;
562 	__u16	apptag;
563 	__u16	appmask;
564 } __attribute__((__packed__));
565 #define NVME_IOCTL_SUBMIT_IO32	_IOW('N', 0x42, struct nvme_user_io32)
566 #endif /* COMPAT_FOR_U64_ALIGNMENT */
567 
nvme_ns_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * argp,unsigned int flags,bool open_for_write)568 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
569 		void __user *argp, unsigned int flags, bool open_for_write)
570 {
571 	switch (cmd) {
572 	case NVME_IOCTL_ID:
573 		force_successful_syscall_return();
574 		return ns->head->ns_id;
575 	case NVME_IOCTL_IO_CMD:
576 		return nvme_user_cmd(ns->ctrl, ns, argp, flags, open_for_write);
577 	/*
578 	 * struct nvme_user_io can have different padding on some 32-bit ABIs.
579 	 * Just accept the compat version as all fields that are used are the
580 	 * same size and at the same offset.
581 	 */
582 #ifdef COMPAT_FOR_U64_ALIGNMENT
583 	case NVME_IOCTL_SUBMIT_IO32:
584 #endif
585 	case NVME_IOCTL_SUBMIT_IO:
586 		return nvme_submit_io(ns, argp);
587 	case NVME_IOCTL_IO64_CMD_VEC:
588 		flags |= NVME_IOCTL_VEC;
589 		fallthrough;
590 	case NVME_IOCTL_IO64_CMD:
591 		return nvme_user_cmd64(ns->ctrl, ns, argp, flags,
592 				       open_for_write);
593 	default:
594 		return -ENOTTY;
595 	}
596 }
597 
nvme_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)598 int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
599 		unsigned int cmd, unsigned long arg)
600 {
601 	struct nvme_ns *ns = bdev->bd_disk->private_data;
602 	bool open_for_write = mode & BLK_OPEN_WRITE;
603 	void __user *argp = (void __user *)arg;
604 	unsigned int flags = 0;
605 
606 	if (bdev_is_partition(bdev))
607 		flags |= NVME_IOCTL_PARTITION;
608 
609 	if (is_ctrl_ioctl(cmd))
610 		return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
611 	return nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
612 }
613 
nvme_ns_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)614 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
615 {
616 	struct nvme_ns *ns =
617 		container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
618 	bool open_for_write = file->f_mode & FMODE_WRITE;
619 	void __user *argp = (void __user *)arg;
620 
621 	if (is_ctrl_ioctl(cmd))
622 		return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
623 	return nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
624 }
625 
nvme_uring_cmd_checks(unsigned int issue_flags)626 static int nvme_uring_cmd_checks(unsigned int issue_flags)
627 {
628 
629 	/* NVMe passthrough requires big SQE/CQE support */
630 	if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
631 	    (IO_URING_F_SQE128|IO_URING_F_CQE32))
632 		return -EOPNOTSUPP;
633 	return 0;
634 }
635 
nvme_ns_uring_cmd(struct nvme_ns * ns,struct io_uring_cmd * ioucmd,unsigned int issue_flags)636 static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
637 			     unsigned int issue_flags)
638 {
639 	struct nvme_ctrl *ctrl = ns->ctrl;
640 	int ret;
641 
642 	BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
643 
644 	ret = nvme_uring_cmd_checks(issue_flags);
645 	if (ret)
646 		return ret;
647 
648 	switch (ioucmd->cmd_op) {
649 	case NVME_URING_CMD_IO:
650 		ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
651 		break;
652 	case NVME_URING_CMD_IO_VEC:
653 		ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
654 		break;
655 	default:
656 		ret = -ENOTTY;
657 	}
658 
659 	return ret;
660 }
661 
nvme_ns_chr_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)662 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
663 {
664 	struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
665 			struct nvme_ns, cdev);
666 
667 	return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
668 }
669 
nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd * ioucmd,struct io_comp_batch * iob,unsigned int poll_flags)670 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
671 				 struct io_comp_batch *iob,
672 				 unsigned int poll_flags)
673 {
674 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
675 	struct request *req = pdu->req;
676 
677 	if (req && blk_rq_is_poll(req))
678 		return blk_rq_poll(req, iob, poll_flags);
679 	return 0;
680 }
681 #ifdef CONFIG_NVME_MULTIPATH
nvme_ns_head_ctrl_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * argp,struct nvme_ns_head * head,int srcu_idx,bool open_for_write)682 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
683 		void __user *argp, struct nvme_ns_head *head, int srcu_idx,
684 		bool open_for_write)
685 	__releases(&head->srcu)
686 {
687 	struct nvme_ctrl *ctrl = ns->ctrl;
688 	int ret;
689 
690 	nvme_get_ctrl(ns->ctrl);
691 	srcu_read_unlock(&head->srcu, srcu_idx);
692 	ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, open_for_write);
693 
694 	nvme_put_ctrl(ctrl);
695 	return ret;
696 }
697 
nvme_ns_head_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)698 int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
699 		unsigned int cmd, unsigned long arg)
700 {
701 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
702 	bool open_for_write = mode & BLK_OPEN_WRITE;
703 	void __user *argp = (void __user *)arg;
704 	struct nvme_ns *ns;
705 	int srcu_idx, ret = -EWOULDBLOCK;
706 	unsigned int flags = 0;
707 
708 	if (bdev_is_partition(bdev))
709 		flags |= NVME_IOCTL_PARTITION;
710 
711 	srcu_idx = srcu_read_lock(&head->srcu);
712 	ns = nvme_find_path(head);
713 	if (!ns)
714 		goto out_unlock;
715 
716 	/*
717 	 * Handle ioctls that apply to the controller instead of the namespace
718 	 * seperately and drop the ns SRCU reference early.  This avoids a
719 	 * deadlock when deleting namespaces using the passthrough interface.
720 	 */
721 	if (is_ctrl_ioctl(cmd))
722 		return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
723 					       open_for_write);
724 
725 	ret = nvme_ns_ioctl(ns, cmd, argp, flags, open_for_write);
726 out_unlock:
727 	srcu_read_unlock(&head->srcu, srcu_idx);
728 	return ret;
729 }
730 
nvme_ns_head_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)731 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
732 		unsigned long arg)
733 {
734 	bool open_for_write = file->f_mode & FMODE_WRITE;
735 	struct cdev *cdev = file_inode(file)->i_cdev;
736 	struct nvme_ns_head *head =
737 		container_of(cdev, struct nvme_ns_head, cdev);
738 	void __user *argp = (void __user *)arg;
739 	struct nvme_ns *ns;
740 	int srcu_idx, ret = -EWOULDBLOCK;
741 
742 	srcu_idx = srcu_read_lock(&head->srcu);
743 	ns = nvme_find_path(head);
744 	if (!ns)
745 		goto out_unlock;
746 
747 	if (is_ctrl_ioctl(cmd))
748 		return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
749 				open_for_write);
750 
751 	ret = nvme_ns_ioctl(ns, cmd, argp, 0, open_for_write);
752 out_unlock:
753 	srcu_read_unlock(&head->srcu, srcu_idx);
754 	return ret;
755 }
756 
nvme_ns_head_chr_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)757 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
758 		unsigned int issue_flags)
759 {
760 	struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
761 	struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
762 	int srcu_idx = srcu_read_lock(&head->srcu);
763 	struct nvme_ns *ns = nvme_find_path(head);
764 	int ret = -EINVAL;
765 
766 	if (ns)
767 		ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
768 	srcu_read_unlock(&head->srcu, srcu_idx);
769 	return ret;
770 }
771 #endif /* CONFIG_NVME_MULTIPATH */
772 
nvme_dev_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)773 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
774 {
775 	struct nvme_ctrl *ctrl = ioucmd->file->private_data;
776 	int ret;
777 
778 	/* IOPOLL not supported yet */
779 	if (issue_flags & IO_URING_F_IOPOLL)
780 		return -EOPNOTSUPP;
781 
782 	ret = nvme_uring_cmd_checks(issue_flags);
783 	if (ret)
784 		return ret;
785 
786 	switch (ioucmd->cmd_op) {
787 	case NVME_URING_CMD_ADMIN:
788 		ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
789 		break;
790 	case NVME_URING_CMD_ADMIN_VEC:
791 		ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
792 		break;
793 	default:
794 		ret = -ENOTTY;
795 	}
796 
797 	return ret;
798 }
799 
nvme_dev_user_cmd(struct nvme_ctrl * ctrl,void __user * argp,bool open_for_write)800 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
801 		bool open_for_write)
802 {
803 	struct nvme_ns *ns;
804 	int ret, srcu_idx;
805 
806 	srcu_idx = srcu_read_lock(&ctrl->srcu);
807 	if (list_empty(&ctrl->namespaces)) {
808 		ret = -ENOTTY;
809 		goto out_unlock;
810 	}
811 
812 	ns = list_first_or_null_rcu(&ctrl->namespaces, struct nvme_ns, list);
813 	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
814 		dev_warn(ctrl->device,
815 			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
816 		ret = -EINVAL;
817 		goto out_unlock;
818 	}
819 
820 	dev_warn(ctrl->device,
821 		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
822 	if (!nvme_get_ns(ns)) {
823 		ret = -ENXIO;
824 		goto out_unlock;
825 	}
826 	srcu_read_unlock(&ctrl->srcu, srcu_idx);
827 
828 	ret = nvme_user_cmd(ctrl, ns, argp, 0, open_for_write);
829 	nvme_put_ns(ns);
830 	return ret;
831 
832 out_unlock:
833 	srcu_read_unlock(&ctrl->srcu, srcu_idx);
834 	return ret;
835 }
836 
nvme_dev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)837 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
838 		unsigned long arg)
839 {
840 	bool open_for_write = file->f_mode & FMODE_WRITE;
841 	struct nvme_ctrl *ctrl = file->private_data;
842 	void __user *argp = (void __user *)arg;
843 
844 	switch (cmd) {
845 	case NVME_IOCTL_ADMIN_CMD:
846 		return nvme_user_cmd(ctrl, NULL, argp, 0, open_for_write);
847 	case NVME_IOCTL_ADMIN64_CMD:
848 		return nvme_user_cmd64(ctrl, NULL, argp, 0, open_for_write);
849 	case NVME_IOCTL_IO_CMD:
850 		return nvme_dev_user_cmd(ctrl, argp, open_for_write);
851 	case NVME_IOCTL_RESET:
852 		if (!capable(CAP_SYS_ADMIN))
853 			return -EACCES;
854 		dev_warn(ctrl->device, "resetting controller\n");
855 		return nvme_reset_ctrl_sync(ctrl);
856 	case NVME_IOCTL_SUBSYS_RESET:
857 		if (!capable(CAP_SYS_ADMIN))
858 			return -EACCES;
859 		return nvme_reset_subsystem(ctrl);
860 	case NVME_IOCTL_RESCAN:
861 		if (!capable(CAP_SYS_ADMIN))
862 			return -EACCES;
863 		nvme_queue_scan(ctrl);
864 		return 0;
865 	default:
866 		return -ENOTTY;
867 	}
868 }
869