1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2011-2014, Intel Corporation.
4 * Copyright (c) 2017-2021 Christoph Hellwig.
5 */
6 #include <linux/ptrace.h> /* for force_successful_syscall_return */
7 #include <linux/nvme_ioctl.h>
8 #include <linux/io_uring.h>
9 #include "nvme.h"
10
11 /*
12 * Convert integer values from ioctl structures to user pointers, silently
13 * ignoring the upper bits in the compat case to match behaviour of 32-bit
14 * kernels.
15 */
nvme_to_user_ptr(uintptr_t ptrval)16 static void __user *nvme_to_user_ptr(uintptr_t ptrval)
17 {
18 if (in_compat_syscall())
19 ptrval = (compat_uptr_t)ptrval;
20 return (void __user *)ptrval;
21 }
22
nvme_add_user_metadata(struct request * req,void __user * ubuf,unsigned len,u32 seed)23 static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
24 unsigned len, u32 seed)
25 {
26 struct bio_integrity_payload *bip;
27 int ret = -ENOMEM;
28 void *buf;
29 struct bio *bio = req->bio;
30
31 buf = kmalloc(len, GFP_KERNEL);
32 if (!buf)
33 goto out;
34
35 if (req_op(req) == REQ_OP_DRV_OUT) {
36 ret = -EFAULT;
37 if (copy_from_user(buf, ubuf, len))
38 goto out_free_meta;
39 } else {
40 memset(buf, 0, len);
41 }
42
43 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
44 if (IS_ERR(bip)) {
45 ret = PTR_ERR(bip);
46 goto out_free_meta;
47 }
48
49 bip->bip_iter.bi_size = len;
50 bip->bip_iter.bi_sector = seed;
51 ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
52 offset_in_page(buf));
53 if (ret != len) {
54 ret = -ENOMEM;
55 goto out_free_meta;
56 }
57
58 req->cmd_flags |= REQ_INTEGRITY;
59 return buf;
60 out_free_meta:
61 kfree(buf);
62 out:
63 return ERR_PTR(ret);
64 }
65
nvme_finish_user_metadata(struct request * req,void __user * ubuf,void * meta,unsigned len,int ret)66 static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
67 void *meta, unsigned len, int ret)
68 {
69 if (!ret && req_op(req) == REQ_OP_DRV_IN &&
70 copy_to_user(ubuf, meta, len))
71 ret = -EFAULT;
72 kfree(meta);
73 return ret;
74 }
75
nvme_alloc_user_request(struct request_queue * q,struct nvme_command * cmd,blk_opf_t rq_flags,blk_mq_req_flags_t blk_flags)76 static struct request *nvme_alloc_user_request(struct request_queue *q,
77 struct nvme_command *cmd, blk_opf_t rq_flags,
78 blk_mq_req_flags_t blk_flags)
79 {
80 struct request *req;
81
82 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
83 if (IS_ERR(req))
84 return req;
85 nvme_init_request(req, cmd);
86 nvme_req(req)->flags |= NVME_REQ_USERCMD;
87 return req;
88 }
89
nvme_map_user_request(struct request * req,u64 ubuffer,unsigned bufflen,void __user * meta_buffer,unsigned meta_len,u32 meta_seed,void ** metap,struct io_uring_cmd * ioucmd,bool vec)90 static int nvme_map_user_request(struct request *req, u64 ubuffer,
91 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
92 u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
93 bool vec)
94 {
95 struct request_queue *q = req->q;
96 struct nvme_ns *ns = q->queuedata;
97 struct block_device *bdev = ns ? ns->disk->part0 : NULL;
98 struct bio *bio = NULL;
99 void *meta = NULL;
100 int ret;
101
102 if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
103 struct iov_iter iter;
104
105 /* fixedbufs is only for non-vectored io */
106 if (WARN_ON_ONCE(vec))
107 return -EINVAL;
108 ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
109 rq_data_dir(req), &iter, ioucmd);
110 if (ret < 0)
111 goto out;
112 ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
113 } else {
114 ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
115 bufflen, GFP_KERNEL, vec, 0, 0,
116 rq_data_dir(req));
117 }
118
119 if (ret)
120 goto out;
121 bio = req->bio;
122 if (bdev)
123 bio_set_dev(bio, bdev);
124
125 if (bdev && meta_buffer && meta_len) {
126 meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
127 meta_seed);
128 if (IS_ERR(meta)) {
129 ret = PTR_ERR(meta);
130 goto out_unmap;
131 }
132 *metap = meta;
133 }
134
135 return ret;
136
137 out_unmap:
138 if (bio)
139 blk_rq_unmap_user(bio);
140 out:
141 blk_mq_free_request(req);
142 return ret;
143 }
144
nvme_submit_user_cmd(struct request_queue * q,struct nvme_command * cmd,u64 ubuffer,unsigned bufflen,void __user * meta_buffer,unsigned meta_len,u32 meta_seed,u64 * result,unsigned timeout,bool vec)145 static int nvme_submit_user_cmd(struct request_queue *q,
146 struct nvme_command *cmd, u64 ubuffer,
147 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
148 u32 meta_seed, u64 *result, unsigned timeout, bool vec)
149 {
150 struct nvme_ctrl *ctrl;
151 struct request *req;
152 void *meta = NULL;
153 struct bio *bio;
154 u32 effects;
155 int ret;
156
157 req = nvme_alloc_user_request(q, cmd, 0, 0);
158 if (IS_ERR(req))
159 return PTR_ERR(req);
160
161 req->timeout = timeout;
162 if (ubuffer && bufflen) {
163 ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
164 meta_len, meta_seed, &meta, NULL, vec);
165 if (ret)
166 return ret;
167 }
168
169 bio = req->bio;
170 ctrl = nvme_req(req)->ctrl;
171
172 ret = nvme_execute_passthru_rq(req, &effects);
173
174 if (result)
175 *result = le64_to_cpu(nvme_req(req)->result.u64);
176 if (meta)
177 ret = nvme_finish_user_metadata(req, meta_buffer, meta,
178 meta_len, ret);
179 if (bio)
180 blk_rq_unmap_user(bio);
181 blk_mq_free_request(req);
182
183 if (effects)
184 nvme_passthru_end(ctrl, effects, cmd, ret);
185
186 return ret;
187 }
188
nvme_submit_io(struct nvme_ns * ns,struct nvme_user_io __user * uio)189 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
190 {
191 struct nvme_user_io io;
192 struct nvme_command c;
193 unsigned length, meta_len;
194 void __user *metadata;
195
196 if (copy_from_user(&io, uio, sizeof(io)))
197 return -EFAULT;
198 if (io.flags)
199 return -EINVAL;
200
201 switch (io.opcode) {
202 case nvme_cmd_write:
203 case nvme_cmd_read:
204 case nvme_cmd_compare:
205 break;
206 default:
207 return -EINVAL;
208 }
209
210 length = (io.nblocks + 1) << ns->lba_shift;
211
212 if ((io.control & NVME_RW_PRINFO_PRACT) &&
213 ns->ms == sizeof(struct t10_pi_tuple)) {
214 /*
215 * Protection information is stripped/inserted by the
216 * controller.
217 */
218 if (nvme_to_user_ptr(io.metadata))
219 return -EINVAL;
220 meta_len = 0;
221 metadata = NULL;
222 } else {
223 meta_len = (io.nblocks + 1) * ns->ms;
224 metadata = nvme_to_user_ptr(io.metadata);
225 }
226
227 if (ns->features & NVME_NS_EXT_LBAS) {
228 length += meta_len;
229 meta_len = 0;
230 } else if (meta_len) {
231 if ((io.metadata & 3) || !io.metadata)
232 return -EINVAL;
233 }
234
235 memset(&c, 0, sizeof(c));
236 c.rw.opcode = io.opcode;
237 c.rw.flags = io.flags;
238 c.rw.nsid = cpu_to_le32(ns->head->ns_id);
239 c.rw.slba = cpu_to_le64(io.slba);
240 c.rw.length = cpu_to_le16(io.nblocks);
241 c.rw.control = cpu_to_le16(io.control);
242 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
243 c.rw.reftag = cpu_to_le32(io.reftag);
244 c.rw.apptag = cpu_to_le16(io.apptag);
245 c.rw.appmask = cpu_to_le16(io.appmask);
246
247 return nvme_submit_user_cmd(ns->queue, &c,
248 io.addr, length,
249 metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
250 false);
251 }
252
nvme_validate_passthru_nsid(struct nvme_ctrl * ctrl,struct nvme_ns * ns,__u32 nsid)253 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
254 struct nvme_ns *ns, __u32 nsid)
255 {
256 if (ns && nsid != ns->head->ns_id) {
257 dev_err(ctrl->device,
258 "%s: nsid (%u) in cmd does not match nsid (%u)"
259 "of namespace\n",
260 current->comm, nsid, ns->head->ns_id);
261 return false;
262 }
263
264 return true;
265 }
266
nvme_user_cmd(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct nvme_passthru_cmd __user * ucmd)267 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
268 struct nvme_passthru_cmd __user *ucmd)
269 {
270 struct nvme_passthru_cmd cmd;
271 struct nvme_command c;
272 unsigned timeout = 0;
273 u64 result;
274 int status;
275
276 if (!capable(CAP_SYS_ADMIN))
277 return -EACCES;
278 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
279 return -EFAULT;
280 if (cmd.flags)
281 return -EINVAL;
282 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
283 return -EINVAL;
284
285 memset(&c, 0, sizeof(c));
286 c.common.opcode = cmd.opcode;
287 c.common.flags = cmd.flags;
288 c.common.nsid = cpu_to_le32(cmd.nsid);
289 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
290 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
291 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
292 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
293 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
294 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
295 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
296 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
297
298 if (cmd.timeout_ms)
299 timeout = msecs_to_jiffies(cmd.timeout_ms);
300
301 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
302 cmd.addr, cmd.data_len,
303 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
304 0, &result, timeout, false);
305
306 if (status >= 0) {
307 if (put_user(result, &ucmd->result))
308 return -EFAULT;
309 }
310
311 return status;
312 }
313
nvme_user_cmd64(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct nvme_passthru_cmd64 __user * ucmd,bool vec)314 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
315 struct nvme_passthru_cmd64 __user *ucmd, bool vec)
316 {
317 struct nvme_passthru_cmd64 cmd;
318 struct nvme_command c;
319 unsigned timeout = 0;
320 int status;
321
322 if (!capable(CAP_SYS_ADMIN))
323 return -EACCES;
324 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
325 return -EFAULT;
326 if (cmd.flags)
327 return -EINVAL;
328 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
329 return -EINVAL;
330
331 memset(&c, 0, sizeof(c));
332 c.common.opcode = cmd.opcode;
333 c.common.flags = cmd.flags;
334 c.common.nsid = cpu_to_le32(cmd.nsid);
335 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
336 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
337 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
338 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
339 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
340 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
341 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
342 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
343
344 if (cmd.timeout_ms)
345 timeout = msecs_to_jiffies(cmd.timeout_ms);
346
347 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
348 cmd.addr, cmd.data_len,
349 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
350 0, &cmd.result, timeout, vec);
351
352 if (status >= 0) {
353 if (put_user(cmd.result, &ucmd->result))
354 return -EFAULT;
355 }
356
357 return status;
358 }
359
360 struct nvme_uring_data {
361 __u64 metadata;
362 __u64 addr;
363 __u32 data_len;
364 __u32 metadata_len;
365 __u32 timeout_ms;
366 };
367
368 /*
369 * This overlays struct io_uring_cmd pdu.
370 * Expect build errors if this grows larger than that.
371 */
372 struct nvme_uring_cmd_pdu {
373 union {
374 struct bio *bio;
375 struct request *req;
376 };
377 u32 meta_len;
378 u32 nvme_status;
379 union {
380 struct {
381 void *meta; /* kernel-resident buffer */
382 void __user *meta_buffer;
383 };
384 u64 result;
385 } u;
386 };
387
nvme_uring_cmd_pdu(struct io_uring_cmd * ioucmd)388 static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
389 struct io_uring_cmd *ioucmd)
390 {
391 return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
392 }
393
nvme_uring_task_meta_cb(struct io_uring_cmd * ioucmd,unsigned issue_flags)394 static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
395 unsigned issue_flags)
396 {
397 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
398 struct request *req = pdu->req;
399 int status;
400 u64 result;
401
402 if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
403 status = -EINTR;
404 else
405 status = nvme_req(req)->status;
406
407 result = le64_to_cpu(nvme_req(req)->result.u64);
408
409 if (pdu->meta_len)
410 status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
411 pdu->u.meta, pdu->meta_len, status);
412 if (req->bio)
413 blk_rq_unmap_user(req->bio);
414 blk_mq_free_request(req);
415
416 io_uring_cmd_done(ioucmd, status, result, issue_flags);
417 }
418
nvme_uring_task_cb(struct io_uring_cmd * ioucmd,unsigned issue_flags)419 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
420 unsigned issue_flags)
421 {
422 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
423
424 if (pdu->bio)
425 blk_rq_unmap_user(pdu->bio);
426
427 io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
428 }
429
nvme_uring_cmd_end_io(struct request * req,blk_status_t err)430 static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
431 blk_status_t err)
432 {
433 struct io_uring_cmd *ioucmd = req->end_io_data;
434 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
435 void *cookie = READ_ONCE(ioucmd->cookie);
436
437 req->bio = pdu->bio;
438 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
439 pdu->nvme_status = -EINTR;
440 } else {
441 pdu->nvme_status = nvme_req(req)->status;
442 if (!pdu->nvme_status)
443 pdu->nvme_status = blk_status_to_errno(err);
444 }
445 pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
446
447 /*
448 * For iopoll, complete it directly.
449 * Otherwise, move the completion to task work.
450 */
451 if (cookie != NULL && blk_rq_is_poll(req))
452 nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
453 else
454 io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
455
456 return RQ_END_IO_FREE;
457 }
458
nvme_uring_cmd_end_io_meta(struct request * req,blk_status_t err)459 static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
460 blk_status_t err)
461 {
462 struct io_uring_cmd *ioucmd = req->end_io_data;
463 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
464 void *cookie = READ_ONCE(ioucmd->cookie);
465
466 req->bio = pdu->bio;
467 pdu->req = req;
468
469 /*
470 * For iopoll, complete it directly.
471 * Otherwise, move the completion to task work.
472 */
473 if (cookie != NULL && blk_rq_is_poll(req))
474 nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
475 else
476 io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
477
478 return RQ_END_IO_NONE;
479 }
480
nvme_uring_cmd_io(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct io_uring_cmd * ioucmd,unsigned int issue_flags,bool vec)481 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
482 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
483 {
484 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
485 const struct nvme_uring_cmd *cmd = ioucmd->cmd;
486 struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
487 struct nvme_uring_data d;
488 struct nvme_command c;
489 struct request *req;
490 blk_opf_t rq_flags = 0;
491 blk_mq_req_flags_t blk_flags = 0;
492 void *meta = NULL;
493 int ret;
494
495 if (!capable(CAP_SYS_ADMIN))
496 return -EACCES;
497
498 c.common.opcode = READ_ONCE(cmd->opcode);
499 c.common.flags = READ_ONCE(cmd->flags);
500 if (c.common.flags)
501 return -EINVAL;
502
503 c.common.command_id = 0;
504 c.common.nsid = cpu_to_le32(cmd->nsid);
505 if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
506 return -EINVAL;
507
508 c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
509 c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
510 c.common.metadata = 0;
511 c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
512 c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
513 c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
514 c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
515 c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
516 c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
517 c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
518
519 d.metadata = READ_ONCE(cmd->metadata);
520 d.addr = READ_ONCE(cmd->addr);
521 d.data_len = READ_ONCE(cmd->data_len);
522 d.metadata_len = READ_ONCE(cmd->metadata_len);
523 d.timeout_ms = READ_ONCE(cmd->timeout_ms);
524
525 if (issue_flags & IO_URING_F_NONBLOCK) {
526 rq_flags = REQ_NOWAIT;
527 blk_flags = BLK_MQ_REQ_NOWAIT;
528 }
529 if (issue_flags & IO_URING_F_IOPOLL)
530 rq_flags |= REQ_POLLED;
531
532 retry:
533 req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
534 if (IS_ERR(req))
535 return PTR_ERR(req);
536 req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
537
538 if (d.addr && d.data_len) {
539 ret = nvme_map_user_request(req, d.addr,
540 d.data_len, nvme_to_user_ptr(d.metadata),
541 d.metadata_len, 0, &meta, ioucmd, vec);
542 if (ret)
543 return ret;
544 }
545
546 if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
547 if (unlikely(!req->bio)) {
548 /* we can't poll this, so alloc regular req instead */
549 blk_mq_free_request(req);
550 rq_flags &= ~REQ_POLLED;
551 goto retry;
552 } else {
553 WRITE_ONCE(ioucmd->cookie, req->bio);
554 req->bio->bi_opf |= REQ_POLLED;
555 }
556 }
557 /* to free bio on completion, as req->bio will be null at that time */
558 pdu->bio = req->bio;
559 pdu->meta_len = d.metadata_len;
560 req->end_io_data = ioucmd;
561 if (pdu->meta_len) {
562 pdu->u.meta = meta;
563 pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
564 req->end_io = nvme_uring_cmd_end_io_meta;
565 } else {
566 req->end_io = nvme_uring_cmd_end_io;
567 }
568 blk_execute_rq_nowait(req, false);
569 return -EIOCBQUEUED;
570 }
571
is_ctrl_ioctl(unsigned int cmd)572 static bool is_ctrl_ioctl(unsigned int cmd)
573 {
574 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
575 return true;
576 if (is_sed_ioctl(cmd))
577 return true;
578 return false;
579 }
580
nvme_ctrl_ioctl(struct nvme_ctrl * ctrl,unsigned int cmd,void __user * argp)581 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
582 void __user *argp)
583 {
584 switch (cmd) {
585 case NVME_IOCTL_ADMIN_CMD:
586 return nvme_user_cmd(ctrl, NULL, argp);
587 case NVME_IOCTL_ADMIN64_CMD:
588 return nvme_user_cmd64(ctrl, NULL, argp, false);
589 default:
590 return sed_ioctl(ctrl->opal_dev, cmd, argp);
591 }
592 }
593
594 #ifdef COMPAT_FOR_U64_ALIGNMENT
595 struct nvme_user_io32 {
596 __u8 opcode;
597 __u8 flags;
598 __u16 control;
599 __u16 nblocks;
600 __u16 rsvd;
601 __u64 metadata;
602 __u64 addr;
603 __u64 slba;
604 __u32 dsmgmt;
605 __u32 reftag;
606 __u16 apptag;
607 __u16 appmask;
608 } __attribute__((__packed__));
609 #define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
610 #endif /* COMPAT_FOR_U64_ALIGNMENT */
611
nvme_ns_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * argp)612 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
613 void __user *argp)
614 {
615 switch (cmd) {
616 case NVME_IOCTL_ID:
617 force_successful_syscall_return();
618 return ns->head->ns_id;
619 case NVME_IOCTL_IO_CMD:
620 return nvme_user_cmd(ns->ctrl, ns, argp);
621 /*
622 * struct nvme_user_io can have different padding on some 32-bit ABIs.
623 * Just accept the compat version as all fields that are used are the
624 * same size and at the same offset.
625 */
626 #ifdef COMPAT_FOR_U64_ALIGNMENT
627 case NVME_IOCTL_SUBMIT_IO32:
628 #endif
629 case NVME_IOCTL_SUBMIT_IO:
630 return nvme_submit_io(ns, argp);
631 case NVME_IOCTL_IO64_CMD:
632 return nvme_user_cmd64(ns->ctrl, ns, argp, false);
633 case NVME_IOCTL_IO64_CMD_VEC:
634 return nvme_user_cmd64(ns->ctrl, ns, argp, true);
635 default:
636 return -ENOTTY;
637 }
638 }
639
__nvme_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * arg)640 static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg)
641 {
642 if (is_ctrl_ioctl(cmd))
643 return nvme_ctrl_ioctl(ns->ctrl, cmd, arg);
644 return nvme_ns_ioctl(ns, cmd, arg);
645 }
646
nvme_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)647 int nvme_ioctl(struct block_device *bdev, fmode_t mode,
648 unsigned int cmd, unsigned long arg)
649 {
650 struct nvme_ns *ns = bdev->bd_disk->private_data;
651
652 return __nvme_ioctl(ns, cmd, (void __user *)arg);
653 }
654
nvme_ns_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)655 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
656 {
657 struct nvme_ns *ns =
658 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
659
660 return __nvme_ioctl(ns, cmd, (void __user *)arg);
661 }
662
nvme_uring_cmd_checks(unsigned int issue_flags)663 static int nvme_uring_cmd_checks(unsigned int issue_flags)
664 {
665
666 /* NVMe passthrough requires big SQE/CQE support */
667 if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
668 (IO_URING_F_SQE128|IO_URING_F_CQE32))
669 return -EOPNOTSUPP;
670 return 0;
671 }
672
nvme_ns_uring_cmd(struct nvme_ns * ns,struct io_uring_cmd * ioucmd,unsigned int issue_flags)673 static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
674 unsigned int issue_flags)
675 {
676 struct nvme_ctrl *ctrl = ns->ctrl;
677 int ret;
678
679 BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
680
681 ret = nvme_uring_cmd_checks(issue_flags);
682 if (ret)
683 return ret;
684
685 switch (ioucmd->cmd_op) {
686 case NVME_URING_CMD_IO:
687 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
688 break;
689 case NVME_URING_CMD_IO_VEC:
690 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
691 break;
692 default:
693 ret = -ENOTTY;
694 }
695
696 return ret;
697 }
698
nvme_ns_chr_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)699 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
700 {
701 struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
702 struct nvme_ns, cdev);
703
704 return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
705 }
706
nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd * ioucmd,struct io_comp_batch * iob,unsigned int poll_flags)707 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
708 struct io_comp_batch *iob,
709 unsigned int poll_flags)
710 {
711 struct bio *bio;
712 int ret = 0;
713 struct nvme_ns *ns;
714 struct request_queue *q;
715
716 rcu_read_lock();
717 bio = READ_ONCE(ioucmd->cookie);
718 ns = container_of(file_inode(ioucmd->file)->i_cdev,
719 struct nvme_ns, cdev);
720 q = ns->queue;
721 if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
722 ret = bio_poll(bio, iob, poll_flags);
723 rcu_read_unlock();
724 return ret;
725 }
726 #ifdef CONFIG_NVME_MULTIPATH
nvme_ns_head_ctrl_ioctl(struct nvme_ns * ns,unsigned int cmd,void __user * argp,struct nvme_ns_head * head,int srcu_idx)727 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
728 void __user *argp, struct nvme_ns_head *head, int srcu_idx)
729 __releases(&head->srcu)
730 {
731 struct nvme_ctrl *ctrl = ns->ctrl;
732 int ret;
733
734 nvme_get_ctrl(ns->ctrl);
735 srcu_read_unlock(&head->srcu, srcu_idx);
736 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
737
738 nvme_put_ctrl(ctrl);
739 return ret;
740 }
741
nvme_ns_head_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)742 int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
743 unsigned int cmd, unsigned long arg)
744 {
745 struct nvme_ns_head *head = bdev->bd_disk->private_data;
746 void __user *argp = (void __user *)arg;
747 struct nvme_ns *ns;
748 int srcu_idx, ret = -EWOULDBLOCK;
749
750 srcu_idx = srcu_read_lock(&head->srcu);
751 ns = nvme_find_path(head);
752 if (!ns)
753 goto out_unlock;
754
755 /*
756 * Handle ioctls that apply to the controller instead of the namespace
757 * seperately and drop the ns SRCU reference early. This avoids a
758 * deadlock when deleting namespaces using the passthrough interface.
759 */
760 if (is_ctrl_ioctl(cmd))
761 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
762
763 ret = nvme_ns_ioctl(ns, cmd, argp);
764 out_unlock:
765 srcu_read_unlock(&head->srcu, srcu_idx);
766 return ret;
767 }
768
nvme_ns_head_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)769 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
770 unsigned long arg)
771 {
772 struct cdev *cdev = file_inode(file)->i_cdev;
773 struct nvme_ns_head *head =
774 container_of(cdev, struct nvme_ns_head, cdev);
775 void __user *argp = (void __user *)arg;
776 struct nvme_ns *ns;
777 int srcu_idx, ret = -EWOULDBLOCK;
778
779 srcu_idx = srcu_read_lock(&head->srcu);
780 ns = nvme_find_path(head);
781 if (!ns)
782 goto out_unlock;
783
784 if (is_ctrl_ioctl(cmd))
785 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
786
787 ret = nvme_ns_ioctl(ns, cmd, argp);
788 out_unlock:
789 srcu_read_unlock(&head->srcu, srcu_idx);
790 return ret;
791 }
792
nvme_ns_head_chr_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)793 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
794 unsigned int issue_flags)
795 {
796 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
797 struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
798 int srcu_idx = srcu_read_lock(&head->srcu);
799 struct nvme_ns *ns = nvme_find_path(head);
800 int ret = -EINVAL;
801
802 if (ns)
803 ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
804 srcu_read_unlock(&head->srcu, srcu_idx);
805 return ret;
806 }
807
nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd * ioucmd,struct io_comp_batch * iob,unsigned int poll_flags)808 int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
809 struct io_comp_batch *iob,
810 unsigned int poll_flags)
811 {
812 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
813 struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
814 int srcu_idx = srcu_read_lock(&head->srcu);
815 struct nvme_ns *ns = nvme_find_path(head);
816 struct bio *bio;
817 int ret = 0;
818 struct request_queue *q;
819
820 if (ns) {
821 rcu_read_lock();
822 bio = READ_ONCE(ioucmd->cookie);
823 q = ns->queue;
824 if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
825 && bio->bi_bdev)
826 ret = bio_poll(bio, iob, poll_flags);
827 rcu_read_unlock();
828 }
829 srcu_read_unlock(&head->srcu, srcu_idx);
830 return ret;
831 }
832 #endif /* CONFIG_NVME_MULTIPATH */
833
nvme_dev_uring_cmd(struct io_uring_cmd * ioucmd,unsigned int issue_flags)834 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
835 {
836 struct nvme_ctrl *ctrl = ioucmd->file->private_data;
837 int ret;
838
839 /* IOPOLL not supported yet */
840 if (issue_flags & IO_URING_F_IOPOLL)
841 return -EOPNOTSUPP;
842
843 ret = nvme_uring_cmd_checks(issue_flags);
844 if (ret)
845 return ret;
846
847 switch (ioucmd->cmd_op) {
848 case NVME_URING_CMD_ADMIN:
849 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
850 break;
851 case NVME_URING_CMD_ADMIN_VEC:
852 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
853 break;
854 default:
855 ret = -ENOTTY;
856 }
857
858 return ret;
859 }
860
nvme_dev_user_cmd(struct nvme_ctrl * ctrl,void __user * argp)861 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
862 {
863 struct nvme_ns *ns;
864 int ret;
865
866 down_read(&ctrl->namespaces_rwsem);
867 if (list_empty(&ctrl->namespaces)) {
868 ret = -ENOTTY;
869 goto out_unlock;
870 }
871
872 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
873 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
874 dev_warn(ctrl->device,
875 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
876 ret = -EINVAL;
877 goto out_unlock;
878 }
879
880 dev_warn(ctrl->device,
881 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
882 kref_get(&ns->kref);
883 up_read(&ctrl->namespaces_rwsem);
884
885 ret = nvme_user_cmd(ctrl, ns, argp);
886 nvme_put_ns(ns);
887 return ret;
888
889 out_unlock:
890 up_read(&ctrl->namespaces_rwsem);
891 return ret;
892 }
893
nvme_dev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)894 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
895 unsigned long arg)
896 {
897 struct nvme_ctrl *ctrl = file->private_data;
898 void __user *argp = (void __user *)arg;
899
900 switch (cmd) {
901 case NVME_IOCTL_ADMIN_CMD:
902 return nvme_user_cmd(ctrl, NULL, argp);
903 case NVME_IOCTL_ADMIN64_CMD:
904 return nvme_user_cmd64(ctrl, NULL, argp, false);
905 case NVME_IOCTL_IO_CMD:
906 return nvme_dev_user_cmd(ctrl, argp);
907 case NVME_IOCTL_RESET:
908 if (!capable(CAP_SYS_ADMIN))
909 return -EACCES;
910 dev_warn(ctrl->device, "resetting controller\n");
911 return nvme_reset_ctrl_sync(ctrl);
912 case NVME_IOCTL_SUBSYS_RESET:
913 if (!capable(CAP_SYS_ADMIN))
914 return -EACCES;
915 return nvme_reset_subsystem(ctrl);
916 case NVME_IOCTL_RESCAN:
917 if (!capable(CAP_SYS_ADMIN))
918 return -EACCES;
919 nvme_queue_scan(ctrl);
920 return 0;
921 default:
922 return -ENOTTY;
923 }
924 }
925