• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * NVM Express device driver
3  * Copyright (c) 2011-2014, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/pr.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
29 #include <scsi/sg.h>
30 #include <asm/unaligned.h>
31 
32 #include "nvme.h"
33 #include "fabrics.h"
34 
35 #define NVME_MINORS		(1U << MINORBITS)
36 
37 unsigned char admin_timeout = 60;
38 module_param(admin_timeout, byte, 0644);
39 MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
40 EXPORT_SYMBOL_GPL(admin_timeout);
41 
42 unsigned char nvme_io_timeout = 30;
43 module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
44 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
45 EXPORT_SYMBOL_GPL(nvme_io_timeout);
46 
47 unsigned char shutdown_timeout = 5;
48 module_param(shutdown_timeout, byte, 0644);
49 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
50 
51 unsigned int nvme_max_retries = 5;
52 module_param_named(max_retries, nvme_max_retries, uint, 0644);
53 MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
54 EXPORT_SYMBOL_GPL(nvme_max_retries);
55 
56 static int nvme_char_major;
57 module_param(nvme_char_major, int, 0);
58 
59 static LIST_HEAD(nvme_ctrl_list);
60 static DEFINE_SPINLOCK(dev_list_lock);
61 
62 static struct class *nvme_class;
63 
nvme_cancel_request(struct request * req,void * data,bool reserved)64 void nvme_cancel_request(struct request *req, void *data, bool reserved)
65 {
66 	int status;
67 
68 	if (!blk_mq_request_started(req))
69 		return;
70 
71 	dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
72 				"Cancelling I/O %d", req->tag);
73 
74 	status = NVME_SC_ABORT_REQ;
75 	if (blk_queue_dying(req->q))
76 		status |= NVME_SC_DNR;
77 	blk_mq_complete_request(req, status);
78 }
79 EXPORT_SYMBOL_GPL(nvme_cancel_request);
80 
nvme_change_ctrl_state(struct nvme_ctrl * ctrl,enum nvme_ctrl_state new_state)81 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
82 		enum nvme_ctrl_state new_state)
83 {
84 	enum nvme_ctrl_state old_state;
85 	bool changed = false;
86 
87 	spin_lock_irq(&ctrl->lock);
88 
89 	old_state = ctrl->state;
90 	switch (new_state) {
91 	case NVME_CTRL_LIVE:
92 		switch (old_state) {
93 		case NVME_CTRL_NEW:
94 		case NVME_CTRL_RESETTING:
95 		case NVME_CTRL_RECONNECTING:
96 			changed = true;
97 			/* FALLTHRU */
98 		default:
99 			break;
100 		}
101 		break;
102 	case NVME_CTRL_RESETTING:
103 		switch (old_state) {
104 		case NVME_CTRL_NEW:
105 		case NVME_CTRL_LIVE:
106 		case NVME_CTRL_RECONNECTING:
107 			changed = true;
108 			/* FALLTHRU */
109 		default:
110 			break;
111 		}
112 		break;
113 	case NVME_CTRL_RECONNECTING:
114 		switch (old_state) {
115 		case NVME_CTRL_LIVE:
116 			changed = true;
117 			/* FALLTHRU */
118 		default:
119 			break;
120 		}
121 		break;
122 	case NVME_CTRL_DELETING:
123 		switch (old_state) {
124 		case NVME_CTRL_LIVE:
125 		case NVME_CTRL_RESETTING:
126 		case NVME_CTRL_RECONNECTING:
127 			changed = true;
128 			/* FALLTHRU */
129 		default:
130 			break;
131 		}
132 		break;
133 	case NVME_CTRL_DEAD:
134 		switch (old_state) {
135 		case NVME_CTRL_DELETING:
136 			changed = true;
137 			/* FALLTHRU */
138 		default:
139 			break;
140 		}
141 		break;
142 	default:
143 		break;
144 	}
145 
146 	if (changed)
147 		ctrl->state = new_state;
148 
149 	spin_unlock_irq(&ctrl->lock);
150 
151 	return changed;
152 }
153 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
154 
nvme_free_ns(struct kref * kref)155 static void nvme_free_ns(struct kref *kref)
156 {
157 	struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
158 
159 	if (ns->ndev)
160 		nvme_nvm_unregister(ns);
161 
162 	if (ns->disk) {
163 		spin_lock(&dev_list_lock);
164 		ns->disk->private_data = NULL;
165 		spin_unlock(&dev_list_lock);
166 	}
167 
168 	put_disk(ns->disk);
169 	ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
170 	nvme_put_ctrl(ns->ctrl);
171 	kfree(ns);
172 }
173 
nvme_put_ns(struct nvme_ns * ns)174 static void nvme_put_ns(struct nvme_ns *ns)
175 {
176 	kref_put(&ns->kref, nvme_free_ns);
177 }
178 
nvme_get_ns_from_disk(struct gendisk * disk)179 static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
180 {
181 	struct nvme_ns *ns;
182 
183 	spin_lock(&dev_list_lock);
184 	ns = disk->private_data;
185 	if (ns) {
186 		if (!kref_get_unless_zero(&ns->kref))
187 			goto fail;
188 		if (!try_module_get(ns->ctrl->ops->module))
189 			goto fail_put_ns;
190 	}
191 	spin_unlock(&dev_list_lock);
192 
193 	return ns;
194 
195 fail_put_ns:
196 	kref_put(&ns->kref, nvme_free_ns);
197 fail:
198 	spin_unlock(&dev_list_lock);
199 	return NULL;
200 }
201 
nvme_requeue_req(struct request * req)202 void nvme_requeue_req(struct request *req)
203 {
204 	unsigned long flags;
205 
206 	blk_mq_requeue_request(req);
207 	spin_lock_irqsave(req->q->queue_lock, flags);
208 	if (!blk_queue_stopped(req->q))
209 		blk_mq_kick_requeue_list(req->q);
210 	spin_unlock_irqrestore(req->q->queue_lock, flags);
211 }
212 EXPORT_SYMBOL_GPL(nvme_requeue_req);
213 
nvme_alloc_request(struct request_queue * q,struct nvme_command * cmd,unsigned int flags,int qid)214 struct request *nvme_alloc_request(struct request_queue *q,
215 		struct nvme_command *cmd, unsigned int flags, int qid)
216 {
217 	struct request *req;
218 
219 	if (qid == NVME_QID_ANY) {
220 		req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags);
221 	} else {
222 		req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags,
223 				qid ? qid - 1 : 0);
224 	}
225 	if (IS_ERR(req))
226 		return req;
227 
228 	req->cmd_type = REQ_TYPE_DRV_PRIV;
229 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
230 	req->cmd = (unsigned char *)cmd;
231 	req->cmd_len = sizeof(struct nvme_command);
232 
233 	return req;
234 }
235 EXPORT_SYMBOL_GPL(nvme_alloc_request);
236 
nvme_setup_flush(struct nvme_ns * ns,struct nvme_command * cmnd)237 static inline void nvme_setup_flush(struct nvme_ns *ns,
238 		struct nvme_command *cmnd)
239 {
240 	memset(cmnd, 0, sizeof(*cmnd));
241 	cmnd->common.opcode = nvme_cmd_flush;
242 	cmnd->common.nsid = cpu_to_le32(ns->ns_id);
243 }
244 
nvme_setup_discard(struct nvme_ns * ns,struct request * req,struct nvme_command * cmnd)245 static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
246 		struct nvme_command *cmnd)
247 {
248 	struct nvme_dsm_range *range;
249 	struct page *page;
250 	int offset;
251 	unsigned int nr_bytes = blk_rq_bytes(req);
252 
253 	range = kmalloc(sizeof(*range), GFP_ATOMIC);
254 	if (!range)
255 		return BLK_MQ_RQ_QUEUE_BUSY;
256 
257 	range->cattr = cpu_to_le32(0);
258 	range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift);
259 	range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
260 
261 	memset(cmnd, 0, sizeof(*cmnd));
262 	cmnd->dsm.opcode = nvme_cmd_dsm;
263 	cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
264 	cmnd->dsm.nr = 0;
265 	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
266 
267 	req->completion_data = range;
268 	page = virt_to_page(range);
269 	offset = offset_in_page(range);
270 	blk_add_request_payload(req, page, offset, sizeof(*range));
271 
272 	/*
273 	 * we set __data_len back to the size of the area to be discarded
274 	 * on disk. This allows us to report completion on the full amount
275 	 * of blocks described by the request.
276 	 */
277 	req->__data_len = nr_bytes;
278 
279 	return 0;
280 }
281 
nvme_setup_rw(struct nvme_ns * ns,struct request * req,struct nvme_command * cmnd)282 static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
283 		struct nvme_command *cmnd)
284 {
285 	u16 control = 0;
286 	u32 dsmgmt = 0;
287 
288 	if (req->cmd_flags & REQ_FUA)
289 		control |= NVME_RW_FUA;
290 	if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
291 		control |= NVME_RW_LR;
292 
293 	if (req->cmd_flags & REQ_RAHEAD)
294 		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
295 
296 	memset(cmnd, 0, sizeof(*cmnd));
297 	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
298 	cmnd->rw.command_id = req->tag;
299 	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
300 	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
301 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
302 
303 	if (ns->ms) {
304 		switch (ns->pi_type) {
305 		case NVME_NS_DPS_PI_TYPE3:
306 			control |= NVME_RW_PRINFO_PRCHK_GUARD;
307 			break;
308 		case NVME_NS_DPS_PI_TYPE1:
309 		case NVME_NS_DPS_PI_TYPE2:
310 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
311 					NVME_RW_PRINFO_PRCHK_REF;
312 			cmnd->rw.reftag = cpu_to_le32(
313 					nvme_block_nr(ns, blk_rq_pos(req)));
314 			break;
315 		}
316 		if (!blk_integrity_rq(req))
317 			control |= NVME_RW_PRINFO_PRACT;
318 	}
319 
320 	cmnd->rw.control = cpu_to_le16(control);
321 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
322 }
323 
nvme_setup_cmd(struct nvme_ns * ns,struct request * req,struct nvme_command * cmd)324 int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
325 		struct nvme_command *cmd)
326 {
327 	int ret = 0;
328 
329 	if (req->cmd_type == REQ_TYPE_DRV_PRIV)
330 		memcpy(cmd, req->cmd, sizeof(*cmd));
331 	else if (req_op(req) == REQ_OP_FLUSH)
332 		nvme_setup_flush(ns, cmd);
333 	else if (req_op(req) == REQ_OP_DISCARD)
334 		ret = nvme_setup_discard(ns, req, cmd);
335 	else
336 		nvme_setup_rw(ns, req, cmd);
337 
338 	return ret;
339 }
340 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
341 
342 /*
343  * Returns 0 on success.  If the result is negative, it's a Linux error code;
344  * if the result is positive, it's an NVM Express status code
345  */
__nvme_submit_sync_cmd(struct request_queue * q,struct nvme_command * cmd,struct nvme_completion * cqe,void * buffer,unsigned bufflen,unsigned timeout,int qid,int at_head,int flags)346 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
347 		struct nvme_completion *cqe, void *buffer, unsigned bufflen,
348 		unsigned timeout, int qid, int at_head, int flags)
349 {
350 	struct request *req;
351 	int ret;
352 
353 	req = nvme_alloc_request(q, cmd, flags, qid);
354 	if (IS_ERR(req))
355 		return PTR_ERR(req);
356 
357 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
358 	req->special = cqe;
359 
360 	if (buffer && bufflen) {
361 		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
362 		if (ret)
363 			goto out;
364 	}
365 
366 	blk_execute_rq(req->q, NULL, req, at_head);
367 	ret = req->errors;
368  out:
369 	blk_mq_free_request(req);
370 	return ret;
371 }
372 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
373 
nvme_submit_sync_cmd(struct request_queue * q,struct nvme_command * cmd,void * buffer,unsigned bufflen)374 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
375 		void *buffer, unsigned bufflen)
376 {
377 	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
378 			NVME_QID_ANY, 0, 0);
379 }
380 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
381 
__nvme_submit_user_cmd(struct request_queue * q,struct nvme_command * cmd,void __user * ubuffer,unsigned bufflen,void __user * meta_buffer,unsigned meta_len,u32 meta_seed,u32 * result,unsigned timeout)382 int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
383 		void __user *ubuffer, unsigned bufflen,
384 		void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
385 		u32 *result, unsigned timeout)
386 {
387 	bool write = nvme_is_write(cmd);
388 	struct nvme_completion cqe;
389 	struct nvme_ns *ns = q->queuedata;
390 	struct gendisk *disk = ns ? ns->disk : NULL;
391 	struct request *req;
392 	struct bio *bio = NULL;
393 	void *meta = NULL;
394 	int ret;
395 
396 	req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
397 	if (IS_ERR(req))
398 		return PTR_ERR(req);
399 
400 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
401 	req->special = &cqe;
402 
403 	if (ubuffer && bufflen) {
404 		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
405 				GFP_KERNEL);
406 		if (ret)
407 			goto out;
408 		bio = req->bio;
409 
410 		if (!disk)
411 			goto submit;
412 		bio->bi_bdev = bdget_disk(disk, 0);
413 		if (!bio->bi_bdev) {
414 			ret = -ENODEV;
415 			goto out_unmap;
416 		}
417 
418 		if (meta_buffer && meta_len) {
419 			struct bio_integrity_payload *bip;
420 
421 			meta = kmalloc(meta_len, GFP_KERNEL);
422 			if (!meta) {
423 				ret = -ENOMEM;
424 				goto out_unmap;
425 			}
426 
427 			if (write) {
428 				if (copy_from_user(meta, meta_buffer,
429 						meta_len)) {
430 					ret = -EFAULT;
431 					goto out_free_meta;
432 				}
433 			}
434 
435 			bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
436 			if (IS_ERR(bip)) {
437 				ret = PTR_ERR(bip);
438 				goto out_free_meta;
439 			}
440 
441 			bip->bip_iter.bi_size = meta_len;
442 			bip->bip_iter.bi_sector = meta_seed;
443 
444 			ret = bio_integrity_add_page(bio, virt_to_page(meta),
445 					meta_len, offset_in_page(meta));
446 			if (ret != meta_len) {
447 				ret = -ENOMEM;
448 				goto out_free_meta;
449 			}
450 		}
451 	}
452  submit:
453 	blk_execute_rq(req->q, disk, req, 0);
454 	ret = req->errors;
455 	if (result)
456 		*result = le32_to_cpu(cqe.result);
457 	if (meta && !ret && !write) {
458 		if (copy_to_user(meta_buffer, meta, meta_len))
459 			ret = -EFAULT;
460 	}
461  out_free_meta:
462 	kfree(meta);
463  out_unmap:
464 	if (bio) {
465 		if (disk && bio->bi_bdev)
466 			bdput(bio->bi_bdev);
467 		blk_rq_unmap_user(bio);
468 	}
469  out:
470 	blk_mq_free_request(req);
471 	return ret;
472 }
473 
nvme_submit_user_cmd(struct request_queue * q,struct nvme_command * cmd,void __user * ubuffer,unsigned bufflen,u32 * result,unsigned timeout)474 int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
475 		void __user *ubuffer, unsigned bufflen, u32 *result,
476 		unsigned timeout)
477 {
478 	return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0,
479 			result, timeout);
480 }
481 
nvme_keep_alive_end_io(struct request * rq,int error)482 static void nvme_keep_alive_end_io(struct request *rq, int error)
483 {
484 	struct nvme_ctrl *ctrl = rq->end_io_data;
485 
486 	blk_mq_free_request(rq);
487 
488 	if (error) {
489 		dev_err(ctrl->device,
490 			"failed nvme_keep_alive_end_io error=%d\n", error);
491 		return;
492 	}
493 
494 	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
495 }
496 
nvme_keep_alive(struct nvme_ctrl * ctrl)497 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
498 {
499 	struct nvme_command c;
500 	struct request *rq;
501 
502 	memset(&c, 0, sizeof(c));
503 	c.common.opcode = nvme_admin_keep_alive;
504 
505 	rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
506 			NVME_QID_ANY);
507 	if (IS_ERR(rq))
508 		return PTR_ERR(rq);
509 
510 	rq->timeout = ctrl->kato * HZ;
511 	rq->end_io_data = ctrl;
512 
513 	blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
514 
515 	return 0;
516 }
517 
nvme_keep_alive_work(struct work_struct * work)518 static void nvme_keep_alive_work(struct work_struct *work)
519 {
520 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
521 			struct nvme_ctrl, ka_work);
522 
523 	if (nvme_keep_alive(ctrl)) {
524 		/* allocation failure, reset the controller */
525 		dev_err(ctrl->device, "keep-alive failed\n");
526 		ctrl->ops->reset_ctrl(ctrl);
527 		return;
528 	}
529 }
530 
nvme_start_keep_alive(struct nvme_ctrl * ctrl)531 void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
532 {
533 	if (unlikely(ctrl->kato == 0))
534 		return;
535 
536 	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
537 	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
538 }
539 EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
540 
nvme_stop_keep_alive(struct nvme_ctrl * ctrl)541 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
542 {
543 	if (unlikely(ctrl->kato == 0))
544 		return;
545 
546 	cancel_delayed_work_sync(&ctrl->ka_work);
547 }
548 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
549 
nvme_identify_ctrl(struct nvme_ctrl * dev,struct nvme_id_ctrl ** id)550 int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
551 {
552 	struct nvme_command c = { };
553 	int error;
554 
555 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
556 	c.identify.opcode = nvme_admin_identify;
557 	c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL);
558 
559 	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
560 	if (!*id)
561 		return -ENOMEM;
562 
563 	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
564 			sizeof(struct nvme_id_ctrl));
565 	if (error)
566 		kfree(*id);
567 	return error;
568 }
569 
nvme_identify_ns_list(struct nvme_ctrl * dev,unsigned nsid,__le32 * ns_list)570 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
571 {
572 	struct nvme_command c = { };
573 
574 	c.identify.opcode = nvme_admin_identify;
575 	c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST);
576 	c.identify.nsid = cpu_to_le32(nsid);
577 	return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
578 }
579 
nvme_identify_ns(struct nvme_ctrl * dev,unsigned nsid,struct nvme_id_ns ** id)580 int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
581 		struct nvme_id_ns **id)
582 {
583 	struct nvme_command c = { };
584 	int error;
585 
586 	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
587 	c.identify.opcode = nvme_admin_identify,
588 	c.identify.nsid = cpu_to_le32(nsid),
589 
590 	*id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
591 	if (!*id)
592 		return -ENOMEM;
593 
594 	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
595 			sizeof(struct nvme_id_ns));
596 	if (error)
597 		kfree(*id);
598 	return error;
599 }
600 
nvme_get_features(struct nvme_ctrl * dev,unsigned fid,unsigned nsid,void * buffer,size_t buflen,u32 * result)601 int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
602 		      void *buffer, size_t buflen, u32 *result)
603 {
604 	struct nvme_command c;
605 	struct nvme_completion cqe;
606 	int ret;
607 
608 	memset(&c, 0, sizeof(c));
609 	c.features.opcode = nvme_admin_get_features;
610 	c.features.nsid = cpu_to_le32(nsid);
611 	c.features.fid = cpu_to_le32(fid);
612 
613 	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0,
614 			NVME_QID_ANY, 0, 0);
615 	if (ret >= 0 && result)
616 		*result = le32_to_cpu(cqe.result);
617 	return ret;
618 }
619 
nvme_set_features(struct nvme_ctrl * dev,unsigned fid,unsigned dword11,void * buffer,size_t buflen,u32 * result)620 int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
621 		      void *buffer, size_t buflen, u32 *result)
622 {
623 	struct nvme_command c;
624 	struct nvme_completion cqe;
625 	int ret;
626 
627 	memset(&c, 0, sizeof(c));
628 	c.features.opcode = nvme_admin_set_features;
629 	c.features.fid = cpu_to_le32(fid);
630 	c.features.dword11 = cpu_to_le32(dword11);
631 
632 	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe,
633 			buffer, buflen, 0, NVME_QID_ANY, 0, 0);
634 	if (ret >= 0 && result)
635 		*result = le32_to_cpu(cqe.result);
636 	return ret;
637 }
638 
nvme_get_log_page(struct nvme_ctrl * dev,struct nvme_smart_log ** log)639 int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
640 {
641 	struct nvme_command c = { };
642 	int error;
643 
644 	c.common.opcode = nvme_admin_get_log_page,
645 	c.common.nsid = cpu_to_le32(0xFFFFFFFF),
646 	c.common.cdw10[0] = cpu_to_le32(
647 			(((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
648 			 NVME_LOG_SMART),
649 
650 	*log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
651 	if (!*log)
652 		return -ENOMEM;
653 
654 	error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
655 			sizeof(struct nvme_smart_log));
656 	if (error)
657 		kfree(*log);
658 	return error;
659 }
660 
nvme_set_queue_count(struct nvme_ctrl * ctrl,int * count)661 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
662 {
663 	u32 q_count = (*count - 1) | ((*count - 1) << 16);
664 	u32 result;
665 	int status, nr_io_queues;
666 
667 	status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
668 			&result);
669 	if (status < 0)
670 		return status;
671 
672 	/*
673 	 * Degraded controllers might return an error when setting the queue
674 	 * count.  We still want to be able to bring them online and offer
675 	 * access to the admin queue, as that might be only way to fix them up.
676 	 */
677 	if (status > 0) {
678 		dev_err(ctrl->dev, "Could not set queue count (%d)\n", status);
679 		*count = 0;
680 	} else {
681 		nr_io_queues = min(result & 0xffff, result >> 16) + 1;
682 		*count = min(*count, nr_io_queues);
683 	}
684 
685 	return 0;
686 }
687 EXPORT_SYMBOL_GPL(nvme_set_queue_count);
688 
nvme_submit_io(struct nvme_ns * ns,struct nvme_user_io __user * uio)689 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
690 {
691 	struct nvme_user_io io;
692 	struct nvme_command c;
693 	unsigned length, meta_len;
694 	void __user *metadata;
695 
696 	if (copy_from_user(&io, uio, sizeof(io)))
697 		return -EFAULT;
698 	if (io.flags)
699 		return -EINVAL;
700 
701 	switch (io.opcode) {
702 	case nvme_cmd_write:
703 	case nvme_cmd_read:
704 	case nvme_cmd_compare:
705 		break;
706 	default:
707 		return -EINVAL;
708 	}
709 
710 	length = (io.nblocks + 1) << ns->lba_shift;
711 	meta_len = (io.nblocks + 1) * ns->ms;
712 	metadata = (void __user *)(uintptr_t)io.metadata;
713 
714 	if (ns->ext) {
715 		length += meta_len;
716 		meta_len = 0;
717 	} else if (meta_len) {
718 		if ((io.metadata & 3) || !io.metadata)
719 			return -EINVAL;
720 	}
721 
722 	memset(&c, 0, sizeof(c));
723 	c.rw.opcode = io.opcode;
724 	c.rw.flags = io.flags;
725 	c.rw.nsid = cpu_to_le32(ns->ns_id);
726 	c.rw.slba = cpu_to_le64(io.slba);
727 	c.rw.length = cpu_to_le16(io.nblocks);
728 	c.rw.control = cpu_to_le16(io.control);
729 	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
730 	c.rw.reftag = cpu_to_le32(io.reftag);
731 	c.rw.apptag = cpu_to_le16(io.apptag);
732 	c.rw.appmask = cpu_to_le16(io.appmask);
733 
734 	return __nvme_submit_user_cmd(ns->queue, &c,
735 			(void __user *)(uintptr_t)io.addr, length,
736 			metadata, meta_len, io.slba, NULL, 0);
737 }
738 
nvme_user_cmd(struct nvme_ctrl * ctrl,struct nvme_ns * ns,struct nvme_passthru_cmd __user * ucmd)739 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
740 			struct nvme_passthru_cmd __user *ucmd)
741 {
742 	struct nvme_passthru_cmd cmd;
743 	struct nvme_command c;
744 	unsigned timeout = 0;
745 	int status;
746 
747 	if (!capable(CAP_SYS_ADMIN))
748 		return -EACCES;
749 	if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
750 		return -EFAULT;
751 	if (cmd.flags)
752 		return -EINVAL;
753 
754 	memset(&c, 0, sizeof(c));
755 	c.common.opcode = cmd.opcode;
756 	c.common.flags = cmd.flags;
757 	c.common.nsid = cpu_to_le32(cmd.nsid);
758 	c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
759 	c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
760 	c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
761 	c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
762 	c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
763 	c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
764 	c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
765 	c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
766 
767 	if (cmd.timeout_ms)
768 		timeout = msecs_to_jiffies(cmd.timeout_ms);
769 
770 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
771 			(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
772 			&cmd.result, timeout);
773 	if (status >= 0) {
774 		if (put_user(cmd.result, &ucmd->result))
775 			return -EFAULT;
776 	}
777 
778 	return status;
779 }
780 
nvme_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)781 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
782 		unsigned int cmd, unsigned long arg)
783 {
784 	struct nvme_ns *ns = bdev->bd_disk->private_data;
785 
786 	switch (cmd) {
787 	case NVME_IOCTL_ID:
788 		force_successful_syscall_return();
789 		return ns->ns_id;
790 	case NVME_IOCTL_ADMIN_CMD:
791 		return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
792 	case NVME_IOCTL_IO_CMD:
793 		return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
794 	case NVME_IOCTL_SUBMIT_IO:
795 		return nvme_submit_io(ns, (void __user *)arg);
796 #ifdef CONFIG_BLK_DEV_NVME_SCSI
797 	case SG_GET_VERSION_NUM:
798 		return nvme_sg_get_version_num((void __user *)arg);
799 	case SG_IO:
800 		return nvme_sg_io(ns, (void __user *)arg);
801 #endif
802 	default:
803 		return -ENOTTY;
804 	}
805 }
806 
807 #ifdef CONFIG_COMPAT
nvme_compat_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)808 static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
809 			unsigned int cmd, unsigned long arg)
810 {
811 	switch (cmd) {
812 	case SG_IO:
813 		return -ENOIOCTLCMD;
814 	}
815 	return nvme_ioctl(bdev, mode, cmd, arg);
816 }
817 #else
818 #define nvme_compat_ioctl	NULL
819 #endif
820 
nvme_open(struct block_device * bdev,fmode_t mode)821 static int nvme_open(struct block_device *bdev, fmode_t mode)
822 {
823 	return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO;
824 }
825 
nvme_release(struct gendisk * disk,fmode_t mode)826 static void nvme_release(struct gendisk *disk, fmode_t mode)
827 {
828 	struct nvme_ns *ns = disk->private_data;
829 
830 	module_put(ns->ctrl->ops->module);
831 	nvme_put_ns(ns);
832 }
833 
nvme_getgeo(struct block_device * bdev,struct hd_geometry * geo)834 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
835 {
836 	/* some standard values */
837 	geo->heads = 1 << 6;
838 	geo->sectors = 1 << 5;
839 	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
840 	return 0;
841 }
842 
843 #ifdef CONFIG_BLK_DEV_INTEGRITY
nvme_init_integrity(struct nvme_ns * ns)844 static void nvme_init_integrity(struct nvme_ns *ns)
845 {
846 	struct blk_integrity integrity;
847 
848 	memset(&integrity, 0, sizeof(integrity));
849 	switch (ns->pi_type) {
850 	case NVME_NS_DPS_PI_TYPE3:
851 		integrity.profile = &t10_pi_type3_crc;
852 		integrity.tag_size = sizeof(u16) + sizeof(u32);
853 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
854 		break;
855 	case NVME_NS_DPS_PI_TYPE1:
856 	case NVME_NS_DPS_PI_TYPE2:
857 		integrity.profile = &t10_pi_type1_crc;
858 		integrity.tag_size = sizeof(u16);
859 		integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
860 		break;
861 	default:
862 		integrity.profile = NULL;
863 		break;
864 	}
865 	integrity.tuple_size = ns->ms;
866 	blk_integrity_register(ns->disk, &integrity);
867 	blk_queue_max_integrity_segments(ns->queue, 1);
868 }
869 #else
nvme_init_integrity(struct nvme_ns * ns)870 static void nvme_init_integrity(struct nvme_ns *ns)
871 {
872 }
873 #endif /* CONFIG_BLK_DEV_INTEGRITY */
874 
nvme_config_discard(struct nvme_ns * ns)875 static void nvme_config_discard(struct nvme_ns *ns)
876 {
877 	struct nvme_ctrl *ctrl = ns->ctrl;
878 	u32 logical_block_size = queue_logical_block_size(ns->queue);
879 
880 	if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES)
881 		ns->queue->limits.discard_zeroes_data = 1;
882 	else
883 		ns->queue->limits.discard_zeroes_data = 0;
884 
885 	ns->queue->limits.discard_alignment = logical_block_size;
886 	ns->queue->limits.discard_granularity = logical_block_size;
887 	blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
888 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
889 }
890 
nvme_revalidate_ns(struct nvme_ns * ns,struct nvme_id_ns ** id)891 static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
892 {
893 	if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) {
894 		dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__);
895 		return -ENODEV;
896 	}
897 
898 	if ((*id)->ncap == 0) {
899 		kfree(*id);
900 		return -ENODEV;
901 	}
902 
903 	if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
904 		memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
905 	if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
906 		memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
907 
908 	return 0;
909 }
910 
__nvme_revalidate_disk(struct gendisk * disk,struct nvme_id_ns * id)911 static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
912 {
913 	struct nvme_ns *ns = disk->private_data;
914 	u8 lbaf, pi_type;
915 	u16 old_ms;
916 	unsigned short bs;
917 
918 	old_ms = ns->ms;
919 	lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
920 	ns->lba_shift = id->lbaf[lbaf].ds;
921 	ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
922 	ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
923 
924 	/*
925 	 * If identify namespace failed, use default 512 byte block size so
926 	 * block layer can use before failing read/write for 0 capacity.
927 	 */
928 	if (ns->lba_shift == 0)
929 		ns->lba_shift = 9;
930 	bs = 1 << ns->lba_shift;
931 	/* XXX: PI implementation requires metadata equal t10 pi tuple size */
932 	pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
933 					id->dps & NVME_NS_DPS_PI_MASK : 0;
934 
935 	blk_mq_freeze_queue(disk->queue);
936 	if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
937 				ns->ms != old_ms ||
938 				bs != queue_logical_block_size(disk->queue) ||
939 				(ns->ms && ns->ext)))
940 		blk_integrity_unregister(disk);
941 
942 	ns->pi_type = pi_type;
943 	blk_queue_logical_block_size(ns->queue, bs);
944 
945 	if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
946 		nvme_init_integrity(ns);
947 	if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
948 		set_capacity(disk, 0);
949 	else
950 		set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
951 
952 	if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
953 		nvme_config_discard(ns);
954 	blk_mq_unfreeze_queue(disk->queue);
955 }
956 
nvme_revalidate_disk(struct gendisk * disk)957 static int nvme_revalidate_disk(struct gendisk *disk)
958 {
959 	struct nvme_ns *ns = disk->private_data;
960 	struct nvme_id_ns *id = NULL;
961 	int ret;
962 
963 	if (test_bit(NVME_NS_DEAD, &ns->flags)) {
964 		set_capacity(disk, 0);
965 		return -ENODEV;
966 	}
967 
968 	ret = nvme_revalidate_ns(ns, &id);
969 	if (ret)
970 		return ret;
971 
972 	__nvme_revalidate_disk(disk, id);
973 	kfree(id);
974 
975 	return 0;
976 }
977 
nvme_pr_type(enum pr_type type)978 static char nvme_pr_type(enum pr_type type)
979 {
980 	switch (type) {
981 	case PR_WRITE_EXCLUSIVE:
982 		return 1;
983 	case PR_EXCLUSIVE_ACCESS:
984 		return 2;
985 	case PR_WRITE_EXCLUSIVE_REG_ONLY:
986 		return 3;
987 	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
988 		return 4;
989 	case PR_WRITE_EXCLUSIVE_ALL_REGS:
990 		return 5;
991 	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
992 		return 6;
993 	default:
994 		return 0;
995 	}
996 };
997 
nvme_pr_command(struct block_device * bdev,u32 cdw10,u64 key,u64 sa_key,u8 op)998 static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
999 				u64 key, u64 sa_key, u8 op)
1000 {
1001 	struct nvme_ns *ns = bdev->bd_disk->private_data;
1002 	struct nvme_command c;
1003 	u8 data[16] = { 0, };
1004 
1005 	put_unaligned_le64(key, &data[0]);
1006 	put_unaligned_le64(sa_key, &data[8]);
1007 
1008 	memset(&c, 0, sizeof(c));
1009 	c.common.opcode = op;
1010 	c.common.nsid = cpu_to_le32(ns->ns_id);
1011 	c.common.cdw10[0] = cpu_to_le32(cdw10);
1012 
1013 	return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
1014 }
1015 
nvme_pr_register(struct block_device * bdev,u64 old,u64 new,unsigned flags)1016 static int nvme_pr_register(struct block_device *bdev, u64 old,
1017 		u64 new, unsigned flags)
1018 {
1019 	u32 cdw10;
1020 
1021 	if (flags & ~PR_FL_IGNORE_KEY)
1022 		return -EOPNOTSUPP;
1023 
1024 	cdw10 = old ? 2 : 0;
1025 	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
1026 	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
1027 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
1028 }
1029 
nvme_pr_reserve(struct block_device * bdev,u64 key,enum pr_type type,unsigned flags)1030 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
1031 		enum pr_type type, unsigned flags)
1032 {
1033 	u32 cdw10;
1034 
1035 	if (flags & ~PR_FL_IGNORE_KEY)
1036 		return -EOPNOTSUPP;
1037 
1038 	cdw10 = nvme_pr_type(type) << 8;
1039 	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
1040 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
1041 }
1042 
nvme_pr_preempt(struct block_device * bdev,u64 old,u64 new,enum pr_type type,bool abort)1043 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
1044 		enum pr_type type, bool abort)
1045 {
1046 	u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
1047 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
1048 }
1049 
nvme_pr_clear(struct block_device * bdev,u64 key)1050 static int nvme_pr_clear(struct block_device *bdev, u64 key)
1051 {
1052 	u32 cdw10 = 1 | (key ? 1 << 3 : 0);
1053 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
1054 }
1055 
nvme_pr_release(struct block_device * bdev,u64 key,enum pr_type type)1056 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
1057 {
1058 	u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
1059 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
1060 }
1061 
1062 static const struct pr_ops nvme_pr_ops = {
1063 	.pr_register	= nvme_pr_register,
1064 	.pr_reserve	= nvme_pr_reserve,
1065 	.pr_release	= nvme_pr_release,
1066 	.pr_preempt	= nvme_pr_preempt,
1067 	.pr_clear	= nvme_pr_clear,
1068 };
1069 
1070 static const struct block_device_operations nvme_fops = {
1071 	.owner		= THIS_MODULE,
1072 	.ioctl		= nvme_ioctl,
1073 	.compat_ioctl	= nvme_compat_ioctl,
1074 	.open		= nvme_open,
1075 	.release	= nvme_release,
1076 	.getgeo		= nvme_getgeo,
1077 	.revalidate_disk= nvme_revalidate_disk,
1078 	.pr_ops		= &nvme_pr_ops,
1079 };
1080 
nvme_wait_ready(struct nvme_ctrl * ctrl,u64 cap,bool enabled)1081 static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
1082 {
1083 	unsigned long timeout =
1084 		((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
1085 	u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
1086 	int ret;
1087 
1088 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1089 		if (csts == ~0)
1090 			return -ENODEV;
1091 		if ((csts & NVME_CSTS_RDY) == bit)
1092 			break;
1093 
1094 		msleep(100);
1095 		if (fatal_signal_pending(current))
1096 			return -EINTR;
1097 		if (time_after(jiffies, timeout)) {
1098 			dev_err(ctrl->device,
1099 				"Device not ready; aborting %s\n", enabled ?
1100 						"initialisation" : "reset");
1101 			return -ENODEV;
1102 		}
1103 	}
1104 
1105 	return ret;
1106 }
1107 
1108 /*
1109  * If the device has been passed off to us in an enabled state, just clear
1110  * the enabled bit.  The spec says we should set the 'shutdown notification
1111  * bits', but doing so may cause the device to complete commands to the
1112  * admin queue ... and we don't know what memory that might be pointing at!
1113  */
nvme_disable_ctrl(struct nvme_ctrl * ctrl,u64 cap)1114 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1115 {
1116 	int ret;
1117 
1118 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
1119 	ctrl->ctrl_config &= ~NVME_CC_ENABLE;
1120 
1121 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1122 	if (ret)
1123 		return ret;
1124 
1125 	if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
1126 		msleep(NVME_QUIRK_DELAY_AMOUNT);
1127 
1128 	return nvme_wait_ready(ctrl, cap, false);
1129 }
1130 EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
1131 
nvme_enable_ctrl(struct nvme_ctrl * ctrl,u64 cap)1132 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1133 {
1134 	/*
1135 	 * Default to a 4K page size, with the intention to update this
1136 	 * path in the future to accomodate architectures with differing
1137 	 * kernel and IO page sizes.
1138 	 */
1139 	unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12;
1140 	int ret;
1141 
1142 	if (page_shift < dev_page_min) {
1143 		dev_err(ctrl->device,
1144 			"Minimum device page size %u too large for host (%u)\n",
1145 			1 << dev_page_min, 1 << page_shift);
1146 		return -ENODEV;
1147 	}
1148 
1149 	ctrl->page_size = 1 << page_shift;
1150 
1151 	ctrl->ctrl_config = NVME_CC_CSS_NVM;
1152 	ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
1153 	ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
1154 	ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
1155 	ctrl->ctrl_config |= NVME_CC_ENABLE;
1156 
1157 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1158 	if (ret)
1159 		return ret;
1160 	return nvme_wait_ready(ctrl, cap, true);
1161 }
1162 EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
1163 
nvme_shutdown_ctrl(struct nvme_ctrl * ctrl)1164 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
1165 {
1166 	unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies;
1167 	u32 csts;
1168 	int ret;
1169 
1170 	ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
1171 	ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
1172 
1173 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
1174 	if (ret)
1175 		return ret;
1176 
1177 	while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
1178 		if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
1179 			break;
1180 
1181 		msleep(100);
1182 		if (fatal_signal_pending(current))
1183 			return -EINTR;
1184 		if (time_after(jiffies, timeout)) {
1185 			dev_err(ctrl->device,
1186 				"Device shutdown incomplete; abort shutdown\n");
1187 			return -ENODEV;
1188 		}
1189 	}
1190 
1191 	return ret;
1192 }
1193 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
1194 
nvme_set_queue_limits(struct nvme_ctrl * ctrl,struct request_queue * q)1195 static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1196 		struct request_queue *q)
1197 {
1198 	bool vwc = false;
1199 
1200 	if (ctrl->max_hw_sectors) {
1201 		u32 max_segments =
1202 			(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
1203 
1204 		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1205 		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1206 	}
1207 	if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
1208 	    is_power_of_2(ctrl->max_hw_sectors))
1209 		blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
1210 	blk_queue_virt_boundary(q, ctrl->page_size - 1);
1211 	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
1212 		vwc = true;
1213 	blk_queue_write_cache(q, vwc, vwc);
1214 }
1215 
1216 /*
1217  * Initialize the cached copies of the Identify data and various controller
1218  * register in our nvme_ctrl structure.  This should be called as soon as
1219  * the admin queue is fully up and running.
1220  */
nvme_init_identify(struct nvme_ctrl * ctrl)1221 int nvme_init_identify(struct nvme_ctrl *ctrl)
1222 {
1223 	struct nvme_id_ctrl *id;
1224 	u64 cap;
1225 	int ret, page_shift;
1226 	u32 max_hw_sectors;
1227 
1228 	ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
1229 	if (ret) {
1230 		dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
1231 		return ret;
1232 	}
1233 
1234 	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
1235 	if (ret) {
1236 		dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
1237 		return ret;
1238 	}
1239 	page_shift = NVME_CAP_MPSMIN(cap) + 12;
1240 
1241 	if (ctrl->vs >= NVME_VS(1, 1, 0))
1242 		ctrl->subsystem = NVME_CAP_NSSRC(cap);
1243 
1244 	ret = nvme_identify_ctrl(ctrl, &id);
1245 	if (ret) {
1246 		dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
1247 		return -EIO;
1248 	}
1249 
1250 	ctrl->vid = le16_to_cpu(id->vid);
1251 	ctrl->oncs = le16_to_cpup(&id->oncs);
1252 	atomic_set(&ctrl->abort_limit, id->acl + 1);
1253 	ctrl->vwc = id->vwc;
1254 	ctrl->cntlid = le16_to_cpup(&id->cntlid);
1255 	memcpy(ctrl->serial, id->sn, sizeof(id->sn));
1256 	memcpy(ctrl->model, id->mn, sizeof(id->mn));
1257 	memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
1258 	if (id->mdts)
1259 		max_hw_sectors = 1 << (id->mdts + page_shift - 9);
1260 	else
1261 		max_hw_sectors = UINT_MAX;
1262 	ctrl->max_hw_sectors =
1263 		min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
1264 
1265 	nvme_set_queue_limits(ctrl, ctrl->admin_q);
1266 	ctrl->sgls = le32_to_cpu(id->sgls);
1267 	ctrl->kas = le16_to_cpu(id->kas);
1268 
1269 	if (ctrl->ops->is_fabrics) {
1270 		ctrl->icdoff = le16_to_cpu(id->icdoff);
1271 		ctrl->ioccsz = le32_to_cpu(id->ioccsz);
1272 		ctrl->iorcsz = le32_to_cpu(id->iorcsz);
1273 		ctrl->maxcmd = le16_to_cpu(id->maxcmd);
1274 
1275 		/*
1276 		 * In fabrics we need to verify the cntlid matches the
1277 		 * admin connect
1278 		 */
1279 		if (ctrl->cntlid != le16_to_cpu(id->cntlid))
1280 			ret = -EINVAL;
1281 
1282 		if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
1283 			dev_err(ctrl->dev,
1284 				"keep-alive support is mandatory for fabrics\n");
1285 			ret = -EINVAL;
1286 		}
1287 	} else {
1288 		ctrl->cntlid = le16_to_cpu(id->cntlid);
1289 	}
1290 
1291 	kfree(id);
1292 	return ret;
1293 }
1294 EXPORT_SYMBOL_GPL(nvme_init_identify);
1295 
nvme_dev_open(struct inode * inode,struct file * file)1296 static int nvme_dev_open(struct inode *inode, struct file *file)
1297 {
1298 	struct nvme_ctrl *ctrl;
1299 	int instance = iminor(inode);
1300 	int ret = -ENODEV;
1301 
1302 	spin_lock(&dev_list_lock);
1303 	list_for_each_entry(ctrl, &nvme_ctrl_list, node) {
1304 		if (ctrl->instance != instance)
1305 			continue;
1306 
1307 		if (!ctrl->admin_q) {
1308 			ret = -EWOULDBLOCK;
1309 			break;
1310 		}
1311 		if (!kref_get_unless_zero(&ctrl->kref))
1312 			break;
1313 		file->private_data = ctrl;
1314 		ret = 0;
1315 		break;
1316 	}
1317 	spin_unlock(&dev_list_lock);
1318 
1319 	return ret;
1320 }
1321 
nvme_dev_release(struct inode * inode,struct file * file)1322 static int nvme_dev_release(struct inode *inode, struct file *file)
1323 {
1324 	nvme_put_ctrl(file->private_data);
1325 	return 0;
1326 }
1327 
nvme_dev_user_cmd(struct nvme_ctrl * ctrl,void __user * argp)1328 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
1329 {
1330 	struct nvme_ns *ns;
1331 	int ret;
1332 
1333 	mutex_lock(&ctrl->namespaces_mutex);
1334 	if (list_empty(&ctrl->namespaces)) {
1335 		ret = -ENOTTY;
1336 		goto out_unlock;
1337 	}
1338 
1339 	ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
1340 	if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
1341 		dev_warn(ctrl->device,
1342 			"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
1343 		ret = -EINVAL;
1344 		goto out_unlock;
1345 	}
1346 
1347 	dev_warn(ctrl->device,
1348 		"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
1349 	kref_get(&ns->kref);
1350 	mutex_unlock(&ctrl->namespaces_mutex);
1351 
1352 	ret = nvme_user_cmd(ctrl, ns, argp);
1353 	nvme_put_ns(ns);
1354 	return ret;
1355 
1356 out_unlock:
1357 	mutex_unlock(&ctrl->namespaces_mutex);
1358 	return ret;
1359 }
1360 
nvme_dev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1361 static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
1362 		unsigned long arg)
1363 {
1364 	struct nvme_ctrl *ctrl = file->private_data;
1365 	void __user *argp = (void __user *)arg;
1366 
1367 	switch (cmd) {
1368 	case NVME_IOCTL_ADMIN_CMD:
1369 		return nvme_user_cmd(ctrl, NULL, argp);
1370 	case NVME_IOCTL_IO_CMD:
1371 		return nvme_dev_user_cmd(ctrl, argp);
1372 	case NVME_IOCTL_RESET:
1373 		dev_warn(ctrl->device, "resetting controller\n");
1374 		return ctrl->ops->reset_ctrl(ctrl);
1375 	case NVME_IOCTL_SUBSYS_RESET:
1376 		return nvme_reset_subsystem(ctrl);
1377 	case NVME_IOCTL_RESCAN:
1378 		nvme_queue_scan(ctrl);
1379 		return 0;
1380 	default:
1381 		return -ENOTTY;
1382 	}
1383 }
1384 
1385 static const struct file_operations nvme_dev_fops = {
1386 	.owner		= THIS_MODULE,
1387 	.open		= nvme_dev_open,
1388 	.release	= nvme_dev_release,
1389 	.unlocked_ioctl	= nvme_dev_ioctl,
1390 	.compat_ioctl	= nvme_dev_ioctl,
1391 };
1392 
nvme_sysfs_reset(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1393 static ssize_t nvme_sysfs_reset(struct device *dev,
1394 				struct device_attribute *attr, const char *buf,
1395 				size_t count)
1396 {
1397 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1398 	int ret;
1399 
1400 	ret = ctrl->ops->reset_ctrl(ctrl);
1401 	if (ret < 0)
1402 		return ret;
1403 	return count;
1404 }
1405 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
1406 
nvme_sysfs_rescan(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1407 static ssize_t nvme_sysfs_rescan(struct device *dev,
1408 				struct device_attribute *attr, const char *buf,
1409 				size_t count)
1410 {
1411 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1412 
1413 	nvme_queue_scan(ctrl);
1414 	return count;
1415 }
1416 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
1417 
wwid_show(struct device * dev,struct device_attribute * attr,char * buf)1418 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
1419 								char *buf)
1420 {
1421 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1422 	struct nvme_ctrl *ctrl = ns->ctrl;
1423 	int serial_len = sizeof(ctrl->serial);
1424 	int model_len = sizeof(ctrl->model);
1425 
1426 	if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
1427 		return sprintf(buf, "eui.%16phN\n", ns->uuid);
1428 
1429 	if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
1430 		return sprintf(buf, "eui.%8phN\n", ns->eui);
1431 
1432 	while (ctrl->serial[serial_len - 1] == ' ')
1433 		serial_len--;
1434 	while (ctrl->model[model_len - 1] == ' ')
1435 		model_len--;
1436 
1437 	return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
1438 		serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id);
1439 }
1440 static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
1441 
uuid_show(struct device * dev,struct device_attribute * attr,char * buf)1442 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
1443 								char *buf)
1444 {
1445 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1446 	return sprintf(buf, "%pU\n", ns->uuid);
1447 }
1448 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
1449 
eui_show(struct device * dev,struct device_attribute * attr,char * buf)1450 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
1451 								char *buf)
1452 {
1453 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1454 	return sprintf(buf, "%8phd\n", ns->eui);
1455 }
1456 static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
1457 
nsid_show(struct device * dev,struct device_attribute * attr,char * buf)1458 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
1459 								char *buf)
1460 {
1461 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1462 	return sprintf(buf, "%d\n", ns->ns_id);
1463 }
1464 static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
1465 
1466 static struct attribute *nvme_ns_attrs[] = {
1467 	&dev_attr_wwid.attr,
1468 	&dev_attr_uuid.attr,
1469 	&dev_attr_eui.attr,
1470 	&dev_attr_nsid.attr,
1471 	NULL,
1472 };
1473 
nvme_ns_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1474 static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
1475 		struct attribute *a, int n)
1476 {
1477 	struct device *dev = container_of(kobj, struct device, kobj);
1478 	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1479 
1480 	if (a == &dev_attr_uuid.attr) {
1481 		if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
1482 			return 0;
1483 	}
1484 	if (a == &dev_attr_eui.attr) {
1485 		if (!memchr_inv(ns->eui, 0, sizeof(ns->eui)))
1486 			return 0;
1487 	}
1488 	return a->mode;
1489 }
1490 
1491 static const struct attribute_group nvme_ns_attr_group = {
1492 	.attrs		= nvme_ns_attrs,
1493 	.is_visible	= nvme_ns_attrs_are_visible,
1494 };
1495 
1496 #define nvme_show_str_function(field)						\
1497 static ssize_t  field##_show(struct device *dev,				\
1498 			    struct device_attribute *attr, char *buf)		\
1499 {										\
1500         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
1501         return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field);	\
1502 }										\
1503 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1504 
1505 #define nvme_show_int_function(field)						\
1506 static ssize_t  field##_show(struct device *dev,				\
1507 			    struct device_attribute *attr, char *buf)		\
1508 {										\
1509         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
1510         return sprintf(buf, "%d\n", ctrl->field);	\
1511 }										\
1512 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
1513 
1514 nvme_show_str_function(model);
1515 nvme_show_str_function(serial);
1516 nvme_show_str_function(firmware_rev);
1517 nvme_show_int_function(cntlid);
1518 
nvme_sysfs_delete(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1519 static ssize_t nvme_sysfs_delete(struct device *dev,
1520 				struct device_attribute *attr, const char *buf,
1521 				size_t count)
1522 {
1523 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1524 
1525 	if (device_remove_file_self(dev, attr))
1526 		ctrl->ops->delete_ctrl(ctrl);
1527 	return count;
1528 }
1529 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
1530 
nvme_sysfs_show_transport(struct device * dev,struct device_attribute * attr,char * buf)1531 static ssize_t nvme_sysfs_show_transport(struct device *dev,
1532 					 struct device_attribute *attr,
1533 					 char *buf)
1534 {
1535 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1536 
1537 	return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
1538 }
1539 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
1540 
nvme_sysfs_show_subsysnqn(struct device * dev,struct device_attribute * attr,char * buf)1541 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
1542 					 struct device_attribute *attr,
1543 					 char *buf)
1544 {
1545 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1546 
1547 	return snprintf(buf, PAGE_SIZE, "%s\n",
1548 			ctrl->ops->get_subsysnqn(ctrl));
1549 }
1550 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
1551 
nvme_sysfs_show_address(struct device * dev,struct device_attribute * attr,char * buf)1552 static ssize_t nvme_sysfs_show_address(struct device *dev,
1553 					 struct device_attribute *attr,
1554 					 char *buf)
1555 {
1556 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1557 
1558 	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
1559 }
1560 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
1561 
1562 static struct attribute *nvme_dev_attrs[] = {
1563 	&dev_attr_reset_controller.attr,
1564 	&dev_attr_rescan_controller.attr,
1565 	&dev_attr_model.attr,
1566 	&dev_attr_serial.attr,
1567 	&dev_attr_firmware_rev.attr,
1568 	&dev_attr_cntlid.attr,
1569 	&dev_attr_delete_controller.attr,
1570 	&dev_attr_transport.attr,
1571 	&dev_attr_subsysnqn.attr,
1572 	&dev_attr_address.attr,
1573 	NULL
1574 };
1575 
1576 #define CHECK_ATTR(ctrl, a, name)		\
1577 	if ((a) == &dev_attr_##name.attr &&	\
1578 	    !(ctrl)->ops->get_##name)		\
1579 		return 0
1580 
nvme_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1581 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
1582 		struct attribute *a, int n)
1583 {
1584 	struct device *dev = container_of(kobj, struct device, kobj);
1585 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
1586 
1587 	if (a == &dev_attr_delete_controller.attr) {
1588 		if (!ctrl->ops->delete_ctrl)
1589 			return 0;
1590 	}
1591 
1592 	CHECK_ATTR(ctrl, a, subsysnqn);
1593 	CHECK_ATTR(ctrl, a, address);
1594 
1595 	return a->mode;
1596 }
1597 
1598 static struct attribute_group nvme_dev_attrs_group = {
1599 	.attrs		= nvme_dev_attrs,
1600 	.is_visible	= nvme_dev_attrs_are_visible,
1601 };
1602 
1603 static const struct attribute_group *nvme_dev_attr_groups[] = {
1604 	&nvme_dev_attrs_group,
1605 	NULL,
1606 };
1607 
ns_cmp(void * priv,struct list_head * a,struct list_head * b)1608 static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
1609 {
1610 	struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
1611 	struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
1612 
1613 	return nsa->ns_id - nsb->ns_id;
1614 }
1615 
nvme_find_get_ns(struct nvme_ctrl * ctrl,unsigned nsid)1616 static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1617 {
1618 	struct nvme_ns *ns, *ret = NULL;
1619 
1620 	mutex_lock(&ctrl->namespaces_mutex);
1621 	list_for_each_entry(ns, &ctrl->namespaces, list) {
1622 		if (ns->ns_id == nsid) {
1623 			if (!kref_get_unless_zero(&ns->kref))
1624 				continue;
1625 			ret = ns;
1626 			break;
1627 		}
1628 		if (ns->ns_id > nsid)
1629 			break;
1630 	}
1631 	mutex_unlock(&ctrl->namespaces_mutex);
1632 	return ret;
1633 }
1634 
nvme_alloc_ns(struct nvme_ctrl * ctrl,unsigned nsid)1635 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1636 {
1637 	struct nvme_ns *ns;
1638 	struct gendisk *disk;
1639 	struct nvme_id_ns *id;
1640 	char disk_name[DISK_NAME_LEN];
1641 	int node = dev_to_node(ctrl->dev);
1642 
1643 	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
1644 	if (!ns)
1645 		return;
1646 
1647 	ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
1648 	if (ns->instance < 0)
1649 		goto out_free_ns;
1650 
1651 	ns->queue = blk_mq_init_queue(ctrl->tagset);
1652 	if (IS_ERR(ns->queue))
1653 		goto out_release_instance;
1654 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1655 	ns->queue->queuedata = ns;
1656 	ns->ctrl = ctrl;
1657 
1658 	kref_init(&ns->kref);
1659 	ns->ns_id = nsid;
1660 	ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
1661 
1662 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1663 	nvme_set_queue_limits(ctrl, ns->queue);
1664 
1665 	sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
1666 
1667 	if (nvme_revalidate_ns(ns, &id))
1668 		goto out_free_queue;
1669 
1670 	if (nvme_nvm_ns_supported(ns, id)) {
1671 		if (nvme_nvm_register(ns, disk_name, node,
1672 							&nvme_ns_attr_group)) {
1673 			dev_warn(ctrl->dev, "%s: LightNVM init failure\n",
1674 								__func__);
1675 			goto out_free_id;
1676 		}
1677 	} else {
1678 		disk = alloc_disk_node(0, node);
1679 		if (!disk)
1680 			goto out_free_id;
1681 
1682 		disk->fops = &nvme_fops;
1683 		disk->private_data = ns;
1684 		disk->queue = ns->queue;
1685 		disk->flags = GENHD_FL_EXT_DEVT;
1686 		memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
1687 		ns->disk = disk;
1688 
1689 		__nvme_revalidate_disk(disk, id);
1690 	}
1691 
1692 	mutex_lock(&ctrl->namespaces_mutex);
1693 	list_add_tail(&ns->list, &ctrl->namespaces);
1694 	mutex_unlock(&ctrl->namespaces_mutex);
1695 
1696 	kref_get(&ctrl->kref);
1697 
1698 	kfree(id);
1699 
1700 	if (ns->ndev)
1701 		return;
1702 
1703 	device_add_disk(ctrl->device, ns->disk);
1704 	if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
1705 					&nvme_ns_attr_group))
1706 		pr_warn("%s: failed to create sysfs group for identification\n",
1707 			ns->disk->disk_name);
1708 	return;
1709  out_free_id:
1710 	kfree(id);
1711  out_free_queue:
1712 	blk_cleanup_queue(ns->queue);
1713  out_release_instance:
1714 	ida_simple_remove(&ctrl->ns_ida, ns->instance);
1715  out_free_ns:
1716 	kfree(ns);
1717 }
1718 
nvme_ns_remove(struct nvme_ns * ns)1719 static void nvme_ns_remove(struct nvme_ns *ns)
1720 {
1721 	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
1722 		return;
1723 
1724 	if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
1725 		if (blk_get_integrity(ns->disk))
1726 			blk_integrity_unregister(ns->disk);
1727 		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
1728 					&nvme_ns_attr_group);
1729 		del_gendisk(ns->disk);
1730 		blk_cleanup_queue(ns->queue);
1731 	}
1732 
1733 	mutex_lock(&ns->ctrl->namespaces_mutex);
1734 	list_del_init(&ns->list);
1735 	mutex_unlock(&ns->ctrl->namespaces_mutex);
1736 
1737 	nvme_put_ns(ns);
1738 }
1739 
nvme_validate_ns(struct nvme_ctrl * ctrl,unsigned nsid)1740 static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1741 {
1742 	struct nvme_ns *ns;
1743 
1744 	ns = nvme_find_get_ns(ctrl, nsid);
1745 	if (ns) {
1746 		if (ns->disk && revalidate_disk(ns->disk))
1747 			nvme_ns_remove(ns);
1748 		nvme_put_ns(ns);
1749 	} else
1750 		nvme_alloc_ns(ctrl, nsid);
1751 }
1752 
nvme_remove_invalid_namespaces(struct nvme_ctrl * ctrl,unsigned nsid)1753 static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
1754 					unsigned nsid)
1755 {
1756 	struct nvme_ns *ns, *next;
1757 
1758 	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
1759 		if (ns->ns_id > nsid)
1760 			nvme_ns_remove(ns);
1761 	}
1762 }
1763 
nvme_scan_ns_list(struct nvme_ctrl * ctrl,unsigned nn)1764 static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
1765 {
1766 	struct nvme_ns *ns;
1767 	__le32 *ns_list;
1768 	unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
1769 	int ret = 0;
1770 
1771 	ns_list = kzalloc(0x1000, GFP_KERNEL);
1772 	if (!ns_list)
1773 		return -ENOMEM;
1774 
1775 	for (i = 0; i < num_lists; i++) {
1776 		ret = nvme_identify_ns_list(ctrl, prev, ns_list);
1777 		if (ret)
1778 			goto free;
1779 
1780 		for (j = 0; j < min(nn, 1024U); j++) {
1781 			nsid = le32_to_cpu(ns_list[j]);
1782 			if (!nsid)
1783 				goto out;
1784 
1785 			nvme_validate_ns(ctrl, nsid);
1786 
1787 			while (++prev < nsid) {
1788 				ns = nvme_find_get_ns(ctrl, prev);
1789 				if (ns) {
1790 					nvme_ns_remove(ns);
1791 					nvme_put_ns(ns);
1792 				}
1793 			}
1794 		}
1795 		nn -= j;
1796 	}
1797  out:
1798 	nvme_remove_invalid_namespaces(ctrl, prev);
1799  free:
1800 	kfree(ns_list);
1801 	return ret;
1802 }
1803 
nvme_scan_ns_sequential(struct nvme_ctrl * ctrl,unsigned nn)1804 static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
1805 {
1806 	unsigned i;
1807 
1808 	for (i = 1; i <= nn; i++)
1809 		nvme_validate_ns(ctrl, i);
1810 
1811 	nvme_remove_invalid_namespaces(ctrl, nn);
1812 }
1813 
nvme_scan_work(struct work_struct * work)1814 static void nvme_scan_work(struct work_struct *work)
1815 {
1816 	struct nvme_ctrl *ctrl =
1817 		container_of(work, struct nvme_ctrl, scan_work);
1818 	struct nvme_id_ctrl *id;
1819 	unsigned nn;
1820 
1821 	if (ctrl->state != NVME_CTRL_LIVE)
1822 		return;
1823 
1824 	if (nvme_identify_ctrl(ctrl, &id))
1825 		return;
1826 
1827 	nn = le32_to_cpu(id->nn);
1828 	if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1829 	    !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
1830 		if (!nvme_scan_ns_list(ctrl, nn))
1831 			goto done;
1832 	}
1833 	nvme_scan_ns_sequential(ctrl, nn);
1834  done:
1835 	mutex_lock(&ctrl->namespaces_mutex);
1836 	list_sort(NULL, &ctrl->namespaces, ns_cmp);
1837 	mutex_unlock(&ctrl->namespaces_mutex);
1838 	kfree(id);
1839 }
1840 
nvme_queue_scan(struct nvme_ctrl * ctrl)1841 void nvme_queue_scan(struct nvme_ctrl *ctrl)
1842 {
1843 	/*
1844 	 * Do not queue new scan work when a controller is reset during
1845 	 * removal.
1846 	 */
1847 	if (ctrl->state == NVME_CTRL_LIVE)
1848 		schedule_work(&ctrl->scan_work);
1849 }
1850 EXPORT_SYMBOL_GPL(nvme_queue_scan);
1851 
1852 /*
1853  * This function iterates the namespace list unlocked to allow recovery from
1854  * controller failure. It is up to the caller to ensure the namespace list is
1855  * not modified by scan work while this function is executing.
1856  */
nvme_remove_namespaces(struct nvme_ctrl * ctrl)1857 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
1858 {
1859 	struct nvme_ns *ns, *next;
1860 
1861 	/*
1862 	 * The dead states indicates the controller was not gracefully
1863 	 * disconnected. In that case, we won't be able to flush any data while
1864 	 * removing the namespaces' disks; fail all the queues now to avoid
1865 	 * potentially having to clean up the failed sync later.
1866 	 */
1867 	if (ctrl->state == NVME_CTRL_DEAD)
1868 		nvme_kill_queues(ctrl);
1869 
1870 	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
1871 		nvme_ns_remove(ns);
1872 }
1873 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
1874 
nvme_async_event_work(struct work_struct * work)1875 static void nvme_async_event_work(struct work_struct *work)
1876 {
1877 	struct nvme_ctrl *ctrl =
1878 		container_of(work, struct nvme_ctrl, async_event_work);
1879 
1880 	spin_lock_irq(&ctrl->lock);
1881 	while (ctrl->event_limit > 0) {
1882 		int aer_idx = --ctrl->event_limit;
1883 
1884 		spin_unlock_irq(&ctrl->lock);
1885 		ctrl->ops->submit_async_event(ctrl, aer_idx);
1886 		spin_lock_irq(&ctrl->lock);
1887 	}
1888 	spin_unlock_irq(&ctrl->lock);
1889 }
1890 
nvme_complete_async_event(struct nvme_ctrl * ctrl,struct nvme_completion * cqe)1891 void nvme_complete_async_event(struct nvme_ctrl *ctrl,
1892 		struct nvme_completion *cqe)
1893 {
1894 	u16 status = le16_to_cpu(cqe->status) >> 1;
1895 	u32 result = le32_to_cpu(cqe->result);
1896 
1897 	if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
1898 		++ctrl->event_limit;
1899 		schedule_work(&ctrl->async_event_work);
1900 	}
1901 
1902 	if (status != NVME_SC_SUCCESS)
1903 		return;
1904 
1905 	switch (result & 0xff07) {
1906 	case NVME_AER_NOTICE_NS_CHANGED:
1907 		dev_info(ctrl->device, "rescanning\n");
1908 		nvme_queue_scan(ctrl);
1909 		break;
1910 	default:
1911 		dev_warn(ctrl->device, "async event result %08x\n", result);
1912 	}
1913 }
1914 EXPORT_SYMBOL_GPL(nvme_complete_async_event);
1915 
nvme_queue_async_events(struct nvme_ctrl * ctrl)1916 void nvme_queue_async_events(struct nvme_ctrl *ctrl)
1917 {
1918 	ctrl->event_limit = NVME_NR_AERS;
1919 	schedule_work(&ctrl->async_event_work);
1920 }
1921 EXPORT_SYMBOL_GPL(nvme_queue_async_events);
1922 
1923 static DEFINE_IDA(nvme_instance_ida);
1924 
nvme_set_instance(struct nvme_ctrl * ctrl)1925 static int nvme_set_instance(struct nvme_ctrl *ctrl)
1926 {
1927 	int instance, error;
1928 
1929 	do {
1930 		if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
1931 			return -ENODEV;
1932 
1933 		spin_lock(&dev_list_lock);
1934 		error = ida_get_new(&nvme_instance_ida, &instance);
1935 		spin_unlock(&dev_list_lock);
1936 	} while (error == -EAGAIN);
1937 
1938 	if (error)
1939 		return -ENODEV;
1940 
1941 	ctrl->instance = instance;
1942 	return 0;
1943 }
1944 
nvme_release_instance(struct nvme_ctrl * ctrl)1945 static void nvme_release_instance(struct nvme_ctrl *ctrl)
1946 {
1947 	spin_lock(&dev_list_lock);
1948 	ida_remove(&nvme_instance_ida, ctrl->instance);
1949 	spin_unlock(&dev_list_lock);
1950 }
1951 
nvme_uninit_ctrl(struct nvme_ctrl * ctrl)1952 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
1953 {
1954 	flush_work(&ctrl->async_event_work);
1955 	flush_work(&ctrl->scan_work);
1956 	nvme_remove_namespaces(ctrl);
1957 
1958 	device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
1959 
1960 	spin_lock(&dev_list_lock);
1961 	list_del(&ctrl->node);
1962 	spin_unlock(&dev_list_lock);
1963 }
1964 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
1965 
nvme_free_ctrl(struct kref * kref)1966 static void nvme_free_ctrl(struct kref *kref)
1967 {
1968 	struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
1969 
1970 	put_device(ctrl->device);
1971 	nvme_release_instance(ctrl);
1972 	ida_destroy(&ctrl->ns_ida);
1973 
1974 	ctrl->ops->free_ctrl(ctrl);
1975 }
1976 
nvme_put_ctrl(struct nvme_ctrl * ctrl)1977 void nvme_put_ctrl(struct nvme_ctrl *ctrl)
1978 {
1979 	kref_put(&ctrl->kref, nvme_free_ctrl);
1980 }
1981 EXPORT_SYMBOL_GPL(nvme_put_ctrl);
1982 
1983 /*
1984  * Initialize a NVMe controller structures.  This needs to be called during
1985  * earliest initialization so that we have the initialized structured around
1986  * during probing.
1987  */
nvme_init_ctrl(struct nvme_ctrl * ctrl,struct device * dev,const struct nvme_ctrl_ops * ops,unsigned long quirks)1988 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
1989 		const struct nvme_ctrl_ops *ops, unsigned long quirks)
1990 {
1991 	int ret;
1992 
1993 	ctrl->state = NVME_CTRL_NEW;
1994 	spin_lock_init(&ctrl->lock);
1995 	INIT_LIST_HEAD(&ctrl->namespaces);
1996 	mutex_init(&ctrl->namespaces_mutex);
1997 	kref_init(&ctrl->kref);
1998 	ctrl->dev = dev;
1999 	ctrl->ops = ops;
2000 	ctrl->quirks = quirks;
2001 	INIT_WORK(&ctrl->scan_work, nvme_scan_work);
2002 	INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
2003 
2004 	ret = nvme_set_instance(ctrl);
2005 	if (ret)
2006 		goto out;
2007 
2008 	ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
2009 				MKDEV(nvme_char_major, ctrl->instance),
2010 				ctrl, nvme_dev_attr_groups,
2011 				"nvme%d", ctrl->instance);
2012 	if (IS_ERR(ctrl->device)) {
2013 		ret = PTR_ERR(ctrl->device);
2014 		goto out_release_instance;
2015 	}
2016 	get_device(ctrl->device);
2017 	ida_init(&ctrl->ns_ida);
2018 
2019 	spin_lock(&dev_list_lock);
2020 	list_add_tail(&ctrl->node, &nvme_ctrl_list);
2021 	spin_unlock(&dev_list_lock);
2022 
2023 	return 0;
2024 out_release_instance:
2025 	nvme_release_instance(ctrl);
2026 out:
2027 	return ret;
2028 }
2029 EXPORT_SYMBOL_GPL(nvme_init_ctrl);
2030 
2031 /**
2032  * nvme_kill_queues(): Ends all namespace queues
2033  * @ctrl: the dead controller that needs to end
2034  *
2035  * Call this function when the driver determines it is unable to get the
2036  * controller in a state capable of servicing IO.
2037  */
nvme_kill_queues(struct nvme_ctrl * ctrl)2038 void nvme_kill_queues(struct nvme_ctrl *ctrl)
2039 {
2040 	struct nvme_ns *ns;
2041 
2042 	mutex_lock(&ctrl->namespaces_mutex);
2043 
2044 	/* Forcibly start all queues to avoid having stuck requests */
2045 	blk_mq_start_hw_queues(ctrl->admin_q);
2046 
2047 	list_for_each_entry(ns, &ctrl->namespaces, list) {
2048 		/*
2049 		 * Revalidating a dead namespace sets capacity to 0. This will
2050 		 * end buffered writers dirtying pages that can't be synced.
2051 		 */
2052 		if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
2053 			continue;
2054 		revalidate_disk(ns->disk);
2055 		blk_set_queue_dying(ns->queue);
2056 
2057 		/*
2058 		 * Forcibly start all queues to avoid having stuck requests.
2059 		 * Note that we must ensure the queues are not stopped
2060 		 * when the final removal happens.
2061 		 */
2062 		blk_mq_start_hw_queues(ns->queue);
2063 
2064 		/* draining requests in requeue list */
2065 		blk_mq_kick_requeue_list(ns->queue);
2066 	}
2067 	mutex_unlock(&ctrl->namespaces_mutex);
2068 }
2069 EXPORT_SYMBOL_GPL(nvme_kill_queues);
2070 
nvme_stop_queues(struct nvme_ctrl * ctrl)2071 void nvme_stop_queues(struct nvme_ctrl *ctrl)
2072 {
2073 	struct nvme_ns *ns;
2074 
2075 	mutex_lock(&ctrl->namespaces_mutex);
2076 	list_for_each_entry(ns, &ctrl->namespaces, list) {
2077 		spin_lock_irq(ns->queue->queue_lock);
2078 		queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
2079 		spin_unlock_irq(ns->queue->queue_lock);
2080 
2081 		blk_mq_cancel_requeue_work(ns->queue);
2082 		blk_mq_stop_hw_queues(ns->queue);
2083 	}
2084 	mutex_unlock(&ctrl->namespaces_mutex);
2085 }
2086 EXPORT_SYMBOL_GPL(nvme_stop_queues);
2087 
nvme_start_queues(struct nvme_ctrl * ctrl)2088 void nvme_start_queues(struct nvme_ctrl *ctrl)
2089 {
2090 	struct nvme_ns *ns;
2091 
2092 	mutex_lock(&ctrl->namespaces_mutex);
2093 	list_for_each_entry(ns, &ctrl->namespaces, list) {
2094 		queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
2095 		blk_mq_start_stopped_hw_queues(ns->queue, true);
2096 		blk_mq_kick_requeue_list(ns->queue);
2097 	}
2098 	mutex_unlock(&ctrl->namespaces_mutex);
2099 }
2100 EXPORT_SYMBOL_GPL(nvme_start_queues);
2101 
nvme_core_init(void)2102 int __init nvme_core_init(void)
2103 {
2104 	int result;
2105 
2106 	result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
2107 							&nvme_dev_fops);
2108 	if (result < 0)
2109 		return result;
2110 	else if (result > 0)
2111 		nvme_char_major = result;
2112 
2113 	nvme_class = class_create(THIS_MODULE, "nvme");
2114 	if (IS_ERR(nvme_class)) {
2115 		result = PTR_ERR(nvme_class);
2116 		goto unregister_chrdev;
2117 	}
2118 
2119 	return 0;
2120 
2121  unregister_chrdev:
2122 	__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
2123 	return result;
2124 }
2125 
nvme_core_exit(void)2126 void nvme_core_exit(void)
2127 {
2128 	class_destroy(nvme_class);
2129 	__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
2130 }
2131 
2132 MODULE_LICENSE("GPL");
2133 MODULE_VERSION("1.0");
2134 module_init(nvme_core_init);
2135 module_exit(nvme_core_exit);
2136