1 /*
2 * Copyright (c) 2011-2014, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14 #ifndef _NVME_H
15 #define _NVME_H
16
17 #include <linux/mutex.h>
18 #include <linux/nvme.h>
19 #include <linux/pci.h>
20 #include <linux/kref.h>
21 #include <linux/blk-mq.h>
22
23 extern unsigned char nvme_io_timeout;
24 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
25
26 enum {
27 NVME_NS_LBA = 0,
28 NVME_NS_LIGHTNVM = 1,
29 };
30
31 /* The below value is the specific amount of delay needed before checking
32 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
33 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
34 * found empirically.
35 */
36 #define NVME_QUIRK_DELAY_AMOUNT 2000
37
38 /*
39 * Represents an NVM Express device. Each nvme_dev is a PCI function.
40 */
41 struct nvme_dev {
42 struct list_head node;
43 struct nvme_queue **queues;
44 struct request_queue *admin_q;
45 struct blk_mq_tag_set tagset;
46 struct blk_mq_tag_set admin_tagset;
47 u32 __iomem *dbs;
48 struct device *dev;
49 struct dma_pool *prp_page_pool;
50 struct dma_pool *prp_small_pool;
51 int instance;
52 unsigned queue_count;
53 unsigned online_queues;
54 unsigned max_qid;
55 int q_depth;
56 u32 db_stride;
57 u32 ctrl_config;
58 struct msix_entry *entry;
59 struct nvme_bar __iomem *bar;
60 struct list_head namespaces;
61 struct kref kref;
62 struct device *device;
63 struct work_struct reset_work;
64 struct work_struct probe_work;
65 struct work_struct scan_work;
66 struct mutex shutdown_lock;
67 char name[12];
68 char serial[20];
69 char model[40];
70 char firmware_rev[8];
71 bool subsystem;
72 u32 max_hw_sectors;
73 u32 stripe_size;
74 u32 page_size;
75 void __iomem *cmb;
76 dma_addr_t cmb_dma_addr;
77 u64 cmb_size;
78 u32 cmbsz;
79 u16 oncs;
80 u16 abort_limit;
81 u8 event_limit;
82 u8 vwc;
83 };
84
85 /*
86 * An NVM Express namespace is equivalent to a SCSI LUN
87 */
88 struct nvme_ns {
89 struct list_head list;
90
91 struct nvme_dev *dev;
92 struct request_queue *queue;
93 struct gendisk *disk;
94 struct kref kref;
95
96 unsigned ns_id;
97 int lba_shift;
98 u16 ms;
99 bool ext;
100 u8 pi_type;
101 int type;
102 u64 mode_select_num_blocks;
103 u32 mode_select_block_len;
104 };
105
106 /*
107 * The nvme_iod describes the data in an I/O, including the list of PRP
108 * entries. You can't see it in this data structure because C doesn't let
109 * me express that. Use nvme_alloc_iod to ensure there's enough space
110 * allocated to store the PRP list.
111 */
112 struct nvme_iod {
113 unsigned long private; /* For the use of the submitter of the I/O */
114 int npages; /* In the PRP list. 0 means small pool in use */
115 int offset; /* Of PRP list */
116 int nents; /* Used in scatterlist */
117 int length; /* Of data, in bytes */
118 dma_addr_t first_dma;
119 struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
120 struct scatterlist sg[0];
121 };
122
nvme_block_nr(struct nvme_ns * ns,sector_t sector)123 static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
124 {
125 return (sector >> (ns->lba_shift - 9));
126 }
127
128 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
129 void *buf, unsigned bufflen);
130 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
131 void *buffer, void __user *ubuffer, unsigned bufflen,
132 u32 *result, unsigned timeout);
133 int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
134 int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
135 struct nvme_id_ns **id);
136 int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
137 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
138 dma_addr_t dma_addr, u32 *result);
139 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
140 dma_addr_t dma_addr, u32 *result);
141
142 struct sg_io_hdr;
143
144 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
145 int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
146 int nvme_sg_get_version_num(int __user *ip);
147
148 #ifdef CONFIG_NVM
149 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
150 int nvme_nvm_register(struct request_queue *q, char *disk_name);
151 void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
152 #else
nvme_nvm_register(struct request_queue * q,char * disk_name)153 static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
154 {
155 return 0;
156 }
157
nvme_nvm_unregister(struct request_queue * q,char * disk_name)158 static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
159
nvme_nvm_ns_supported(struct nvme_ns * ns,struct nvme_id_ns * id)160 static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
161 {
162 return 0;
163 }
164 #endif /* CONFIG_NVM */
165
166 #endif /* _NVME_H */
167