• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2015 Intel Corporation
4  *	Keith Busch <kbusch@kernel.org>
5  */
6 #include <linux/blkdev.h>
7 #include <linux/pr.h>
8 #include <linux/unaligned.h>
9 
10 #include "nvme.h"
11 
nvme_pr_type_from_blk(enum pr_type type)12 static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type)
13 {
14 	switch (type) {
15 	case PR_WRITE_EXCLUSIVE:
16 		return NVME_PR_WRITE_EXCLUSIVE;
17 	case PR_EXCLUSIVE_ACCESS:
18 		return NVME_PR_EXCLUSIVE_ACCESS;
19 	case PR_WRITE_EXCLUSIVE_REG_ONLY:
20 		return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY;
21 	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
22 		return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY;
23 	case PR_WRITE_EXCLUSIVE_ALL_REGS:
24 		return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS;
25 	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
26 		return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS;
27 	}
28 
29 	return 0;
30 }
31 
block_pr_type_from_nvme(enum nvme_pr_type type)32 static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type)
33 {
34 	switch (type) {
35 	case NVME_PR_WRITE_EXCLUSIVE:
36 		return PR_WRITE_EXCLUSIVE;
37 	case NVME_PR_EXCLUSIVE_ACCESS:
38 		return PR_EXCLUSIVE_ACCESS;
39 	case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
40 		return PR_WRITE_EXCLUSIVE_REG_ONLY;
41 	case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
42 		return PR_EXCLUSIVE_ACCESS_REG_ONLY;
43 	case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
44 		return PR_WRITE_EXCLUSIVE_ALL_REGS;
45 	case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
46 		return PR_EXCLUSIVE_ACCESS_ALL_REGS;
47 	}
48 
49 	return 0;
50 }
51 
nvme_send_ns_head_pr_command(struct block_device * bdev,struct nvme_command * c,void * data,unsigned int data_len)52 static int nvme_send_ns_head_pr_command(struct block_device *bdev,
53 		struct nvme_command *c, void *data, unsigned int data_len)
54 {
55 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
56 	int srcu_idx = srcu_read_lock(&head->srcu);
57 	struct nvme_ns *ns = nvme_find_path(head);
58 	int ret = -EWOULDBLOCK;
59 
60 	if (ns) {
61 		c->common.nsid = cpu_to_le32(ns->head->ns_id);
62 		ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len);
63 	}
64 	srcu_read_unlock(&head->srcu, srcu_idx);
65 	return ret;
66 }
67 
nvme_send_ns_pr_command(struct nvme_ns * ns,struct nvme_command * c,void * data,unsigned int data_len)68 static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
69 		void *data, unsigned int data_len)
70 {
71 	c->common.nsid = cpu_to_le32(ns->head->ns_id);
72 	return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
73 }
74 
nvme_status_to_pr_err(int status)75 static int nvme_status_to_pr_err(int status)
76 {
77 	if (nvme_is_path_error(status))
78 		return PR_STS_PATH_FAILED;
79 
80 	switch (status & NVME_SCT_SC_MASK) {
81 	case NVME_SC_SUCCESS:
82 		return PR_STS_SUCCESS;
83 	case NVME_SC_RESERVATION_CONFLICT:
84 		return PR_STS_RESERVATION_CONFLICT;
85 	case NVME_SC_BAD_ATTRIBUTES:
86 	case NVME_SC_INVALID_OPCODE:
87 	case NVME_SC_INVALID_FIELD:
88 	case NVME_SC_INVALID_NS:
89 		return -EINVAL;
90 	default:
91 		return PR_STS_IOERR;
92 	}
93 }
94 
nvme_send_pr_command(struct block_device * bdev,struct nvme_command * c,void * data,unsigned int data_len)95 static int nvme_send_pr_command(struct block_device *bdev,
96 		struct nvme_command *c, void *data, unsigned int data_len)
97 {
98 	if (nvme_disk_is_ns_head(bdev->bd_disk))
99 		return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
100 
101 	return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
102 				       data_len);
103 }
104 
nvme_pr_command(struct block_device * bdev,u32 cdw10,u64 key,u64 sa_key,u8 op)105 static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
106 				u64 key, u64 sa_key, u8 op)
107 {
108 	struct nvme_command c = { };
109 	u8 data[16] = { 0, };
110 	int ret;
111 
112 	put_unaligned_le64(key, &data[0]);
113 	put_unaligned_le64(sa_key, &data[8]);
114 
115 	c.common.opcode = op;
116 	c.common.cdw10 = cpu_to_le32(cdw10);
117 
118 	ret = nvme_send_pr_command(bdev, &c, data, sizeof(data));
119 	if (ret < 0)
120 		return ret;
121 
122 	return nvme_status_to_pr_err(ret);
123 }
124 
nvme_pr_register(struct block_device * bdev,u64 old,u64 new,unsigned flags)125 static int nvme_pr_register(struct block_device *bdev, u64 old,
126 		u64 new, unsigned flags)
127 {
128 	u32 cdw10;
129 
130 	if (flags & ~PR_FL_IGNORE_KEY)
131 		return -EOPNOTSUPP;
132 
133 	cdw10 = old ? 2 : 0;
134 	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
135 	cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
136 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
137 }
138 
nvme_pr_reserve(struct block_device * bdev,u64 key,enum pr_type type,unsigned flags)139 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
140 		enum pr_type type, unsigned flags)
141 {
142 	u32 cdw10;
143 
144 	if (flags & ~PR_FL_IGNORE_KEY)
145 		return -EOPNOTSUPP;
146 
147 	cdw10 = nvme_pr_type_from_blk(type) << 8;
148 	cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
149 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
150 }
151 
nvme_pr_preempt(struct block_device * bdev,u64 old,u64 new,enum pr_type type,bool abort)152 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
153 		enum pr_type type, bool abort)
154 {
155 	u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (abort ? 2 : 1);
156 
157 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
158 }
159 
nvme_pr_clear(struct block_device * bdev,u64 key)160 static int nvme_pr_clear(struct block_device *bdev, u64 key)
161 {
162 	u32 cdw10 = 1 | (key ? 0 : 1 << 3);
163 
164 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
165 }
166 
nvme_pr_release(struct block_device * bdev,u64 key,enum pr_type type)167 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
168 {
169 	u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (key ? 0 : 1 << 3);
170 
171 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
172 }
173 
nvme_pr_resv_report(struct block_device * bdev,void * data,u32 data_len,bool * eds)174 static int nvme_pr_resv_report(struct block_device *bdev, void *data,
175 		u32 data_len, bool *eds)
176 {
177 	struct nvme_command c = { };
178 	int ret;
179 
180 	c.common.opcode = nvme_cmd_resv_report;
181 	c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len));
182 	c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT);
183 	*eds = true;
184 
185 retry:
186 	ret = nvme_send_pr_command(bdev, &c, data, data_len);
187 	if (ret == NVME_SC_HOST_ID_INCONSIST &&
188 	    c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) {
189 		c.common.cdw11 = 0;
190 		*eds = false;
191 		goto retry;
192 	}
193 
194 	if (ret < 0)
195 		return ret;
196 
197 	return nvme_status_to_pr_err(ret);
198 }
199 
nvme_pr_read_keys(struct block_device * bdev,struct pr_keys * keys_info)200 static int nvme_pr_read_keys(struct block_device *bdev,
201 		struct pr_keys *keys_info)
202 {
203 	u32 rse_len, num_keys = keys_info->num_keys;
204 	struct nvme_reservation_status_ext *rse;
205 	int ret, i;
206 	bool eds;
207 
208 	/*
209 	 * Assume we are using 128-bit host IDs and allocate a buffer large
210 	 * enough to get enough keys to fill the return keys buffer.
211 	 */
212 	rse_len = struct_size(rse, regctl_eds, num_keys);
213 	rse = kzalloc(rse_len, GFP_KERNEL);
214 	if (!rse)
215 		return -ENOMEM;
216 
217 	ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
218 	if (ret)
219 		goto free_rse;
220 
221 	keys_info->generation = le32_to_cpu(rse->gen);
222 	keys_info->num_keys = get_unaligned_le16(&rse->regctl);
223 
224 	num_keys = min(num_keys, keys_info->num_keys);
225 	for (i = 0; i < num_keys; i++) {
226 		if (eds) {
227 			keys_info->keys[i] =
228 					le64_to_cpu(rse->regctl_eds[i].rkey);
229 		} else {
230 			struct nvme_reservation_status *rs;
231 
232 			rs = (struct nvme_reservation_status *)rse;
233 			keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey);
234 		}
235 	}
236 
237 free_rse:
238 	kfree(rse);
239 	return ret;
240 }
241 
nvme_pr_read_reservation(struct block_device * bdev,struct pr_held_reservation * resv)242 static int nvme_pr_read_reservation(struct block_device *bdev,
243 		struct pr_held_reservation *resv)
244 {
245 	struct nvme_reservation_status_ext tmp_rse, *rse;
246 	int ret, i, num_regs;
247 	u32 rse_len;
248 	bool eds;
249 
250 get_num_regs:
251 	/*
252 	 * Get the number of registrations so we know how big to allocate
253 	 * the response buffer.
254 	 */
255 	ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds);
256 	if (ret)
257 		return ret;
258 
259 	num_regs = get_unaligned_le16(&tmp_rse.regctl);
260 	if (!num_regs) {
261 		resv->generation = le32_to_cpu(tmp_rse.gen);
262 		return 0;
263 	}
264 
265 	rse_len = struct_size(rse, regctl_eds, num_regs);
266 	rse = kzalloc(rse_len, GFP_KERNEL);
267 	if (!rse)
268 		return -ENOMEM;
269 
270 	ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
271 	if (ret)
272 		goto free_rse;
273 
274 	if (num_regs != get_unaligned_le16(&rse->regctl)) {
275 		kfree(rse);
276 		goto get_num_regs;
277 	}
278 
279 	resv->generation = le32_to_cpu(rse->gen);
280 	resv->type = block_pr_type_from_nvme(rse->rtype);
281 
282 	for (i = 0; i < num_regs; i++) {
283 		if (eds) {
284 			if (rse->regctl_eds[i].rcsts) {
285 				resv->key = le64_to_cpu(rse->regctl_eds[i].rkey);
286 				break;
287 			}
288 		} else {
289 			struct nvme_reservation_status *rs;
290 
291 			rs = (struct nvme_reservation_status *)rse;
292 			if (rs->regctl_ds[i].rcsts) {
293 				resv->key = le64_to_cpu(rs->regctl_ds[i].rkey);
294 				break;
295 			}
296 		}
297 	}
298 
299 free_rse:
300 	kfree(rse);
301 	return ret;
302 }
303 
304 const struct pr_ops nvme_pr_ops = {
305 	.pr_register	= nvme_pr_register,
306 	.pr_reserve	= nvme_pr_reserve,
307 	.pr_release	= nvme_pr_release,
308 	.pr_preempt	= nvme_pr_preempt,
309 	.pr_clear	= nvme_pr_clear,
310 	.pr_read_keys	= nvme_pr_read_keys,
311 	.pr_read_reservation = nvme_pr_read_reservation,
312 };
313