• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * NVM Express device driver
3  * Copyright (c) 2011-2014, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 
15 /*
16  * Refer to the SCSI-NVMe Translation spec for details on how
17  * each command is translated.
18  */
19 
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/compat.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/fs.h>
27 #include <linux/genhd.h>
28 #include <linux/idr.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/io.h>
32 #include <linux/kdev_t.h>
33 #include <linux/kthread.h>
34 #include <linux/kernel.h>
35 #include <linux/mm.h>
36 #include <linux/module.h>
37 #include <linux/moduleparam.h>
38 #include <linux/pci.h>
39 #include <linux/poison.h>
40 #include <linux/sched.h>
41 #include <linux/slab.h>
42 #include <linux/types.h>
43 #include <asm/unaligned.h>
44 #include <scsi/sg.h>
45 #include <scsi/scsi.h>
46 
47 #include "nvme.h"
48 
49 static int sg_version_num = 30534;	/* 2 digits for each component */
50 
51 /* VPD Page Codes */
52 #define VPD_SUPPORTED_PAGES				0x00
53 #define VPD_SERIAL_NUMBER				0x80
54 #define VPD_DEVICE_IDENTIFIERS				0x83
55 #define VPD_EXTENDED_INQUIRY				0x86
56 #define VPD_BLOCK_LIMITS				0xB0
57 #define VPD_BLOCK_DEV_CHARACTERISTICS			0xB1
58 
59 /* format unit paramter list offsets */
60 #define FORMAT_UNIT_SHORT_PARM_LIST_LEN			4
61 #define FORMAT_UNIT_LONG_PARM_LIST_LEN			8
62 #define FORMAT_UNIT_PROT_INT_OFFSET			3
63 #define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET		0
64 #define FORMAT_UNIT_PROT_FIELD_USAGE_MASK		0x07
65 
66 /* Misc. defines */
67 #define FIXED_SENSE_DATA				0x70
68 #define DESC_FORMAT_SENSE_DATA				0x72
69 #define FIXED_SENSE_DATA_ADD_LENGTH			10
70 #define LUN_ENTRY_SIZE					8
71 #define LUN_DATA_HEADER_SIZE				8
72 #define ALL_LUNS_RETURNED				0x02
73 #define ALL_WELL_KNOWN_LUNS_RETURNED			0x01
74 #define RESTRICTED_LUNS_RETURNED			0x00
75 #define NVME_POWER_STATE_START_VALID			0x00
76 #define NVME_POWER_STATE_ACTIVE				0x01
77 #define NVME_POWER_STATE_IDLE				0x02
78 #define NVME_POWER_STATE_STANDBY			0x03
79 #define NVME_POWER_STATE_LU_CONTROL			0x07
80 #define POWER_STATE_0					0
81 #define POWER_STATE_1					1
82 #define POWER_STATE_2					2
83 #define POWER_STATE_3					3
84 #define DOWNLOAD_SAVE_ACTIVATE				0x05
85 #define DOWNLOAD_SAVE_DEFER_ACTIVATE			0x0E
86 #define ACTIVATE_DEFERRED_MICROCODE			0x0F
87 #define FORMAT_UNIT_IMMED_MASK				0x2
88 #define FORMAT_UNIT_IMMED_OFFSET			1
89 #define KELVIN_TEMP_FACTOR				273
90 #define FIXED_FMT_SENSE_DATA_SIZE			18
91 #define DESC_FMT_SENSE_DATA_SIZE			8
92 
93 /* SCSI/NVMe defines and bit masks */
94 #define INQ_STANDARD_INQUIRY_PAGE			0x00
95 #define INQ_SUPPORTED_VPD_PAGES_PAGE			0x00
96 #define INQ_UNIT_SERIAL_NUMBER_PAGE			0x80
97 #define INQ_DEVICE_IDENTIFICATION_PAGE			0x83
98 #define INQ_EXTENDED_INQUIRY_DATA_PAGE			0x86
99 #define INQ_BDEV_LIMITS_PAGE				0xB0
100 #define INQ_BDEV_CHARACTERISTICS_PAGE			0xB1
101 #define INQ_SERIAL_NUMBER_LENGTH			0x14
102 #define INQ_NUM_SUPPORTED_VPD_PAGES			6
103 #define VERSION_SPC_4					0x06
104 #define ACA_UNSUPPORTED					0
105 #define STANDARD_INQUIRY_LENGTH				36
106 #define ADDITIONAL_STD_INQ_LENGTH			31
107 #define EXTENDED_INQUIRY_DATA_PAGE_LENGTH		0x3C
108 #define RESERVED_FIELD					0
109 
110 /* Mode Sense/Select defines */
111 #define MODE_PAGE_INFO_EXCEP				0x1C
112 #define MODE_PAGE_CACHING				0x08
113 #define MODE_PAGE_CONTROL				0x0A
114 #define MODE_PAGE_POWER_CONDITION			0x1A
115 #define MODE_PAGE_RETURN_ALL				0x3F
116 #define MODE_PAGE_BLK_DES_LEN				0x08
117 #define MODE_PAGE_LLBAA_BLK_DES_LEN			0x10
118 #define MODE_PAGE_CACHING_LEN				0x14
119 #define MODE_PAGE_CONTROL_LEN				0x0C
120 #define MODE_PAGE_POW_CND_LEN				0x28
121 #define MODE_PAGE_INF_EXC_LEN				0x0C
122 #define MODE_PAGE_ALL_LEN				0x54
123 #define MODE_SENSE6_MPH_SIZE				4
124 #define MODE_SENSE_PAGE_CONTROL_MASK			0xC0
125 #define MODE_SENSE_PAGE_CODE_OFFSET			2
126 #define MODE_SENSE_PAGE_CODE_MASK			0x3F
127 #define MODE_SENSE_LLBAA_MASK				0x10
128 #define MODE_SENSE_LLBAA_SHIFT				4
129 #define MODE_SENSE_DBD_MASK				8
130 #define MODE_SENSE_DBD_SHIFT				3
131 #define MODE_SENSE10_MPH_SIZE				8
132 #define MODE_SELECT_CDB_PAGE_FORMAT_MASK		0x10
133 #define MODE_SELECT_CDB_SAVE_PAGES_MASK			0x1
134 #define MODE_SELECT_6_BD_OFFSET				3
135 #define MODE_SELECT_10_BD_OFFSET			6
136 #define MODE_SELECT_10_LLBAA_OFFSET			4
137 #define MODE_SELECT_10_LLBAA_MASK			1
138 #define MODE_SELECT_6_MPH_SIZE				4
139 #define MODE_SELECT_10_MPH_SIZE				8
140 #define CACHING_MODE_PAGE_WCE_MASK			0x04
141 #define MODE_SENSE_BLK_DESC_ENABLED			0
142 #define MODE_SENSE_BLK_DESC_COUNT			1
143 #define MODE_SELECT_PAGE_CODE_MASK			0x3F
144 #define SHORT_DESC_BLOCK				8
145 #define LONG_DESC_BLOCK					16
146 #define MODE_PAGE_POW_CND_LEN_FIELD			0x26
147 #define MODE_PAGE_INF_EXC_LEN_FIELD			0x0A
148 #define MODE_PAGE_CACHING_LEN_FIELD			0x12
149 #define MODE_PAGE_CONTROL_LEN_FIELD			0x0A
150 #define MODE_SENSE_PC_CURRENT_VALUES			0
151 
152 /* Log Sense defines */
153 #define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE		0x00
154 #define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH		0x07
155 #define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE		0x2F
156 #define LOG_PAGE_TEMPERATURE_PAGE			0x0D
157 #define LOG_SENSE_CDB_SP_NOT_ENABLED			0
158 #define LOG_SENSE_CDB_PC_MASK				0xC0
159 #define LOG_SENSE_CDB_PC_SHIFT				6
160 #define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES		1
161 #define LOG_SENSE_CDB_PAGE_CODE_MASK			0x3F
162 #define REMAINING_INFO_EXCP_PAGE_LENGTH			0x8
163 #define LOG_INFO_EXCP_PAGE_LENGTH			0xC
164 #define REMAINING_TEMP_PAGE_LENGTH			0xC
165 #define LOG_TEMP_PAGE_LENGTH				0x10
166 #define LOG_TEMP_UNKNOWN				0xFF
167 #define SUPPORTED_LOG_PAGES_PAGE_LENGTH			0x3
168 
169 /* Read Capacity defines */
170 #define READ_CAP_10_RESP_SIZE				8
171 #define READ_CAP_16_RESP_SIZE				32
172 
173 /* NVMe Namespace and Command Defines */
174 #define BYTES_TO_DWORDS					4
175 #define NVME_MAX_FIRMWARE_SLOT				7
176 
177 /* Report LUNs defines */
178 #define REPORT_LUNS_FIRST_LUN_OFFSET			8
179 
180 /* SCSI ADDITIONAL SENSE Codes */
181 
182 #define SCSI_ASC_NO_SENSE				0x00
183 #define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT		0x03
184 #define SCSI_ASC_LUN_NOT_READY				0x04
185 #define SCSI_ASC_WARNING				0x0B
186 #define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED		0x10
187 #define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED		0x10
188 #define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED		0x10
189 #define SCSI_ASC_UNRECOVERED_READ_ERROR			0x11
190 #define SCSI_ASC_MISCOMPARE_DURING_VERIFY		0x1D
191 #define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID		0x20
192 #define SCSI_ASC_ILLEGAL_COMMAND			0x20
193 #define SCSI_ASC_ILLEGAL_BLOCK				0x21
194 #define SCSI_ASC_INVALID_CDB				0x24
195 #define SCSI_ASC_INVALID_LUN				0x25
196 #define SCSI_ASC_INVALID_PARAMETER			0x26
197 #define SCSI_ASC_FORMAT_COMMAND_FAILED			0x31
198 #define SCSI_ASC_INTERNAL_TARGET_FAILURE		0x44
199 
200 /* SCSI ADDITIONAL SENSE Code Qualifiers */
201 
202 #define SCSI_ASCQ_CAUSE_NOT_REPORTABLE			0x00
203 #define SCSI_ASCQ_FORMAT_COMMAND_FAILED			0x01
204 #define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED		0x01
205 #define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED		0x02
206 #define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED		0x03
207 #define SCSI_ASCQ_FORMAT_IN_PROGRESS			0x04
208 #define SCSI_ASCQ_POWER_LOSS_EXPECTED			0x08
209 #define SCSI_ASCQ_INVALID_LUN_ID			0x09
210 
211 /* copied from drivers/usb/gadget/function/storage_common.h */
get_unaligned_be24(u8 * buf)212 static inline u32 get_unaligned_be24(u8 *buf)
213 {
214 	return 0xffffff & (u32) get_unaligned_be32(buf - 1);
215 }
216 
217 /* Struct to gather data that needs to be extracted from a SCSI CDB.
218    Not conforming to any particular CDB variant, but compatible with all. */
219 
220 struct nvme_trans_io_cdb {
221 	u8 fua;
222 	u8 prot_info;
223 	u64 lba;
224 	u32 xfer_len;
225 };
226 
227 
228 /* Internal Helper Functions */
229 
230 
231 /* Copy data to userspace memory */
232 
nvme_trans_copy_to_user(struct sg_io_hdr * hdr,void * from,unsigned long n)233 static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
234 								unsigned long n)
235 {
236 	int i;
237 	void *index = from;
238 	size_t remaining = n;
239 	size_t xfer_len;
240 
241 	if (hdr->iovec_count > 0) {
242 		struct sg_iovec sgl;
243 
244 		for (i = 0; i < hdr->iovec_count; i++) {
245 			if (copy_from_user(&sgl, hdr->dxferp +
246 						i * sizeof(struct sg_iovec),
247 						sizeof(struct sg_iovec)))
248 				return -EFAULT;
249 			xfer_len = min(remaining, sgl.iov_len);
250 			if (copy_to_user(sgl.iov_base, index, xfer_len))
251 				return -EFAULT;
252 
253 			index += xfer_len;
254 			remaining -= xfer_len;
255 			if (remaining == 0)
256 				break;
257 		}
258 		return 0;
259 	}
260 
261 	if (copy_to_user(hdr->dxferp, from, n))
262 		return -EFAULT;
263 	return 0;
264 }
265 
266 /* Copy data from userspace memory */
267 
nvme_trans_copy_from_user(struct sg_io_hdr * hdr,void * to,unsigned long n)268 static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
269 								unsigned long n)
270 {
271 	int i;
272 	void *index = to;
273 	size_t remaining = n;
274 	size_t xfer_len;
275 
276 	if (hdr->iovec_count > 0) {
277 		struct sg_iovec sgl;
278 
279 		for (i = 0; i < hdr->iovec_count; i++) {
280 			if (copy_from_user(&sgl, hdr->dxferp +
281 						i * sizeof(struct sg_iovec),
282 						sizeof(struct sg_iovec)))
283 				return -EFAULT;
284 			xfer_len = min(remaining, sgl.iov_len);
285 			if (copy_from_user(index, sgl.iov_base, xfer_len))
286 				return -EFAULT;
287 			index += xfer_len;
288 			remaining -= xfer_len;
289 			if (remaining == 0)
290 				break;
291 		}
292 		return 0;
293 	}
294 
295 	if (copy_from_user(to, hdr->dxferp, n))
296 		return -EFAULT;
297 	return 0;
298 }
299 
300 /* Status/Sense Buffer Writeback */
301 
nvme_trans_completion(struct sg_io_hdr * hdr,u8 status,u8 sense_key,u8 asc,u8 ascq)302 static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
303 				 u8 asc, u8 ascq)
304 {
305 	u8 xfer_len;
306 	u8 resp[DESC_FMT_SENSE_DATA_SIZE];
307 
308 	if (scsi_status_is_good(status)) {
309 		hdr->status = SAM_STAT_GOOD;
310 		hdr->masked_status = GOOD;
311 		hdr->host_status = DID_OK;
312 		hdr->driver_status = DRIVER_OK;
313 		hdr->sb_len_wr = 0;
314 	} else {
315 		hdr->status = status;
316 		hdr->masked_status = status >> 1;
317 		hdr->host_status = DID_OK;
318 		hdr->driver_status = DRIVER_OK;
319 
320 		memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
321 		resp[0] = DESC_FORMAT_SENSE_DATA;
322 		resp[1] = sense_key;
323 		resp[2] = asc;
324 		resp[3] = ascq;
325 
326 		xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
327 		hdr->sb_len_wr = xfer_len;
328 		if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
329 			return -EFAULT;
330 	}
331 
332 	return 0;
333 }
334 
335 /*
336  * Take a status code from a lowlevel routine, and if it was a positive NVMe
337  * error code update the sense data based on it.  In either case the passed
338  * in value is returned again, unless an -EFAULT from copy_to_user overrides
339  * it.
340  */
nvme_trans_status_code(struct sg_io_hdr * hdr,int nvme_sc)341 static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
342 {
343 	u8 status, sense_key, asc, ascq;
344 	int res;
345 
346 	/* For non-nvme (Linux) errors, simply return the error code */
347 	if (nvme_sc < 0)
348 		return nvme_sc;
349 
350 	/* Mask DNR, More, and reserved fields */
351 	switch (nvme_sc & 0x7FF) {
352 	/* Generic Command Status */
353 	case NVME_SC_SUCCESS:
354 		status = SAM_STAT_GOOD;
355 		sense_key = NO_SENSE;
356 		asc = SCSI_ASC_NO_SENSE;
357 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
358 		break;
359 	case NVME_SC_INVALID_OPCODE:
360 		status = SAM_STAT_CHECK_CONDITION;
361 		sense_key = ILLEGAL_REQUEST;
362 		asc = SCSI_ASC_ILLEGAL_COMMAND;
363 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
364 		break;
365 	case NVME_SC_INVALID_FIELD:
366 		status = SAM_STAT_CHECK_CONDITION;
367 		sense_key = ILLEGAL_REQUEST;
368 		asc = SCSI_ASC_INVALID_CDB;
369 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
370 		break;
371 	case NVME_SC_DATA_XFER_ERROR:
372 		status = SAM_STAT_CHECK_CONDITION;
373 		sense_key = MEDIUM_ERROR;
374 		asc = SCSI_ASC_NO_SENSE;
375 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
376 		break;
377 	case NVME_SC_POWER_LOSS:
378 		status = SAM_STAT_TASK_ABORTED;
379 		sense_key = ABORTED_COMMAND;
380 		asc = SCSI_ASC_WARNING;
381 		ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
382 		break;
383 	case NVME_SC_INTERNAL:
384 		status = SAM_STAT_CHECK_CONDITION;
385 		sense_key = HARDWARE_ERROR;
386 		asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
387 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
388 		break;
389 	case NVME_SC_ABORT_REQ:
390 		status = SAM_STAT_TASK_ABORTED;
391 		sense_key = ABORTED_COMMAND;
392 		asc = SCSI_ASC_NO_SENSE;
393 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
394 		break;
395 	case NVME_SC_ABORT_QUEUE:
396 		status = SAM_STAT_TASK_ABORTED;
397 		sense_key = ABORTED_COMMAND;
398 		asc = SCSI_ASC_NO_SENSE;
399 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
400 		break;
401 	case NVME_SC_FUSED_FAIL:
402 		status = SAM_STAT_TASK_ABORTED;
403 		sense_key = ABORTED_COMMAND;
404 		asc = SCSI_ASC_NO_SENSE;
405 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
406 		break;
407 	case NVME_SC_FUSED_MISSING:
408 		status = SAM_STAT_TASK_ABORTED;
409 		sense_key = ABORTED_COMMAND;
410 		asc = SCSI_ASC_NO_SENSE;
411 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
412 		break;
413 	case NVME_SC_INVALID_NS:
414 		status = SAM_STAT_CHECK_CONDITION;
415 		sense_key = ILLEGAL_REQUEST;
416 		asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
417 		ascq = SCSI_ASCQ_INVALID_LUN_ID;
418 		break;
419 	case NVME_SC_LBA_RANGE:
420 		status = SAM_STAT_CHECK_CONDITION;
421 		sense_key = ILLEGAL_REQUEST;
422 		asc = SCSI_ASC_ILLEGAL_BLOCK;
423 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
424 		break;
425 	case NVME_SC_CAP_EXCEEDED:
426 		status = SAM_STAT_CHECK_CONDITION;
427 		sense_key = MEDIUM_ERROR;
428 		asc = SCSI_ASC_NO_SENSE;
429 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
430 		break;
431 	case NVME_SC_NS_NOT_READY:
432 		status = SAM_STAT_CHECK_CONDITION;
433 		sense_key = NOT_READY;
434 		asc = SCSI_ASC_LUN_NOT_READY;
435 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
436 		break;
437 
438 	/* Command Specific Status */
439 	case NVME_SC_INVALID_FORMAT:
440 		status = SAM_STAT_CHECK_CONDITION;
441 		sense_key = ILLEGAL_REQUEST;
442 		asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
443 		ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
444 		break;
445 	case NVME_SC_BAD_ATTRIBUTES:
446 		status = SAM_STAT_CHECK_CONDITION;
447 		sense_key = ILLEGAL_REQUEST;
448 		asc = SCSI_ASC_INVALID_CDB;
449 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
450 		break;
451 
452 	/* Media Errors */
453 	case NVME_SC_WRITE_FAULT:
454 		status = SAM_STAT_CHECK_CONDITION;
455 		sense_key = MEDIUM_ERROR;
456 		asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
457 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
458 		break;
459 	case NVME_SC_READ_ERROR:
460 		status = SAM_STAT_CHECK_CONDITION;
461 		sense_key = MEDIUM_ERROR;
462 		asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
463 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
464 		break;
465 	case NVME_SC_GUARD_CHECK:
466 		status = SAM_STAT_CHECK_CONDITION;
467 		sense_key = MEDIUM_ERROR;
468 		asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
469 		ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
470 		break;
471 	case NVME_SC_APPTAG_CHECK:
472 		status = SAM_STAT_CHECK_CONDITION;
473 		sense_key = MEDIUM_ERROR;
474 		asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
475 		ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
476 		break;
477 	case NVME_SC_REFTAG_CHECK:
478 		status = SAM_STAT_CHECK_CONDITION;
479 		sense_key = MEDIUM_ERROR;
480 		asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
481 		ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
482 		break;
483 	case NVME_SC_COMPARE_FAILED:
484 		status = SAM_STAT_CHECK_CONDITION;
485 		sense_key = MISCOMPARE;
486 		asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
487 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
488 		break;
489 	case NVME_SC_ACCESS_DENIED:
490 		status = SAM_STAT_CHECK_CONDITION;
491 		sense_key = ILLEGAL_REQUEST;
492 		asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
493 		ascq = SCSI_ASCQ_INVALID_LUN_ID;
494 		break;
495 
496 	/* Unspecified/Default */
497 	case NVME_SC_CMDID_CONFLICT:
498 	case NVME_SC_CMD_SEQ_ERROR:
499 	case NVME_SC_CQ_INVALID:
500 	case NVME_SC_QID_INVALID:
501 	case NVME_SC_QUEUE_SIZE:
502 	case NVME_SC_ABORT_LIMIT:
503 	case NVME_SC_ABORT_MISSING:
504 	case NVME_SC_ASYNC_LIMIT:
505 	case NVME_SC_FIRMWARE_SLOT:
506 	case NVME_SC_FIRMWARE_IMAGE:
507 	case NVME_SC_INVALID_VECTOR:
508 	case NVME_SC_INVALID_LOG_PAGE:
509 	default:
510 		status = SAM_STAT_CHECK_CONDITION;
511 		sense_key = ILLEGAL_REQUEST;
512 		asc = SCSI_ASC_NO_SENSE;
513 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
514 		break;
515 	}
516 
517 	res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
518 	return res ? res : nvme_sc;
519 }
520 
521 /* INQUIRY Helper Functions */
522 
nvme_trans_standard_inquiry_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * inq_response,int alloc_len)523 static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
524 					struct sg_io_hdr *hdr, u8 *inq_response,
525 					int alloc_len)
526 {
527 	struct nvme_dev *dev = ns->dev;
528 	struct nvme_id_ns *id_ns;
529 	int res;
530 	int nvme_sc;
531 	int xfer_len;
532 	u8 resp_data_format = 0x02;
533 	u8 protect;
534 	u8 cmdque = 0x01 << 1;
535 	u8 fw_offset = sizeof(dev->firmware_rev);
536 
537 	/* nvme ns identify - use DPS value for PROTECT field */
538 	nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
539 	res = nvme_trans_status_code(hdr, nvme_sc);
540 	if (res)
541 		return res;
542 
543 	if (id_ns->dps)
544 		protect = 0x01;
545 	else
546 		protect = 0;
547 	kfree(id_ns);
548 
549 	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
550 	inq_response[2] = VERSION_SPC_4;
551 	inq_response[3] = resp_data_format;	/*normaca=0 | hisup=0 */
552 	inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
553 	inq_response[5] = protect;	/* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
554 	inq_response[7] = cmdque;	/* wbus16=0 | sync=0 | vs=0 */
555 	strncpy(&inq_response[8], "NVMe    ", 8);
556 	strncpy(&inq_response[16], dev->model, 16);
557 
558 	while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
559 		fw_offset--;
560 	fw_offset -= 4;
561 	strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
562 
563 	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
564 	return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
565 }
566 
nvme_trans_supported_vpd_pages(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * inq_response,int alloc_len)567 static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
568 					struct sg_io_hdr *hdr, u8 *inq_response,
569 					int alloc_len)
570 {
571 	int xfer_len;
572 
573 	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
574 	inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE;   /* Page Code */
575 	inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES;    /* Page Length */
576 	inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
577 	inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
578 	inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
579 	inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
580 	inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
581 	inq_response[9] = INQ_BDEV_LIMITS_PAGE;
582 
583 	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
584 	return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
585 }
586 
nvme_trans_unit_serial_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * inq_response,int alloc_len)587 static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
588 					struct sg_io_hdr *hdr, u8 *inq_response,
589 					int alloc_len)
590 {
591 	struct nvme_dev *dev = ns->dev;
592 	int xfer_len;
593 
594 	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
595 	inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
596 	inq_response[3] = INQ_SERIAL_NUMBER_LENGTH;    /* Page Length */
597 	strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
598 
599 	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
600 	return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
601 }
602 
nvme_trans_device_id_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * inq_response,int alloc_len)603 static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
604 					u8 *inq_response, int alloc_len)
605 {
606 	struct nvme_dev *dev = ns->dev;
607 	int res;
608 	int nvme_sc;
609 	int xfer_len;
610 	__be32 tmp_id = cpu_to_be32(ns->ns_id);
611 
612 	memset(inq_response, 0, alloc_len);
613 	inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;    /* Page Code */
614 	if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
615 		struct nvme_id_ns *id_ns;
616 		void *eui;
617 		int len;
618 
619 		nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
620 		res = nvme_trans_status_code(hdr, nvme_sc);
621 		if (res)
622 			return res;
623 
624 		eui = id_ns->eui64;
625 		len = sizeof(id_ns->eui64);
626 		if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
627 			if (bitmap_empty(eui, len * 8)) {
628 				eui = id_ns->nguid;
629 				len = sizeof(id_ns->nguid);
630 			}
631 		}
632 		if (bitmap_empty(eui, len * 8)) {
633 			kfree(id_ns);
634 			goto scsi_string;
635 		}
636 
637 		inq_response[3] = 4 + len; /* Page Length */
638 		/* Designation Descriptor start */
639 		inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
640 		inq_response[5] = 0x02;    /* PIV=0b | Asso=00b | Designator Type=2h */
641 		inq_response[6] = 0x00;    /* Rsvd */
642 		inq_response[7] = len;     /* Designator Length */
643 		memcpy(&inq_response[8], eui, len);
644 		kfree(id_ns);
645 	} else {
646  scsi_string:
647 		if (alloc_len < 72) {
648 			return nvme_trans_completion(hdr,
649 					SAM_STAT_CHECK_CONDITION,
650 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
651 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
652 		}
653 		inq_response[3] = 0x48;    /* Page Length */
654 		/* Designation Descriptor start */
655 		inq_response[4] = 0x03;    /* Proto ID=0h | Code set=3h */
656 		inq_response[5] = 0x08;    /* PIV=0b | Asso=00b | Designator Type=8h */
657 		inq_response[6] = 0x00;    /* Rsvd */
658 		inq_response[7] = 0x44;    /* Designator Length */
659 
660 		sprintf(&inq_response[8], "%04x", to_pci_dev(dev->dev)->vendor);
661 		memcpy(&inq_response[12], dev->model, sizeof(dev->model));
662 		sprintf(&inq_response[52], "%04x", tmp_id);
663 		memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
664 	}
665 	xfer_len = alloc_len;
666 	return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
667 }
668 
nvme_trans_ext_inq_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,int alloc_len)669 static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
670 					int alloc_len)
671 {
672 	u8 *inq_response;
673 	int res;
674 	int nvme_sc;
675 	struct nvme_dev *dev = ns->dev;
676 	struct nvme_id_ctrl *id_ctrl;
677 	struct nvme_id_ns *id_ns;
678 	int xfer_len;
679 	u8 microcode = 0x80;
680 	u8 spt;
681 	u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
682 	u8 grd_chk, app_chk, ref_chk, protect;
683 	u8 uask_sup = 0x20;
684 	u8 v_sup;
685 	u8 luiclr = 0x01;
686 
687 	inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
688 	if (inq_response == NULL)
689 		return -ENOMEM;
690 
691 	nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
692 	res = nvme_trans_status_code(hdr, nvme_sc);
693 	if (res)
694 		goto out_free_inq;
695 
696 	spt = spt_lut[id_ns->dpc & 0x07] << 3;
697 	if (id_ns->dps)
698 		protect = 0x01;
699 	else
700 		protect = 0;
701 	kfree(id_ns);
702 
703 	grd_chk = protect << 2;
704 	app_chk = protect << 1;
705 	ref_chk = protect;
706 
707 	nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
708 	res = nvme_trans_status_code(hdr, nvme_sc);
709 	if (res)
710 		goto out_free_inq;
711 
712 	v_sup = id_ctrl->vwc;
713 	kfree(id_ctrl);
714 
715 	memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
716 	inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE;    /* Page Code */
717 	inq_response[2] = 0x00;    /* Page Length MSB */
718 	inq_response[3] = 0x3C;    /* Page Length LSB */
719 	inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
720 	inq_response[5] = uask_sup;
721 	inq_response[6] = v_sup;
722 	inq_response[7] = luiclr;
723 	inq_response[8] = 0;
724 	inq_response[9] = 0;
725 
726 	xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
727 	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
728 
729  out_free_inq:
730 	kfree(inq_response);
731 	return res;
732 }
733 
nvme_trans_bdev_limits_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * inq_response,int alloc_len)734 static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
735 					u8 *inq_response, int alloc_len)
736 {
737 	__be32 max_sectors = cpu_to_be32(
738 		nvme_block_nr(ns, queue_max_hw_sectors(ns->queue)));
739 	__be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
740 	__be32 discard_desc_count = cpu_to_be32(0x100);
741 
742 	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
743 	inq_response[1] = VPD_BLOCK_LIMITS;
744 	inq_response[3] = 0x3c; /* Page Length */
745 	memcpy(&inq_response[8], &max_sectors, sizeof(u32));
746 	memcpy(&inq_response[20], &max_discard, sizeof(u32));
747 
748 	if (max_discard)
749 		memcpy(&inq_response[24], &discard_desc_count, sizeof(u32));
750 
751 	return nvme_trans_copy_to_user(hdr, inq_response, 0x3c);
752 }
753 
nvme_trans_bdev_char_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,int alloc_len)754 static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
755 					int alloc_len)
756 {
757 	u8 *inq_response;
758 	int res;
759 	int xfer_len;
760 
761 	inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
762 	if (inq_response == NULL) {
763 		res = -ENOMEM;
764 		goto out_mem;
765 	}
766 
767 	inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE;    /* Page Code */
768 	inq_response[2] = 0x00;    /* Page Length MSB */
769 	inq_response[3] = 0x3C;    /* Page Length LSB */
770 	inq_response[4] = 0x00;    /* Medium Rotation Rate MSB */
771 	inq_response[5] = 0x01;    /* Medium Rotation Rate LSB */
772 	inq_response[6] = 0x00;    /* Form Factor */
773 
774 	xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
775 	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
776 
777 	kfree(inq_response);
778  out_mem:
779 	return res;
780 }
781 
782 /* LOG SENSE Helper Functions */
783 
nvme_trans_log_supp_pages(struct nvme_ns * ns,struct sg_io_hdr * hdr,int alloc_len)784 static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
785 					int alloc_len)
786 {
787 	int res;
788 	int xfer_len;
789 	u8 *log_response;
790 
791 	log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
792 	if (log_response == NULL) {
793 		res = -ENOMEM;
794 		goto out_mem;
795 	}
796 
797 	log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
798 	/* Subpage=0x00, Page Length MSB=0 */
799 	log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
800 	log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
801 	log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
802 	log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
803 
804 	xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
805 	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
806 
807 	kfree(log_response);
808  out_mem:
809 	return res;
810 }
811 
nvme_trans_log_info_exceptions(struct nvme_ns * ns,struct sg_io_hdr * hdr,int alloc_len)812 static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
813 					struct sg_io_hdr *hdr, int alloc_len)
814 {
815 	int res;
816 	int xfer_len;
817 	u8 *log_response;
818 	struct nvme_dev *dev = ns->dev;
819 	struct nvme_smart_log *smart_log;
820 	u8 temp_c;
821 	u16 temp_k;
822 
823 	log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
824 	if (log_response == NULL)
825 		return -ENOMEM;
826 
827 	res = nvme_get_log_page(dev, &smart_log);
828 	if (res < 0)
829 		goto out_free_response;
830 
831 	if (res != NVME_SC_SUCCESS) {
832 		temp_c = LOG_TEMP_UNKNOWN;
833 	} else {
834 		temp_k = (smart_log->temperature[1] << 8) +
835 				(smart_log->temperature[0]);
836 		temp_c = temp_k - KELVIN_TEMP_FACTOR;
837 	}
838 	kfree(smart_log);
839 
840 	log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
841 	/* Subpage=0x00, Page Length MSB=0 */
842 	log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
843 	/* Informational Exceptions Log Parameter 1 Start */
844 	/* Parameter Code=0x0000 bytes 4,5 */
845 	log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
846 	log_response[7] = 0x04; /* PARAMETER LENGTH */
847 	/* Add sense Code and qualifier = 0x00 each */
848 	/* Use Temperature from NVMe Get Log Page, convert to C from K */
849 	log_response[10] = temp_c;
850 
851 	xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
852 	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
853 
854  out_free_response:
855 	kfree(log_response);
856 	return res;
857 }
858 
nvme_trans_log_temperature(struct nvme_ns * ns,struct sg_io_hdr * hdr,int alloc_len)859 static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
860 					int alloc_len)
861 {
862 	int res;
863 	int xfer_len;
864 	u8 *log_response;
865 	struct nvme_dev *dev = ns->dev;
866 	struct nvme_smart_log *smart_log;
867 	u32 feature_resp;
868 	u8 temp_c_cur, temp_c_thresh;
869 	u16 temp_k;
870 
871 	log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
872 	if (log_response == NULL)
873 		return -ENOMEM;
874 
875 	res = nvme_get_log_page(dev, &smart_log);
876 	if (res < 0)
877 		goto out_free_response;
878 
879 	if (res != NVME_SC_SUCCESS) {
880 		temp_c_cur = LOG_TEMP_UNKNOWN;
881 	} else {
882 		temp_k = (smart_log->temperature[1] << 8) +
883 				(smart_log->temperature[0]);
884 		temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
885 	}
886 	kfree(smart_log);
887 
888 	/* Get Features for Temp Threshold */
889 	res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
890 								&feature_resp);
891 	if (res != NVME_SC_SUCCESS)
892 		temp_c_thresh = LOG_TEMP_UNKNOWN;
893 	else
894 		temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
895 
896 	log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
897 	/* Subpage=0x00, Page Length MSB=0 */
898 	log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
899 	/* Temperature Log Parameter 1 (Temperature) Start */
900 	/* Parameter Code = 0x0000 */
901 	log_response[6] = 0x01;		/* Format and Linking = 01b */
902 	log_response[7] = 0x02;		/* Parameter Length */
903 	/* Use Temperature from NVMe Get Log Page, convert to C from K */
904 	log_response[9] = temp_c_cur;
905 	/* Temperature Log Parameter 2 (Reference Temperature) Start */
906 	log_response[11] = 0x01;	/* Parameter Code = 0x0001 */
907 	log_response[12] = 0x01;	/* Format and Linking = 01b */
908 	log_response[13] = 0x02;	/* Parameter Length */
909 	/* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
910 	log_response[15] = temp_c_thresh;
911 
912 	xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
913 	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
914 
915  out_free_response:
916 	kfree(log_response);
917 	return res;
918 }
919 
920 /* MODE SENSE Helper Functions */
921 
nvme_trans_fill_mode_parm_hdr(u8 * resp,int len,u8 cdb10,u8 llbaa,u16 mode_data_length,u16 blk_desc_len)922 static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
923 					u16 mode_data_length, u16 blk_desc_len)
924 {
925 	/* Quick check to make sure I don't stomp on my own memory... */
926 	if ((cdb10 && len < 8) || (!cdb10 && len < 4))
927 		return -EINVAL;
928 
929 	if (cdb10) {
930 		resp[0] = (mode_data_length & 0xFF00) >> 8;
931 		resp[1] = (mode_data_length & 0x00FF);
932 		resp[3] = 0x10 /* DPOFUA */;
933 		resp[4] = llbaa;
934 		resp[5] = RESERVED_FIELD;
935 		resp[6] = (blk_desc_len & 0xFF00) >> 8;
936 		resp[7] = (blk_desc_len & 0x00FF);
937 	} else {
938 		resp[0] = (mode_data_length & 0x00FF);
939 		resp[2] = 0x10 /* DPOFUA */;
940 		resp[3] = (blk_desc_len & 0x00FF);
941 	}
942 
943 	return 0;
944 }
945 
nvme_trans_fill_blk_desc(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len,u8 llbaa)946 static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
947 				    u8 *resp, int len, u8 llbaa)
948 {
949 	int res;
950 	int nvme_sc;
951 	struct nvme_dev *dev = ns->dev;
952 	struct nvme_id_ns *id_ns;
953 	u8 flbas;
954 	u32 lba_length;
955 
956 	if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
957 		return -EINVAL;
958 	else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
959 		return -EINVAL;
960 
961 	nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
962 	res = nvme_trans_status_code(hdr, nvme_sc);
963 	if (res)
964 		return res;
965 
966 	flbas = (id_ns->flbas) & 0x0F;
967 	lba_length = (1 << (id_ns->lbaf[flbas].ds));
968 
969 	if (llbaa == 0) {
970 		__be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
971 		/* Byte 4 is reserved */
972 		__be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
973 
974 		memcpy(resp, &tmp_cap, sizeof(u32));
975 		memcpy(&resp[4], &tmp_len, sizeof(u32));
976 	} else {
977 		__be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
978 		__be32 tmp_len = cpu_to_be32(lba_length);
979 
980 		memcpy(resp, &tmp_cap, sizeof(u64));
981 		/* Bytes 8, 9, 10, 11 are reserved */
982 		memcpy(&resp[12], &tmp_len, sizeof(u32));
983 	}
984 
985 	kfree(id_ns);
986 	return res;
987 }
988 
nvme_trans_fill_control_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len)989 static int nvme_trans_fill_control_page(struct nvme_ns *ns,
990 					struct sg_io_hdr *hdr, u8 *resp,
991 					int len)
992 {
993 	if (len < MODE_PAGE_CONTROL_LEN)
994 		return -EINVAL;
995 
996 	resp[0] = MODE_PAGE_CONTROL;
997 	resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
998 	resp[2] = 0x0E;		/* TST=000b, TMF_ONLY=0, DPICZ=1,
999 				 * D_SENSE=1, GLTSD=1, RLEC=0 */
1000 	resp[3] = 0x12;		/* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
1001 	/* Byte 4:  VS=0, RAC=0, UA_INT=0, SWP=0 */
1002 	resp[5] = 0x40;		/* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
1003 	/* resp[6] and [7] are obsolete, thus zero */
1004 	resp[8] = 0xFF;		/* Busy timeout period = 0xffff */
1005 	resp[9] = 0xFF;
1006 	/* Bytes 10,11: Extended selftest completion time = 0x0000 */
1007 
1008 	return 0;
1009 }
1010 
nvme_trans_fill_caching_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len)1011 static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
1012 					struct sg_io_hdr *hdr,
1013 					u8 *resp, int len)
1014 {
1015 	int res = 0;
1016 	int nvme_sc;
1017 	struct nvme_dev *dev = ns->dev;
1018 	u32 feature_resp;
1019 	u8 vwc;
1020 
1021 	if (len < MODE_PAGE_CACHING_LEN)
1022 		return -EINVAL;
1023 
1024 	nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
1025 								&feature_resp);
1026 	res = nvme_trans_status_code(hdr, nvme_sc);
1027 	if (res)
1028 		return res;
1029 
1030 	vwc = feature_resp & 0x00000001;
1031 
1032 	resp[0] = MODE_PAGE_CACHING;
1033 	resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
1034 	resp[2] = vwc << 2;
1035 	return 0;
1036 }
1037 
nvme_trans_fill_pow_cnd_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len)1038 static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
1039 					struct sg_io_hdr *hdr, u8 *resp,
1040 					int len)
1041 {
1042 	if (len < MODE_PAGE_POW_CND_LEN)
1043 		return -EINVAL;
1044 
1045 	resp[0] = MODE_PAGE_POWER_CONDITION;
1046 	resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
1047 	/* All other bytes are zero */
1048 
1049 	return 0;
1050 }
1051 
nvme_trans_fill_inf_exc_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len)1052 static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
1053 					struct sg_io_hdr *hdr, u8 *resp,
1054 					int len)
1055 {
1056 	if (len < MODE_PAGE_INF_EXC_LEN)
1057 		return -EINVAL;
1058 
1059 	resp[0] = MODE_PAGE_INFO_EXCEP;
1060 	resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
1061 	resp[2] = 0x88;
1062 	/* All other bytes are zero */
1063 
1064 	return 0;
1065 }
1066 
nvme_trans_fill_all_pages(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len)1067 static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1068 				     u8 *resp, int len)
1069 {
1070 	int res;
1071 	u16 mode_pages_offset_1 = 0;
1072 	u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
1073 
1074 	mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
1075 	mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
1076 	mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
1077 
1078 	res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
1079 					MODE_PAGE_CACHING_LEN);
1080 	if (res)
1081 		return res;
1082 	res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
1083 					MODE_PAGE_CONTROL_LEN);
1084 	if (res)
1085 		return res;
1086 	res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
1087 					MODE_PAGE_POW_CND_LEN);
1088 	if (res)
1089 		return res;
1090 	return nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
1091 					MODE_PAGE_INF_EXC_LEN);
1092 }
1093 
nvme_trans_get_blk_desc_len(u8 dbd,u8 llbaa)1094 static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
1095 {
1096 	if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
1097 		/* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
1098 		return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
1099 	} else {
1100 		return 0;
1101 	}
1102 }
1103 
nvme_trans_mode_page_create(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd,u16 alloc_len,u8 cdb10,int (* mode_page_fill_func)(struct nvme_ns *,struct sg_io_hdr * hdr,u8 *,int),u16 mode_pages_tot_len)1104 static int nvme_trans_mode_page_create(struct nvme_ns *ns,
1105 					struct sg_io_hdr *hdr, u8 *cmd,
1106 					u16 alloc_len, u8 cdb10,
1107 					int (*mode_page_fill_func)
1108 					(struct nvme_ns *,
1109 					struct sg_io_hdr *hdr, u8 *, int),
1110 					u16 mode_pages_tot_len)
1111 {
1112 	int res;
1113 	int xfer_len;
1114 	u8 *response;
1115 	u8 dbd, llbaa;
1116 	u16 resp_size;
1117 	int mph_size;
1118 	u16 mode_pages_offset_1;
1119 	u16 blk_desc_len, blk_desc_offset, mode_data_length;
1120 
1121 	dbd = (cmd[1] & MODE_SENSE_DBD_MASK) >> MODE_SENSE_DBD_SHIFT;
1122 	llbaa = (cmd[1] & MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT;
1123 	mph_size = cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE;
1124 
1125 	blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
1126 
1127 	resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
1128 	/* Refer spc4r34 Table 440 for calculation of Mode data Length field */
1129 	mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
1130 
1131 	blk_desc_offset = mph_size;
1132 	mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
1133 
1134 	response = kzalloc(resp_size, GFP_KERNEL);
1135 	if (response == NULL) {
1136 		res = -ENOMEM;
1137 		goto out_mem;
1138 	}
1139 
1140 	res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
1141 					llbaa, mode_data_length, blk_desc_len);
1142 	if (res)
1143 		goto out_free;
1144 	if (blk_desc_len > 0) {
1145 		res = nvme_trans_fill_blk_desc(ns, hdr,
1146 					       &response[blk_desc_offset],
1147 					       blk_desc_len, llbaa);
1148 		if (res)
1149 			goto out_free;
1150 	}
1151 	res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
1152 					mode_pages_tot_len);
1153 	if (res)
1154 		goto out_free;
1155 
1156 	xfer_len = min(alloc_len, resp_size);
1157 	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
1158 
1159  out_free:
1160 	kfree(response);
1161  out_mem:
1162 	return res;
1163 }
1164 
1165 /* Read Capacity Helper Functions */
1166 
nvme_trans_fill_read_cap(u8 * response,struct nvme_id_ns * id_ns,u8 cdb16)1167 static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
1168 								u8 cdb16)
1169 {
1170 	u8 flbas;
1171 	u32 lba_length;
1172 	u64 rlba;
1173 	u8 prot_en;
1174 	u8 p_type_lut[4] = {0, 0, 1, 2};
1175 	__be64 tmp_rlba;
1176 	__be32 tmp_rlba_32;
1177 	__be32 tmp_len;
1178 
1179 	flbas = (id_ns->flbas) & 0x0F;
1180 	lba_length = (1 << (id_ns->lbaf[flbas].ds));
1181 	rlba = le64_to_cpup(&id_ns->nsze) - 1;
1182 	(id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
1183 
1184 	if (!cdb16) {
1185 		if (rlba > 0xFFFFFFFF)
1186 			rlba = 0xFFFFFFFF;
1187 		tmp_rlba_32 = cpu_to_be32(rlba);
1188 		tmp_len = cpu_to_be32(lba_length);
1189 		memcpy(response, &tmp_rlba_32, sizeof(u32));
1190 		memcpy(&response[4], &tmp_len, sizeof(u32));
1191 	} else {
1192 		tmp_rlba = cpu_to_be64(rlba);
1193 		tmp_len = cpu_to_be32(lba_length);
1194 		memcpy(response, &tmp_rlba, sizeof(u64));
1195 		memcpy(&response[8], &tmp_len, sizeof(u32));
1196 		response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
1197 		/* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
1198 		/* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
1199 		/* Bytes 16-31 - Reserved */
1200 	}
1201 }
1202 
1203 /* Start Stop Unit Helper Functions */
1204 
nvme_trans_power_state(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 pc,u8 pcmod,u8 start)1205 static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1206 						u8 pc, u8 pcmod, u8 start)
1207 {
1208 	int res;
1209 	int nvme_sc;
1210 	struct nvme_dev *dev = ns->dev;
1211 	struct nvme_id_ctrl *id_ctrl;
1212 	int lowest_pow_st;	/* max npss = lowest power consumption */
1213 	unsigned ps_desired = 0;
1214 
1215 	nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
1216 	res = nvme_trans_status_code(hdr, nvme_sc);
1217 	if (res)
1218 		return res;
1219 
1220 	lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
1221 	kfree(id_ctrl);
1222 
1223 	switch (pc) {
1224 	case NVME_POWER_STATE_START_VALID:
1225 		/* Action unspecified if POWER CONDITION MODIFIER != 0 */
1226 		if (pcmod == 0 && start == 0x1)
1227 			ps_desired = POWER_STATE_0;
1228 		if (pcmod == 0 && start == 0x0)
1229 			ps_desired = lowest_pow_st;
1230 		break;
1231 	case NVME_POWER_STATE_ACTIVE:
1232 		/* Action unspecified if POWER CONDITION MODIFIER != 0 */
1233 		if (pcmod == 0)
1234 			ps_desired = POWER_STATE_0;
1235 		break;
1236 	case NVME_POWER_STATE_IDLE:
1237 		/* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
1238 		if (pcmod == 0x0)
1239 			ps_desired = POWER_STATE_1;
1240 		else if (pcmod == 0x1)
1241 			ps_desired = POWER_STATE_2;
1242 		else if (pcmod == 0x2)
1243 			ps_desired = POWER_STATE_3;
1244 		break;
1245 	case NVME_POWER_STATE_STANDBY:
1246 		/* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
1247 		if (pcmod == 0x0)
1248 			ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2));
1249 		else if (pcmod == 0x1)
1250 			ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1));
1251 		break;
1252 	case NVME_POWER_STATE_LU_CONTROL:
1253 	default:
1254 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1255 				ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1256 				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1257 		break;
1258 	}
1259 	nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
1260 				    NULL);
1261 	return nvme_trans_status_code(hdr, nvme_sc);
1262 }
1263 
nvme_trans_send_activate_fw_cmd(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 buffer_id)1264 static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1265 					u8 buffer_id)
1266 {
1267 	struct nvme_command c;
1268 	int nvme_sc;
1269 
1270 	memset(&c, 0, sizeof(c));
1271 	c.common.opcode = nvme_admin_activate_fw;
1272 	c.common.cdw10[0] = cpu_to_le32(buffer_id | NVME_FWACT_REPL_ACTV);
1273 
1274 	nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
1275 	return nvme_trans_status_code(hdr, nvme_sc);
1276 }
1277 
nvme_trans_send_download_fw_cmd(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 opcode,u32 tot_len,u32 offset,u8 buffer_id)1278 static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1279 					u8 opcode, u32 tot_len, u32 offset,
1280 					u8 buffer_id)
1281 {
1282 	int nvme_sc;
1283 	struct nvme_dev *dev = ns->dev;
1284 	struct nvme_command c;
1285 
1286 	if (hdr->iovec_count > 0) {
1287 		/* Assuming SGL is not allowed for this command */
1288 		return nvme_trans_completion(hdr,
1289 					SAM_STAT_CHECK_CONDITION,
1290 					ILLEGAL_REQUEST,
1291 					SCSI_ASC_INVALID_CDB,
1292 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1293 	}
1294 
1295 	memset(&c, 0, sizeof(c));
1296 	c.common.opcode = nvme_admin_download_fw;
1297 	c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
1298 	c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
1299 
1300 	nvme_sc = __nvme_submit_sync_cmd(dev->admin_q, &c, NULL,
1301 			hdr->dxferp, tot_len, NULL, 0);
1302 	return nvme_trans_status_code(hdr, nvme_sc);
1303 }
1304 
1305 /* Mode Select Helper Functions */
1306 
nvme_trans_modesel_get_bd_len(u8 * parm_list,u8 cdb10,u16 * bd_len,u8 * llbaa)1307 static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
1308 						u16 *bd_len, u8 *llbaa)
1309 {
1310 	if (cdb10) {
1311 		/* 10 Byte CDB */
1312 		*bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
1313 			parm_list[MODE_SELECT_10_BD_OFFSET + 1];
1314 		*llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
1315 				MODE_SELECT_10_LLBAA_MASK;
1316 	} else {
1317 		/* 6 Byte CDB */
1318 		*bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
1319 	}
1320 }
1321 
nvme_trans_modesel_save_bd(struct nvme_ns * ns,u8 * parm_list,u16 idx,u16 bd_len,u8 llbaa)1322 static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
1323 					u16 idx, u16 bd_len, u8 llbaa)
1324 {
1325 	u16 bd_num;
1326 
1327 	bd_num = bd_len / ((llbaa == 0) ?
1328 			SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
1329 	/* Store block descriptor info if a FORMAT UNIT comes later */
1330 	/* TODO Saving 1st BD info; what to do if multiple BD received? */
1331 	if (llbaa == 0) {
1332 		/* Standard Block Descriptor - spc4r34 7.5.5.1 */
1333 		ns->mode_select_num_blocks =
1334 				(parm_list[idx + 1] << 16) +
1335 				(parm_list[idx + 2] << 8) +
1336 				(parm_list[idx + 3]);
1337 
1338 		ns->mode_select_block_len =
1339 				(parm_list[idx + 5] << 16) +
1340 				(parm_list[idx + 6] << 8) +
1341 				(parm_list[idx + 7]);
1342 	} else {
1343 		/* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
1344 		ns->mode_select_num_blocks =
1345 				(((u64)parm_list[idx + 0]) << 56) +
1346 				(((u64)parm_list[idx + 1]) << 48) +
1347 				(((u64)parm_list[idx + 2]) << 40) +
1348 				(((u64)parm_list[idx + 3]) << 32) +
1349 				(((u64)parm_list[idx + 4]) << 24) +
1350 				(((u64)parm_list[idx + 5]) << 16) +
1351 				(((u64)parm_list[idx + 6]) << 8) +
1352 				((u64)parm_list[idx + 7]);
1353 
1354 		ns->mode_select_block_len =
1355 				(parm_list[idx + 12] << 24) +
1356 				(parm_list[idx + 13] << 16) +
1357 				(parm_list[idx + 14] << 8) +
1358 				(parm_list[idx + 15]);
1359 	}
1360 }
1361 
nvme_trans_modesel_get_mp(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * mode_page,u8 page_code)1362 static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1363 					u8 *mode_page, u8 page_code)
1364 {
1365 	int res = 0;
1366 	int nvme_sc;
1367 	struct nvme_dev *dev = ns->dev;
1368 	unsigned dword11;
1369 
1370 	switch (page_code) {
1371 	case MODE_PAGE_CACHING:
1372 		dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
1373 		nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
1374 					    0, NULL);
1375 		res = nvme_trans_status_code(hdr, nvme_sc);
1376 		break;
1377 	case MODE_PAGE_CONTROL:
1378 		break;
1379 	case MODE_PAGE_POWER_CONDITION:
1380 		/* Verify the OS is not trying to set timers */
1381 		if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
1382 			res = nvme_trans_completion(hdr,
1383 						SAM_STAT_CHECK_CONDITION,
1384 						ILLEGAL_REQUEST,
1385 						SCSI_ASC_INVALID_PARAMETER,
1386 						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1387 			break;
1388 		}
1389 		break;
1390 	default:
1391 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1392 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1393 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1394 		break;
1395 	}
1396 
1397 	return res;
1398 }
1399 
nvme_trans_modesel_data(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd,u16 parm_list_len,u8 pf,u8 sp,u8 cdb10)1400 static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1401 					u8 *cmd, u16 parm_list_len, u8 pf,
1402 					u8 sp, u8 cdb10)
1403 {
1404 	int res;
1405 	u8 *parm_list;
1406 	u16 bd_len;
1407 	u8 llbaa = 0;
1408 	u16 index, saved_index;
1409 	u8 page_code;
1410 	u16 mp_size;
1411 
1412 	/* Get parm list from data-in/out buffer */
1413 	parm_list = kmalloc(parm_list_len, GFP_KERNEL);
1414 	if (parm_list == NULL) {
1415 		res = -ENOMEM;
1416 		goto out;
1417 	}
1418 
1419 	res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
1420 	if (res)
1421 		goto out_mem;
1422 
1423 	nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
1424 	index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
1425 
1426 	if (bd_len != 0) {
1427 		/* Block Descriptors present, parse */
1428 		nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
1429 		index += bd_len;
1430 	}
1431 	saved_index = index;
1432 
1433 	/* Multiple mode pages may be present; iterate through all */
1434 	/* In 1st Iteration, don't do NVME Command, only check for CDB errors */
1435 	do {
1436 		page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1437 		mp_size = parm_list[index + 1] + 2;
1438 		if ((page_code != MODE_PAGE_CACHING) &&
1439 		    (page_code != MODE_PAGE_CONTROL) &&
1440 		    (page_code != MODE_PAGE_POWER_CONDITION)) {
1441 			res = nvme_trans_completion(hdr,
1442 						SAM_STAT_CHECK_CONDITION,
1443 						ILLEGAL_REQUEST,
1444 						SCSI_ASC_INVALID_CDB,
1445 						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1446 			goto out_mem;
1447 		}
1448 		index += mp_size;
1449 	} while (index < parm_list_len);
1450 
1451 	/* In 2nd Iteration, do the NVME Commands */
1452 	index = saved_index;
1453 	do {
1454 		page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1455 		mp_size = parm_list[index + 1] + 2;
1456 		res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
1457 								page_code);
1458 		if (res)
1459 			break;
1460 		index += mp_size;
1461 	} while (index < parm_list_len);
1462 
1463  out_mem:
1464 	kfree(parm_list);
1465  out:
1466 	return res;
1467 }
1468 
1469 /* Format Unit Helper Functions */
1470 
nvme_trans_fmt_set_blk_size_count(struct nvme_ns * ns,struct sg_io_hdr * hdr)1471 static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
1472 					     struct sg_io_hdr *hdr)
1473 {
1474 	int res = 0;
1475 	int nvme_sc;
1476 	struct nvme_dev *dev = ns->dev;
1477 	u8 flbas;
1478 
1479 	/*
1480 	 * SCSI Expects a MODE SELECT would have been issued prior to
1481 	 * a FORMAT UNIT, and the block size and number would be used
1482 	 * from the block descriptor in it. If a MODE SELECT had not
1483 	 * been issued, FORMAT shall use the current values for both.
1484 	 */
1485 
1486 	if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
1487 		struct nvme_id_ns *id_ns;
1488 
1489 		nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
1490 		res = nvme_trans_status_code(hdr, nvme_sc);
1491 		if (res)
1492 			return res;
1493 
1494 		if (ns->mode_select_num_blocks == 0)
1495 			ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
1496 		if (ns->mode_select_block_len == 0) {
1497 			flbas = (id_ns->flbas) & 0x0F;
1498 			ns->mode_select_block_len =
1499 						(1 << (id_ns->lbaf[flbas].ds));
1500 		}
1501 
1502 		kfree(id_ns);
1503 	}
1504 
1505 	return 0;
1506 }
1507 
nvme_trans_fmt_get_parm_header(struct sg_io_hdr * hdr,u8 len,u8 format_prot_info,u8 * nvme_pf_code)1508 static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
1509 					u8 format_prot_info, u8 *nvme_pf_code)
1510 {
1511 	int res;
1512 	u8 *parm_list;
1513 	u8 pf_usage, pf_code;
1514 
1515 	parm_list = kmalloc(len, GFP_KERNEL);
1516 	if (parm_list == NULL) {
1517 		res = -ENOMEM;
1518 		goto out;
1519 	}
1520 	res = nvme_trans_copy_from_user(hdr, parm_list, len);
1521 	if (res)
1522 		goto out_mem;
1523 
1524 	if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
1525 				FORMAT_UNIT_IMMED_MASK) != 0) {
1526 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1527 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1528 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1529 		goto out_mem;
1530 	}
1531 
1532 	if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
1533 	    (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
1534 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1535 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1536 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1537 		goto out_mem;
1538 	}
1539 	pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
1540 			FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
1541 	pf_code = (pf_usage << 2) | format_prot_info;
1542 	switch (pf_code) {
1543 	case 0:
1544 		*nvme_pf_code = 0;
1545 		break;
1546 	case 2:
1547 		*nvme_pf_code = 1;
1548 		break;
1549 	case 3:
1550 		*nvme_pf_code = 2;
1551 		break;
1552 	case 7:
1553 		*nvme_pf_code = 3;
1554 		break;
1555 	default:
1556 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1557 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1558 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1559 		break;
1560 	}
1561 
1562  out_mem:
1563 	kfree(parm_list);
1564  out:
1565 	return res;
1566 }
1567 
nvme_trans_fmt_send_cmd(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 prot_info)1568 static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1569 				   u8 prot_info)
1570 {
1571 	int res;
1572 	int nvme_sc;
1573 	struct nvme_dev *dev = ns->dev;
1574 	struct nvme_id_ns *id_ns;
1575 	u8 i;
1576 	u8 flbas, nlbaf;
1577 	u8 selected_lbaf = 0xFF;
1578 	u32 cdw10 = 0;
1579 	struct nvme_command c;
1580 
1581 	/* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
1582 	nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
1583 	res = nvme_trans_status_code(hdr, nvme_sc);
1584 	if (res)
1585 		return res;
1586 
1587 	flbas = (id_ns->flbas) & 0x0F;
1588 	nlbaf = id_ns->nlbaf;
1589 
1590 	for (i = 0; i < nlbaf; i++) {
1591 		if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
1592 			selected_lbaf = i;
1593 			break;
1594 		}
1595 	}
1596 	if (selected_lbaf > 0x0F) {
1597 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1598 				ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1599 				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1600 	}
1601 	if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
1602 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1603 				ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1604 				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1605 	}
1606 
1607 	cdw10 |= prot_info << 5;
1608 	cdw10 |= selected_lbaf & 0x0F;
1609 	memset(&c, 0, sizeof(c));
1610 	c.format.opcode = nvme_admin_format_nvm;
1611 	c.format.nsid = cpu_to_le32(ns->ns_id);
1612 	c.format.cdw10 = cpu_to_le32(cdw10);
1613 
1614 	nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
1615 	res = nvme_trans_status_code(hdr, nvme_sc);
1616 
1617 	kfree(id_ns);
1618 	return res;
1619 }
1620 
nvme_trans_io_get_num_cmds(struct sg_io_hdr * hdr,struct nvme_trans_io_cdb * cdb_info,u32 max_blocks)1621 static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
1622 					struct nvme_trans_io_cdb *cdb_info,
1623 					u32 max_blocks)
1624 {
1625 	/* If using iovecs, send one nvme command per vector */
1626 	if (hdr->iovec_count > 0)
1627 		return hdr->iovec_count;
1628 	else if (cdb_info->xfer_len > max_blocks)
1629 		return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
1630 	else
1631 		return 1;
1632 }
1633 
nvme_trans_io_get_control(struct nvme_ns * ns,struct nvme_trans_io_cdb * cdb_info)1634 static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
1635 					struct nvme_trans_io_cdb *cdb_info)
1636 {
1637 	u16 control = 0;
1638 
1639 	/* When Protection information support is added, implement here */
1640 
1641 	if (cdb_info->fua > 0)
1642 		control |= NVME_RW_FUA;
1643 
1644 	return control;
1645 }
1646 
nvme_trans_do_nvme_io(struct nvme_ns * ns,struct sg_io_hdr * hdr,struct nvme_trans_io_cdb * cdb_info,u8 is_write)1647 static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1648 				struct nvme_trans_io_cdb *cdb_info, u8 is_write)
1649 {
1650 	int nvme_sc = NVME_SC_SUCCESS;
1651 	u32 num_cmds;
1652 	u64 unit_len;
1653 	u64 unit_num_blocks;	/* Number of blocks to xfer in each nvme cmd */
1654 	u32 retcode;
1655 	u32 i = 0;
1656 	u64 nvme_offset = 0;
1657 	void __user *next_mapping_addr;
1658 	struct nvme_command c;
1659 	u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
1660 	u16 control;
1661 	u32 max_blocks = queue_max_hw_sectors(ns->queue);
1662 
1663 	num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
1664 
1665 	/*
1666 	 * This loop handles two cases.
1667 	 * First, when an SGL is used in the form of an iovec list:
1668 	 *   - Use iov_base as the next mapping address for the nvme command_id
1669 	 *   - Use iov_len as the data transfer length for the command.
1670 	 * Second, when we have a single buffer
1671 	 *   - If larger than max_blocks, split into chunks, offset
1672 	 *        each nvme command accordingly.
1673 	 */
1674 	for (i = 0; i < num_cmds; i++) {
1675 		memset(&c, 0, sizeof(c));
1676 		if (hdr->iovec_count > 0) {
1677 			struct sg_iovec sgl;
1678 
1679 			retcode = copy_from_user(&sgl, hdr->dxferp +
1680 					i * sizeof(struct sg_iovec),
1681 					sizeof(struct sg_iovec));
1682 			if (retcode)
1683 				return -EFAULT;
1684 			unit_len = sgl.iov_len;
1685 			unit_num_blocks = unit_len >> ns->lba_shift;
1686 			next_mapping_addr = sgl.iov_base;
1687 		} else {
1688 			unit_num_blocks = min((u64)max_blocks,
1689 					(cdb_info->xfer_len - nvme_offset));
1690 			unit_len = unit_num_blocks << ns->lba_shift;
1691 			next_mapping_addr = hdr->dxferp +
1692 					((1 << ns->lba_shift) * nvme_offset);
1693 		}
1694 
1695 		c.rw.opcode = opcode;
1696 		c.rw.nsid = cpu_to_le32(ns->ns_id);
1697 		c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
1698 		c.rw.length = cpu_to_le16(unit_num_blocks - 1);
1699 		control = nvme_trans_io_get_control(ns, cdb_info);
1700 		c.rw.control = cpu_to_le16(control);
1701 
1702 		if (get_capacity(ns->disk) - unit_num_blocks <
1703 				cdb_info->lba + nvme_offset) {
1704 			nvme_sc = NVME_SC_LBA_RANGE;
1705 			break;
1706 		}
1707 		nvme_sc = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
1708 				next_mapping_addr, unit_len, NULL, 0);
1709 		if (nvme_sc)
1710 			break;
1711 
1712 		nvme_offset += unit_num_blocks;
1713 	}
1714 
1715 	return nvme_trans_status_code(hdr, nvme_sc);
1716 }
1717 
1718 
1719 /* SCSI Command Translation Functions */
1720 
nvme_trans_io(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 is_write,u8 * cmd)1721 static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
1722 							u8 *cmd)
1723 {
1724 	int res = 0;
1725 	struct nvme_trans_io_cdb cdb_info = { 0, };
1726 	u8 opcode = cmd[0];
1727 	u64 xfer_bytes;
1728 	u64 sum_iov_len = 0;
1729 	struct sg_iovec sgl;
1730 	int i;
1731 	size_t not_copied;
1732 
1733 	/*
1734 	 * The FUA and WPROTECT fields are not supported in 6-byte CDBs,
1735 	 * but always in the same place for all others.
1736 	 */
1737 	switch (opcode) {
1738 	case WRITE_6:
1739 	case READ_6:
1740 		break;
1741 	default:
1742 		cdb_info.fua = cmd[1] & 0x8;
1743 		cdb_info.prot_info = (cmd[1] & 0xe0) >> 5;
1744 		if (cdb_info.prot_info && !ns->pi_type) {
1745 			return nvme_trans_completion(hdr,
1746 					SAM_STAT_CHECK_CONDITION,
1747 					ILLEGAL_REQUEST,
1748 					SCSI_ASC_INVALID_CDB,
1749 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1750 		}
1751 	}
1752 
1753 	switch (opcode) {
1754 	case WRITE_6:
1755 	case READ_6:
1756 		cdb_info.lba = get_unaligned_be24(&cmd[1]);
1757 		cdb_info.xfer_len = cmd[4];
1758 		if (cdb_info.xfer_len == 0)
1759 			cdb_info.xfer_len = 256;
1760 		break;
1761 	case WRITE_10:
1762 	case READ_10:
1763 		cdb_info.lba = get_unaligned_be32(&cmd[2]);
1764 		cdb_info.xfer_len = get_unaligned_be16(&cmd[7]);
1765 		break;
1766 	case WRITE_12:
1767 	case READ_12:
1768 		cdb_info.lba = get_unaligned_be32(&cmd[2]);
1769 		cdb_info.xfer_len = get_unaligned_be32(&cmd[6]);
1770 		break;
1771 	case WRITE_16:
1772 	case READ_16:
1773 		cdb_info.lba = get_unaligned_be64(&cmd[2]);
1774 		cdb_info.xfer_len = get_unaligned_be32(&cmd[10]);
1775 		break;
1776 	default:
1777 		/* Will never really reach here */
1778 		res = -EIO;
1779 		goto out;
1780 	}
1781 
1782 	/* Calculate total length of transfer (in bytes) */
1783 	if (hdr->iovec_count > 0) {
1784 		for (i = 0; i < hdr->iovec_count; i++) {
1785 			not_copied = copy_from_user(&sgl, hdr->dxferp +
1786 						i * sizeof(struct sg_iovec),
1787 						sizeof(struct sg_iovec));
1788 			if (not_copied)
1789 				return -EFAULT;
1790 			sum_iov_len += sgl.iov_len;
1791 			/* IO vector sizes should be multiples of block size */
1792 			if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
1793 				res = nvme_trans_completion(hdr,
1794 						SAM_STAT_CHECK_CONDITION,
1795 						ILLEGAL_REQUEST,
1796 						SCSI_ASC_INVALID_PARAMETER,
1797 						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1798 				goto out;
1799 			}
1800 		}
1801 	} else {
1802 		sum_iov_len = hdr->dxfer_len;
1803 	}
1804 
1805 	/* As Per sg ioctl howto, if the lengths differ, use the lower one */
1806 	xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
1807 
1808 	/* If block count and actual data buffer size dont match, error out */
1809 	if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
1810 		res = -EINVAL;
1811 		goto out;
1812 	}
1813 
1814 	/* Check for 0 length transfer - it is not illegal */
1815 	if (cdb_info.xfer_len == 0)
1816 		goto out;
1817 
1818 	/* Send NVMe IO Command(s) */
1819 	res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
1820 	if (res)
1821 		goto out;
1822 
1823  out:
1824 	return res;
1825 }
1826 
nvme_trans_inquiry(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)1827 static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1828 							u8 *cmd)
1829 {
1830 	int res = 0;
1831 	u8 evpd;
1832 	u8 page_code;
1833 	int alloc_len;
1834 	u8 *inq_response;
1835 
1836 	evpd = cmd[1] & 0x01;
1837 	page_code = cmd[2];
1838 	alloc_len = get_unaligned_be16(&cmd[3]);
1839 
1840 	inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH),
1841 				GFP_KERNEL);
1842 	if (inq_response == NULL) {
1843 		res = -ENOMEM;
1844 		goto out_mem;
1845 	}
1846 
1847 	if (evpd == 0) {
1848 		if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
1849 			res = nvme_trans_standard_inquiry_page(ns, hdr,
1850 						inq_response, alloc_len);
1851 		} else {
1852 			res = nvme_trans_completion(hdr,
1853 						SAM_STAT_CHECK_CONDITION,
1854 						ILLEGAL_REQUEST,
1855 						SCSI_ASC_INVALID_CDB,
1856 						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1857 		}
1858 	} else {
1859 		switch (page_code) {
1860 		case VPD_SUPPORTED_PAGES:
1861 			res = nvme_trans_supported_vpd_pages(ns, hdr,
1862 						inq_response, alloc_len);
1863 			break;
1864 		case VPD_SERIAL_NUMBER:
1865 			res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
1866 								alloc_len);
1867 			break;
1868 		case VPD_DEVICE_IDENTIFIERS:
1869 			res = nvme_trans_device_id_page(ns, hdr, inq_response,
1870 								alloc_len);
1871 			break;
1872 		case VPD_EXTENDED_INQUIRY:
1873 			res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
1874 			break;
1875 		case VPD_BLOCK_LIMITS:
1876 			res = nvme_trans_bdev_limits_page(ns, hdr, inq_response,
1877 								alloc_len);
1878 			break;
1879 		case VPD_BLOCK_DEV_CHARACTERISTICS:
1880 			res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
1881 			break;
1882 		default:
1883 			res = nvme_trans_completion(hdr,
1884 						SAM_STAT_CHECK_CONDITION,
1885 						ILLEGAL_REQUEST,
1886 						SCSI_ASC_INVALID_CDB,
1887 						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1888 			break;
1889 		}
1890 	}
1891 	kfree(inq_response);
1892  out_mem:
1893 	return res;
1894 }
1895 
nvme_trans_log_sense(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)1896 static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1897 							u8 *cmd)
1898 {
1899 	int res;
1900 	u16 alloc_len;
1901 	u8 pc;
1902 	u8 page_code;
1903 
1904 	if (cmd[1] != LOG_SENSE_CDB_SP_NOT_ENABLED) {
1905 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1906 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1907 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1908 		goto out;
1909 	}
1910 
1911 	page_code = cmd[2] & LOG_SENSE_CDB_PAGE_CODE_MASK;
1912 	pc = (cmd[2] & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
1913 	if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
1914 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1915 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1916 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1917 		goto out;
1918 	}
1919 	alloc_len = get_unaligned_be16(&cmd[7]);
1920 	switch (page_code) {
1921 	case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
1922 		res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
1923 		break;
1924 	case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
1925 		res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
1926 		break;
1927 	case LOG_PAGE_TEMPERATURE_PAGE:
1928 		res = nvme_trans_log_temperature(ns, hdr, alloc_len);
1929 		break;
1930 	default:
1931 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1932 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1933 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1934 		break;
1935 	}
1936 
1937  out:
1938 	return res;
1939 }
1940 
nvme_trans_mode_select(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)1941 static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1942 							u8 *cmd)
1943 {
1944 	u8 cdb10 = 0;
1945 	u16 parm_list_len;
1946 	u8 page_format;
1947 	u8 save_pages;
1948 
1949 	page_format = cmd[1] & MODE_SELECT_CDB_PAGE_FORMAT_MASK;
1950 	save_pages = cmd[1] & MODE_SELECT_CDB_SAVE_PAGES_MASK;
1951 
1952 	if (cmd[0] == MODE_SELECT) {
1953 		parm_list_len = cmd[4];
1954 	} else {
1955 		parm_list_len = cmd[7];
1956 		cdb10 = 1;
1957 	}
1958 
1959 	if (parm_list_len != 0) {
1960 		/*
1961 		 * According to SPC-4 r24, a paramter list length field of 0
1962 		 * shall not be considered an error
1963 		 */
1964 		return nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
1965 						page_format, save_pages, cdb10);
1966 	}
1967 
1968 	return 0;
1969 }
1970 
nvme_trans_mode_sense(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)1971 static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1972 							u8 *cmd)
1973 {
1974 	int res = 0;
1975 	u16 alloc_len;
1976 	u8 cdb10 = 0;
1977 
1978 	if (cmd[0] == MODE_SENSE) {
1979 		alloc_len = cmd[4];
1980 	} else {
1981 		alloc_len = get_unaligned_be16(&cmd[7]);
1982 		cdb10 = 1;
1983 	}
1984 
1985 	if ((cmd[2] & MODE_SENSE_PAGE_CONTROL_MASK) !=
1986 			MODE_SENSE_PC_CURRENT_VALUES) {
1987 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1988 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1989 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1990 		goto out;
1991 	}
1992 
1993 	switch (cmd[2] & MODE_SENSE_PAGE_CODE_MASK) {
1994 	case MODE_PAGE_CACHING:
1995 		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
1996 						cdb10,
1997 						&nvme_trans_fill_caching_page,
1998 						MODE_PAGE_CACHING_LEN);
1999 		break;
2000 	case MODE_PAGE_CONTROL:
2001 		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2002 						cdb10,
2003 						&nvme_trans_fill_control_page,
2004 						MODE_PAGE_CONTROL_LEN);
2005 		break;
2006 	case MODE_PAGE_POWER_CONDITION:
2007 		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2008 						cdb10,
2009 						&nvme_trans_fill_pow_cnd_page,
2010 						MODE_PAGE_POW_CND_LEN);
2011 		break;
2012 	case MODE_PAGE_INFO_EXCEP:
2013 		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2014 						cdb10,
2015 						&nvme_trans_fill_inf_exc_page,
2016 						MODE_PAGE_INF_EXC_LEN);
2017 		break;
2018 	case MODE_PAGE_RETURN_ALL:
2019 		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2020 						cdb10,
2021 						&nvme_trans_fill_all_pages,
2022 						MODE_PAGE_ALL_LEN);
2023 		break;
2024 	default:
2025 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2026 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2027 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2028 		break;
2029 	}
2030 
2031  out:
2032 	return res;
2033 }
2034 
nvme_trans_read_capacity(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd,u8 cdb16)2035 static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2036 							u8 *cmd, u8 cdb16)
2037 {
2038 	int res;
2039 	int nvme_sc;
2040 	u32 alloc_len;
2041 	u32 resp_size;
2042 	u32 xfer_len;
2043 	struct nvme_dev *dev = ns->dev;
2044 	struct nvme_id_ns *id_ns;
2045 	u8 *response;
2046 
2047 	if (cdb16) {
2048 		alloc_len = get_unaligned_be32(&cmd[10]);
2049 		resp_size = READ_CAP_16_RESP_SIZE;
2050 	} else {
2051 		alloc_len = READ_CAP_10_RESP_SIZE;
2052 		resp_size = READ_CAP_10_RESP_SIZE;
2053 	}
2054 
2055 	nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
2056 	res = nvme_trans_status_code(hdr, nvme_sc);
2057 	if (res)
2058 		return res;
2059 
2060 	response = kzalloc(resp_size, GFP_KERNEL);
2061 	if (response == NULL) {
2062 		res = -ENOMEM;
2063 		goto out_free_id;
2064 	}
2065 	nvme_trans_fill_read_cap(response, id_ns, cdb16);
2066 
2067 	xfer_len = min(alloc_len, resp_size);
2068 	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2069 
2070 	kfree(response);
2071  out_free_id:
2072 	kfree(id_ns);
2073 	return res;
2074 }
2075 
nvme_trans_report_luns(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2076 static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2077 							u8 *cmd)
2078 {
2079 	int res;
2080 	int nvme_sc;
2081 	u32 alloc_len, xfer_len, resp_size;
2082 	u8 *response;
2083 	struct nvme_dev *dev = ns->dev;
2084 	struct nvme_id_ctrl *id_ctrl;
2085 	u32 ll_length, lun_id;
2086 	u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
2087 	__be32 tmp_len;
2088 
2089 	switch (cmd[2]) {
2090 	default:
2091 		return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2092 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2093 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2094 	case ALL_LUNS_RETURNED:
2095 	case ALL_WELL_KNOWN_LUNS_RETURNED:
2096 	case RESTRICTED_LUNS_RETURNED:
2097 		nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
2098 		res = nvme_trans_status_code(hdr, nvme_sc);
2099 		if (res)
2100 			return res;
2101 
2102 		ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
2103 		resp_size = ll_length + LUN_DATA_HEADER_SIZE;
2104 
2105 		alloc_len = get_unaligned_be32(&cmd[6]);
2106 		if (alloc_len < resp_size) {
2107 			res = nvme_trans_completion(hdr,
2108 					SAM_STAT_CHECK_CONDITION,
2109 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2110 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2111 			goto out_free_id;
2112 		}
2113 
2114 		response = kzalloc(resp_size, GFP_KERNEL);
2115 		if (response == NULL) {
2116 			res = -ENOMEM;
2117 			goto out_free_id;
2118 		}
2119 
2120 		/* The first LUN ID will always be 0 per the SAM spec */
2121 		for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
2122 			/*
2123 			 * Set the LUN Id and then increment to the next LUN
2124 			 * location in the parameter data.
2125 			 */
2126 			__be64 tmp_id = cpu_to_be64(lun_id);
2127 			memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
2128 			lun_id_offset += LUN_ENTRY_SIZE;
2129 		}
2130 		tmp_len = cpu_to_be32(ll_length);
2131 		memcpy(response, &tmp_len, sizeof(u32));
2132 	}
2133 
2134 	xfer_len = min(alloc_len, resp_size);
2135 	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2136 
2137 	kfree(response);
2138  out_free_id:
2139 	kfree(id_ctrl);
2140 	return res;
2141 }
2142 
nvme_trans_request_sense(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2143 static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2144 							u8 *cmd)
2145 {
2146 	int res;
2147 	u8 alloc_len, xfer_len, resp_size;
2148 	u8 desc_format;
2149 	u8 *response;
2150 
2151 	desc_format = cmd[1] & 0x01;
2152 	alloc_len = cmd[4];
2153 
2154 	resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
2155 					(FIXED_FMT_SENSE_DATA_SIZE));
2156 	response = kzalloc(resp_size, GFP_KERNEL);
2157 	if (response == NULL) {
2158 		res = -ENOMEM;
2159 		goto out;
2160 	}
2161 
2162 	if (desc_format) {
2163 		/* Descriptor Format Sense Data */
2164 		response[0] = DESC_FORMAT_SENSE_DATA;
2165 		response[1] = NO_SENSE;
2166 		/* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
2167 		response[2] = SCSI_ASC_NO_SENSE;
2168 		response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2169 		/* SDAT_OVFL = 0 | Additional Sense Length = 0 */
2170 	} else {
2171 		/* Fixed Format Sense Data */
2172 		response[0] = FIXED_SENSE_DATA;
2173 		/* Byte 1 = Obsolete */
2174 		response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
2175 		/* Bytes 3-6 - Information - set to zero */
2176 		response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
2177 		/* Bytes 8-11 - Cmd Specific Information - set to zero */
2178 		response[12] = SCSI_ASC_NO_SENSE;
2179 		response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2180 		/* Byte 14 = Field Replaceable Unit Code = 0 */
2181 		/* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
2182 	}
2183 
2184 	xfer_len = min(alloc_len, resp_size);
2185 	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2186 
2187 	kfree(response);
2188  out:
2189 	return res;
2190 }
2191 
nvme_trans_security_protocol(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2192 static int nvme_trans_security_protocol(struct nvme_ns *ns,
2193 					struct sg_io_hdr *hdr,
2194 					u8 *cmd)
2195 {
2196 	return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2197 				ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
2198 				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2199 }
2200 
nvme_trans_synchronize_cache(struct nvme_ns * ns,struct sg_io_hdr * hdr)2201 static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
2202 					struct sg_io_hdr *hdr)
2203 {
2204 	int nvme_sc;
2205 	struct nvme_command c;
2206 
2207 	memset(&c, 0, sizeof(c));
2208 	c.common.opcode = nvme_cmd_flush;
2209 	c.common.nsid = cpu_to_le32(ns->ns_id);
2210 
2211 	nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
2212 	return nvme_trans_status_code(hdr, nvme_sc);
2213 }
2214 
nvme_trans_start_stop(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2215 static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2216 							u8 *cmd)
2217 {
2218 	u8 immed, pcmod, pc, no_flush, start;
2219 
2220 	immed = cmd[1] & 0x01;
2221 	pcmod = cmd[3] & 0x0f;
2222 	pc = (cmd[4] & 0xf0) >> 4;
2223 	no_flush = cmd[4] & 0x04;
2224 	start = cmd[4] & 0x01;
2225 
2226 	if (immed != 0) {
2227 		return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2228 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2229 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2230 	} else {
2231 		if (no_flush == 0) {
2232 			/* Issue NVME FLUSH command prior to START STOP UNIT */
2233 			int res = nvme_trans_synchronize_cache(ns, hdr);
2234 			if (res)
2235 				return res;
2236 		}
2237 		/* Setup the expected power state transition */
2238 		return nvme_trans_power_state(ns, hdr, pc, pcmod, start);
2239 	}
2240 }
2241 
nvme_trans_format_unit(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2242 static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2243 							u8 *cmd)
2244 {
2245 	int res;
2246 	u8 parm_hdr_len = 0;
2247 	u8 nvme_pf_code = 0;
2248 	u8 format_prot_info, long_list, format_data;
2249 
2250 	format_prot_info = (cmd[1] & 0xc0) >> 6;
2251 	long_list = cmd[1] & 0x20;
2252 	format_data = cmd[1] & 0x10;
2253 
2254 	if (format_data != 0) {
2255 		if (format_prot_info != 0) {
2256 			if (long_list == 0)
2257 				parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
2258 			else
2259 				parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
2260 		}
2261 	} else if (format_data == 0 && format_prot_info != 0) {
2262 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2263 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2264 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2265 		goto out;
2266 	}
2267 
2268 	/* Get parm header from data-in/out buffer */
2269 	/*
2270 	 * According to the translation spec, the only fields in the parameter
2271 	 * list we are concerned with are in the header. So allocate only that.
2272 	 */
2273 	if (parm_hdr_len > 0) {
2274 		res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
2275 					format_prot_info, &nvme_pf_code);
2276 		if (res)
2277 			goto out;
2278 	}
2279 
2280 	/* Attempt to activate any previously downloaded firmware image */
2281 	res = nvme_trans_send_activate_fw_cmd(ns, hdr, 0);
2282 
2283 	/* Determine Block size and count and send format command */
2284 	res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
2285 	if (res)
2286 		goto out;
2287 
2288 	res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
2289 
2290  out:
2291 	return res;
2292 }
2293 
nvme_trans_test_unit_ready(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2294 static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
2295 					struct sg_io_hdr *hdr,
2296 					u8 *cmd)
2297 {
2298 	struct nvme_dev *dev = ns->dev;
2299 
2300 	if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
2301 		return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2302 					    NOT_READY, SCSI_ASC_LUN_NOT_READY,
2303 					    SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2304 	else
2305 		return nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
2306 }
2307 
nvme_trans_write_buffer(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2308 static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2309 							u8 *cmd)
2310 {
2311 	int res = 0;
2312 	u32 buffer_offset, parm_list_length;
2313 	u8 buffer_id, mode;
2314 
2315 	parm_list_length = get_unaligned_be24(&cmd[6]);
2316 	if (parm_list_length % BYTES_TO_DWORDS != 0) {
2317 		/* NVMe expects Firmware file to be a whole number of DWORDS */
2318 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2319 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2320 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2321 		goto out;
2322 	}
2323 	buffer_id = cmd[2];
2324 	if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
2325 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2326 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2327 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2328 		goto out;
2329 	}
2330 	mode = cmd[1] & 0x1f;
2331 	buffer_offset = get_unaligned_be24(&cmd[3]);
2332 
2333 	switch (mode) {
2334 	case DOWNLOAD_SAVE_ACTIVATE:
2335 		res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw,
2336 						parm_list_length, buffer_offset,
2337 						buffer_id);
2338 		if (res)
2339 			goto out;
2340 		res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id);
2341 		break;
2342 	case DOWNLOAD_SAVE_DEFER_ACTIVATE:
2343 		res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw,
2344 						parm_list_length, buffer_offset,
2345 						buffer_id);
2346 		break;
2347 	case ACTIVATE_DEFERRED_MICROCODE:
2348 		res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id);
2349 		break;
2350 	default:
2351 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2352 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2353 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2354 		break;
2355 	}
2356 
2357  out:
2358 	return res;
2359 }
2360 
2361 struct scsi_unmap_blk_desc {
2362 	__be64	slba;
2363 	__be32	nlb;
2364 	u32	resv;
2365 };
2366 
2367 struct scsi_unmap_parm_list {
2368 	__be16	unmap_data_len;
2369 	__be16	unmap_blk_desc_data_len;
2370 	u32	resv;
2371 	struct scsi_unmap_blk_desc desc[0];
2372 };
2373 
nvme_trans_unmap(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2374 static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2375 							u8 *cmd)
2376 {
2377 	struct scsi_unmap_parm_list *plist;
2378 	struct nvme_dsm_range *range;
2379 	struct nvme_command c;
2380 	int i, nvme_sc, res;
2381 	u16 ndesc, list_len;
2382 
2383 	list_len = get_unaligned_be16(&cmd[7]);
2384 	if (!list_len)
2385 		return -EINVAL;
2386 
2387 	plist = kmalloc(list_len, GFP_KERNEL);
2388 	if (!plist)
2389 		return -ENOMEM;
2390 
2391 	res = nvme_trans_copy_from_user(hdr, plist, list_len);
2392 	if (res)
2393 		goto out;
2394 
2395 	ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
2396 	if (!ndesc || ndesc > 256) {
2397 		res = -EINVAL;
2398 		goto out;
2399 	}
2400 
2401 	range = kcalloc(ndesc, sizeof(*range), GFP_KERNEL);
2402 	if (!range) {
2403 		res = -ENOMEM;
2404 		goto out;
2405 	}
2406 
2407 	for (i = 0; i < ndesc; i++) {
2408 		range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
2409 		range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
2410 		range[i].cattr = 0;
2411 	}
2412 
2413 	memset(&c, 0, sizeof(c));
2414 	c.dsm.opcode = nvme_cmd_dsm;
2415 	c.dsm.nsid = cpu_to_le32(ns->ns_id);
2416 	c.dsm.nr = cpu_to_le32(ndesc - 1);
2417 	c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
2418 
2419 	nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, range,
2420 			ndesc * sizeof(*range));
2421 	res = nvme_trans_status_code(hdr, nvme_sc);
2422 
2423 	kfree(range);
2424  out:
2425 	kfree(plist);
2426 	return res;
2427 }
2428 
nvme_scsi_translate(struct nvme_ns * ns,struct sg_io_hdr * hdr)2429 static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
2430 {
2431 	u8 cmd[BLK_MAX_CDB];
2432 	int retcode;
2433 	unsigned int opcode;
2434 
2435 	if (hdr->cmdp == NULL)
2436 		return -EMSGSIZE;
2437 	if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
2438 		return -EFAULT;
2439 
2440 	/*
2441 	 * Prime the hdr with good status for scsi commands that don't require
2442 	 * an nvme command for translation.
2443 	 */
2444 	retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
2445 	if (retcode)
2446 		return retcode;
2447 
2448 	opcode = cmd[0];
2449 
2450 	switch (opcode) {
2451 	case READ_6:
2452 	case READ_10:
2453 	case READ_12:
2454 	case READ_16:
2455 		retcode = nvme_trans_io(ns, hdr, 0, cmd);
2456 		break;
2457 	case WRITE_6:
2458 	case WRITE_10:
2459 	case WRITE_12:
2460 	case WRITE_16:
2461 		retcode = nvme_trans_io(ns, hdr, 1, cmd);
2462 		break;
2463 	case INQUIRY:
2464 		retcode = nvme_trans_inquiry(ns, hdr, cmd);
2465 		break;
2466 	case LOG_SENSE:
2467 		retcode = nvme_trans_log_sense(ns, hdr, cmd);
2468 		break;
2469 	case MODE_SELECT:
2470 	case MODE_SELECT_10:
2471 		retcode = nvme_trans_mode_select(ns, hdr, cmd);
2472 		break;
2473 	case MODE_SENSE:
2474 	case MODE_SENSE_10:
2475 		retcode = nvme_trans_mode_sense(ns, hdr, cmd);
2476 		break;
2477 	case READ_CAPACITY:
2478 		retcode = nvme_trans_read_capacity(ns, hdr, cmd, 0);
2479 		break;
2480 	case SERVICE_ACTION_IN_16:
2481 		switch (cmd[1]) {
2482 		case SAI_READ_CAPACITY_16:
2483 			retcode = nvme_trans_read_capacity(ns, hdr, cmd, 1);
2484 			break;
2485 		default:
2486 			goto out;
2487 		}
2488 		break;
2489 	case REPORT_LUNS:
2490 		retcode = nvme_trans_report_luns(ns, hdr, cmd);
2491 		break;
2492 	case REQUEST_SENSE:
2493 		retcode = nvme_trans_request_sense(ns, hdr, cmd);
2494 		break;
2495 	case SECURITY_PROTOCOL_IN:
2496 	case SECURITY_PROTOCOL_OUT:
2497 		retcode = nvme_trans_security_protocol(ns, hdr, cmd);
2498 		break;
2499 	case START_STOP:
2500 		retcode = nvme_trans_start_stop(ns, hdr, cmd);
2501 		break;
2502 	case SYNCHRONIZE_CACHE:
2503 		retcode = nvme_trans_synchronize_cache(ns, hdr);
2504 		break;
2505 	case FORMAT_UNIT:
2506 		retcode = nvme_trans_format_unit(ns, hdr, cmd);
2507 		break;
2508 	case TEST_UNIT_READY:
2509 		retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
2510 		break;
2511 	case WRITE_BUFFER:
2512 		retcode = nvme_trans_write_buffer(ns, hdr, cmd);
2513 		break;
2514 	case UNMAP:
2515 		retcode = nvme_trans_unmap(ns, hdr, cmd);
2516 		break;
2517 	default:
2518  out:
2519 		retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2520 				ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
2521 				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2522 		break;
2523 	}
2524 	return retcode;
2525 }
2526 
nvme_sg_io(struct nvme_ns * ns,struct sg_io_hdr __user * u_hdr)2527 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
2528 {
2529 	struct sg_io_hdr hdr;
2530 	int retcode;
2531 
2532 	if (!capable(CAP_SYS_ADMIN))
2533 		return -EACCES;
2534 	if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
2535 		return -EFAULT;
2536 	if (hdr.interface_id != 'S')
2537 		return -EINVAL;
2538 	if (hdr.cmd_len > BLK_MAX_CDB)
2539 		return -EINVAL;
2540 
2541 	/*
2542 	 * A positive return code means a NVMe status, which has been
2543 	 * translated to sense data.
2544 	 */
2545 	retcode = nvme_scsi_translate(ns, &hdr);
2546 	if (retcode < 0)
2547 		return retcode;
2548 	if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
2549 		return -EFAULT;
2550 	return 0;
2551 }
2552 
nvme_sg_get_version_num(int __user * ip)2553 int nvme_sg_get_version_num(int __user *ip)
2554 {
2555 	return put_user(sg_version_num, ip);
2556 }
2557