• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SCSI Block Commands (SBC) parsing and emulation.
3  *
4  * (c) Copyright 2002-2012 RisingTide Systems LLC.
5  *
6  * Nicholas A. Bellinger <nab@kernel.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ratelimit.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 
29 #include <target/target_core_base.h>
30 #include <target/target_core_backend.h>
31 #include <target/target_core_fabric.h>
32 
33 #include "target_core_internal.h"
34 #include "target_core_ua.h"
35 
36 
37 static sense_reason_t
sbc_emulate_readcapacity(struct se_cmd * cmd)38 sbc_emulate_readcapacity(struct se_cmd *cmd)
39 {
40 	struct se_device *dev = cmd->se_dev;
41 	unsigned long long blocks_long = dev->transport->get_blocks(dev);
42 	unsigned char *rbuf;
43 	unsigned char buf[8];
44 	u32 blocks;
45 
46 	if (blocks_long >= 0x00000000ffffffff)
47 		blocks = 0xffffffff;
48 	else
49 		blocks = (u32)blocks_long;
50 
51 	buf[0] = (blocks >> 24) & 0xff;
52 	buf[1] = (blocks >> 16) & 0xff;
53 	buf[2] = (blocks >> 8) & 0xff;
54 	buf[3] = blocks & 0xff;
55 	buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
56 	buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
57 	buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
58 	buf[7] = dev->dev_attrib.block_size & 0xff;
59 
60 	rbuf = transport_kmap_data_sg(cmd);
61 	if (rbuf) {
62 		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
63 		transport_kunmap_data_sg(cmd);
64 	}
65 
66 	target_complete_cmd(cmd, GOOD);
67 	return 0;
68 }
69 
70 static sense_reason_t
sbc_emulate_readcapacity_16(struct se_cmd * cmd)71 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
72 {
73 	struct se_device *dev = cmd->se_dev;
74 	unsigned char *rbuf;
75 	unsigned char buf[32];
76 	unsigned long long blocks = dev->transport->get_blocks(dev);
77 
78 	memset(buf, 0, sizeof(buf));
79 	buf[0] = (blocks >> 56) & 0xff;
80 	buf[1] = (blocks >> 48) & 0xff;
81 	buf[2] = (blocks >> 40) & 0xff;
82 	buf[3] = (blocks >> 32) & 0xff;
83 	buf[4] = (blocks >> 24) & 0xff;
84 	buf[5] = (blocks >> 16) & 0xff;
85 	buf[6] = (blocks >> 8) & 0xff;
86 	buf[7] = blocks & 0xff;
87 	buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
88 	buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
89 	buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
90 	buf[11] = dev->dev_attrib.block_size & 0xff;
91 	/*
92 	 * Set Thin Provisioning Enable bit following sbc3r22 in section
93 	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
94 	 */
95 	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
96 		buf[14] = 0x80;
97 
98 	rbuf = transport_kmap_data_sg(cmd);
99 	if (rbuf) {
100 		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
101 		transport_kunmap_data_sg(cmd);
102 	}
103 
104 	target_complete_cmd(cmd, GOOD);
105 	return 0;
106 }
107 
sbc_get_write_same_sectors(struct se_cmd * cmd)108 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
109 {
110 	u32 num_blocks;
111 
112 	if (cmd->t_task_cdb[0] == WRITE_SAME)
113 		num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
114 	else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
115 		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
116 	else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
117 		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
118 
119 	/*
120 	 * Use the explicit range when non zero is supplied, otherwise calculate
121 	 * the remaining range based on ->get_blocks() - starting LBA.
122 	 */
123 	if (num_blocks)
124 		return num_blocks;
125 
126 	return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
127 		cmd->t_task_lba + 1;
128 }
129 EXPORT_SYMBOL(sbc_get_write_same_sectors);
130 
131 static sense_reason_t
sbc_emulate_noop(struct se_cmd * cmd)132 sbc_emulate_noop(struct se_cmd *cmd)
133 {
134 	target_complete_cmd(cmd, GOOD);
135 	return 0;
136 }
137 
sbc_get_size(struct se_cmd * cmd,u32 sectors)138 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
139 {
140 	return cmd->se_dev->dev_attrib.block_size * sectors;
141 }
142 
sbc_check_valid_sectors(struct se_cmd * cmd)143 static int sbc_check_valid_sectors(struct se_cmd *cmd)
144 {
145 	struct se_device *dev = cmd->se_dev;
146 	unsigned long long end_lba;
147 	u32 sectors;
148 
149 	sectors = cmd->data_length / dev->dev_attrib.block_size;
150 	end_lba = dev->transport->get_blocks(dev) + 1;
151 
152 	if (cmd->t_task_lba + sectors > end_lba) {
153 		pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
154 			cmd->t_task_lba, sectors, end_lba);
155 		return -EINVAL;
156 	}
157 
158 	return 0;
159 }
160 
transport_get_sectors_6(unsigned char * cdb)161 static inline u32 transport_get_sectors_6(unsigned char *cdb)
162 {
163 	/*
164 	 * Use 8-bit sector value.  SBC-3 says:
165 	 *
166 	 *   A TRANSFER LENGTH field set to zero specifies that 256
167 	 *   logical blocks shall be written.  Any other value
168 	 *   specifies the number of logical blocks that shall be
169 	 *   written.
170 	 */
171 	return cdb[4] ? : 256;
172 }
173 
transport_get_sectors_10(unsigned char * cdb)174 static inline u32 transport_get_sectors_10(unsigned char *cdb)
175 {
176 	return (u32)(cdb[7] << 8) + cdb[8];
177 }
178 
transport_get_sectors_12(unsigned char * cdb)179 static inline u32 transport_get_sectors_12(unsigned char *cdb)
180 {
181 	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
182 }
183 
transport_get_sectors_16(unsigned char * cdb)184 static inline u32 transport_get_sectors_16(unsigned char *cdb)
185 {
186 	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
187 		    (cdb[12] << 8) + cdb[13];
188 }
189 
190 /*
191  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
192  */
transport_get_sectors_32(unsigned char * cdb)193 static inline u32 transport_get_sectors_32(unsigned char *cdb)
194 {
195 	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
196 		    (cdb[30] << 8) + cdb[31];
197 
198 }
199 
transport_lba_21(unsigned char * cdb)200 static inline u32 transport_lba_21(unsigned char *cdb)
201 {
202 	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
203 }
204 
transport_lba_32(unsigned char * cdb)205 static inline u32 transport_lba_32(unsigned char *cdb)
206 {
207 	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
208 }
209 
transport_lba_64(unsigned char * cdb)210 static inline unsigned long long transport_lba_64(unsigned char *cdb)
211 {
212 	unsigned int __v1, __v2;
213 
214 	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
215 	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
216 
217 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
218 }
219 
220 /*
221  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
222  */
transport_lba_64_ext(unsigned char * cdb)223 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
224 {
225 	unsigned int __v1, __v2;
226 
227 	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
228 	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
229 
230 	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
231 }
232 
233 static sense_reason_t
sbc_setup_write_same(struct se_cmd * cmd,unsigned char * flags,struct sbc_ops * ops)234 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
235 {
236 	unsigned int sectors = sbc_get_write_same_sectors(cmd);
237 
238 	if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
239 		pr_err("WRITE_SAME PBDATA and LBDATA"
240 			" bits not supported for Block Discard"
241 			" Emulation\n");
242 		return TCM_UNSUPPORTED_SCSI_OPCODE;
243 	}
244 	if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
245 		pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
246 			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
247 		return TCM_INVALID_CDB_FIELD;
248 	}
249 	/*
250 	 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
251 	 * translated into block discard requests within backend code.
252 	 */
253 	if (flags[0] & 0x08) {
254 		if (!ops->execute_write_same_unmap)
255 			return TCM_UNSUPPORTED_SCSI_OPCODE;
256 
257 		cmd->execute_cmd = ops->execute_write_same_unmap;
258 		return 0;
259 	}
260 	if (!ops->execute_write_same)
261 		return TCM_UNSUPPORTED_SCSI_OPCODE;
262 
263 	cmd->execute_cmd = ops->execute_write_same;
264 	return 0;
265 }
266 
xdreadwrite_callback(struct se_cmd * cmd)267 static void xdreadwrite_callback(struct se_cmd *cmd)
268 {
269 	unsigned char *buf, *addr;
270 	struct scatterlist *sg;
271 	unsigned int offset;
272 	int i;
273 	int count;
274 	/*
275 	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
276 	 *
277 	 * 1) read the specified logical block(s);
278 	 * 2) transfer logical blocks from the data-out buffer;
279 	 * 3) XOR the logical blocks transferred from the data-out buffer with
280 	 *    the logical blocks read, storing the resulting XOR data in a buffer;
281 	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
282 	 *    blocks transferred from the data-out buffer; and
283 	 * 5) transfer the resulting XOR data to the data-in buffer.
284 	 */
285 	buf = kmalloc(cmd->data_length, GFP_KERNEL);
286 	if (!buf) {
287 		pr_err("Unable to allocate xor_callback buf\n");
288 		return;
289 	}
290 	/*
291 	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
292 	 * into the locally allocated *buf
293 	 */
294 	sg_copy_to_buffer(cmd->t_data_sg,
295 			  cmd->t_data_nents,
296 			  buf,
297 			  cmd->data_length);
298 
299 	/*
300 	 * Now perform the XOR against the BIDI read memory located at
301 	 * cmd->t_mem_bidi_list
302 	 */
303 
304 	offset = 0;
305 	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
306 		addr = kmap_atomic(sg_page(sg));
307 		if (!addr)
308 			goto out;
309 
310 		for (i = 0; i < sg->length; i++)
311 			*(addr + sg->offset + i) ^= *(buf + offset + i);
312 
313 		offset += sg->length;
314 		kunmap_atomic(addr);
315 	}
316 
317 out:
318 	kfree(buf);
319 }
320 
321 sense_reason_t
sbc_parse_cdb(struct se_cmd * cmd,struct sbc_ops * ops)322 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
323 {
324 	struct se_device *dev = cmd->se_dev;
325 	unsigned char *cdb = cmd->t_task_cdb;
326 	unsigned int size;
327 	u32 sectors = 0;
328 	sense_reason_t ret;
329 
330 	switch (cdb[0]) {
331 	case READ_6:
332 		sectors = transport_get_sectors_6(cdb);
333 		cmd->t_task_lba = transport_lba_21(cdb);
334 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
335 		cmd->execute_cmd = ops->execute_rw;
336 		break;
337 	case READ_10:
338 		sectors = transport_get_sectors_10(cdb);
339 		cmd->t_task_lba = transport_lba_32(cdb);
340 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
341 		cmd->execute_cmd = ops->execute_rw;
342 		break;
343 	case READ_12:
344 		sectors = transport_get_sectors_12(cdb);
345 		cmd->t_task_lba = transport_lba_32(cdb);
346 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
347 		cmd->execute_cmd = ops->execute_rw;
348 		break;
349 	case READ_16:
350 		sectors = transport_get_sectors_16(cdb);
351 		cmd->t_task_lba = transport_lba_64(cdb);
352 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
353 		cmd->execute_cmd = ops->execute_rw;
354 		break;
355 	case WRITE_6:
356 		sectors = transport_get_sectors_6(cdb);
357 		cmd->t_task_lba = transport_lba_21(cdb);
358 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
359 		cmd->execute_cmd = ops->execute_rw;
360 		break;
361 	case WRITE_10:
362 	case WRITE_VERIFY:
363 		sectors = transport_get_sectors_10(cdb);
364 		cmd->t_task_lba = transport_lba_32(cdb);
365 		if (cdb[1] & 0x8)
366 			cmd->se_cmd_flags |= SCF_FUA;
367 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
368 		cmd->execute_cmd = ops->execute_rw;
369 		break;
370 	case WRITE_12:
371 		sectors = transport_get_sectors_12(cdb);
372 		cmd->t_task_lba = transport_lba_32(cdb);
373 		if (cdb[1] & 0x8)
374 			cmd->se_cmd_flags |= SCF_FUA;
375 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
376 		cmd->execute_cmd = ops->execute_rw;
377 		break;
378 	case WRITE_16:
379 		sectors = transport_get_sectors_16(cdb);
380 		cmd->t_task_lba = transport_lba_64(cdb);
381 		if (cdb[1] & 0x8)
382 			cmd->se_cmd_flags |= SCF_FUA;
383 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
384 		cmd->execute_cmd = ops->execute_rw;
385 		break;
386 	case XDWRITEREAD_10:
387 		if (cmd->data_direction != DMA_TO_DEVICE ||
388 		    !(cmd->se_cmd_flags & SCF_BIDI))
389 			return TCM_INVALID_CDB_FIELD;
390 		sectors = transport_get_sectors_10(cdb);
391 
392 		cmd->t_task_lba = transport_lba_32(cdb);
393 		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
394 
395 		/*
396 		 * Setup BIDI XOR callback to be run after I/O completion.
397 		 */
398 		cmd->execute_cmd = ops->execute_rw;
399 		cmd->transport_complete_callback = &xdreadwrite_callback;
400 		if (cdb[1] & 0x8)
401 			cmd->se_cmd_flags |= SCF_FUA;
402 		break;
403 	case VARIABLE_LENGTH_CMD:
404 	{
405 		u16 service_action = get_unaligned_be16(&cdb[8]);
406 		switch (service_action) {
407 		case XDWRITEREAD_32:
408 			sectors = transport_get_sectors_32(cdb);
409 
410 			/*
411 			 * Use WRITE_32 and READ_32 opcodes for the emulated
412 			 * XDWRITE_READ_32 logic.
413 			 */
414 			cmd->t_task_lba = transport_lba_64_ext(cdb);
415 			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
416 
417 			/*
418 			 * Setup BIDI XOR callback to be run during after I/O
419 			 * completion.
420 			 */
421 			cmd->execute_cmd = ops->execute_rw;
422 			cmd->transport_complete_callback = &xdreadwrite_callback;
423 			if (cdb[1] & 0x8)
424 				cmd->se_cmd_flags |= SCF_FUA;
425 			break;
426 		case WRITE_SAME_32:
427 			sectors = transport_get_sectors_32(cdb);
428 			if (!sectors) {
429 				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
430 				       " supported\n");
431 				return TCM_INVALID_CDB_FIELD;
432 			}
433 
434 			size = sbc_get_size(cmd, 1);
435 			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
436 
437 			ret = sbc_setup_write_same(cmd, &cdb[10], ops);
438 			if (ret)
439 				return ret;
440 			break;
441 		default:
442 			pr_err("VARIABLE_LENGTH_CMD service action"
443 				" 0x%04x not supported\n", service_action);
444 			return TCM_UNSUPPORTED_SCSI_OPCODE;
445 		}
446 		break;
447 	}
448 	case READ_CAPACITY:
449 		size = READ_CAP_LEN;
450 		cmd->execute_cmd = sbc_emulate_readcapacity;
451 		break;
452 	case SERVICE_ACTION_IN:
453 		switch (cmd->t_task_cdb[1] & 0x1f) {
454 		case SAI_READ_CAPACITY_16:
455 			cmd->execute_cmd = sbc_emulate_readcapacity_16;
456 			break;
457 		default:
458 			pr_err("Unsupported SA: 0x%02x\n",
459 				cmd->t_task_cdb[1] & 0x1f);
460 			return TCM_INVALID_CDB_FIELD;
461 		}
462 		size = (cdb[10] << 24) | (cdb[11] << 16) |
463 		       (cdb[12] << 8) | cdb[13];
464 		break;
465 	case SYNCHRONIZE_CACHE:
466 	case SYNCHRONIZE_CACHE_16:
467 		if (!ops->execute_sync_cache) {
468 			size = 0;
469 			cmd->execute_cmd = sbc_emulate_noop;
470 			break;
471 		}
472 
473 		/*
474 		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
475 		 */
476 		if (cdb[0] == SYNCHRONIZE_CACHE) {
477 			sectors = transport_get_sectors_10(cdb);
478 			cmd->t_task_lba = transport_lba_32(cdb);
479 		} else {
480 			sectors = transport_get_sectors_16(cdb);
481 			cmd->t_task_lba = transport_lba_64(cdb);
482 		}
483 
484 		size = sbc_get_size(cmd, sectors);
485 
486 		/*
487 		 * Check to ensure that LBA + Range does not exceed past end of
488 		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
489 		 */
490 		if (cmd->t_task_lba || sectors) {
491 			if (sbc_check_valid_sectors(cmd) < 0)
492 				return TCM_ADDRESS_OUT_OF_RANGE;
493 		}
494 		cmd->execute_cmd = ops->execute_sync_cache;
495 		break;
496 	case UNMAP:
497 		if (!ops->execute_unmap)
498 			return TCM_UNSUPPORTED_SCSI_OPCODE;
499 
500 		size = get_unaligned_be16(&cdb[7]);
501 		cmd->execute_cmd = ops->execute_unmap;
502 		break;
503 	case WRITE_SAME_16:
504 		sectors = transport_get_sectors_16(cdb);
505 		if (!sectors) {
506 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
507 			return TCM_INVALID_CDB_FIELD;
508 		}
509 
510 		size = sbc_get_size(cmd, 1);
511 		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
512 
513 		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
514 		if (ret)
515 			return ret;
516 		break;
517 	case WRITE_SAME:
518 		sectors = transport_get_sectors_10(cdb);
519 		if (!sectors) {
520 			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
521 			return TCM_INVALID_CDB_FIELD;
522 		}
523 
524 		size = sbc_get_size(cmd, 1);
525 		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
526 
527 		/*
528 		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
529 		 * of byte 1 bit 3 UNMAP instead of original reserved field
530 		 */
531 		ret = sbc_setup_write_same(cmd, &cdb[1], ops);
532 		if (ret)
533 			return ret;
534 		break;
535 	case VERIFY:
536 		size = 0;
537 		cmd->execute_cmd = sbc_emulate_noop;
538 		break;
539 	case REZERO_UNIT:
540 	case SEEK_6:
541 	case SEEK_10:
542 		/*
543 		 * There are still clients out there which use these old SCSI-2
544 		 * commands. This mainly happens when running VMs with legacy
545 		 * guest systems, connected via SCSI command pass-through to
546 		 * iSCSI targets. Make them happy and return status GOOD.
547 		 */
548 		size = 0;
549 		cmd->execute_cmd = sbc_emulate_noop;
550 		break;
551 	default:
552 		ret = spc_parse_cdb(cmd, &size);
553 		if (ret)
554 			return ret;
555 	}
556 
557 	/* reject any command that we don't have a handler for */
558 	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
559 		return TCM_UNSUPPORTED_SCSI_OPCODE;
560 
561 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
562 		unsigned long long end_lba;
563 
564 		if (sectors > dev->dev_attrib.fabric_max_sectors) {
565 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
566 				" big sectors %u exceeds fabric_max_sectors:"
567 				" %u\n", cdb[0], sectors,
568 				dev->dev_attrib.fabric_max_sectors);
569 			return TCM_INVALID_CDB_FIELD;
570 		}
571 		if (sectors > dev->dev_attrib.hw_max_sectors) {
572 			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
573 				" big sectors %u exceeds backend hw_max_sectors:"
574 				" %u\n", cdb[0], sectors,
575 				dev->dev_attrib.hw_max_sectors);
576 			return TCM_INVALID_CDB_FIELD;
577 		}
578 
579 		end_lba = dev->transport->get_blocks(dev) + 1;
580 		if (cmd->t_task_lba + sectors > end_lba) {
581 			pr_err("cmd exceeds last lba %llu "
582 				"(lba %llu, sectors %u)\n",
583 				end_lba, cmd->t_task_lba, sectors);
584 			return TCM_INVALID_CDB_FIELD;
585 		}
586 
587 		size = sbc_get_size(cmd, sectors);
588 	}
589 
590 	return target_cmd_size_check(cmd, size);
591 }
592 EXPORT_SYMBOL(sbc_parse_cdb);
593 
sbc_get_device_type(struct se_device * dev)594 u32 sbc_get_device_type(struct se_device *dev)
595 {
596 	return TYPE_DISK;
597 }
598 EXPORT_SYMBOL(sbc_get_device_type);
599 
600 sense_reason_t
sbc_execute_unmap(struct se_cmd * cmd,sense_reason_t (* do_unmap_fn)(struct se_cmd *,void *,sector_t,sector_t),void * priv)601 sbc_execute_unmap(struct se_cmd *cmd,
602 	sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
603 				      sector_t, sector_t),
604 	void *priv)
605 {
606 	struct se_device *dev = cmd->se_dev;
607 	unsigned char *buf, *ptr = NULL;
608 	sector_t lba;
609 	int size;
610 	u32 range;
611 	sense_reason_t ret = 0;
612 	int dl, bd_dl;
613 
614 	/* We never set ANC_SUP */
615 	if (cmd->t_task_cdb[1])
616 		return TCM_INVALID_CDB_FIELD;
617 
618 	if (cmd->data_length == 0) {
619 		target_complete_cmd(cmd, SAM_STAT_GOOD);
620 		return 0;
621 	}
622 
623 	if (cmd->data_length < 8) {
624 		pr_warn("UNMAP parameter list length %u too small\n",
625 			cmd->data_length);
626 		return TCM_PARAMETER_LIST_LENGTH_ERROR;
627 	}
628 
629 	buf = transport_kmap_data_sg(cmd);
630 	if (!buf)
631 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
632 
633 	dl = get_unaligned_be16(&buf[0]);
634 	bd_dl = get_unaligned_be16(&buf[2]);
635 
636 	size = cmd->data_length - 8;
637 	if (bd_dl > size)
638 		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
639 			cmd->data_length, bd_dl);
640 	else
641 		size = bd_dl;
642 
643 	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
644 		ret = TCM_INVALID_PARAMETER_LIST;
645 		goto err;
646 	}
647 
648 	/* First UNMAP block descriptor starts at 8 byte offset */
649 	ptr = &buf[8];
650 	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
651 		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
652 
653 	while (size >= 16) {
654 		lba = get_unaligned_be64(&ptr[0]);
655 		range = get_unaligned_be32(&ptr[8]);
656 		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
657 				 (unsigned long long)lba, range);
658 
659 		if (range > dev->dev_attrib.max_unmap_lba_count) {
660 			ret = TCM_INVALID_PARAMETER_LIST;
661 			goto err;
662 		}
663 
664 		if (lba + range > dev->transport->get_blocks(dev) + 1) {
665 			ret = TCM_ADDRESS_OUT_OF_RANGE;
666 			goto err;
667 		}
668 
669 		ret = do_unmap_fn(cmd, priv, lba, range);
670 		if (ret)
671 			goto err;
672 
673 		ptr += 16;
674 		size -= 16;
675 	}
676 
677 err:
678 	transport_kunmap_data_sg(cmd);
679 	if (!ret)
680 		target_complete_cmd(cmd, GOOD);
681 	return ret;
682 }
683 EXPORT_SYMBOL(sbc_execute_unmap);
684