• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * osd_initiator - Main body of the osd initiator library.
3  *
4  * Note: The file does not contain the advanced security functionality which
5  * is only needed by the security_manager's initiators.
6  *
7  * Copyright (C) 2008 Panasas Inc.  All rights reserved.
8  *
9  * Authors:
10  *   Boaz Harrosh <bharrosh@panasas.com>
11  *   Benny Halevy <bhalevy@panasas.com>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  *
20  *  1. Redistributions of source code must retain the above copyright
21  *     notice, this list of conditions and the following disclaimer.
22  *  2. Redistributions in binary form must reproduce the above copyright
23  *     notice, this list of conditions and the following disclaimer in the
24  *     documentation and/or other materials provided with the distribution.
25  *  3. Neither the name of the Panasas company nor the names of its
26  *     contributors may be used to endorse or promote products derived
27  *     from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
30  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
31  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
32  * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
37  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
38  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
39  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #include <linux/slab.h>
43 #include <linux/module.h>
44 
45 #include <scsi/osd_initiator.h>
46 #include <scsi/osd_sec.h>
47 #include <scsi/osd_attributes.h>
48 #include <scsi/osd_sense.h>
49 
50 #include <scsi/scsi_device.h>
51 
52 #include "osd_debug.h"
53 
54 #ifndef __unused
55 #    define __unused			__attribute__((unused))
56 #endif
57 
58 enum { OSD_REQ_RETRIES = 1 };
59 
60 MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
61 MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
62 MODULE_LICENSE("GPL");
63 
build_test(void)64 static inline void build_test(void)
65 {
66 	/* structures were not packed */
67 	BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN);
68 	BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN);
69 	BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN);
70 }
71 
_osd_ver_desc(struct osd_request * or)72 static const char *_osd_ver_desc(struct osd_request *or)
73 {
74 	return osd_req_is_ver1(or) ? "OSD1" : "OSD2";
75 }
76 
77 #define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
78 
_osd_get_print_system_info(struct osd_dev * od,void * caps,struct osd_dev_info * odi)79 static int _osd_get_print_system_info(struct osd_dev *od,
80 	void *caps, struct osd_dev_info *odi)
81 {
82 	struct osd_request *or;
83 	struct osd_attr get_attrs[] = {
84 		ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8),
85 		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16),
86 		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32),
87 		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4),
88 		ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/),
89 		ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/),
90 		ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8),
91 		ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8),
92 		ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8),
93 		ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6),
94 		/* IBM-OSD-SIM Has a bug with this one put it last */
95 		ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20),
96 	};
97 	void *iter = NULL, *pFirst;
98 	int nelem = ARRAY_SIZE(get_attrs), a = 0;
99 	int ret;
100 
101 	or = osd_start_request(od, GFP_KERNEL);
102 	if (!or)
103 		return -ENOMEM;
104 
105 	/* get attrs */
106 	osd_req_get_attributes(or, &osd_root_object);
107 	osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs));
108 
109 	ret = osd_finalize_request(or, 0, caps, NULL);
110 	if (ret)
111 		goto out;
112 
113 	ret = osd_execute_request(or);
114 	if (ret) {
115 		OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret);
116 		goto out;
117 	}
118 
119 	osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter);
120 
121 	OSD_INFO("Detected %s device\n",
122 		_osd_ver_desc(or));
123 
124 	pFirst = get_attrs[a++].val_ptr;
125 	OSD_INFO("VENDOR_IDENTIFICATION  [%s]\n",
126 		(char *)pFirst);
127 
128 	pFirst = get_attrs[a++].val_ptr;
129 	OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
130 		(char *)pFirst);
131 
132 	pFirst = get_attrs[a++].val_ptr;
133 	OSD_INFO("PRODUCT_MODEL          [%s]\n",
134 		(char *)pFirst);
135 
136 	pFirst = get_attrs[a++].val_ptr;
137 	OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
138 		pFirst ? get_unaligned_be32(pFirst) : ~0U);
139 
140 	pFirst = get_attrs[a++].val_ptr;
141 	OSD_INFO("PRODUCT_SERIAL_NUMBER  [%s]\n",
142 		(char *)pFirst);
143 
144 	odi->osdname_len = get_attrs[a].len;
145 	/* Avoid NULL for memcmp optimization 0-length is good enough */
146 	odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
147 	if (!odi->osdname) {
148 		ret = -ENOMEM;
149 		goto out;
150 	}
151 	if (odi->osdname_len)
152 		memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
153 	OSD_INFO("OSD_NAME               [%s]\n", odi->osdname);
154 	a++;
155 
156 	pFirst = get_attrs[a++].val_ptr;
157 	OSD_INFO("TOTAL_CAPACITY         [0x%llx]\n",
158 		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
159 
160 	pFirst = get_attrs[a++].val_ptr;
161 	OSD_INFO("USED_CAPACITY          [0x%llx]\n",
162 		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
163 
164 	pFirst = get_attrs[a++].val_ptr;
165 	OSD_INFO("NUMBER_OF_PARTITIONS   [%llu]\n",
166 		pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
167 
168 	if (a >= nelem)
169 		goto out;
170 
171 	/* FIXME: Where are the time utilities */
172 	pFirst = get_attrs[a++].val_ptr;
173 	OSD_INFO("CLOCK                  [0x%02x%02x%02x%02x%02x%02x]\n",
174 		((char *)pFirst)[0], ((char *)pFirst)[1],
175 		((char *)pFirst)[2], ((char *)pFirst)[3],
176 		((char *)pFirst)[4], ((char *)pFirst)[5]);
177 
178 	if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */
179 		unsigned len = get_attrs[a].len;
180 		char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */
181 
182 		hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
183 				   sid_dump, sizeof(sid_dump), true);
184 		OSD_INFO("OSD_SYSTEM_ID(%d)\n"
185 			 "        [%s]\n", len, sid_dump);
186 
187 		if (unlikely(len > sizeof(odi->systemid))) {
188 			OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
189 				"device idetification might not work\n", len);
190 			len = sizeof(odi->systemid);
191 		}
192 		odi->systemid_len = len;
193 		memcpy(odi->systemid, get_attrs[a].val_ptr, len);
194 		a++;
195 	}
196 out:
197 	osd_end_request(or);
198 	return ret;
199 }
200 
osd_auto_detect_ver(struct osd_dev * od,void * caps,struct osd_dev_info * odi)201 int osd_auto_detect_ver(struct osd_dev *od,
202 	void *caps, struct osd_dev_info *odi)
203 {
204 	int ret;
205 
206 	/* Auto-detect the osd version */
207 	ret = _osd_get_print_system_info(od, caps, odi);
208 	if (ret) {
209 		osd_dev_set_ver(od, OSD_VER1);
210 		OSD_DEBUG("converting to OSD1\n");
211 		ret = _osd_get_print_system_info(od, caps, odi);
212 	}
213 
214 	return ret;
215 }
216 EXPORT_SYMBOL(osd_auto_detect_ver);
217 
_osd_req_cdb_len(struct osd_request * or)218 static unsigned _osd_req_cdb_len(struct osd_request *or)
219 {
220 	return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN;
221 }
222 
_osd_req_alist_elem_size(struct osd_request * or,unsigned len)223 static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len)
224 {
225 	return osd_req_is_ver1(or) ?
226 		osdv1_attr_list_elem_size(len) :
227 		osdv2_attr_list_elem_size(len);
228 }
229 
_osd_req_alist_elem_encode(struct osd_request * or,void * attr_last,const struct osd_attr * oa)230 static void _osd_req_alist_elem_encode(struct osd_request *or,
231 	void *attr_last, const struct osd_attr *oa)
232 {
233 	if (osd_req_is_ver1(or)) {
234 		struct osdv1_attributes_list_element *attr = attr_last;
235 
236 		attr->attr_page = cpu_to_be32(oa->attr_page);
237 		attr->attr_id = cpu_to_be32(oa->attr_id);
238 		attr->attr_bytes = cpu_to_be16(oa->len);
239 		memcpy(attr->attr_val, oa->val_ptr, oa->len);
240 	} else {
241 		struct osdv2_attributes_list_element *attr = attr_last;
242 
243 		attr->attr_page = cpu_to_be32(oa->attr_page);
244 		attr->attr_id = cpu_to_be32(oa->attr_id);
245 		attr->attr_bytes = cpu_to_be16(oa->len);
246 		memcpy(attr->attr_val, oa->val_ptr, oa->len);
247 	}
248 }
249 
_osd_req_alist_elem_decode(struct osd_request * or,void * cur_p,struct osd_attr * oa,unsigned max_bytes)250 static int _osd_req_alist_elem_decode(struct osd_request *or,
251 	void *cur_p, struct osd_attr *oa, unsigned max_bytes)
252 {
253 	unsigned inc;
254 	if (osd_req_is_ver1(or)) {
255 		struct osdv1_attributes_list_element *attr = cur_p;
256 
257 		if (max_bytes < sizeof(*attr))
258 			return -1;
259 
260 		oa->len = be16_to_cpu(attr->attr_bytes);
261 		inc = _osd_req_alist_elem_size(or, oa->len);
262 		if (inc > max_bytes)
263 			return -1;
264 
265 		oa->attr_page = be32_to_cpu(attr->attr_page);
266 		oa->attr_id = be32_to_cpu(attr->attr_id);
267 
268 		/* OSD1: On empty attributes we return a pointer to 2 bytes
269 		 * of zeros. This keeps similar behaviour with OSD2.
270 		 * (See below)
271 		 */
272 		oa->val_ptr = likely(oa->len) ? attr->attr_val :
273 						(u8 *)&attr->attr_bytes;
274 	} else {
275 		struct osdv2_attributes_list_element *attr = cur_p;
276 
277 		if (max_bytes < sizeof(*attr))
278 			return -1;
279 
280 		oa->len = be16_to_cpu(attr->attr_bytes);
281 		inc = _osd_req_alist_elem_size(or, oa->len);
282 		if (inc > max_bytes)
283 			return -1;
284 
285 		oa->attr_page = be32_to_cpu(attr->attr_page);
286 		oa->attr_id = be32_to_cpu(attr->attr_id);
287 
288 		/* OSD2: For convenience, on empty attributes, we return 8 bytes
289 		 * of zeros here. This keeps the same behaviour with OSD2r04,
290 		 * and is nice with null terminating ASCII fields.
291 		 * oa->val_ptr == NULL marks the end-of-list, or error.
292 		 */
293 		oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved;
294 	}
295 	return inc;
296 }
297 
_osd_req_alist_size(struct osd_request * or,void * list_head)298 static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head)
299 {
300 	return osd_req_is_ver1(or) ?
301 		osdv1_list_size(list_head) :
302 		osdv2_list_size(list_head);
303 }
304 
_osd_req_sizeof_alist_header(struct osd_request * or)305 static unsigned _osd_req_sizeof_alist_header(struct osd_request *or)
306 {
307 	return osd_req_is_ver1(or) ?
308 		sizeof(struct osdv1_attributes_list_header) :
309 		sizeof(struct osdv2_attributes_list_header);
310 }
311 
_osd_req_set_alist_type(struct osd_request * or,void * list,int list_type)312 static void _osd_req_set_alist_type(struct osd_request *or,
313 	void *list, int list_type)
314 {
315 	if (osd_req_is_ver1(or)) {
316 		struct osdv1_attributes_list_header *attr_list = list;
317 
318 		memset(attr_list, 0, sizeof(*attr_list));
319 		attr_list->type = list_type;
320 	} else {
321 		struct osdv2_attributes_list_header *attr_list = list;
322 
323 		memset(attr_list, 0, sizeof(*attr_list));
324 		attr_list->type = list_type;
325 	}
326 }
327 
_osd_req_is_alist_type(struct osd_request * or,void * list,int list_type)328 static bool _osd_req_is_alist_type(struct osd_request *or,
329 	void *list, int list_type)
330 {
331 	if (!list)
332 		return false;
333 
334 	if (osd_req_is_ver1(or)) {
335 		struct osdv1_attributes_list_header *attr_list = list;
336 
337 		return attr_list->type == list_type;
338 	} else {
339 		struct osdv2_attributes_list_header *attr_list = list;
340 
341 		return attr_list->type == list_type;
342 	}
343 }
344 
345 /* This is for List-objects not Attributes-Lists */
_osd_req_encode_olist(struct osd_request * or,struct osd_obj_id_list * list)346 static void _osd_req_encode_olist(struct osd_request *or,
347 	struct osd_obj_id_list *list)
348 {
349 	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
350 
351 	if (osd_req_is_ver1(or)) {
352 		cdbh->v1.list_identifier = list->list_identifier;
353 		cdbh->v1.start_address = list->continuation_id;
354 	} else {
355 		cdbh->v2.list_identifier = list->list_identifier;
356 		cdbh->v2.start_address = list->continuation_id;
357 	}
358 }
359 
osd_req_encode_offset(struct osd_request * or,u64 offset,unsigned * padding)360 static osd_cdb_offset osd_req_encode_offset(struct osd_request *or,
361 	u64 offset, unsigned *padding)
362 {
363 	return __osd_encode_offset(offset, padding,
364 			osd_req_is_ver1(or) ?
365 				OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT,
366 			OSD_OFFSET_MAX_SHIFT);
367 }
368 
369 static struct osd_security_parameters *
_osd_req_sec_params(struct osd_request * or)370 _osd_req_sec_params(struct osd_request *or)
371 {
372 	struct osd_cdb *ocdb = &or->cdb;
373 
374 	if (osd_req_is_ver1(or))
375 		return (struct osd_security_parameters *)&ocdb->v1.sec_params;
376 	else
377 		return (struct osd_security_parameters *)&ocdb->v2.sec_params;
378 }
379 
osd_dev_init(struct osd_dev * osdd,struct scsi_device * scsi_device)380 void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device)
381 {
382 	memset(osdd, 0, sizeof(*osdd));
383 	osdd->scsi_device = scsi_device;
384 	osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT;
385 #ifdef OSD_VER1_SUPPORT
386 	osdd->version = OSD_VER2;
387 #endif
388 	/* TODO: Allocate pools for osd_request attributes ... */
389 }
390 EXPORT_SYMBOL(osd_dev_init);
391 
osd_dev_fini(struct osd_dev * osdd)392 void osd_dev_fini(struct osd_dev *osdd)
393 {
394 	/* TODO: De-allocate pools */
395 
396 	osdd->scsi_device = NULL;
397 }
398 EXPORT_SYMBOL(osd_dev_fini);
399 
_osd_request_alloc(gfp_t gfp)400 static struct osd_request *_osd_request_alloc(gfp_t gfp)
401 {
402 	struct osd_request *or;
403 
404 	/* TODO: Use mempool with one saved request */
405 	or = kzalloc(sizeof(*or), gfp);
406 	return or;
407 }
408 
_osd_request_free(struct osd_request * or)409 static void _osd_request_free(struct osd_request *or)
410 {
411 	kfree(or);
412 }
413 
osd_start_request(struct osd_dev * dev,gfp_t gfp)414 struct osd_request *osd_start_request(struct osd_dev *dev, gfp_t gfp)
415 {
416 	struct osd_request *or;
417 
418 	or = _osd_request_alloc(gfp);
419 	if (!or)
420 		return NULL;
421 
422 	or->osd_dev = dev;
423 	or->alloc_flags = gfp;
424 	or->timeout = dev->def_timeout;
425 	or->retries = OSD_REQ_RETRIES;
426 
427 	return or;
428 }
429 EXPORT_SYMBOL(osd_start_request);
430 
_osd_free_seg(struct osd_request * or __unused,struct _osd_req_data_segment * seg)431 static void _osd_free_seg(struct osd_request *or __unused,
432 	struct _osd_req_data_segment *seg)
433 {
434 	if (!seg->buff || !seg->alloc_size)
435 		return;
436 
437 	kfree(seg->buff);
438 	seg->buff = NULL;
439 	seg->alloc_size = 0;
440 }
441 
_put_request(struct request * rq)442 static void _put_request(struct request *rq)
443 {
444 	/*
445 	 * If osd_finalize_request() was called but the request was not
446 	 * executed through the block layer, then we must release BIOs.
447 	 * TODO: Keep error code in or->async_error. Need to audit all
448 	 *       code paths.
449 	 */
450 	if (unlikely(rq->bio))
451 		blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
452 	else
453 		blk_put_request(rq);
454 }
455 
osd_end_request(struct osd_request * or)456 void osd_end_request(struct osd_request *or)
457 {
458 	struct request *rq = or->request;
459 
460 	if (rq) {
461 		if (rq->next_rq) {
462 			_put_request(rq->next_rq);
463 			rq->next_rq = NULL;
464 		}
465 
466 		_put_request(rq);
467 	}
468 
469 	_osd_free_seg(or, &or->get_attr);
470 	_osd_free_seg(or, &or->enc_get_attr);
471 	_osd_free_seg(or, &or->set_attr);
472 	_osd_free_seg(or, &or->cdb_cont);
473 
474 	_osd_request_free(or);
475 }
476 EXPORT_SYMBOL(osd_end_request);
477 
_set_error_resid(struct osd_request * or,struct request * req,int error)478 static void _set_error_resid(struct osd_request *or, struct request *req,
479 			     int error)
480 {
481 	or->async_error = error;
482 	or->req_errors = req->errors ? : error;
483 	or->sense_len = req->sense_len;
484 	if (or->out.req)
485 		or->out.residual = or->out.req->resid_len;
486 	if (or->in.req)
487 		or->in.residual = or->in.req->resid_len;
488 }
489 
osd_execute_request(struct osd_request * or)490 int osd_execute_request(struct osd_request *or)
491 {
492 	int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
493 
494 	_set_error_resid(or, or->request, error);
495 	return error;
496 }
497 EXPORT_SYMBOL(osd_execute_request);
498 
osd_request_async_done(struct request * req,int error)499 static void osd_request_async_done(struct request *req, int error)
500 {
501 	struct osd_request *or = req->end_io_data;
502 
503 	_set_error_resid(or, req, error);
504 	if (req->next_rq) {
505 		__blk_put_request(req->q, req->next_rq);
506 		req->next_rq = NULL;
507 	}
508 
509 	__blk_put_request(req->q, req);
510 	or->request = NULL;
511 	or->in.req = NULL;
512 	or->out.req = NULL;
513 
514 	if (or->async_done)
515 		or->async_done(or, or->async_private);
516 	else
517 		osd_end_request(or);
518 }
519 
osd_execute_request_async(struct osd_request * or,osd_req_done_fn * done,void * private)520 int osd_execute_request_async(struct osd_request *or,
521 	osd_req_done_fn *done, void *private)
522 {
523 	or->request->end_io_data = or;
524 	or->async_private = private;
525 	or->async_done = done;
526 
527 	blk_execute_rq_nowait(or->request->q, NULL, or->request, 0,
528 			      osd_request_async_done);
529 	return 0;
530 }
531 EXPORT_SYMBOL(osd_execute_request_async);
532 
533 u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
534 u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
535 
_osd_realloc_seg(struct osd_request * or,struct _osd_req_data_segment * seg,unsigned max_bytes)536 static int _osd_realloc_seg(struct osd_request *or,
537 	struct _osd_req_data_segment *seg, unsigned max_bytes)
538 {
539 	void *buff;
540 
541 	if (seg->alloc_size >= max_bytes)
542 		return 0;
543 
544 	buff = krealloc(seg->buff, max_bytes, or->alloc_flags);
545 	if (!buff) {
546 		OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes,
547 			seg->alloc_size);
548 		return -ENOMEM;
549 	}
550 
551 	memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size);
552 	seg->buff = buff;
553 	seg->alloc_size = max_bytes;
554 	return 0;
555 }
556 
_alloc_cdb_cont(struct osd_request * or,unsigned total_bytes)557 static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
558 {
559 	OSD_DEBUG("total_bytes=%d\n", total_bytes);
560 	return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
561 }
562 
_alloc_set_attr_list(struct osd_request * or,const struct osd_attr * oa,unsigned nelem,unsigned add_bytes)563 static int _alloc_set_attr_list(struct osd_request *or,
564 	const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
565 {
566 	unsigned total_bytes = add_bytes;
567 
568 	for (; nelem; --nelem, ++oa)
569 		total_bytes += _osd_req_alist_elem_size(or, oa->len);
570 
571 	OSD_DEBUG("total_bytes=%d\n", total_bytes);
572 	return _osd_realloc_seg(or, &or->set_attr, total_bytes);
573 }
574 
_alloc_get_attr_desc(struct osd_request * or,unsigned max_bytes)575 static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes)
576 {
577 	OSD_DEBUG("total_bytes=%d\n", max_bytes);
578 	return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes);
579 }
580 
_alloc_get_attr_list(struct osd_request * or)581 static int _alloc_get_attr_list(struct osd_request *or)
582 {
583 	OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes);
584 	return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes);
585 }
586 
587 /*
588  * Common to all OSD commands
589  */
590 
_osdv1_req_encode_common(struct osd_request * or,__be16 act,const struct osd_obj_id * obj,u64 offset,u64 len)591 static void _osdv1_req_encode_common(struct osd_request *or,
592 	__be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
593 {
594 	struct osdv1_cdb *ocdb = &or->cdb.v1;
595 
596 	/*
597 	 * For speed, the commands
598 	 *	OSD_ACT_PERFORM_SCSI_COMMAND	, V1 0x8F7E, V2 0x8F7C
599 	 *	OSD_ACT_SCSI_TASK_MANAGEMENT	, V1 0x8F7F, V2 0x8F7D
600 	 * are not supported here. Should pass zero and set after the call
601 	 */
602 	act &= cpu_to_be16(~0x0080); /* V1 action code */
603 
604 	OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act));
605 
606 	ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
607 	ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
608 	ocdb->h.varlen_cdb.service_action = act;
609 
610 	ocdb->h.partition = cpu_to_be64(obj->partition);
611 	ocdb->h.object = cpu_to_be64(obj->id);
612 	ocdb->h.v1.length = cpu_to_be64(len);
613 	ocdb->h.v1.start_address = cpu_to_be64(offset);
614 }
615 
_osdv2_req_encode_common(struct osd_request * or,__be16 act,const struct osd_obj_id * obj,u64 offset,u64 len)616 static void _osdv2_req_encode_common(struct osd_request *or,
617 	 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
618 {
619 	struct osdv2_cdb *ocdb = &or->cdb.v2;
620 
621 	OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act));
622 
623 	ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
624 	ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
625 	ocdb->h.varlen_cdb.service_action = act;
626 
627 	ocdb->h.partition = cpu_to_be64(obj->partition);
628 	ocdb->h.object = cpu_to_be64(obj->id);
629 	ocdb->h.v2.length = cpu_to_be64(len);
630 	ocdb->h.v2.start_address = cpu_to_be64(offset);
631 }
632 
_osd_req_encode_common(struct osd_request * or,__be16 act,const struct osd_obj_id * obj,u64 offset,u64 len)633 static void _osd_req_encode_common(struct osd_request *or,
634 	__be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
635 {
636 	if (osd_req_is_ver1(or))
637 		_osdv1_req_encode_common(or, act, obj, offset, len);
638 	else
639 		_osdv2_req_encode_common(or, act, obj, offset, len);
640 }
641 
642 /*
643  * Device commands
644  */
645 /*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */
646 /*TODO: void osd_req_set_master_key(struct osd_request *, ...); */
647 
osd_req_format(struct osd_request * or,u64 tot_capacity)648 void osd_req_format(struct osd_request *or, u64 tot_capacity)
649 {
650 	_osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0,
651 				tot_capacity);
652 }
653 EXPORT_SYMBOL(osd_req_format);
654 
osd_req_list_dev_partitions(struct osd_request * or,osd_id initial_id,struct osd_obj_id_list * list,unsigned nelem)655 int osd_req_list_dev_partitions(struct osd_request *or,
656 	osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem)
657 {
658 	return osd_req_list_partition_objects(or, 0, initial_id, list, nelem);
659 }
660 EXPORT_SYMBOL(osd_req_list_dev_partitions);
661 
_osd_req_encode_flush(struct osd_request * or,enum osd_options_flush_scope_values op)662 static void _osd_req_encode_flush(struct osd_request *or,
663 	enum osd_options_flush_scope_values op)
664 {
665 	struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb);
666 
667 	ocdb->command_specific_options = op;
668 }
669 
osd_req_flush_obsd(struct osd_request * or,enum osd_options_flush_scope_values op)670 void osd_req_flush_obsd(struct osd_request *or,
671 	enum osd_options_flush_scope_values op)
672 {
673 	_osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0);
674 	_osd_req_encode_flush(or, op);
675 }
676 EXPORT_SYMBOL(osd_req_flush_obsd);
677 
678 /*TODO: void osd_req_perform_scsi_command(struct osd_request *,
679 	const u8 *cdb, ...); */
680 /*TODO: void osd_req_task_management(struct osd_request *, ...); */
681 
682 /*
683  * Partition commands
684  */
_osd_req_encode_partition(struct osd_request * or,__be16 act,osd_id partition)685 static void _osd_req_encode_partition(struct osd_request *or,
686 	__be16 act, osd_id partition)
687 {
688 	struct osd_obj_id par = {
689 		.partition = partition,
690 		.id = 0,
691 	};
692 
693 	_osd_req_encode_common(or, act, &par, 0, 0);
694 }
695 
osd_req_create_partition(struct osd_request * or,osd_id partition)696 void osd_req_create_partition(struct osd_request *or, osd_id partition)
697 {
698 	_osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition);
699 }
700 EXPORT_SYMBOL(osd_req_create_partition);
701 
osd_req_remove_partition(struct osd_request * or,osd_id partition)702 void osd_req_remove_partition(struct osd_request *or, osd_id partition)
703 {
704 	_osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition);
705 }
706 EXPORT_SYMBOL(osd_req_remove_partition);
707 
708 /*TODO: void osd_req_set_partition_key(struct osd_request *,
709 	osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
710 	u8 seed[OSD_CRYPTO_SEED_SIZE]); */
711 
_osd_req_list_objects(struct osd_request * or,__be16 action,const struct osd_obj_id * obj,osd_id initial_id,struct osd_obj_id_list * list,unsigned nelem)712 static int _osd_req_list_objects(struct osd_request *or,
713 	__be16 action, const struct osd_obj_id *obj, osd_id initial_id,
714 	struct osd_obj_id_list *list, unsigned nelem)
715 {
716 	struct request_queue *q = osd_request_queue(or->osd_dev);
717 	u64 len = nelem * sizeof(osd_id) + sizeof(*list);
718 	struct bio *bio;
719 
720 	_osd_req_encode_common(or, action, obj, (u64)initial_id, len);
721 
722 	if (list->list_identifier)
723 		_osd_req_encode_olist(or, list);
724 
725 	WARN_ON(or->in.bio);
726 	bio = bio_map_kern(q, list, len, or->alloc_flags);
727 	if (IS_ERR(bio)) {
728 		OSD_ERR("!!! Failed to allocate list_objects BIO\n");
729 		return PTR_ERR(bio);
730 	}
731 
732 	bio->bi_rw &= ~REQ_WRITE;
733 	or->in.bio = bio;
734 	or->in.total_bytes = bio->bi_size;
735 	return 0;
736 }
737 
osd_req_list_partition_collections(struct osd_request * or,osd_id partition,osd_id initial_id,struct osd_obj_id_list * list,unsigned nelem)738 int osd_req_list_partition_collections(struct osd_request *or,
739 	osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
740 	unsigned nelem)
741 {
742 	struct osd_obj_id par = {
743 		.partition = partition,
744 		.id = 0,
745 	};
746 
747 	return osd_req_list_collection_objects(or, &par, initial_id, list,
748 					       nelem);
749 }
750 EXPORT_SYMBOL(osd_req_list_partition_collections);
751 
osd_req_list_partition_objects(struct osd_request * or,osd_id partition,osd_id initial_id,struct osd_obj_id_list * list,unsigned nelem)752 int osd_req_list_partition_objects(struct osd_request *or,
753 	osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
754 	unsigned nelem)
755 {
756 	struct osd_obj_id par = {
757 		.partition = partition,
758 		.id = 0,
759 	};
760 
761 	return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list,
762 				     nelem);
763 }
764 EXPORT_SYMBOL(osd_req_list_partition_objects);
765 
osd_req_flush_partition(struct osd_request * or,osd_id partition,enum osd_options_flush_scope_values op)766 void osd_req_flush_partition(struct osd_request *or,
767 	osd_id partition, enum osd_options_flush_scope_values op)
768 {
769 	_osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition);
770 	_osd_req_encode_flush(or, op);
771 }
772 EXPORT_SYMBOL(osd_req_flush_partition);
773 
774 /*
775  * Collection commands
776  */
777 /*TODO: void osd_req_create_collection(struct osd_request *,
778 	const struct osd_obj_id *); */
779 /*TODO: void osd_req_remove_collection(struct osd_request *,
780 	const struct osd_obj_id *); */
781 
osd_req_list_collection_objects(struct osd_request * or,const struct osd_obj_id * obj,osd_id initial_id,struct osd_obj_id_list * list,unsigned nelem)782 int osd_req_list_collection_objects(struct osd_request *or,
783 	const struct osd_obj_id *obj, osd_id initial_id,
784 	struct osd_obj_id_list *list, unsigned nelem)
785 {
786 	return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj,
787 				     initial_id, list, nelem);
788 }
789 EXPORT_SYMBOL(osd_req_list_collection_objects);
790 
791 /*TODO: void query(struct osd_request *, ...); V2 */
792 
osd_req_flush_collection(struct osd_request * or,const struct osd_obj_id * obj,enum osd_options_flush_scope_values op)793 void osd_req_flush_collection(struct osd_request *or,
794 	const struct osd_obj_id *obj, enum osd_options_flush_scope_values op)
795 {
796 	_osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0);
797 	_osd_req_encode_flush(or, op);
798 }
799 EXPORT_SYMBOL(osd_req_flush_collection);
800 
801 /*TODO: void get_member_attrs(struct osd_request *, ...); V2 */
802 /*TODO: void set_member_attrs(struct osd_request *, ...); V2 */
803 
804 /*
805  * Object commands
806  */
osd_req_create_object(struct osd_request * or,struct osd_obj_id * obj)807 void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj)
808 {
809 	_osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0);
810 }
811 EXPORT_SYMBOL(osd_req_create_object);
812 
osd_req_remove_object(struct osd_request * or,struct osd_obj_id * obj)813 void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj)
814 {
815 	_osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0);
816 }
817 EXPORT_SYMBOL(osd_req_remove_object);
818 
819 
820 /*TODO: void osd_req_create_multi(struct osd_request *or,
821 	struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem);
822 */
823 
osd_req_write(struct osd_request * or,const struct osd_obj_id * obj,u64 offset,struct bio * bio,u64 len)824 void osd_req_write(struct osd_request *or,
825 	const struct osd_obj_id *obj, u64 offset,
826 	struct bio *bio, u64 len)
827 {
828 	_osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
829 	WARN_ON(or->out.bio || or->out.total_bytes);
830 	WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
831 	or->out.bio = bio;
832 	or->out.total_bytes = len;
833 }
834 EXPORT_SYMBOL(osd_req_write);
835 
osd_req_write_kern(struct osd_request * or,const struct osd_obj_id * obj,u64 offset,void * buff,u64 len)836 int osd_req_write_kern(struct osd_request *or,
837 	const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
838 {
839 	struct request_queue *req_q = osd_request_queue(or->osd_dev);
840 	struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
841 
842 	if (IS_ERR(bio))
843 		return PTR_ERR(bio);
844 
845 	bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
846 	osd_req_write(or, obj, offset, bio, len);
847 	return 0;
848 }
849 EXPORT_SYMBOL(osd_req_write_kern);
850 
851 /*TODO: void osd_req_append(struct osd_request *,
852 	const struct osd_obj_id *, struct bio *data_out); */
853 /*TODO: void osd_req_create_write(struct osd_request *,
854 	const struct osd_obj_id *, struct bio *data_out, u64 offset); */
855 /*TODO: void osd_req_clear(struct osd_request *,
856 	const struct osd_obj_id *, u64 offset, u64 len); */
857 /*TODO: void osd_req_punch(struct osd_request *,
858 	const struct osd_obj_id *, u64 offset, u64 len); V2 */
859 
osd_req_flush_object(struct osd_request * or,const struct osd_obj_id * obj,enum osd_options_flush_scope_values op,u64 offset,u64 len)860 void osd_req_flush_object(struct osd_request *or,
861 	const struct osd_obj_id *obj, enum osd_options_flush_scope_values op,
862 	/*V2*/ u64 offset, /*V2*/ u64 len)
863 {
864 	if (unlikely(osd_req_is_ver1(or) && (offset || len))) {
865 		OSD_DEBUG("OSD Ver1 flush on specific range ignored\n");
866 		offset = 0;
867 		len = 0;
868 	}
869 
870 	_osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len);
871 	_osd_req_encode_flush(or, op);
872 }
873 EXPORT_SYMBOL(osd_req_flush_object);
874 
osd_req_read(struct osd_request * or,const struct osd_obj_id * obj,u64 offset,struct bio * bio,u64 len)875 void osd_req_read(struct osd_request *or,
876 	const struct osd_obj_id *obj, u64 offset,
877 	struct bio *bio, u64 len)
878 {
879 	_osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
880 	WARN_ON(or->in.bio || or->in.total_bytes);
881 	WARN_ON(bio->bi_rw & REQ_WRITE);
882 	or->in.bio = bio;
883 	or->in.total_bytes = len;
884 }
885 EXPORT_SYMBOL(osd_req_read);
886 
osd_req_read_kern(struct osd_request * or,const struct osd_obj_id * obj,u64 offset,void * buff,u64 len)887 int osd_req_read_kern(struct osd_request *or,
888 	const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
889 {
890 	struct request_queue *req_q = osd_request_queue(or->osd_dev);
891 	struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
892 
893 	if (IS_ERR(bio))
894 		return PTR_ERR(bio);
895 
896 	osd_req_read(or, obj, offset, bio, len);
897 	return 0;
898 }
899 EXPORT_SYMBOL(osd_req_read_kern);
900 
_add_sg_continuation_descriptor(struct osd_request * or,const struct osd_sg_entry * sglist,unsigned numentries,u64 * len)901 static int _add_sg_continuation_descriptor(struct osd_request *or,
902 	const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
903 {
904 	struct osd_sg_continuation_descriptor *oscd;
905 	u32 oscd_size;
906 	unsigned i;
907 	int ret;
908 
909 	oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
910 
911 	if (!or->cdb_cont.total_bytes) {
912 		/* First time, jump over the header, we will write to:
913 		 *	cdb_cont.buff + cdb_cont.total_bytes
914 		 */
915 		or->cdb_cont.total_bytes =
916 				sizeof(struct osd_continuation_segment_header);
917 	}
918 
919 	ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
920 	if (unlikely(ret))
921 		return ret;
922 
923 	oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
924 	oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
925 	oscd->hdr.pad_length = 0;
926 	oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
927 
928 	*len = 0;
929 	/* copy the sg entries and convert to network byte order */
930 	for (i = 0; i < numentries; i++) {
931 		oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
932 		oscd->entries[i].len    = cpu_to_be64(sglist[i].len);
933 		*len += sglist[i].len;
934 	}
935 
936 	or->cdb_cont.total_bytes += oscd_size;
937 	OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
938 		  or->cdb_cont.total_bytes, oscd_size, numentries);
939 	return 0;
940 }
941 
_osd_req_finalize_cdb_cont(struct osd_request * or,const u8 * cap_key)942 static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
943 {
944 	struct request_queue *req_q = osd_request_queue(or->osd_dev);
945 	struct bio *bio;
946 	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
947 	struct osd_continuation_segment_header *cont_seg_hdr;
948 
949 	if (!or->cdb_cont.total_bytes)
950 		return 0;
951 
952 	cont_seg_hdr = or->cdb_cont.buff;
953 	cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
954 	cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
955 
956 	/* create a bio for continuation segment */
957 	bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
958 			   GFP_KERNEL);
959 	if (IS_ERR(bio))
960 		return PTR_ERR(bio);
961 
962 	bio->bi_rw |= REQ_WRITE;
963 
964 	/* integrity check the continuation before the bio is linked
965 	 * with the other data segments since the continuation
966 	 * integrity is separate from the other data segments.
967 	 */
968 	osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
969 
970 	cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
971 
972 	/* we can't use _req_append_segment, because we need to link in the
973 	 * continuation bio to the head of the bio list - the
974 	 * continuation segment (if it exists) is always the first segment in
975 	 * the out data buffer.
976 	 */
977 	bio->bi_next = or->out.bio;
978 	or->out.bio = bio;
979 	or->out.total_bytes += or->cdb_cont.total_bytes;
980 
981 	return 0;
982 }
983 
984 /* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
985  * @sglist that has the scatter gather entries. Scatter-gather enables a write
986  * of multiple none-contiguous areas of an object, in a single call. The extents
987  * may overlap and/or be in any order. The only constrain is that:
988  *	total_bytes(sglist) >= total_bytes(bio)
989  */
osd_req_write_sg(struct osd_request * or,const struct osd_obj_id * obj,struct bio * bio,const struct osd_sg_entry * sglist,unsigned numentries)990 int osd_req_write_sg(struct osd_request *or,
991 	const struct osd_obj_id *obj, struct bio *bio,
992 	const struct osd_sg_entry *sglist, unsigned numentries)
993 {
994 	u64 len;
995 	int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
996 
997 	if (ret)
998 		return ret;
999 	osd_req_write(or, obj, 0, bio, len);
1000 
1001 	return 0;
1002 }
1003 EXPORT_SYMBOL(osd_req_write_sg);
1004 
1005 /* osd_req_read_sg: Read multiple extents of an object into @bio
1006  * See osd_req_write_sg
1007  */
osd_req_read_sg(struct osd_request * or,const struct osd_obj_id * obj,struct bio * bio,const struct osd_sg_entry * sglist,unsigned numentries)1008 int osd_req_read_sg(struct osd_request *or,
1009 	const struct osd_obj_id *obj, struct bio *bio,
1010 	const struct osd_sg_entry *sglist, unsigned numentries)
1011 {
1012 	u64 len;
1013 	u64 off;
1014 	int ret;
1015 
1016 	if (numentries > 1) {
1017 		off = 0;
1018 		ret = _add_sg_continuation_descriptor(or, sglist, numentries,
1019 						      &len);
1020 		if (ret)
1021 			return ret;
1022 	} else {
1023 		/* Optimize the case of single segment, read_sg is a
1024 		 * bidi operation.
1025 		 */
1026 		len = sglist->len;
1027 		off = sglist->offset;
1028 	}
1029 	osd_req_read(or, obj, off, bio, len);
1030 
1031 	return 0;
1032 }
1033 EXPORT_SYMBOL(osd_req_read_sg);
1034 
1035 /* SG-list write/read Kern API
1036  *
1037  * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
1038  * of sg_entries. @numentries indicates how many pointers and sg_entries there
1039  * are.  By requiring an array of buff pointers. This allows a caller to do a
1040  * single write/read and scatter into multiple buffers.
1041  * NOTE: Each buffer + len should not cross a page boundary.
1042  */
_create_sg_bios(struct osd_request * or,void ** buff,const struct osd_sg_entry * sglist,unsigned numentries)1043 static struct bio *_create_sg_bios(struct osd_request *or,
1044 	void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
1045 {
1046 	struct request_queue *q = osd_request_queue(or->osd_dev);
1047 	struct bio *bio;
1048 	unsigned i;
1049 
1050 	bio = bio_kmalloc(GFP_KERNEL, numentries);
1051 	if (unlikely(!bio)) {
1052 		OSD_DEBUG("Failed to allocate BIO size=%u\n", numentries);
1053 		return ERR_PTR(-ENOMEM);
1054 	}
1055 
1056 	for (i = 0; i < numentries; i++) {
1057 		unsigned offset = offset_in_page(buff[i]);
1058 		struct page *page = virt_to_page(buff[i]);
1059 		unsigned len = sglist[i].len;
1060 		unsigned added_len;
1061 
1062 		BUG_ON(offset + len > PAGE_SIZE);
1063 		added_len = bio_add_pc_page(q, bio, page, len, offset);
1064 		if (unlikely(len != added_len)) {
1065 			OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
1066 				  len, added_len);
1067 			bio_put(bio);
1068 			return ERR_PTR(-ENOMEM);
1069 		}
1070 	}
1071 
1072 	return bio;
1073 }
1074 
osd_req_write_sg_kern(struct osd_request * or,const struct osd_obj_id * obj,void ** buff,const struct osd_sg_entry * sglist,unsigned numentries)1075 int osd_req_write_sg_kern(struct osd_request *or,
1076 	const struct osd_obj_id *obj, void **buff,
1077 	const struct osd_sg_entry *sglist, unsigned numentries)
1078 {
1079 	struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1080 	if (IS_ERR(bio))
1081 		return PTR_ERR(bio);
1082 
1083 	bio->bi_rw |= REQ_WRITE;
1084 	osd_req_write_sg(or, obj, bio, sglist, numentries);
1085 
1086 	return 0;
1087 }
1088 EXPORT_SYMBOL(osd_req_write_sg_kern);
1089 
osd_req_read_sg_kern(struct osd_request * or,const struct osd_obj_id * obj,void ** buff,const struct osd_sg_entry * sglist,unsigned numentries)1090 int osd_req_read_sg_kern(struct osd_request *or,
1091 	const struct osd_obj_id *obj, void **buff,
1092 	const struct osd_sg_entry *sglist, unsigned numentries)
1093 {
1094 	struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1095 	if (IS_ERR(bio))
1096 		return PTR_ERR(bio);
1097 
1098 	osd_req_read_sg(or, obj, bio, sglist, numentries);
1099 
1100 	return 0;
1101 }
1102 EXPORT_SYMBOL(osd_req_read_sg_kern);
1103 
1104 
1105 
osd_req_get_attributes(struct osd_request * or,const struct osd_obj_id * obj)1106 void osd_req_get_attributes(struct osd_request *or,
1107 	const struct osd_obj_id *obj)
1108 {
1109 	_osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0);
1110 }
1111 EXPORT_SYMBOL(osd_req_get_attributes);
1112 
osd_req_set_attributes(struct osd_request * or,const struct osd_obj_id * obj)1113 void osd_req_set_attributes(struct osd_request *or,
1114 	const struct osd_obj_id *obj)
1115 {
1116 	_osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0);
1117 }
1118 EXPORT_SYMBOL(osd_req_set_attributes);
1119 
1120 /*
1121  * Attributes List-mode
1122  */
1123 
osd_req_add_set_attr_list(struct osd_request * or,const struct osd_attr * oa,unsigned nelem)1124 int osd_req_add_set_attr_list(struct osd_request *or,
1125 	const struct osd_attr *oa, unsigned nelem)
1126 {
1127 	unsigned total_bytes = or->set_attr.total_bytes;
1128 	void *attr_last;
1129 	int ret;
1130 
1131 	if (or->attributes_mode &&
1132 	    or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
1133 		WARN_ON(1);
1134 		return -EINVAL;
1135 	}
1136 	or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1137 
1138 	if (!total_bytes) { /* first-time: allocate and put list header */
1139 		total_bytes = _osd_req_sizeof_alist_header(or);
1140 		ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
1141 		if (ret)
1142 			return ret;
1143 		_osd_req_set_alist_type(or, or->set_attr.buff,
1144 					OSD_ATTR_LIST_SET_RETRIEVE);
1145 	}
1146 	attr_last = or->set_attr.buff + total_bytes;
1147 
1148 	for (; nelem; --nelem) {
1149 		unsigned elem_size = _osd_req_alist_elem_size(or, oa->len);
1150 
1151 		total_bytes += elem_size;
1152 		if (unlikely(or->set_attr.alloc_size < total_bytes)) {
1153 			or->set_attr.total_bytes = total_bytes - elem_size;
1154 			ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
1155 			if (ret)
1156 				return ret;
1157 			attr_last =
1158 				or->set_attr.buff + or->set_attr.total_bytes;
1159 		}
1160 
1161 		_osd_req_alist_elem_encode(or, attr_last, oa);
1162 
1163 		attr_last += elem_size;
1164 		++oa;
1165 	}
1166 
1167 	or->set_attr.total_bytes = total_bytes;
1168 	return 0;
1169 }
1170 EXPORT_SYMBOL(osd_req_add_set_attr_list);
1171 
_req_append_segment(struct osd_request * or,unsigned padding,struct _osd_req_data_segment * seg,struct _osd_req_data_segment * last_seg,struct _osd_io_info * io)1172 static int _req_append_segment(struct osd_request *or,
1173 	unsigned padding, struct _osd_req_data_segment *seg,
1174 	struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
1175 {
1176 	void *pad_buff;
1177 	int ret;
1178 
1179 	if (padding) {
1180 		/* check if we can just add it to last buffer */
1181 		if (last_seg &&
1182 		    (padding <= last_seg->alloc_size - last_seg->total_bytes))
1183 			pad_buff = last_seg->buff + last_seg->total_bytes;
1184 		else
1185 			pad_buff = io->pad_buff;
1186 
1187 		ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
1188 				       or->alloc_flags);
1189 		if (ret)
1190 			return ret;
1191 		io->total_bytes += padding;
1192 	}
1193 
1194 	ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
1195 			       or->alloc_flags);
1196 	if (ret)
1197 		return ret;
1198 
1199 	io->total_bytes += seg->total_bytes;
1200 	OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff,
1201 		  seg->total_bytes);
1202 	return 0;
1203 }
1204 
_osd_req_finalize_set_attr_list(struct osd_request * or)1205 static int _osd_req_finalize_set_attr_list(struct osd_request *or)
1206 {
1207 	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1208 	unsigned padding;
1209 	int ret;
1210 
1211 	if (!or->set_attr.total_bytes) {
1212 		cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED;
1213 		return 0;
1214 	}
1215 
1216 	cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes);
1217 	cdbh->attrs_list.set_attr_offset =
1218 		osd_req_encode_offset(or, or->out.total_bytes, &padding);
1219 
1220 	ret = _req_append_segment(or, padding, &or->set_attr,
1221 				  or->out.last_seg, &or->out);
1222 	if (ret)
1223 		return ret;
1224 
1225 	or->out.last_seg = &or->set_attr;
1226 	return 0;
1227 }
1228 
osd_req_add_get_attr_list(struct osd_request * or,const struct osd_attr * oa,unsigned nelem)1229 int osd_req_add_get_attr_list(struct osd_request *or,
1230 	const struct osd_attr *oa, unsigned nelem)
1231 {
1232 	unsigned total_bytes = or->enc_get_attr.total_bytes;
1233 	void *attr_last;
1234 	int ret;
1235 
1236 	if (or->attributes_mode &&
1237 	    or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
1238 		WARN_ON(1);
1239 		return -EINVAL;
1240 	}
1241 	or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1242 
1243 	/* first time calc data-in list header size */
1244 	if (!or->get_attr.total_bytes)
1245 		or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or);
1246 
1247 	/* calc data-out info */
1248 	if (!total_bytes) { /* first-time: allocate and put list header */
1249 		unsigned max_bytes;
1250 
1251 		total_bytes = _osd_req_sizeof_alist_header(or);
1252 		max_bytes = total_bytes +
1253 			nelem * sizeof(struct osd_attributes_list_attrid);
1254 		ret = _alloc_get_attr_desc(or, max_bytes);
1255 		if (ret)
1256 			return ret;
1257 
1258 		_osd_req_set_alist_type(or, or->enc_get_attr.buff,
1259 					OSD_ATTR_LIST_GET);
1260 	}
1261 	attr_last = or->enc_get_attr.buff + total_bytes;
1262 
1263 	for (; nelem; --nelem) {
1264 		struct osd_attributes_list_attrid *attrid;
1265 		const unsigned cur_size = sizeof(*attrid);
1266 
1267 		total_bytes += cur_size;
1268 		if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) {
1269 			or->enc_get_attr.total_bytes = total_bytes - cur_size;
1270 			ret = _alloc_get_attr_desc(or,
1271 					total_bytes + nelem * sizeof(*attrid));
1272 			if (ret)
1273 				return ret;
1274 			attr_last = or->enc_get_attr.buff +
1275 				or->enc_get_attr.total_bytes;
1276 		}
1277 
1278 		attrid = attr_last;
1279 		attrid->attr_page = cpu_to_be32(oa->attr_page);
1280 		attrid->attr_id = cpu_to_be32(oa->attr_id);
1281 
1282 		attr_last += cur_size;
1283 
1284 		/* calc data-in size */
1285 		or->get_attr.total_bytes +=
1286 			_osd_req_alist_elem_size(or, oa->len);
1287 		++oa;
1288 	}
1289 
1290 	or->enc_get_attr.total_bytes = total_bytes;
1291 
1292 	OSD_DEBUG(
1293 	       "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%Zu)\n",
1294 	       or->get_attr.total_bytes,
1295 	       or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or),
1296 	       or->enc_get_attr.total_bytes,
1297 	       (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or))
1298 			/ sizeof(struct osd_attributes_list_attrid));
1299 
1300 	return 0;
1301 }
1302 EXPORT_SYMBOL(osd_req_add_get_attr_list);
1303 
_osd_req_finalize_get_attr_list(struct osd_request * or)1304 static int _osd_req_finalize_get_attr_list(struct osd_request *or)
1305 {
1306 	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1307 	unsigned out_padding;
1308 	unsigned in_padding;
1309 	int ret;
1310 
1311 	if (!or->enc_get_attr.total_bytes) {
1312 		cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED;
1313 		cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED;
1314 		return 0;
1315 	}
1316 
1317 	ret = _alloc_get_attr_list(or);
1318 	if (ret)
1319 		return ret;
1320 
1321 	/* The out-going buffer info update */
1322 	OSD_DEBUG("out-going\n");
1323 	cdbh->attrs_list.get_attr_desc_bytes =
1324 		cpu_to_be32(or->enc_get_attr.total_bytes);
1325 
1326 	cdbh->attrs_list.get_attr_desc_offset =
1327 		osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1328 
1329 	ret = _req_append_segment(or, out_padding, &or->enc_get_attr,
1330 				  or->out.last_seg, &or->out);
1331 	if (ret)
1332 		return ret;
1333 	or->out.last_seg = &or->enc_get_attr;
1334 
1335 	/* The incoming buffer info update */
1336 	OSD_DEBUG("in-coming\n");
1337 	cdbh->attrs_list.get_attr_alloc_length =
1338 		cpu_to_be32(or->get_attr.total_bytes);
1339 
1340 	cdbh->attrs_list.get_attr_offset =
1341 		osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1342 
1343 	ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1344 				  &or->in);
1345 	if (ret)
1346 		return ret;
1347 	or->in.last_seg = &or->get_attr;
1348 
1349 	return 0;
1350 }
1351 
osd_req_decode_get_attr_list(struct osd_request * or,struct osd_attr * oa,int * nelem,void ** iterator)1352 int osd_req_decode_get_attr_list(struct osd_request *or,
1353 	struct osd_attr *oa, int *nelem, void **iterator)
1354 {
1355 	unsigned cur_bytes, returned_bytes;
1356 	int n;
1357 	const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or);
1358 	void *cur_p;
1359 
1360 	if (!_osd_req_is_alist_type(or, or->get_attr.buff,
1361 				    OSD_ATTR_LIST_SET_RETRIEVE)) {
1362 		oa->attr_page = 0;
1363 		oa->attr_id = 0;
1364 		oa->val_ptr = NULL;
1365 		oa->len = 0;
1366 		*iterator = NULL;
1367 		return 0;
1368 	}
1369 
1370 	if (*iterator) {
1371 		BUG_ON((*iterator < or->get_attr.buff) ||
1372 		     (or->get_attr.buff + or->get_attr.alloc_size < *iterator));
1373 		cur_p = *iterator;
1374 		cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list;
1375 		returned_bytes = or->get_attr.total_bytes;
1376 	} else { /* first time decode the list header */
1377 		cur_bytes = sizeof_attr_list;
1378 		returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) +
1379 					sizeof_attr_list;
1380 
1381 		cur_p = or->get_attr.buff + sizeof_attr_list;
1382 
1383 		if (returned_bytes > or->get_attr.alloc_size) {
1384 			OSD_DEBUG("target report: space was not big enough! "
1385 				  "Allocate=%u Needed=%u\n",
1386 				  or->get_attr.alloc_size,
1387 				  returned_bytes + sizeof_attr_list);
1388 
1389 			returned_bytes =
1390 				or->get_attr.alloc_size - sizeof_attr_list;
1391 		}
1392 		or->get_attr.total_bytes = returned_bytes;
1393 	}
1394 
1395 	for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) {
1396 		int inc = _osd_req_alist_elem_decode(or, cur_p, oa,
1397 						 returned_bytes - cur_bytes);
1398 
1399 		if (inc < 0) {
1400 			OSD_ERR("BAD FOOD from target. list not valid!"
1401 				"c=%d r=%d n=%d\n",
1402 				cur_bytes, returned_bytes, n);
1403 			oa->val_ptr = NULL;
1404 			cur_bytes = returned_bytes; /* break the caller loop */
1405 			break;
1406 		}
1407 
1408 		cur_bytes += inc;
1409 		cur_p += inc;
1410 		++oa;
1411 	}
1412 
1413 	*iterator = (returned_bytes - cur_bytes) ? cur_p : NULL;
1414 	*nelem = n;
1415 	return returned_bytes - cur_bytes;
1416 }
1417 EXPORT_SYMBOL(osd_req_decode_get_attr_list);
1418 
1419 /*
1420  * Attributes Page-mode
1421  */
1422 
osd_req_add_get_attr_page(struct osd_request * or,u32 page_id,void * attar_page,unsigned max_page_len,const struct osd_attr * set_one_attr)1423 int osd_req_add_get_attr_page(struct osd_request *or,
1424 	u32 page_id, void *attar_page, unsigned max_page_len,
1425 	const struct osd_attr *set_one_attr)
1426 {
1427 	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1428 
1429 	if (or->attributes_mode &&
1430 	    or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1431 		WARN_ON(1);
1432 		return -EINVAL;
1433 	}
1434 	or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE;
1435 
1436 	or->get_attr.buff = attar_page;
1437 	or->get_attr.total_bytes = max_page_len;
1438 
1439 	cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
1440 	cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
1441 
1442 	if (!set_one_attr || !set_one_attr->attr_page)
1443 		return 0; /* The set is optional */
1444 
1445 	or->set_attr.buff = set_one_attr->val_ptr;
1446 	or->set_attr.total_bytes = set_one_attr->len;
1447 
1448 	cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
1449 	cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
1450 	cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
1451 	return 0;
1452 }
1453 EXPORT_SYMBOL(osd_req_add_get_attr_page);
1454 
_osd_req_finalize_attr_page(struct osd_request * or)1455 static int _osd_req_finalize_attr_page(struct osd_request *or)
1456 {
1457 	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1458 	unsigned in_padding, out_padding;
1459 	int ret;
1460 
1461 	/* returned page */
1462 	cdbh->attrs_page.get_attr_offset =
1463 		osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1464 
1465 	ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1466 				  &or->in);
1467 	if (ret)
1468 		return ret;
1469 
1470 	if (or->set_attr.total_bytes == 0)
1471 		return 0;
1472 
1473 	/* set one value */
1474 	cdbh->attrs_page.set_attr_offset =
1475 		osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1476 
1477 	ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
1478 				  &or->out);
1479 	return ret;
1480 }
1481 
osd_sec_parms_set_out_offset(bool is_v1,struct osd_security_parameters * sec_parms,osd_cdb_offset offset)1482 static inline void osd_sec_parms_set_out_offset(bool is_v1,
1483 	struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
1484 {
1485 	if (is_v1)
1486 		sec_parms->v1.data_out_integrity_check_offset = offset;
1487 	else
1488 		sec_parms->v2.data_out_integrity_check_offset = offset;
1489 }
1490 
osd_sec_parms_set_in_offset(bool is_v1,struct osd_security_parameters * sec_parms,osd_cdb_offset offset)1491 static inline void osd_sec_parms_set_in_offset(bool is_v1,
1492 	struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
1493 {
1494 	if (is_v1)
1495 		sec_parms->v1.data_in_integrity_check_offset = offset;
1496 	else
1497 		sec_parms->v2.data_in_integrity_check_offset = offset;
1498 }
1499 
_osd_req_finalize_data_integrity(struct osd_request * or,bool has_in,bool has_out,struct bio * out_data_bio,u64 out_data_bytes,const u8 * cap_key)1500 static int _osd_req_finalize_data_integrity(struct osd_request *or,
1501 	bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
1502 	const u8 *cap_key)
1503 {
1504 	struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1505 	int ret;
1506 
1507 	if (!osd_is_sec_alldata(sec_parms))
1508 		return 0;
1509 
1510 	if (has_out) {
1511 		struct _osd_req_data_segment seg = {
1512 			.buff = &or->out_data_integ,
1513 			.total_bytes = sizeof(or->out_data_integ),
1514 		};
1515 		unsigned pad;
1516 
1517 		or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
1518 		or->out_data_integ.set_attributes_bytes = cpu_to_be64(
1519 			or->set_attr.total_bytes);
1520 		or->out_data_integ.get_attributes_bytes = cpu_to_be64(
1521 			or->enc_get_attr.total_bytes);
1522 
1523 		osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms,
1524 			osd_req_encode_offset(or, or->out.total_bytes, &pad));
1525 
1526 		ret = _req_append_segment(or, pad, &seg, or->out.last_seg,
1527 					  &or->out);
1528 		if (ret)
1529 			return ret;
1530 		or->out.last_seg = NULL;
1531 
1532 		/* they are now all chained to request sign them all together */
1533 		osd_sec_sign_data(&or->out_data_integ, out_data_bio,
1534 				  cap_key);
1535 	}
1536 
1537 	if (has_in) {
1538 		struct _osd_req_data_segment seg = {
1539 			.buff = &or->in_data_integ,
1540 			.total_bytes = sizeof(or->in_data_integ),
1541 		};
1542 		unsigned pad;
1543 
1544 		osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms,
1545 			osd_req_encode_offset(or, or->in.total_bytes, &pad));
1546 
1547 		ret = _req_append_segment(or, pad, &seg, or->in.last_seg,
1548 					  &or->in);
1549 		if (ret)
1550 			return ret;
1551 
1552 		or->in.last_seg = NULL;
1553 	}
1554 
1555 	return 0;
1556 }
1557 
1558 /*
1559  * osd_finalize_request and helpers
1560  */
_make_request(struct request_queue * q,bool has_write,struct _osd_io_info * oii,gfp_t flags)1561 static struct request *_make_request(struct request_queue *q, bool has_write,
1562 			      struct _osd_io_info *oii, gfp_t flags)
1563 {
1564 	if (oii->bio)
1565 		return blk_make_request(q, oii->bio, flags);
1566 	else {
1567 		struct request *req;
1568 
1569 		req = blk_get_request(q, has_write ? WRITE : READ, flags);
1570 		if (unlikely(!req))
1571 			return ERR_PTR(-ENOMEM);
1572 
1573 		return req;
1574 	}
1575 }
1576 
_init_blk_request(struct osd_request * or,bool has_in,bool has_out)1577 static int _init_blk_request(struct osd_request *or,
1578 	bool has_in, bool has_out)
1579 {
1580 	gfp_t flags = or->alloc_flags;
1581 	struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1582 	struct request_queue *q = scsi_device->request_queue;
1583 	struct request *req;
1584 	int ret;
1585 
1586 	req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
1587 	if (IS_ERR(req)) {
1588 		ret = PTR_ERR(req);
1589 		goto out;
1590 	}
1591 
1592 	or->request = req;
1593 	req->cmd_type = REQ_TYPE_BLOCK_PC;
1594 	req->cmd_flags |= REQ_QUIET;
1595 
1596 	req->timeout = or->timeout;
1597 	req->retries = or->retries;
1598 	req->sense = or->sense;
1599 	req->sense_len = 0;
1600 
1601 	if (has_out) {
1602 		or->out.req = req;
1603 		if (has_in) {
1604 			/* allocate bidi request */
1605 			req = _make_request(q, false, &or->in, flags);
1606 			if (IS_ERR(req)) {
1607 				OSD_DEBUG("blk_get_request for bidi failed\n");
1608 				ret = PTR_ERR(req);
1609 				goto out;
1610 			}
1611 			req->cmd_type = REQ_TYPE_BLOCK_PC;
1612 			or->in.req = or->request->next_rq = req;
1613 		}
1614 	} else if (has_in)
1615 		or->in.req = req;
1616 
1617 	ret = 0;
1618 out:
1619 	OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n",
1620 			or, has_in, has_out, ret, or->request);
1621 	return ret;
1622 }
1623 
osd_finalize_request(struct osd_request * or,u8 options,const void * cap,const u8 * cap_key)1624 int osd_finalize_request(struct osd_request *or,
1625 	u8 options, const void *cap, const u8 *cap_key)
1626 {
1627 	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1628 	bool has_in, has_out;
1629 	 /* Save for data_integrity without the cdb_continuation */
1630 	struct bio *out_data_bio = or->out.bio;
1631 	u64 out_data_bytes = or->out.total_bytes;
1632 	int ret;
1633 
1634 	if (options & OSD_REQ_FUA)
1635 		cdbh->options |= OSD_CDB_FUA;
1636 
1637 	if (options & OSD_REQ_DPO)
1638 		cdbh->options |= OSD_CDB_DPO;
1639 
1640 	if (options & OSD_REQ_BYPASS_TIMESTAMPS)
1641 		cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS;
1642 
1643 	osd_set_caps(&or->cdb, cap);
1644 
1645 	has_in = or->in.bio || or->get_attr.total_bytes;
1646 	has_out = or->out.bio || or->cdb_cont.total_bytes ||
1647 		or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
1648 
1649 	ret = _osd_req_finalize_cdb_cont(or, cap_key);
1650 	if (ret) {
1651 		OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
1652 		return ret;
1653 	}
1654 	ret = _init_blk_request(or, has_in, has_out);
1655 	if (ret) {
1656 		OSD_DEBUG("_init_blk_request failed\n");
1657 		return ret;
1658 	}
1659 
1660 	or->out.pad_buff = sg_out_pad_buffer;
1661 	or->in.pad_buff = sg_in_pad_buffer;
1662 
1663 	if (!or->attributes_mode)
1664 		or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1665 	cdbh->command_specific_options |= or->attributes_mode;
1666 	if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1667 		ret = _osd_req_finalize_attr_page(or);
1668 		if (ret) {
1669 			OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
1670 			return ret;
1671 		}
1672 	} else {
1673 		/* TODO: I think that for the GET_ATTR command these 2 should
1674 		 * be reversed to keep them in execution order (for embeded
1675 		 * targets with low memory footprint)
1676 		 */
1677 		ret = _osd_req_finalize_set_attr_list(or);
1678 		if (ret) {
1679 			OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n");
1680 			return ret;
1681 		}
1682 
1683 		ret = _osd_req_finalize_get_attr_list(or);
1684 		if (ret) {
1685 			OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n");
1686 			return ret;
1687 		}
1688 	}
1689 
1690 	ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
1691 					       out_data_bio, out_data_bytes,
1692 					       cap_key);
1693 	if (ret)
1694 		return ret;
1695 
1696 	osd_sec_sign_cdb(&or->cdb, cap_key);
1697 
1698 	or->request->cmd = or->cdb.buff;
1699 	or->request->cmd_len = _osd_req_cdb_len(or);
1700 
1701 	return 0;
1702 }
1703 EXPORT_SYMBOL(osd_finalize_request);
1704 
_is_osd_security_code(int code)1705 static bool _is_osd_security_code(int code)
1706 {
1707 	return	(code == osd_security_audit_value_frozen) ||
1708 		(code == osd_security_working_key_frozen) ||
1709 		(code == osd_nonce_not_unique) ||
1710 		(code == osd_nonce_timestamp_out_of_range) ||
1711 		(code == osd_invalid_dataout_buffer_integrity_check_value);
1712 }
1713 
1714 #define OSD_SENSE_PRINT1(fmt, a...) \
1715 	do { \
1716 		if (__cur_sense_need_output) \
1717 			OSD_ERR(fmt, ##a); \
1718 	} while (0)
1719 
1720 #define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1("    " fmt, ##a)
1721 
osd_req_decode_sense_full(struct osd_request * or,struct osd_sense_info * osi,bool silent,struct osd_obj_id * bad_obj_list __unused,int max_obj __unused,struct osd_attr * bad_attr_list,int max_attr)1722 int osd_req_decode_sense_full(struct osd_request *or,
1723 	struct osd_sense_info *osi, bool silent,
1724 	struct osd_obj_id *bad_obj_list __unused, int max_obj __unused,
1725 	struct osd_attr *bad_attr_list, int max_attr)
1726 {
1727 	int sense_len, original_sense_len;
1728 	struct osd_sense_info local_osi;
1729 	struct scsi_sense_descriptor_based *ssdb;
1730 	void *cur_descriptor;
1731 #if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0)
1732 	const bool __cur_sense_need_output = false;
1733 #else
1734 	bool __cur_sense_need_output = !silent;
1735 #endif
1736 	int ret;
1737 
1738 	if (likely(!or->req_errors))
1739 		return 0;
1740 
1741 	osi = osi ? : &local_osi;
1742 	memset(osi, 0, sizeof(*osi));
1743 
1744 	ssdb = (typeof(ssdb))or->sense;
1745 	sense_len = or->sense_len;
1746 	if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
1747 		OSD_ERR("Block-layer returned error(0x%x) but "
1748 			"sense_len(%u) || key(%d) is empty\n",
1749 			or->req_errors, sense_len, ssdb->sense_key);
1750 		goto analyze;
1751 	}
1752 
1753 	if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
1754 		OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
1755 			ssdb->response_code, sense_len);
1756 		goto analyze;
1757 	}
1758 
1759 	osi->key = ssdb->sense_key;
1760 	osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
1761 	original_sense_len = ssdb->additional_sense_length + 8;
1762 
1763 #if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1)
1764 	if (__cur_sense_need_output)
1765 		__cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
1766 #endif
1767 	OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
1768 			"additional_code=0x%x async_error=%d errors=0x%x\n",
1769 			osi->key, original_sense_len, sense_len,
1770 			osi->additional_code, or->async_error,
1771 			or->req_errors);
1772 
1773 	if (original_sense_len < sense_len)
1774 		sense_len = original_sense_len;
1775 
1776 	cur_descriptor = ssdb->ssd;
1777 	sense_len -= sizeof(*ssdb);
1778 	while (sense_len > 0) {
1779 		struct scsi_sense_descriptor *ssd = cur_descriptor;
1780 		int cur_len = ssd->additional_length + 2;
1781 
1782 		sense_len -= cur_len;
1783 
1784 		if (sense_len < 0)
1785 			break; /* sense was truncated */
1786 
1787 		switch (ssd->descriptor_type) {
1788 		case scsi_sense_information:
1789 		case scsi_sense_command_specific_information:
1790 		{
1791 			struct scsi_sense_command_specific_data_descriptor
1792 				*sscd = cur_descriptor;
1793 
1794 			osi->command_info =
1795 				get_unaligned_be64(&sscd->information) ;
1796 			OSD_SENSE_PRINT2(
1797 				"command_specific_information 0x%llx \n",
1798 				_LLU(osi->command_info));
1799 			break;
1800 		}
1801 		case scsi_sense_key_specific:
1802 		{
1803 			struct scsi_sense_key_specific_data_descriptor
1804 				*ssks = cur_descriptor;
1805 
1806 			osi->sense_info = get_unaligned_be16(&ssks->value);
1807 			OSD_SENSE_PRINT2(
1808 				"sense_key_specific_information %u"
1809 				"sksv_cd_bpv_bp (0x%x)\n",
1810 				osi->sense_info, ssks->sksv_cd_bpv_bp);
1811 			break;
1812 		}
1813 		case osd_sense_object_identification:
1814 		{ /*FIXME: Keep first not last, Store in array*/
1815 			struct osd_sense_identification_data_descriptor
1816 				*osidd = cur_descriptor;
1817 
1818 			osi->not_initiated_command_functions =
1819 				le32_to_cpu(osidd->not_initiated_functions);
1820 			osi->completed_command_functions =
1821 				le32_to_cpu(osidd->completed_functions);
1822 			osi->obj.partition = be64_to_cpu(osidd->partition_id);
1823 			osi->obj.id = be64_to_cpu(osidd->object_id);
1824 			OSD_SENSE_PRINT2(
1825 				"object_identification pid=0x%llx oid=0x%llx\n",
1826 				_LLU(osi->obj.partition), _LLU(osi->obj.id));
1827 			OSD_SENSE_PRINT2(
1828 				"not_initiated_bits(%x) "
1829 				"completed_command_bits(%x)\n",
1830 				osi->not_initiated_command_functions,
1831 				osi->completed_command_functions);
1832 			break;
1833 		}
1834 		case osd_sense_response_integrity_check:
1835 		{
1836 			struct osd_sense_response_integrity_check_descriptor
1837 				*osricd = cur_descriptor;
1838 			const unsigned len =
1839 					  sizeof(osricd->integrity_check_value);
1840 			char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */
1841 
1842 			hex_dump_to_buffer(osricd->integrity_check_value, len,
1843 				       32, 1, key_dump, sizeof(key_dump), true);
1844 			OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump);
1845 		}
1846 		case osd_sense_attribute_identification:
1847 		{
1848 			struct osd_sense_attributes_data_descriptor
1849 				*osadd = cur_descriptor;
1850 			unsigned len = min(cur_len, sense_len);
1851 			struct osd_sense_attr *pattr = osadd->sense_attrs;
1852 
1853 			while (len >= sizeof(*pattr)) {
1854 				u32 attr_page = be32_to_cpu(pattr->attr_page);
1855 				u32 attr_id = be32_to_cpu(pattr->attr_id);
1856 
1857 				if (!osi->attr.attr_page) {
1858 					osi->attr.attr_page = attr_page;
1859 					osi->attr.attr_id = attr_id;
1860 				}
1861 
1862 				if (bad_attr_list && max_attr) {
1863 					bad_attr_list->attr_page = attr_page;
1864 					bad_attr_list->attr_id = attr_id;
1865 					bad_attr_list++;
1866 					max_attr--;
1867 				}
1868 
1869 				len -= sizeof(*pattr);
1870 				OSD_SENSE_PRINT2(
1871 					"osd_sense_attribute_identification"
1872 					"attr_page=0x%x attr_id=0x%x\n",
1873 					attr_page, attr_id);
1874 			}
1875 		}
1876 		/*These are not legal for OSD*/
1877 		case scsi_sense_field_replaceable_unit:
1878 			OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n");
1879 			break;
1880 		case scsi_sense_stream_commands:
1881 			OSD_SENSE_PRINT2("scsi_sense_stream_commands\n");
1882 			break;
1883 		case scsi_sense_block_commands:
1884 			OSD_SENSE_PRINT2("scsi_sense_block_commands\n");
1885 			break;
1886 		case scsi_sense_ata_return:
1887 			OSD_SENSE_PRINT2("scsi_sense_ata_return\n");
1888 			break;
1889 		default:
1890 			if (ssd->descriptor_type <= scsi_sense_Reserved_last)
1891 				OSD_SENSE_PRINT2(
1892 					"scsi_sense Reserved descriptor (0x%x)",
1893 					ssd->descriptor_type);
1894 			else
1895 				OSD_SENSE_PRINT2(
1896 					"scsi_sense Vendor descriptor (0x%x)",
1897 					ssd->descriptor_type);
1898 		}
1899 
1900 		cur_descriptor += cur_len;
1901 	}
1902 
1903 analyze:
1904 	if (!osi->key) {
1905 		/* scsi sense is Empty, the request was never issued to target
1906 		 * linux return code might tell us what happened.
1907 		 */
1908 		if (or->async_error == -ENOMEM)
1909 			osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
1910 		else
1911 			osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
1912 		ret = or->async_error;
1913 	} else if (osi->key <= scsi_sk_recovered_error) {
1914 		osi->osd_err_pri = 0;
1915 		ret = 0;
1916 	} else if (osi->additional_code == scsi_invalid_field_in_cdb) {
1917 		if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) {
1918 			osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES;
1919 			ret = -EFAULT; /* caller should recover from this */
1920 		} else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) {
1921 			osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND;
1922 			ret = -ENOENT;
1923 		} else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) {
1924 			osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS;
1925 			ret = -EACCES;
1926 		} else {
1927 			osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1928 			ret = -EINVAL;
1929 		}
1930 	} else if (osi->additional_code == osd_quota_error) {
1931 		osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE;
1932 		ret = -ENOSPC;
1933 	} else if (_is_osd_security_code(osi->additional_code)) {
1934 		osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1935 		ret = -EINVAL;
1936 	} else {
1937 		osi->osd_err_pri = OSD_ERR_PRI_EIO;
1938 		ret = -EIO;
1939 	}
1940 
1941 	if (!or->out.residual)
1942 		or->out.residual = or->out.total_bytes;
1943 	if (!or->in.residual)
1944 		or->in.residual = or->in.total_bytes;
1945 
1946 	return ret;
1947 }
1948 EXPORT_SYMBOL(osd_req_decode_sense_full);
1949 
1950 /*
1951  * Implementation of osd_sec.h API
1952  * TODO: Move to a separate osd_sec.c file at a later stage.
1953  */
1954 
1955 enum { OSD_SEC_CAP_V1_ALL_CAPS =
1956 	OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE   |
1957 	OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR |
1958 	OSD_SEC_CAP_WRITE  | OSD_SEC_CAP_READ     | OSD_SEC_CAP_POL_SEC  |
1959 	OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT
1960 };
1961 
1962 enum { OSD_SEC_CAP_V2_ALL_CAPS =
1963 	OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT
1964 };
1965 
osd_sec_init_nosec_doall_caps(void * caps,const struct osd_obj_id * obj,bool is_collection,const bool is_v1)1966 void osd_sec_init_nosec_doall_caps(void *caps,
1967 	const struct osd_obj_id *obj, bool is_collection, const bool is_v1)
1968 {
1969 	struct osd_capability *cap = caps;
1970 	u8 type;
1971 	u8 descriptor_type;
1972 
1973 	if (likely(obj->id)) {
1974 		if (unlikely(is_collection)) {
1975 			type = OSD_SEC_OBJ_COLLECTION;
1976 			descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ :
1977 						  OSD_SEC_OBJ_DESC_COL;
1978 		} else {
1979 			type = OSD_SEC_OBJ_USER;
1980 			descriptor_type = OSD_SEC_OBJ_DESC_OBJ;
1981 		}
1982 		WARN_ON(!obj->partition);
1983 	} else {
1984 		type = obj->partition ? OSD_SEC_OBJ_PARTITION :
1985 					OSD_SEC_OBJ_ROOT;
1986 		descriptor_type = OSD_SEC_OBJ_DESC_PAR;
1987 	}
1988 
1989 	memset(cap, 0, sizeof(*cap));
1990 
1991 	cap->h.format = OSD_SEC_CAP_FORMAT_VER1;
1992 	cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */
1993 	cap->h.security_method = OSD_SEC_NOSEC;
1994 /*	cap->expiration_time;
1995 	cap->AUDIT[30-10];
1996 	cap->discriminator[42-30];
1997 	cap->object_created_time; */
1998 	cap->h.object_type = type;
1999 	osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS);
2000 	cap->h.object_descriptor_type = descriptor_type;
2001 	cap->od.obj_desc.policy_access_tag = 0;
2002 	cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition);
2003 	cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id);
2004 }
2005 EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
2006 
2007 /* FIXME: Extract version from caps pointer.
2008  *        Also Pete's target only supports caps from OSDv1 for now
2009  */
osd_set_caps(struct osd_cdb * cdb,const void * caps)2010 void osd_set_caps(struct osd_cdb *cdb, const void *caps)
2011 {
2012 	bool is_ver1 = true;
2013 	/* NOTE: They start at same address */
2014 	memcpy(&cdb->v1.caps, caps, is_ver1 ? OSDv1_CAP_LEN : OSD_CAP_LEN);
2015 }
2016 
osd_is_sec_alldata(struct osd_security_parameters * sec_parms __unused)2017 bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
2018 {
2019 	return false;
2020 }
2021 
osd_sec_sign_cdb(struct osd_cdb * ocdb __unused,const u8 * cap_key __unused)2022 void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused)
2023 {
2024 }
2025 
osd_sec_sign_data(void * data_integ __unused,struct bio * bio __unused,const u8 * cap_key __unused)2026 void osd_sec_sign_data(void *data_integ __unused,
2027 		       struct bio *bio __unused, const u8 *cap_key __unused)
2028 {
2029 }
2030 
2031 /*
2032  * Declared in osd_protocol.h
2033  * 4.12.5 Data-In and Data-Out buffer offsets
2034  * byte offset = mantissa * (2^(exponent+8))
2035  * Returns the smallest allowed encoded offset that contains given @offset
2036  * The actual encoded offset returned is @offset + *@padding.
2037  */
__osd_encode_offset(u64 offset,unsigned * padding,int min_shift,int max_shift)2038 osd_cdb_offset __osd_encode_offset(
2039 	u64 offset, unsigned *padding, int min_shift, int max_shift)
2040 {
2041 	u64 try_offset = -1, mod, align;
2042 	osd_cdb_offset be32_offset;
2043 	int shift;
2044 
2045 	*padding = 0;
2046 	if (!offset)
2047 		return 0;
2048 
2049 	for (shift = min_shift; shift < max_shift; ++shift) {
2050 		try_offset = offset >> shift;
2051 		if (try_offset < (1 << OSD_OFFSET_MAX_BITS))
2052 			break;
2053 	}
2054 
2055 	BUG_ON(shift == max_shift);
2056 
2057 	align = 1 << shift;
2058 	mod = offset & (align - 1);
2059 	if (mod) {
2060 		*padding = align - mod;
2061 		try_offset += 1;
2062 	}
2063 
2064 	try_offset |= ((shift - 8) & 0xf) << 28;
2065 	be32_offset = cpu_to_be32((u32)try_offset);
2066 
2067 	OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n",
2068 		 _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift,
2069 		 be32_offset, *padding);
2070 	return be32_offset;
2071 }
2072