• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec_plain.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40 
41 #define DEBUG_SUBSYSTEM S_SEC
42 
43 #include "../include/obd_support.h"
44 #include "../include/obd_cksum.h"
45 #include "../include/obd_class.h"
46 #include "../include/lustre_net.h"
47 #include "../include/lustre_sec.h"
48 #include "ptlrpc_internal.h"
49 
50 struct plain_sec {
51 	struct ptlrpc_sec       pls_base;
52 	rwlock_t	    pls_lock;
53 	struct ptlrpc_cli_ctx  *pls_ctx;
54 };
55 
sec2plsec(struct ptlrpc_sec * sec)56 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
57 {
58 	return container_of(sec, struct plain_sec, pls_base);
59 }
60 
61 static struct ptlrpc_sec_policy plain_policy;
62 static struct ptlrpc_ctx_ops    plain_ctx_ops;
63 static struct ptlrpc_svc_ctx    plain_svc_ctx;
64 
65 static unsigned int plain_at_offset;
66 
67 /*
68  * for simplicity, plain policy rpc use fixed layout.
69  */
70 #define PLAIN_PACK_SEGMENTS	     (4)
71 
72 #define PLAIN_PACK_HDR_OFF	      (0)
73 #define PLAIN_PACK_MSG_OFF	      (1)
74 #define PLAIN_PACK_USER_OFF	     (2)
75 #define PLAIN_PACK_BULK_OFF	     (3)
76 
77 #define PLAIN_FL_USER		   (0x01)
78 #define PLAIN_FL_BULK		   (0x02)
79 
80 struct plain_header {
81 	__u8	    ph_ver;	    /* 0 */
82 	__u8	    ph_flags;
83 	__u8	    ph_sp;	     /* source */
84 	__u8	    ph_bulk_hash_alg;  /* complete flavor desc */
85 	__u8	    ph_pad[4];
86 };
87 
88 struct plain_bulk_token {
89 	__u8	    pbt_hash[8];
90 };
91 
92 #define PLAIN_BSD_SIZE \
93 	(sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
94 
95 /****************************************
96  * bulk checksum helpers		*
97  ****************************************/
98 
plain_unpack_bsd(struct lustre_msg * msg,int swabbed)99 static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
100 {
101 	struct ptlrpc_bulk_sec_desc *bsd;
102 
103 	if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
104 		return -EPROTO;
105 
106 	bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
107 	if (bsd == NULL) {
108 		CERROR("bulk sec desc has short size %d\n",
109 		       lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
110 		return -EPROTO;
111 	}
112 
113 	if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
114 	    bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
115 		CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
116 		return -EPROTO;
117 	}
118 
119 	return 0;
120 }
121 
plain_generate_bulk_csum(struct ptlrpc_bulk_desc * desc,__u8 hash_alg,struct plain_bulk_token * token)122 static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
123 				    __u8 hash_alg,
124 				    struct plain_bulk_token *token)
125 {
126 	if (hash_alg == BULK_HASH_ALG_NULL)
127 		return 0;
128 
129 	memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
130 	return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
131 					 sizeof(token->pbt_hash));
132 }
133 
plain_verify_bulk_csum(struct ptlrpc_bulk_desc * desc,__u8 hash_alg,struct plain_bulk_token * tokenr)134 static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
135 				  __u8 hash_alg,
136 				  struct plain_bulk_token *tokenr)
137 {
138 	struct plain_bulk_token tokenv;
139 	int rc;
140 
141 	if (hash_alg == BULK_HASH_ALG_NULL)
142 		return 0;
143 
144 	memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
145 	rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
146 				       sizeof(tokenv.pbt_hash));
147 	if (rc)
148 		return rc;
149 
150 	if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
151 		return -EACCES;
152 	return 0;
153 }
154 
corrupt_bulk_data(struct ptlrpc_bulk_desc * desc)155 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
156 {
157 	char *ptr;
158 	unsigned int off, i;
159 
160 	for (i = 0; i < desc->bd_iov_count; i++) {
161 		if (desc->bd_iov[i].kiov_len == 0)
162 			continue;
163 
164 		ptr = kmap(desc->bd_iov[i].kiov_page);
165 		off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
166 		ptr[off] ^= 0x1;
167 		kunmap(desc->bd_iov[i].kiov_page);
168 		return;
169 	}
170 }
171 
172 /****************************************
173  * cli_ctx apis			 *
174  ****************************************/
175 
176 static
plain_ctx_refresh(struct ptlrpc_cli_ctx * ctx)177 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
178 {
179 	/* should never reach here */
180 	LBUG();
181 	return 0;
182 }
183 
184 static
plain_ctx_validate(struct ptlrpc_cli_ctx * ctx)185 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
186 {
187 	return 0;
188 }
189 
190 static
plain_ctx_sign(struct ptlrpc_cli_ctx * ctx,struct ptlrpc_request * req)191 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
192 {
193 	struct lustre_msg *msg = req->rq_reqbuf;
194 	struct plain_header *phdr;
195 
196 	msg->lm_secflvr = req->rq_flvr.sf_rpc;
197 
198 	phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
199 	phdr->ph_ver = 0;
200 	phdr->ph_flags = 0;
201 	phdr->ph_sp = ctx->cc_sec->ps_part;
202 	phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
203 
204 	if (req->rq_pack_udesc)
205 		phdr->ph_flags |= PLAIN_FL_USER;
206 	if (req->rq_pack_bulk)
207 		phdr->ph_flags |= PLAIN_FL_BULK;
208 
209 	req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
210 						 msg->lm_buflens);
211 	return 0;
212 }
213 
214 static
plain_ctx_verify(struct ptlrpc_cli_ctx * ctx,struct ptlrpc_request * req)215 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
216 {
217 	struct lustre_msg *msg = req->rq_repdata;
218 	struct plain_header *phdr;
219 	__u32 cksum;
220 	int swabbed;
221 
222 	if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
223 		CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
224 		return -EPROTO;
225 	}
226 
227 	swabbed = ptlrpc_rep_need_swab(req);
228 
229 	phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
230 	if (phdr == NULL) {
231 		CERROR("missing plain header\n");
232 		return -EPROTO;
233 	}
234 
235 	if (phdr->ph_ver != 0) {
236 		CERROR("Invalid header version\n");
237 		return -EPROTO;
238 	}
239 
240 	/* expect no user desc in reply */
241 	if (phdr->ph_flags & PLAIN_FL_USER) {
242 		CERROR("Unexpected udesc flag in reply\n");
243 		return -EPROTO;
244 	}
245 
246 	if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
247 		CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
248 		       req->rq_flvr.u_bulk.hash.hash_alg);
249 		return -EPROTO;
250 	}
251 
252 	if (unlikely(req->rq_early)) {
253 		unsigned int hsize = 4;
254 
255 		cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
256 				lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
257 				lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
258 				NULL, 0, (unsigned char *)&cksum, &hsize);
259 		if (cksum != msg->lm_cksum) {
260 			CDEBUG(D_SEC,
261 			       "early reply checksum mismatch: %08x != %08x\n",
262 			       cpu_to_le32(cksum), msg->lm_cksum);
263 			return -EINVAL;
264 		}
265 	} else {
266 		/* whether we sent with bulk or not, we expect the same
267 		 * in reply, except for early reply */
268 		if (!req->rq_early &&
269 		    !equi(req->rq_pack_bulk == 1,
270 			  phdr->ph_flags & PLAIN_FL_BULK)) {
271 			CERROR("%s bulk checksum in reply\n",
272 			       req->rq_pack_bulk ? "Missing" : "Unexpected");
273 			return -EPROTO;
274 		}
275 
276 		if (phdr->ph_flags & PLAIN_FL_BULK) {
277 			if (plain_unpack_bsd(msg, swabbed))
278 				return -EPROTO;
279 		}
280 	}
281 
282 	req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
283 	req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
284 	return 0;
285 }
286 
287 static
plain_cli_wrap_bulk(struct ptlrpc_cli_ctx * ctx,struct ptlrpc_request * req,struct ptlrpc_bulk_desc * desc)288 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
289 			struct ptlrpc_request *req,
290 			struct ptlrpc_bulk_desc *desc)
291 {
292 	struct ptlrpc_bulk_sec_desc *bsd;
293 	struct plain_bulk_token *token;
294 	int rc;
295 
296 	LASSERT(req->rq_pack_bulk);
297 	LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
298 
299 	bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
300 	token = (struct plain_bulk_token *) bsd->bsd_data;
301 
302 	bsd->bsd_version = 0;
303 	bsd->bsd_flags = 0;
304 	bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
305 	bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
306 
307 	if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
308 		return 0;
309 
310 	if (req->rq_bulk_read)
311 		return 0;
312 
313 	rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
314 				      token);
315 	if (rc) {
316 		CERROR("bulk write: failed to compute checksum: %d\n", rc);
317 	} else {
318 		/*
319 		 * for sending we only compute the wrong checksum instead
320 		 * of corrupting the data so it is still correct on a redo
321 		 */
322 		if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
323 		    req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
324 			token->pbt_hash[0] ^= 0x1;
325 	}
326 
327 	return rc;
328 }
329 
330 static
plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx * ctx,struct ptlrpc_request * req,struct ptlrpc_bulk_desc * desc)331 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
332 			  struct ptlrpc_request *req,
333 			  struct ptlrpc_bulk_desc *desc)
334 {
335 	struct ptlrpc_bulk_sec_desc *bsdv;
336 	struct plain_bulk_token *tokenv;
337 	int rc;
338 	int i, nob;
339 
340 	LASSERT(req->rq_pack_bulk);
341 	LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
342 	LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
343 
344 	bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
345 	tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
346 
347 	if (req->rq_bulk_write) {
348 		if (bsdv->bsd_flags & BSD_FL_ERR)
349 			return -EIO;
350 		return 0;
351 	}
352 
353 	/* fix the actual data size */
354 	for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
355 		if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
356 			desc->bd_iov[i].kiov_len =
357 				desc->bd_nob_transferred - nob;
358 		}
359 		nob += desc->bd_iov[i].kiov_len;
360 	}
361 
362 	rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
363 				    tokenv);
364 	if (rc)
365 		CERROR("bulk read: client verify failed: %d\n", rc);
366 
367 	return rc;
368 }
369 
370 /****************************************
371  * sec apis			     *
372  ****************************************/
373 
374 static
plain_sec_install_ctx(struct plain_sec * plsec)375 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
376 {
377 	struct ptlrpc_cli_ctx *ctx, *ctx_new;
378 
379 	ctx_new = kzalloc(sizeof(*ctx_new), GFP_NOFS);
380 
381 	write_lock(&plsec->pls_lock);
382 
383 	ctx = plsec->pls_ctx;
384 	if (ctx) {
385 		atomic_inc(&ctx->cc_refcount);
386 
387 		kfree(ctx_new);
388 	} else if (ctx_new) {
389 		ctx = ctx_new;
390 
391 		atomic_set(&ctx->cc_refcount, 1); /* for cache */
392 		ctx->cc_sec = &plsec->pls_base;
393 		ctx->cc_ops = &plain_ctx_ops;
394 		ctx->cc_expire = 0;
395 		ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
396 		ctx->cc_vcred.vc_uid = 0;
397 		spin_lock_init(&ctx->cc_lock);
398 		INIT_LIST_HEAD(&ctx->cc_req_list);
399 		INIT_LIST_HEAD(&ctx->cc_gc_chain);
400 
401 		plsec->pls_ctx = ctx;
402 		atomic_inc(&plsec->pls_base.ps_nctx);
403 		atomic_inc(&plsec->pls_base.ps_refcount);
404 
405 		atomic_inc(&ctx->cc_refcount); /* for caller */
406 	}
407 
408 	write_unlock(&plsec->pls_lock);
409 
410 	return ctx;
411 }
412 
413 static
plain_destroy_sec(struct ptlrpc_sec * sec)414 void plain_destroy_sec(struct ptlrpc_sec *sec)
415 {
416 	struct plain_sec *plsec = sec2plsec(sec);
417 
418 	LASSERT(sec->ps_policy == &plain_policy);
419 	LASSERT(sec->ps_import);
420 	LASSERT(atomic_read(&sec->ps_refcount) == 0);
421 	LASSERT(atomic_read(&sec->ps_nctx) == 0);
422 	LASSERT(plsec->pls_ctx == NULL);
423 
424 	class_import_put(sec->ps_import);
425 
426 	kfree(plsec);
427 }
428 
429 static
plain_kill_sec(struct ptlrpc_sec * sec)430 void plain_kill_sec(struct ptlrpc_sec *sec)
431 {
432 	sec->ps_dying = 1;
433 }
434 
435 static
plain_create_sec(struct obd_import * imp,struct ptlrpc_svc_ctx * svc_ctx,struct sptlrpc_flavor * sf)436 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
437 				    struct ptlrpc_svc_ctx *svc_ctx,
438 				    struct sptlrpc_flavor *sf)
439 {
440 	struct plain_sec *plsec;
441 	struct ptlrpc_sec *sec;
442 	struct ptlrpc_cli_ctx *ctx;
443 
444 	LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
445 
446 	plsec = kzalloc(sizeof(*plsec), GFP_NOFS);
447 	if (!plsec)
448 		return NULL;
449 
450 	/*
451 	 * initialize plain_sec
452 	 */
453 	rwlock_init(&plsec->pls_lock);
454 	plsec->pls_ctx = NULL;
455 
456 	sec = &plsec->pls_base;
457 	sec->ps_policy = &plain_policy;
458 	atomic_set(&sec->ps_refcount, 0);
459 	atomic_set(&sec->ps_nctx, 0);
460 	sec->ps_id = sptlrpc_get_next_secid();
461 	sec->ps_import = class_import_get(imp);
462 	sec->ps_flvr = *sf;
463 	spin_lock_init(&sec->ps_lock);
464 	INIT_LIST_HEAD(&sec->ps_gc_list);
465 	sec->ps_gc_interval = 0;
466 	sec->ps_gc_next = 0;
467 
468 	/* install ctx immediately if this is a reverse sec */
469 	if (svc_ctx) {
470 		ctx = plain_sec_install_ctx(plsec);
471 		if (ctx == NULL) {
472 			plain_destroy_sec(sec);
473 			return NULL;
474 		}
475 		sptlrpc_cli_ctx_put(ctx, 1);
476 	}
477 
478 	return sec;
479 }
480 
481 static
plain_lookup_ctx(struct ptlrpc_sec * sec,struct vfs_cred * vcred,int create,int remove_dead)482 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
483 					struct vfs_cred *vcred,
484 					int create, int remove_dead)
485 {
486 	struct plain_sec *plsec = sec2plsec(sec);
487 	struct ptlrpc_cli_ctx *ctx;
488 
489 	read_lock(&plsec->pls_lock);
490 	ctx = plsec->pls_ctx;
491 	if (ctx)
492 		atomic_inc(&ctx->cc_refcount);
493 	read_unlock(&plsec->pls_lock);
494 
495 	if (unlikely(ctx == NULL))
496 		ctx = plain_sec_install_ctx(plsec);
497 
498 	return ctx;
499 }
500 
501 static
plain_release_ctx(struct ptlrpc_sec * sec,struct ptlrpc_cli_ctx * ctx,int sync)502 void plain_release_ctx(struct ptlrpc_sec *sec,
503 		       struct ptlrpc_cli_ctx *ctx, int sync)
504 {
505 	LASSERT(atomic_read(&sec->ps_refcount) > 0);
506 	LASSERT(atomic_read(&sec->ps_nctx) > 0);
507 	LASSERT(atomic_read(&ctx->cc_refcount) == 0);
508 	LASSERT(ctx->cc_sec == sec);
509 
510 	kfree(ctx);
511 
512 	atomic_dec(&sec->ps_nctx);
513 	sptlrpc_sec_put(sec);
514 }
515 
516 static
plain_flush_ctx_cache(struct ptlrpc_sec * sec,uid_t uid,int grace,int force)517 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
518 			  uid_t uid, int grace, int force)
519 {
520 	struct plain_sec *plsec = sec2plsec(sec);
521 	struct ptlrpc_cli_ctx *ctx;
522 
523 	/* do nothing unless caller want to flush for 'all' */
524 	if (uid != -1)
525 		return 0;
526 
527 	write_lock(&plsec->pls_lock);
528 	ctx = plsec->pls_ctx;
529 	plsec->pls_ctx = NULL;
530 	write_unlock(&plsec->pls_lock);
531 
532 	if (ctx)
533 		sptlrpc_cli_ctx_put(ctx, 1);
534 	return 0;
535 }
536 
537 static
plain_alloc_reqbuf(struct ptlrpc_sec * sec,struct ptlrpc_request * req,int msgsize)538 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
539 		       struct ptlrpc_request *req,
540 		       int msgsize)
541 {
542 	__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
543 	int alloc_len;
544 
545 	buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
546 	buflens[PLAIN_PACK_MSG_OFF] = msgsize;
547 
548 	if (req->rq_pack_udesc)
549 		buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
550 
551 	if (req->rq_pack_bulk) {
552 		LASSERT(req->rq_bulk_read || req->rq_bulk_write);
553 		buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
554 	}
555 
556 	alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
557 
558 	if (!req->rq_reqbuf) {
559 		LASSERT(!req->rq_pool);
560 
561 		alloc_len = size_roundup_power2(alloc_len);
562 		req->rq_reqbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
563 		if (!req->rq_reqbuf)
564 			return -ENOMEM;
565 
566 		req->rq_reqbuf_len = alloc_len;
567 	} else {
568 		LASSERT(req->rq_pool);
569 		LASSERT(req->rq_reqbuf_len >= alloc_len);
570 		memset(req->rq_reqbuf, 0, alloc_len);
571 	}
572 
573 	lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
574 	req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
575 
576 	if (req->rq_pack_udesc)
577 		sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
578 
579 	return 0;
580 }
581 
582 static
plain_free_reqbuf(struct ptlrpc_sec * sec,struct ptlrpc_request * req)583 void plain_free_reqbuf(struct ptlrpc_sec *sec,
584 		       struct ptlrpc_request *req)
585 {
586 	if (!req->rq_pool) {
587 		kvfree(req->rq_reqbuf);
588 		req->rq_reqbuf = NULL;
589 		req->rq_reqbuf_len = 0;
590 	}
591 }
592 
593 static
plain_alloc_repbuf(struct ptlrpc_sec * sec,struct ptlrpc_request * req,int msgsize)594 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
595 		       struct ptlrpc_request *req,
596 		       int msgsize)
597 {
598 	__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
599 	int alloc_len;
600 
601 	buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
602 	buflens[PLAIN_PACK_MSG_OFF] = msgsize;
603 
604 	if (req->rq_pack_bulk) {
605 		LASSERT(req->rq_bulk_read || req->rq_bulk_write);
606 		buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
607 	}
608 
609 	alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
610 
611 	/* add space for early reply */
612 	alloc_len += plain_at_offset;
613 
614 	alloc_len = size_roundup_power2(alloc_len);
615 
616 	req->rq_repbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
617 	if (!req->rq_repbuf)
618 		return -ENOMEM;
619 
620 	req->rq_repbuf_len = alloc_len;
621 	return 0;
622 }
623 
624 static
plain_free_repbuf(struct ptlrpc_sec * sec,struct ptlrpc_request * req)625 void plain_free_repbuf(struct ptlrpc_sec *sec,
626 		       struct ptlrpc_request *req)
627 {
628 	kvfree(req->rq_repbuf);
629 	req->rq_repbuf = NULL;
630 	req->rq_repbuf_len = 0;
631 }
632 
633 static
plain_enlarge_reqbuf(struct ptlrpc_sec * sec,struct ptlrpc_request * req,int segment,int newsize)634 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
635 			 struct ptlrpc_request *req,
636 			 int segment, int newsize)
637 {
638 	struct lustre_msg *newbuf;
639 	int oldsize;
640 	int newmsg_size, newbuf_size;
641 
642 	LASSERT(req->rq_reqbuf);
643 	LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
644 	LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
645 		req->rq_reqmsg);
646 
647 	/* compute new embedded msg size.  */
648 	oldsize = req->rq_reqmsg->lm_buflens[segment];
649 	req->rq_reqmsg->lm_buflens[segment] = newsize;
650 	newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
651 					 req->rq_reqmsg->lm_buflens);
652 	req->rq_reqmsg->lm_buflens[segment] = oldsize;
653 
654 	/* compute new wrapper msg size.  */
655 	oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
656 	req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
657 	newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
658 					 req->rq_reqbuf->lm_buflens);
659 	req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
660 
661 	/* request from pool should always have enough buffer */
662 	LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
663 
664 	if (req->rq_reqbuf_len < newbuf_size) {
665 		newbuf_size = size_roundup_power2(newbuf_size);
666 
667 		newbuf = libcfs_kvzalloc(newbuf_size, GFP_NOFS);
668 		if (newbuf == NULL)
669 			return -ENOMEM;
670 
671 		/* Must lock this, so that otherwise unprotected change of
672 		 * rq_reqmsg is not racing with parallel processing of
673 		 * imp_replay_list traversing threads. See LU-3333
674 		 * This is a bandaid at best, we really need to deal with this
675 		 * in request enlarging code before unpacking that's already
676 		 * there */
677 		if (req->rq_import)
678 			spin_lock(&req->rq_import->imp_lock);
679 
680 		memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
681 
682 		kvfree(req->rq_reqbuf);
683 		req->rq_reqbuf = newbuf;
684 		req->rq_reqbuf_len = newbuf_size;
685 		req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
686 						PLAIN_PACK_MSG_OFF, 0);
687 
688 		if (req->rq_import)
689 			spin_unlock(&req->rq_import->imp_lock);
690 	}
691 
692 	_sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
693 				     newmsg_size);
694 	_sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
695 
696 	req->rq_reqlen = newmsg_size;
697 	return 0;
698 }
699 
700 /****************************************
701  * service apis			 *
702  ****************************************/
703 
704 static struct ptlrpc_svc_ctx plain_svc_ctx = {
705 	.sc_refcount    = ATOMIC_INIT(1),
706 	.sc_policy      = &plain_policy,
707 };
708 
709 static
plain_accept(struct ptlrpc_request * req)710 int plain_accept(struct ptlrpc_request *req)
711 {
712 	struct lustre_msg *msg = req->rq_reqbuf;
713 	struct plain_header *phdr;
714 	int swabbed;
715 
716 	LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
717 		SPTLRPC_POLICY_PLAIN);
718 
719 	if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
720 	    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
721 	    SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
722 	    SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
723 		CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
724 		return SECSVC_DROP;
725 	}
726 
727 	if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
728 		CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
729 		return SECSVC_DROP;
730 	}
731 
732 	swabbed = ptlrpc_req_need_swab(req);
733 
734 	phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
735 	if (phdr == NULL) {
736 		CERROR("missing plain header\n");
737 		return -EPROTO;
738 	}
739 
740 	if (phdr->ph_ver != 0) {
741 		CERROR("Invalid header version\n");
742 		return -EPROTO;
743 	}
744 
745 	if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
746 		CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
747 		return -EPROTO;
748 	}
749 
750 	req->rq_sp_from = phdr->ph_sp;
751 	req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
752 
753 	if (phdr->ph_flags & PLAIN_FL_USER) {
754 		if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
755 					     swabbed)) {
756 			CERROR("Mal-formed user descriptor\n");
757 			return SECSVC_DROP;
758 		}
759 
760 		req->rq_pack_udesc = 1;
761 		req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
762 	}
763 
764 	if (phdr->ph_flags & PLAIN_FL_BULK) {
765 		if (plain_unpack_bsd(msg, swabbed))
766 			return SECSVC_DROP;
767 
768 		req->rq_pack_bulk = 1;
769 	}
770 
771 	req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
772 	req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
773 
774 	req->rq_svc_ctx = &plain_svc_ctx;
775 	atomic_inc(&req->rq_svc_ctx->sc_refcount);
776 
777 	return SECSVC_OK;
778 }
779 
780 static
plain_alloc_rs(struct ptlrpc_request * req,int msgsize)781 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
782 {
783 	struct ptlrpc_reply_state *rs;
784 	__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
785 	int rs_size = sizeof(*rs);
786 
787 	LASSERT(msgsize % 8 == 0);
788 
789 	buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
790 	buflens[PLAIN_PACK_MSG_OFF] = msgsize;
791 
792 	if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
793 		buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
794 
795 	rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
796 
797 	rs = req->rq_reply_state;
798 
799 	if (rs) {
800 		/* pre-allocated */
801 		LASSERT(rs->rs_size >= rs_size);
802 	} else {
803 		rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
804 		if (rs == NULL)
805 			return -ENOMEM;
806 
807 		rs->rs_size = rs_size;
808 	}
809 
810 	rs->rs_svc_ctx = req->rq_svc_ctx;
811 	atomic_inc(&req->rq_svc_ctx->sc_refcount);
812 	rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
813 	rs->rs_repbuf_len = rs_size - sizeof(*rs);
814 
815 	lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
816 	rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
817 
818 	req->rq_reply_state = rs;
819 	return 0;
820 }
821 
822 static
plain_free_rs(struct ptlrpc_reply_state * rs)823 void plain_free_rs(struct ptlrpc_reply_state *rs)
824 {
825 	LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
826 	atomic_dec(&rs->rs_svc_ctx->sc_refcount);
827 
828 	if (!rs->rs_prealloc)
829 		kvfree(rs);
830 }
831 
832 static
plain_authorize(struct ptlrpc_request * req)833 int plain_authorize(struct ptlrpc_request *req)
834 {
835 	struct ptlrpc_reply_state *rs = req->rq_reply_state;
836 	struct lustre_msg_v2 *msg = rs->rs_repbuf;
837 	struct plain_header *phdr;
838 	int len;
839 
840 	LASSERT(rs);
841 	LASSERT(msg);
842 
843 	if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
844 		len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
845 					req->rq_replen, 1);
846 	else
847 		len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
848 
849 	msg->lm_secflvr = req->rq_flvr.sf_rpc;
850 
851 	phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
852 	phdr->ph_ver = 0;
853 	phdr->ph_flags = 0;
854 	phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
855 
856 	if (req->rq_pack_bulk)
857 		phdr->ph_flags |= PLAIN_FL_BULK;
858 
859 	rs->rs_repdata_len = len;
860 
861 	if (likely(req->rq_packed_final)) {
862 		if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
863 			req->rq_reply_off = plain_at_offset;
864 		else
865 			req->rq_reply_off = 0;
866 	} else {
867 		unsigned int hsize = 4;
868 
869 		cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
870 			lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
871 			lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
872 			NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
873 		req->rq_reply_off = 0;
874 	}
875 
876 	return 0;
877 }
878 
879 static
plain_svc_unwrap_bulk(struct ptlrpc_request * req,struct ptlrpc_bulk_desc * desc)880 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
881 			  struct ptlrpc_bulk_desc *desc)
882 {
883 	struct ptlrpc_reply_state *rs = req->rq_reply_state;
884 	struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
885 	struct plain_bulk_token *tokenr;
886 	int rc;
887 
888 	LASSERT(req->rq_bulk_write);
889 	LASSERT(req->rq_pack_bulk);
890 
891 	bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
892 	tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
893 	bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
894 
895 	bsdv->bsd_version = 0;
896 	bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
897 	bsdv->bsd_svc = bsdr->bsd_svc;
898 	bsdv->bsd_flags = 0;
899 
900 	if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
901 		return 0;
902 
903 	rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
904 				    tokenr);
905 	if (rc) {
906 		bsdv->bsd_flags |= BSD_FL_ERR;
907 		CERROR("bulk write: server verify failed: %d\n", rc);
908 	}
909 
910 	return rc;
911 }
912 
913 static
plain_svc_wrap_bulk(struct ptlrpc_request * req,struct ptlrpc_bulk_desc * desc)914 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
915 			struct ptlrpc_bulk_desc *desc)
916 {
917 	struct ptlrpc_reply_state *rs = req->rq_reply_state;
918 	struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
919 	struct plain_bulk_token *tokenv;
920 	int rc;
921 
922 	LASSERT(req->rq_bulk_read);
923 	LASSERT(req->rq_pack_bulk);
924 
925 	bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
926 	bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
927 	tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
928 
929 	bsdv->bsd_version = 0;
930 	bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
931 	bsdv->bsd_svc = bsdr->bsd_svc;
932 	bsdv->bsd_flags = 0;
933 
934 	if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
935 		return 0;
936 
937 	rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
938 				      tokenv);
939 	if (rc) {
940 		CERROR("bulk read: server failed to compute checksum: %d\n",
941 		       rc);
942 	} else {
943 		if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
944 			corrupt_bulk_data(desc);
945 	}
946 
947 	return rc;
948 }
949 
950 static struct ptlrpc_ctx_ops plain_ctx_ops = {
951 	.refresh		= plain_ctx_refresh,
952 	.validate	       = plain_ctx_validate,
953 	.sign		   = plain_ctx_sign,
954 	.verify		 = plain_ctx_verify,
955 	.wrap_bulk	      = plain_cli_wrap_bulk,
956 	.unwrap_bulk	    = plain_cli_unwrap_bulk,
957 };
958 
959 static struct ptlrpc_sec_cops plain_sec_cops = {
960 	.create_sec	     = plain_create_sec,
961 	.destroy_sec	    = plain_destroy_sec,
962 	.kill_sec	       = plain_kill_sec,
963 	.lookup_ctx	     = plain_lookup_ctx,
964 	.release_ctx	    = plain_release_ctx,
965 	.flush_ctx_cache	= plain_flush_ctx_cache,
966 	.alloc_reqbuf	   = plain_alloc_reqbuf,
967 	.free_reqbuf	    = plain_free_reqbuf,
968 	.alloc_repbuf	   = plain_alloc_repbuf,
969 	.free_repbuf	    = plain_free_repbuf,
970 	.enlarge_reqbuf	 = plain_enlarge_reqbuf,
971 };
972 
973 static struct ptlrpc_sec_sops plain_sec_sops = {
974 	.accept		 = plain_accept,
975 	.alloc_rs	       = plain_alloc_rs,
976 	.authorize	      = plain_authorize,
977 	.free_rs		= plain_free_rs,
978 	.unwrap_bulk	    = plain_svc_unwrap_bulk,
979 	.wrap_bulk	      = plain_svc_wrap_bulk,
980 };
981 
982 static struct ptlrpc_sec_policy plain_policy = {
983 	.sp_owner	       = THIS_MODULE,
984 	.sp_name		= "plain",
985 	.sp_policy	      = SPTLRPC_POLICY_PLAIN,
986 	.sp_cops		= &plain_sec_cops,
987 	.sp_sops		= &plain_sec_sops,
988 };
989 
sptlrpc_plain_init(void)990 int sptlrpc_plain_init(void)
991 {
992 	__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
993 	int rc;
994 
995 	buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
996 	plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
997 
998 	rc = sptlrpc_register_policy(&plain_policy);
999 	if (rc)
1000 		CERROR("failed to register: %d\n", rc);
1001 
1002 	return rc;
1003 }
1004 
sptlrpc_plain_fini(void)1005 void sptlrpc_plain_fini(void)
1006 {
1007 	int rc;
1008 
1009 	rc = sptlrpc_unregister_policy(&plain_policy);
1010 	if (rc)
1011 		CERROR("cannot unregister: %d\n", rc);
1012 }
1013