1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36
37 #ifndef _LUSTRE_SEC_H_
38 #define _LUSTRE_SEC_H_
39
40 /** \defgroup sptlrpc sptlrpc
41 *
42 * @{
43 */
44
45 /*
46 * to avoid include
47 */
48 struct obd_import;
49 struct obd_export;
50 struct ptlrpc_request;
51 struct ptlrpc_reply_state;
52 struct ptlrpc_bulk_desc;
53 struct brw_page;
54 /* Linux specific */
55 struct key;
56 struct seq_file;
57
58 /*
59 * forward declaration
60 */
61 struct ptlrpc_sec_policy;
62 struct ptlrpc_sec_cops;
63 struct ptlrpc_sec_sops;
64 struct ptlrpc_sec;
65 struct ptlrpc_svc_ctx;
66 struct ptlrpc_cli_ctx;
67 struct ptlrpc_ctx_ops;
68
69 /**
70 * \addtogroup flavor flavor
71 *
72 * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits
73 * are unused, must be set to 0 for future expansion.
74 * <pre>
75 * ------------------------------------------------------------------------
76 * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) |
77 * ------------------------------------------------------------------------
78 * </pre>
79 *
80 * @{
81 */
82
83 /*
84 * flavor constants
85 */
86 enum sptlrpc_policy {
87 SPTLRPC_POLICY_NULL = 0,
88 SPTLRPC_POLICY_PLAIN = 1,
89 SPTLRPC_POLICY_GSS = 2,
90 SPTLRPC_POLICY_MAX,
91 };
92
93 enum sptlrpc_mech_null {
94 SPTLRPC_MECH_NULL = 0,
95 SPTLRPC_MECH_NULL_MAX,
96 };
97
98 enum sptlrpc_mech_plain {
99 SPTLRPC_MECH_PLAIN = 0,
100 SPTLRPC_MECH_PLAIN_MAX,
101 };
102
103 enum sptlrpc_mech_gss {
104 SPTLRPC_MECH_GSS_NULL = 0,
105 SPTLRPC_MECH_GSS_KRB5 = 1,
106 SPTLRPC_MECH_GSS_MAX,
107 };
108
109 enum sptlrpc_service_type {
110 SPTLRPC_SVC_NULL = 0, /**< no security */
111 SPTLRPC_SVC_AUTH = 1, /**< authentication only */
112 SPTLRPC_SVC_INTG = 2, /**< integrity */
113 SPTLRPC_SVC_PRIV = 3, /**< privacy */
114 SPTLRPC_SVC_MAX,
115 };
116
117 enum sptlrpc_bulk_type {
118 SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */
119 SPTLRPC_BULK_HASH = 1, /**< hash integrity */
120 SPTLRPC_BULK_MAX,
121 };
122
123 enum sptlrpc_bulk_service {
124 SPTLRPC_BULK_SVC_NULL = 0, /**< no security */
125 SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */
126 SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */
127 SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */
128 SPTLRPC_BULK_SVC_MAX,
129 };
130
131 /*
132 * compose/extract macros
133 */
134 #define FLVR_POLICY_OFFSET (0)
135 #define FLVR_MECH_OFFSET (4)
136 #define FLVR_SVC_OFFSET (8)
137 #define FLVR_BULK_TYPE_OFFSET (12)
138 #define FLVR_BULK_SVC_OFFSET (16)
139
140 #define MAKE_FLVR(policy, mech, svc, btype, bsvc) \
141 (((__u32)(policy) << FLVR_POLICY_OFFSET) | \
142 ((__u32)(mech) << FLVR_MECH_OFFSET) | \
143 ((__u32)(svc) << FLVR_SVC_OFFSET) | \
144 ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \
145 ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET))
146
147 /*
148 * extraction
149 */
150 #define SPTLRPC_FLVR_POLICY(flavor) \
151 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF)
152 #define SPTLRPC_FLVR_MECH(flavor) \
153 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF)
154 #define SPTLRPC_FLVR_SVC(flavor) \
155 ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF)
156 #define SPTLRPC_FLVR_BULK_TYPE(flavor) \
157 ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF)
158 #define SPTLRPC_FLVR_BULK_SVC(flavor) \
159 ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF)
160
161 #define SPTLRPC_FLVR_BASE(flavor) \
162 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF)
163 #define SPTLRPC_FLVR_BASE_SUB(flavor) \
164 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF)
165
166 /*
167 * gss subflavors
168 */
169 #define MAKE_BASE_SUBFLVR(mech, svc) \
170 ((__u32)(mech) | \
171 ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET)))
172
173 #define SPTLRPC_SUBFLVR_KRB5N \
174 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
175 #define SPTLRPC_SUBFLVR_KRB5A \
176 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
177 #define SPTLRPC_SUBFLVR_KRB5I \
178 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
179 #define SPTLRPC_SUBFLVR_KRB5P \
180 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
181
182 /*
183 * "end user" flavors
184 */
185 #define SPTLRPC_FLVR_NULL \
186 MAKE_FLVR(SPTLRPC_POLICY_NULL, \
187 SPTLRPC_MECH_NULL, \
188 SPTLRPC_SVC_NULL, \
189 SPTLRPC_BULK_DEFAULT, \
190 SPTLRPC_BULK_SVC_NULL)
191 #define SPTLRPC_FLVR_PLAIN \
192 MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \
193 SPTLRPC_MECH_PLAIN, \
194 SPTLRPC_SVC_NULL, \
195 SPTLRPC_BULK_HASH, \
196 SPTLRPC_BULK_SVC_INTG)
197 #define SPTLRPC_FLVR_KRB5N \
198 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
199 SPTLRPC_MECH_GSS_KRB5, \
200 SPTLRPC_SVC_NULL, \
201 SPTLRPC_BULK_DEFAULT, \
202 SPTLRPC_BULK_SVC_NULL)
203 #define SPTLRPC_FLVR_KRB5A \
204 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
205 SPTLRPC_MECH_GSS_KRB5, \
206 SPTLRPC_SVC_AUTH, \
207 SPTLRPC_BULK_DEFAULT, \
208 SPTLRPC_BULK_SVC_NULL)
209 #define SPTLRPC_FLVR_KRB5I \
210 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
211 SPTLRPC_MECH_GSS_KRB5, \
212 SPTLRPC_SVC_INTG, \
213 SPTLRPC_BULK_DEFAULT, \
214 SPTLRPC_BULK_SVC_INTG)
215 #define SPTLRPC_FLVR_KRB5P \
216 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
217 SPTLRPC_MECH_GSS_KRB5, \
218 SPTLRPC_SVC_PRIV, \
219 SPTLRPC_BULK_DEFAULT, \
220 SPTLRPC_BULK_SVC_PRIV)
221
222 #define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
223
224 #define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
225 #define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
226
227 /**
228 * extract the useful part from wire flavor
229 */
230 #define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
231
232 /** @} flavor */
233
flvr_set_svc(__u32 * flvr,__u32 svc)234 static inline void flvr_set_svc(__u32 *flvr, __u32 svc)
235 {
236 LASSERT(svc < SPTLRPC_SVC_MAX);
237 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
238 SPTLRPC_FLVR_MECH(*flvr),
239 svc,
240 SPTLRPC_FLVR_BULK_TYPE(*flvr),
241 SPTLRPC_FLVR_BULK_SVC(*flvr));
242 }
243
flvr_set_bulk_svc(__u32 * flvr,__u32 svc)244 static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc)
245 {
246 LASSERT(svc < SPTLRPC_BULK_SVC_MAX);
247 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
248 SPTLRPC_FLVR_MECH(*flvr),
249 SPTLRPC_FLVR_SVC(*flvr),
250 SPTLRPC_FLVR_BULK_TYPE(*flvr),
251 svc);
252 }
253
254 struct bulk_spec_hash {
255 __u8 hash_alg;
256 };
257
258 /**
259 * Full description of flavors being used on a ptlrpc connection, include
260 * both regular RPC and bulk transfer parts.
261 */
262 struct sptlrpc_flavor {
263 /**
264 * wire flavor, should be renamed to sf_wire.
265 */
266 __u32 sf_rpc;
267 /**
268 * general flags of PTLRPC_SEC_FL_*
269 */
270 __u32 sf_flags;
271 /**
272 * rpc flavor specification
273 */
274 union {
275 /* nothing for now */
276 } u_rpc;
277 /**
278 * bulk flavor specification
279 */
280 union {
281 struct bulk_spec_hash hash;
282 } u_bulk;
283 };
284
285 /**
286 * identify the RPC is generated from what part of Lustre. It's encoded into
287 * RPC requests and to be checked by ptlrpc service.
288 */
289 enum lustre_sec_part {
290 LUSTRE_SP_CLI = 0,
291 LUSTRE_SP_MDT,
292 LUSTRE_SP_OST,
293 LUSTRE_SP_MGC,
294 LUSTRE_SP_MGS,
295 LUSTRE_SP_ANY = 0xFF
296 };
297
298 enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd);
299
300 /**
301 * A rule specifies a flavor to be used by a ptlrpc connection between
302 * two Lustre parts.
303 */
304 struct sptlrpc_rule {
305 __u32 sr_netid; /* LNET network ID */
306 __u8 sr_from; /* sec_part */
307 __u8 sr_to; /* sec_part */
308 __u16 sr_padding;
309 struct sptlrpc_flavor sr_flvr;
310 };
311
312 /**
313 * A set of rules in memory.
314 *
315 * Rules are generated and stored on MGS, and propagated to MDT, OST,
316 * and client when needed.
317 */
318 struct sptlrpc_rule_set {
319 int srs_nslot;
320 int srs_nrule;
321 struct sptlrpc_rule *srs_rules;
322 };
323
324 int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr);
325 bool sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr);
326
sptlrpc_rule_set_init(struct sptlrpc_rule_set * set)327 static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set)
328 {
329 memset(set, 0, sizeof(*set));
330 }
331
332 int sptlrpc_process_config(struct lustre_cfg *lcfg);
333 void sptlrpc_conf_log_start(const char *logname);
334 void sptlrpc_conf_log_stop(const char *logname);
335 void sptlrpc_conf_log_update_begin(const char *logname);
336 void sptlrpc_conf_log_update_end(const char *logname);
337 void sptlrpc_conf_client_adapt(struct obd_device *obd);
338
339 /* The maximum length of security payload. 1024 is enough for Kerberos 5,
340 * and should be enough for other future mechanisms but not sure.
341 * Only used by pre-allocated request/reply pool.
342 */
343 #define SPTLRPC_MAX_PAYLOAD (1024)
344
345 struct vfs_cred {
346 uint32_t vc_uid;
347 uint32_t vc_gid;
348 };
349
350 struct ptlrpc_ctx_ops {
351 /**
352 * To determine whether it's suitable to use the \a ctx for \a vcred.
353 */
354 int (*match) (struct ptlrpc_cli_ctx *ctx,
355 struct vfs_cred *vcred);
356
357 /**
358 * To bring the \a ctx uptodate.
359 */
360 int (*refresh) (struct ptlrpc_cli_ctx *ctx);
361
362 /**
363 * Validate the \a ctx.
364 */
365 int (*validate) (struct ptlrpc_cli_ctx *ctx);
366
367 /**
368 * Force the \a ctx to die.
369 */
370 void (*force_die) (struct ptlrpc_cli_ctx *ctx,
371 int grace);
372 int (*display) (struct ptlrpc_cli_ctx *ctx,
373 char *buf, int bufsize);
374
375 /**
376 * Sign the request message using \a ctx.
377 *
378 * \pre req->rq_reqmsg point to request message.
379 * \pre req->rq_reqlen is the request message length.
380 * \post req->rq_reqbuf point to request message with signature.
381 * \post req->rq_reqdata_len is set to the final request message size.
382 *
383 * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign().
384 */
385 int (*sign) (struct ptlrpc_cli_ctx *ctx,
386 struct ptlrpc_request *req);
387
388 /**
389 * Verify the reply message using \a ctx.
390 *
391 * \pre req->rq_repdata point to reply message with signature.
392 * \pre req->rq_repdata_len is the total reply message length.
393 * \post req->rq_repmsg point to reply message without signature.
394 * \post req->rq_replen is the reply message length.
395 *
396 * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify().
397 */
398 int (*verify) (struct ptlrpc_cli_ctx *ctx,
399 struct ptlrpc_request *req);
400
401 /**
402 * Encrypt the request message using \a ctx.
403 *
404 * \pre req->rq_reqmsg point to request message in clear text.
405 * \pre req->rq_reqlen is the request message length.
406 * \post req->rq_reqbuf point to request message.
407 * \post req->rq_reqdata_len is set to the final request message size.
408 *
409 * \see gss_cli_ctx_seal().
410 */
411 int (*seal) (struct ptlrpc_cli_ctx *ctx,
412 struct ptlrpc_request *req);
413
414 /**
415 * Decrypt the reply message using \a ctx.
416 *
417 * \pre req->rq_repdata point to encrypted reply message.
418 * \pre req->rq_repdata_len is the total cipher text length.
419 * \post req->rq_repmsg point to reply message in clear text.
420 * \post req->rq_replen is the reply message length in clear text.
421 *
422 * \see gss_cli_ctx_unseal().
423 */
424 int (*unseal) (struct ptlrpc_cli_ctx *ctx,
425 struct ptlrpc_request *req);
426
427 /**
428 * Wrap bulk request data. This is called before wrapping RPC
429 * request message.
430 *
431 * \pre bulk buffer is descripted by desc->bd_iov and
432 * desc->bd_iov_count. note for read it's just buffer, no data
433 * need to be sent; for write it contains data in clear text.
434 * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared
435 * (usually inside of RPC request message).
436 * - encryption: cipher text bulk buffer is descripted by
437 * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov
438 * count remains the same).
439 * - otherwise: bulk buffer is still desc->bd_iov and
440 * desc->bd_iov_count.
441 *
442 * \return 0: success.
443 * \return -ev: error code.
444 *
445 * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk().
446 */
447 int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx,
448 struct ptlrpc_request *req,
449 struct ptlrpc_bulk_desc *desc);
450
451 /**
452 * Unwrap bulk reply data. This is called after wrapping RPC
453 * reply message.
454 *
455 * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and
456 * desc->bd_iov_count, according to wrap_bulk().
457 * \post final bulk data in clear text is placed in buffer described
458 * by desc->bd_iov and desc->bd_iov_count.
459 * \return +ve nob of actual bulk data in clear text.
460 * \return -ve error code.
461 *
462 * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk().
463 */
464 int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx,
465 struct ptlrpc_request *req,
466 struct ptlrpc_bulk_desc *desc);
467 };
468
469 #define PTLRPC_CTX_NEW_BIT (0) /* newly created */
470 #define PTLRPC_CTX_UPTODATE_BIT (1) /* uptodate */
471 #define PTLRPC_CTX_DEAD_BIT (2) /* mark expired gracefully */
472 #define PTLRPC_CTX_ERROR_BIT (3) /* fatal error (refresh, etc.) */
473 #define PTLRPC_CTX_CACHED_BIT (8) /* in ctx cache (hash etc.) */
474 #define PTLRPC_CTX_ETERNAL_BIT (9) /* always valid */
475
476 #define PTLRPC_CTX_NEW (1 << PTLRPC_CTX_NEW_BIT)
477 #define PTLRPC_CTX_UPTODATE (1 << PTLRPC_CTX_UPTODATE_BIT)
478 #define PTLRPC_CTX_DEAD (1 << PTLRPC_CTX_DEAD_BIT)
479 #define PTLRPC_CTX_ERROR (1 << PTLRPC_CTX_ERROR_BIT)
480 #define PTLRPC_CTX_CACHED (1 << PTLRPC_CTX_CACHED_BIT)
481 #define PTLRPC_CTX_ETERNAL (1 << PTLRPC_CTX_ETERNAL_BIT)
482
483 #define PTLRPC_CTX_STATUS_MASK (PTLRPC_CTX_NEW_BIT | \
484 PTLRPC_CTX_UPTODATE | \
485 PTLRPC_CTX_DEAD | \
486 PTLRPC_CTX_ERROR)
487
488 struct ptlrpc_cli_ctx {
489 struct hlist_node cc_cache; /* linked into ctx cache */
490 atomic_t cc_refcount;
491 struct ptlrpc_sec *cc_sec;
492 struct ptlrpc_ctx_ops *cc_ops;
493 unsigned long cc_expire; /* in seconds */
494 unsigned int cc_early_expire:1;
495 unsigned long cc_flags;
496 struct vfs_cred cc_vcred;
497 spinlock_t cc_lock;
498 struct list_head cc_req_list; /* waiting reqs linked here */
499 struct list_head cc_gc_chain; /* linked to gc chain */
500 };
501
502 /**
503 * client side policy operation vector.
504 */
505 struct ptlrpc_sec_cops {
506 /**
507 * Given an \a imp, create and initialize a ptlrpc_sec structure.
508 * \param ctx service context:
509 * - regular import: \a ctx should be NULL;
510 * - reverse import: \a ctx is obtained from incoming request.
511 * \param flavor specify what flavor to use.
512 *
513 * When necessary, policy module is responsible for taking reference
514 * on the import.
515 *
516 * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr().
517 */
518 struct ptlrpc_sec * (*create_sec) (struct obd_import *imp,
519 struct ptlrpc_svc_ctx *ctx,
520 struct sptlrpc_flavor *flavor);
521
522 /**
523 * Destructor of ptlrpc_sec. When called, refcount has been dropped
524 * to 0 and all contexts has been destroyed.
525 *
526 * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr().
527 */
528 void (*destroy_sec) (struct ptlrpc_sec *sec);
529
530 /**
531 * Notify that this ptlrpc_sec is going to die. Optionally, policy
532 * module is supposed to set sec->ps_dying and whatever necessary
533 * actions.
534 *
535 * \see plain_kill_sec(), gss_sec_kill().
536 */
537 void (*kill_sec) (struct ptlrpc_sec *sec);
538
539 /**
540 * Given \a vcred, lookup and/or create its context. The policy module
541 * is supposed to maintain its own context cache.
542 * XXX currently \a create and \a remove_dead is always 1, perhaps
543 * should be removed completely.
544 *
545 * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr().
546 */
547 struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec,
548 struct vfs_cred *vcred,
549 int create,
550 int remove_dead);
551
552 /**
553 * Called then the reference of \a ctx dropped to 0. The policy module
554 * is supposed to destroy this context or whatever else according to
555 * its cache maintenance mechanism.
556 *
557 * \param sync if zero, we shouldn't wait for the context being
558 * destroyed completely.
559 *
560 * \see plain_release_ctx(), gss_sec_release_ctx_kr().
561 */
562 void (*release_ctx) (struct ptlrpc_sec *sec,
563 struct ptlrpc_cli_ctx *ctx,
564 int sync);
565
566 /**
567 * Flush the context cache.
568 *
569 * \param uid context of which user, -1 means all contexts.
570 * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected
571 * contexts should be cleared immediately.
572 * \param force if zero, only idle contexts will be flushed.
573 *
574 * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr().
575 */
576 int (*flush_ctx_cache)
577 (struct ptlrpc_sec *sec,
578 uid_t uid,
579 int grace,
580 int force);
581
582 /**
583 * Called periodically by garbage collector to remove dead contexts
584 * from cache.
585 *
586 * \see gss_sec_gc_ctx_kr().
587 */
588 void (*gc_ctx) (struct ptlrpc_sec *sec);
589
590 /**
591 * Given an context \a ctx, install a corresponding reverse service
592 * context on client side.
593 * XXX currently it's only used by GSS module, maybe we should remove
594 * this from general API.
595 */
596 int (*install_rctx)(struct obd_import *imp,
597 struct ptlrpc_sec *sec,
598 struct ptlrpc_cli_ctx *ctx);
599
600 /**
601 * To allocate request buffer for \a req.
602 *
603 * \pre req->rq_reqmsg == NULL.
604 * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated,
605 * we are not supposed to free it.
606 * \post if success, req->rq_reqmsg point to a buffer with size
607 * at least \a lustre_msg_size.
608 *
609 * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf().
610 */
611 int (*alloc_reqbuf)(struct ptlrpc_sec *sec,
612 struct ptlrpc_request *req,
613 int lustre_msg_size);
614
615 /**
616 * To free request buffer for \a req.
617 *
618 * \pre req->rq_reqbuf != NULL.
619 *
620 * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf().
621 */
622 void (*free_reqbuf) (struct ptlrpc_sec *sec,
623 struct ptlrpc_request *req);
624
625 /**
626 * To allocate reply buffer for \a req.
627 *
628 * \pre req->rq_repbuf == NULL.
629 * \post if success, req->rq_repbuf point to a buffer with size
630 * req->rq_repbuf_len, the size should be large enough to receive
631 * reply which be transformed from \a lustre_msg_size of clear text.
632 *
633 * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf().
634 */
635 int (*alloc_repbuf)(struct ptlrpc_sec *sec,
636 struct ptlrpc_request *req,
637 int lustre_msg_size);
638
639 /**
640 * To free reply buffer for \a req.
641 *
642 * \pre req->rq_repbuf != NULL.
643 * \post req->rq_repbuf == NULL.
644 * \post req->rq_repbuf_len == 0.
645 *
646 * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf().
647 */
648 void (*free_repbuf) (struct ptlrpc_sec *sec,
649 struct ptlrpc_request *req);
650
651 /**
652 * To expand the request buffer of \a req, thus the \a segment in
653 * the request message pointed by req->rq_reqmsg can accommodate
654 * at least \a newsize of data.
655 *
656 * \pre req->rq_reqmsg->lm_buflens[segment] < newsize.
657 *
658 * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(),
659 * gss_enlarge_reqbuf().
660 */
661 int (*enlarge_reqbuf)
662 (struct ptlrpc_sec *sec,
663 struct ptlrpc_request *req,
664 int segment, int newsize);
665 /*
666 * misc
667 */
668 int (*display) (struct ptlrpc_sec *sec,
669 struct seq_file *seq);
670 };
671
672 /**
673 * server side policy operation vector.
674 */
675 struct ptlrpc_sec_sops {
676 /**
677 * verify an incoming request.
678 *
679 * \pre request message is pointed by req->rq_reqbuf, size is
680 * req->rq_reqdata_len; and the message has been unpacked to
681 * host byte order.
682 *
683 * \retval SECSVC_OK success, req->rq_reqmsg point to request message
684 * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set;
685 * req->rq_sp_from is decoded from request.
686 * \retval SECSVC_COMPLETE success, the request has been fully
687 * processed, and reply message has been prepared; req->rq_sp_from is
688 * decoded from request.
689 * \retval SECSVC_DROP failed, this request should be dropped.
690 *
691 * \see null_accept(), plain_accept(), gss_svc_accept_kr().
692 */
693 int (*accept) (struct ptlrpc_request *req);
694
695 /**
696 * Perform security transformation upon reply message.
697 *
698 * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
699 * is req->rq_replen.
700 * \post req->rs_repdata_len is the final message size.
701 * \post req->rq_reply_off is set.
702 *
703 * \see null_authorize(), plain_authorize(), gss_svc_authorize().
704 */
705 int (*authorize) (struct ptlrpc_request *req);
706
707 /**
708 * Invalidate server context \a ctx.
709 *
710 * \see gss_svc_invalidate_ctx().
711 */
712 void (*invalidate_ctx)
713 (struct ptlrpc_svc_ctx *ctx);
714
715 /**
716 * Allocate a ptlrpc_reply_state.
717 *
718 * \param msgsize size of the reply message in clear text.
719 * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we
720 * should simply use it; otherwise we'll responsible for allocating
721 * a new one.
722 * \post req->rq_reply_state != NULL;
723 * \post req->rq_reply_state->rs_msg != NULL;
724 *
725 * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs().
726 */
727 int (*alloc_rs) (struct ptlrpc_request *req,
728 int msgsize);
729
730 /**
731 * Free a ptlrpc_reply_state.
732 */
733 void (*free_rs) (struct ptlrpc_reply_state *rs);
734
735 /**
736 * Release the server context \a ctx.
737 *
738 * \see gss_svc_free_ctx().
739 */
740 void (*free_ctx) (struct ptlrpc_svc_ctx *ctx);
741
742 /**
743 * Install a reverse context based on the server context \a ctx.
744 *
745 * \see gss_svc_install_rctx_kr().
746 */
747 int (*install_rctx)(struct obd_import *imp,
748 struct ptlrpc_svc_ctx *ctx);
749
750 /**
751 * Prepare buffer for incoming bulk write.
752 *
753 * \pre desc->bd_iov and desc->bd_iov_count describes the buffer
754 * intended to receive the write.
755 *
756 * \see gss_svc_prep_bulk().
757 */
758 int (*prep_bulk) (struct ptlrpc_request *req,
759 struct ptlrpc_bulk_desc *desc);
760
761 /**
762 * Unwrap the bulk write data.
763 *
764 * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk().
765 */
766 int (*unwrap_bulk) (struct ptlrpc_request *req,
767 struct ptlrpc_bulk_desc *desc);
768
769 /**
770 * Wrap the bulk read data.
771 *
772 * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk().
773 */
774 int (*wrap_bulk) (struct ptlrpc_request *req,
775 struct ptlrpc_bulk_desc *desc);
776 };
777
778 struct ptlrpc_sec_policy {
779 struct module *sp_owner;
780 char *sp_name;
781 __u16 sp_policy; /* policy number */
782 struct ptlrpc_sec_cops *sp_cops; /* client ops */
783 struct ptlrpc_sec_sops *sp_sops; /* server ops */
784 };
785
786 #define PTLRPC_SEC_FL_REVERSE 0x0001 /* reverse sec */
787 #define PTLRPC_SEC_FL_ROOTONLY 0x0002 /* treat everyone as root */
788 #define PTLRPC_SEC_FL_UDESC 0x0004 /* ship udesc */
789 #define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */
790 #define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */
791
792 /**
793 * The ptlrpc_sec represents the client side ptlrpc security facilities,
794 * each obd_import (both regular and reverse import) must associate with
795 * a ptlrpc_sec.
796 *
797 * \see sptlrpc_import_sec_adapt().
798 */
799 struct ptlrpc_sec {
800 struct ptlrpc_sec_policy *ps_policy;
801 atomic_t ps_refcount;
802 /** statistic only */
803 atomic_t ps_nctx;
804 /** unique identifier */
805 int ps_id;
806 struct sptlrpc_flavor ps_flvr;
807 enum lustre_sec_part ps_part;
808 /** after set, no more new context will be created */
809 unsigned int ps_dying:1;
810 /** owning import */
811 struct obd_import *ps_import;
812 spinlock_t ps_lock;
813
814 /*
815 * garbage collection
816 */
817 struct list_head ps_gc_list;
818 unsigned long ps_gc_interval; /* in seconds */
819 time64_t ps_gc_next; /* in seconds */
820 };
821
sec_is_reverse(struct ptlrpc_sec * sec)822 static inline int sec_is_reverse(struct ptlrpc_sec *sec)
823 {
824 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE);
825 }
826
sec_is_rootonly(struct ptlrpc_sec * sec)827 static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
828 {
829 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY);
830 }
831
832 struct ptlrpc_svc_ctx {
833 atomic_t sc_refcount;
834 struct ptlrpc_sec_policy *sc_policy;
835 };
836
837 /*
838 * user identity descriptor
839 */
840 #define LUSTRE_MAX_GROUPS (128)
841
842 struct ptlrpc_user_desc {
843 __u32 pud_uid;
844 __u32 pud_gid;
845 __u32 pud_fsuid;
846 __u32 pud_fsgid;
847 __u32 pud_cap;
848 __u32 pud_ngroups;
849 __u32 pud_groups[0];
850 };
851
852 /*
853 * bulk flavors
854 */
855 enum sptlrpc_bulk_hash_alg {
856 BULK_HASH_ALG_NULL = 0,
857 BULK_HASH_ALG_ADLER32,
858 BULK_HASH_ALG_CRC32,
859 BULK_HASH_ALG_MD5,
860 BULK_HASH_ALG_SHA1,
861 BULK_HASH_ALG_SHA256,
862 BULK_HASH_ALG_SHA384,
863 BULK_HASH_ALG_SHA512,
864 BULK_HASH_ALG_MAX
865 };
866
867 const char *sptlrpc_get_hash_name(__u8 hash_alg);
868 __u8 sptlrpc_get_hash_alg(const char *algname);
869
870 enum {
871 BSD_FL_ERR = 1,
872 };
873
874 struct ptlrpc_bulk_sec_desc {
875 __u8 bsd_version; /* 0 */
876 __u8 bsd_type; /* SPTLRPC_BULK_XXX */
877 __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */
878 __u8 bsd_flags; /* flags */
879 __u32 bsd_nob; /* nob of bulk data */
880 __u8 bsd_data[0]; /* policy-specific token */
881 };
882
883 /*
884 * round size up to next power of 2, for slab allocation.
885 * @size must be sane (can't overflow after round up)
886 */
size_roundup_power2(int size)887 static inline int size_roundup_power2(int size)
888 {
889 size--;
890 size |= size >> 1;
891 size |= size >> 2;
892 size |= size >> 4;
893 size |= size >> 8;
894 size |= size >> 16;
895 size++;
896 return size;
897 }
898
899 /*
900 * internal support libraries
901 */
902 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
903 int segment, int newsize);
904
905 /*
906 * security policies
907 */
908 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy);
909 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy);
910
911 __u32 sptlrpc_name2flavor_base(const char *name);
912 const char *sptlrpc_flavor2name_base(__u32 flvr);
913 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
914 char *buf, int bufsize);
915 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
916
917 static inline
sptlrpc_policy_get(struct ptlrpc_sec_policy * policy)918 struct ptlrpc_sec_policy *sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
919 {
920 __module_get(policy->sp_owner);
921 return policy;
922 }
923
924 static inline
sptlrpc_policy_put(struct ptlrpc_sec_policy * policy)925 void sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
926 {
927 module_put(policy->sp_owner);
928 }
929
930 /*
931 * client credential
932 */
933 static inline
cli_ctx_status(struct ptlrpc_cli_ctx * ctx)934 unsigned long cli_ctx_status(struct ptlrpc_cli_ctx *ctx)
935 {
936 return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK);
937 }
938
939 static inline
cli_ctx_is_ready(struct ptlrpc_cli_ctx * ctx)940 int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx)
941 {
942 return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
943 }
944
945 static inline
cli_ctx_is_refreshed(struct ptlrpc_cli_ctx * ctx)946 int cli_ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx)
947 {
948 return (cli_ctx_status(ctx) != 0);
949 }
950
951 static inline
cli_ctx_is_uptodate(struct ptlrpc_cli_ctx * ctx)952 int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
953 {
954 return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0);
955 }
956
957 static inline
cli_ctx_is_error(struct ptlrpc_cli_ctx * ctx)958 int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx)
959 {
960 return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0);
961 }
962
963 static inline
cli_ctx_is_dead(struct ptlrpc_cli_ctx * ctx)964 int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
965 {
966 return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
967 }
968
969 static inline
cli_ctx_is_eternal(struct ptlrpc_cli_ctx * ctx)970 int cli_ctx_is_eternal(struct ptlrpc_cli_ctx *ctx)
971 {
972 return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0);
973 }
974
975 /*
976 * sec get/put
977 */
978 void sptlrpc_sec_put(struct ptlrpc_sec *sec);
979
980 /*
981 * internal apis which only used by policy implementation
982 */
983 int sptlrpc_get_next_secid(void);
984
985 /*
986 * exported client context api
987 */
988 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx);
989 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync);
990
991 /*
992 * exported client context wrap/buffers
993 */
994 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req);
995 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req);
996 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize);
997 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req);
998 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize);
999 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req);
1000 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1001 int segment, int newsize);
1002 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1003 struct ptlrpc_request **req_ret);
1004 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req);
1005
1006 void sptlrpc_request_out_callback(struct ptlrpc_request *req);
1007
1008 /*
1009 * exported higher interface of import & request
1010 */
1011 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1012 struct ptlrpc_svc_ctx *ctx,
1013 struct sptlrpc_flavor *flvr);
1014 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp);
1015 void sptlrpc_import_sec_put(struct obd_import *imp);
1016
1017 int sptlrpc_import_check_ctx(struct obd_import *imp);
1018 void sptlrpc_import_flush_root_ctx(struct obd_import *imp);
1019 void sptlrpc_import_flush_my_ctx(struct obd_import *imp);
1020 void sptlrpc_import_flush_all_ctx(struct obd_import *imp);
1021 int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
1022 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
1023 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
1024 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
1025
1026 /* gc */
1027 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
1028 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
1029
1030 /* misc */
1031 const char *sec2target_str(struct ptlrpc_sec *sec);
1032 /*
1033 * lprocfs
1034 */
1035 int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev);
1036
1037 /*
1038 * server side
1039 */
1040 enum secsvc_accept_res {
1041 SECSVC_OK = 0,
1042 SECSVC_COMPLETE,
1043 SECSVC_DROP,
1044 };
1045
1046 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
1047 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
1048 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
1049 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs);
1050 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
1051 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
1052
1053 int sptlrpc_target_export_check(struct obd_export *exp,
1054 struct ptlrpc_request *req);
1055
1056 /* bulk security api */
1057 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
1058
1059 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
1060 struct ptlrpc_bulk_desc *desc);
1061 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
1062 struct ptlrpc_bulk_desc *desc,
1063 int nob);
1064 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
1065 struct ptlrpc_bulk_desc *desc);
1066
1067 /* bulk helpers (internal use only by policies) */
1068 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
1069 void *buf, int buflen);
1070
1071 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed);
1072
1073 /* user descriptor helpers */
sptlrpc_user_desc_size(int ngroups)1074 static inline int sptlrpc_user_desc_size(int ngroups)
1075 {
1076 return sizeof(struct ptlrpc_user_desc) + ngroups * sizeof(__u32);
1077 }
1078
1079 int sptlrpc_current_user_desc_size(void);
1080 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
1081 int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
1082
1083 #define CFS_CAP_CHOWN_MASK (1 << CFS_CAP_CHOWN)
1084 #define CFS_CAP_SYS_RESOURCE_MASK (1 << CFS_CAP_SYS_RESOURCE)
1085
1086 enum {
1087 LUSTRE_SEC_NONE = 0,
1088 LUSTRE_SEC_REMOTE = 1,
1089 LUSTRE_SEC_SPECIFY = 2,
1090 LUSTRE_SEC_ALL = 3
1091 };
1092
1093 /** @} sptlrpc */
1094
1095 #endif /* _LUSTRE_SEC_H_ */
1096