1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3 * linux/net/sunrpc/auth_gss/auth_gss.c
4 *
5 * RPCSEC_GSS client authentication.
6 *
7 * Copyright (c) 2000 The Regents of the University of Michigan.
8 * All rights reserved.
9 *
10 * Dug Song <dugsong@monkey.org>
11 * Andy Adamson <andros@umich.edu>
12 */
13
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/sunrpc/auth.h>
22 #include <linux/sunrpc/auth_gss.h>
23 #include <linux/sunrpc/gss_krb5.h>
24 #include <linux/sunrpc/svcauth_gss.h>
25 #include <linux/sunrpc/gss_err.h>
26 #include <linux/workqueue.h>
27 #include <linux/sunrpc/rpc_pipe_fs.h>
28 #include <linux/sunrpc/gss_api.h>
29 #include <linux/uaccess.h>
30 #include <linux/hashtable.h>
31
32 #include "auth_gss_internal.h"
33 #include "../netns.h"
34
35 #include <trace/events/rpcgss.h>
36
37 static const struct rpc_authops authgss_ops;
38
39 static const struct rpc_credops gss_credops;
40 static const struct rpc_credops gss_nullops;
41
42 #define GSS_RETRY_EXPIRED 5
43 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
44
45 #define GSS_KEY_EXPIRE_TIMEO 240
46 static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO;
47
48 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
49 # define RPCDBG_FACILITY RPCDBG_AUTH
50 #endif
51
52 #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2)
53 /* length of a krb5 verifier (48), plus data added before arguments when
54 * using integrity (two 4-byte integers): */
55 #define GSS_VERF_SLACK 100
56
57 static DEFINE_HASHTABLE(gss_auth_hash_table, 4);
58 static DEFINE_SPINLOCK(gss_auth_hash_lock);
59
60 struct gss_pipe {
61 struct rpc_pipe_dir_object pdo;
62 struct rpc_pipe *pipe;
63 struct rpc_clnt *clnt;
64 const char *name;
65 struct kref kref;
66 };
67
68 struct gss_auth {
69 struct kref kref;
70 struct hlist_node hash;
71 struct rpc_auth rpc_auth;
72 struct gss_api_mech *mech;
73 enum rpc_gss_svc service;
74 struct rpc_clnt *client;
75 struct net *net;
76 /*
77 * There are two upcall pipes; dentry[1], named "gssd", is used
78 * for the new text-based upcall; dentry[0] is named after the
79 * mechanism (for example, "krb5") and exists for
80 * backwards-compatibility with older gssd's.
81 */
82 struct gss_pipe *gss_pipe[2];
83 const char *target_name;
84 };
85
86 /* pipe_version >= 0 if and only if someone has a pipe open. */
87 static DEFINE_SPINLOCK(pipe_version_lock);
88 static struct rpc_wait_queue pipe_version_rpc_waitqueue;
89 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
90 static void gss_put_auth(struct gss_auth *gss_auth);
91
92 static void gss_free_ctx(struct gss_cl_ctx *);
93 static const struct rpc_pipe_ops gss_upcall_ops_v0;
94 static const struct rpc_pipe_ops gss_upcall_ops_v1;
95
96 static inline struct gss_cl_ctx *
gss_get_ctx(struct gss_cl_ctx * ctx)97 gss_get_ctx(struct gss_cl_ctx *ctx)
98 {
99 refcount_inc(&ctx->count);
100 return ctx;
101 }
102
103 static inline void
gss_put_ctx(struct gss_cl_ctx * ctx)104 gss_put_ctx(struct gss_cl_ctx *ctx)
105 {
106 if (refcount_dec_and_test(&ctx->count))
107 gss_free_ctx(ctx);
108 }
109
110 /* gss_cred_set_ctx:
111 * called by gss_upcall_callback and gss_create_upcall in order
112 * to set the gss context. The actual exchange of an old context
113 * and a new one is protected by the pipe->lock.
114 */
115 static void
gss_cred_set_ctx(struct rpc_cred * cred,struct gss_cl_ctx * ctx)116 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
117 {
118 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
119
120 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
121 return;
122 gss_get_ctx(ctx);
123 rcu_assign_pointer(gss_cred->gc_ctx, ctx);
124 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
125 smp_mb__before_atomic();
126 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
127 }
128
129 static struct gss_cl_ctx *
gss_cred_get_ctx(struct rpc_cred * cred)130 gss_cred_get_ctx(struct rpc_cred *cred)
131 {
132 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
133 struct gss_cl_ctx *ctx = NULL;
134
135 rcu_read_lock();
136 ctx = rcu_dereference(gss_cred->gc_ctx);
137 if (ctx)
138 gss_get_ctx(ctx);
139 rcu_read_unlock();
140 return ctx;
141 }
142
143 static struct gss_cl_ctx *
gss_alloc_context(void)144 gss_alloc_context(void)
145 {
146 struct gss_cl_ctx *ctx;
147
148 ctx = kzalloc(sizeof(*ctx), GFP_NOFS);
149 if (ctx != NULL) {
150 ctx->gc_proc = RPC_GSS_PROC_DATA;
151 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
152 spin_lock_init(&ctx->gc_seq_lock);
153 refcount_set(&ctx->count,1);
154 }
155 return ctx;
156 }
157
158 #define GSSD_MIN_TIMEOUT (60 * 60)
159 static const void *
gss_fill_context(const void * p,const void * end,struct gss_cl_ctx * ctx,struct gss_api_mech * gm)160 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
161 {
162 const void *q;
163 unsigned int seclen;
164 unsigned int timeout;
165 unsigned long now = jiffies;
166 u32 window_size;
167 int ret;
168
169 /* First unsigned int gives the remaining lifetime in seconds of the
170 * credential - e.g. the remaining TGT lifetime for Kerberos or
171 * the -t value passed to GSSD.
172 */
173 p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
174 if (IS_ERR(p))
175 goto err;
176 if (timeout == 0)
177 timeout = GSSD_MIN_TIMEOUT;
178 ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
179 /* Sequence number window. Determines the maximum number of
180 * simultaneous requests
181 */
182 p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
183 if (IS_ERR(p))
184 goto err;
185 ctx->gc_win = window_size;
186 /* gssd signals an error by passing ctx->gc_win = 0: */
187 if (ctx->gc_win == 0) {
188 /*
189 * in which case, p points to an error code. Anything other
190 * than -EKEYEXPIRED gets converted to -EACCES.
191 */
192 p = simple_get_bytes(p, end, &ret, sizeof(ret));
193 if (!IS_ERR(p))
194 p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
195 ERR_PTR(-EACCES);
196 goto err;
197 }
198 /* copy the opaque wire context */
199 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
200 if (IS_ERR(p))
201 goto err;
202 /* import the opaque security context */
203 p = simple_get_bytes(p, end, &seclen, sizeof(seclen));
204 if (IS_ERR(p))
205 goto err;
206 q = (const void *)((const char *)p + seclen);
207 if (unlikely(q > end || q < p)) {
208 p = ERR_PTR(-EFAULT);
209 goto err;
210 }
211 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
212 if (ret < 0) {
213 trace_rpcgss_import_ctx(ret);
214 p = ERR_PTR(ret);
215 goto err;
216 }
217
218 /* is there any trailing data? */
219 if (q == end) {
220 p = q;
221 goto done;
222 }
223
224 /* pull in acceptor name (if there is one) */
225 p = simple_get_netobj(q, end, &ctx->gc_acceptor);
226 if (IS_ERR(p))
227 goto err;
228 done:
229 trace_rpcgss_context(window_size, ctx->gc_expiry, now, timeout,
230 ctx->gc_acceptor.len, ctx->gc_acceptor.data);
231 err:
232 return p;
233 }
234
235 /* XXX: Need some documentation about why UPCALL_BUF_LEN is so small.
236 * Is user space expecting no more than UPCALL_BUF_LEN bytes?
237 * Note that there are now _two_ NI_MAXHOST sized data items
238 * being passed in this string.
239 */
240 #define UPCALL_BUF_LEN 256
241
242 struct gss_upcall_msg {
243 refcount_t count;
244 kuid_t uid;
245 const char *service_name;
246 struct rpc_pipe_msg msg;
247 struct list_head list;
248 struct gss_auth *auth;
249 struct rpc_pipe *pipe;
250 struct rpc_wait_queue rpc_waitqueue;
251 wait_queue_head_t waitqueue;
252 struct gss_cl_ctx *ctx;
253 char databuf[UPCALL_BUF_LEN];
254 };
255
get_pipe_version(struct net * net)256 static int get_pipe_version(struct net *net)
257 {
258 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
259 int ret;
260
261 spin_lock(&pipe_version_lock);
262 if (sn->pipe_version >= 0) {
263 atomic_inc(&sn->pipe_users);
264 ret = sn->pipe_version;
265 } else
266 ret = -EAGAIN;
267 spin_unlock(&pipe_version_lock);
268 return ret;
269 }
270
put_pipe_version(struct net * net)271 static void put_pipe_version(struct net *net)
272 {
273 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
274
275 if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
276 sn->pipe_version = -1;
277 spin_unlock(&pipe_version_lock);
278 }
279 }
280
281 static void
gss_release_msg(struct gss_upcall_msg * gss_msg)282 gss_release_msg(struct gss_upcall_msg *gss_msg)
283 {
284 struct net *net = gss_msg->auth->net;
285 if (!refcount_dec_and_test(&gss_msg->count))
286 return;
287 put_pipe_version(net);
288 BUG_ON(!list_empty(&gss_msg->list));
289 if (gss_msg->ctx != NULL)
290 gss_put_ctx(gss_msg->ctx);
291 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
292 gss_put_auth(gss_msg->auth);
293 kfree_const(gss_msg->service_name);
294 kfree(gss_msg);
295 }
296
297 static struct gss_upcall_msg *
__gss_find_upcall(struct rpc_pipe * pipe,kuid_t uid,const struct gss_auth * auth)298 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
299 {
300 struct gss_upcall_msg *pos;
301 list_for_each_entry(pos, &pipe->in_downcall, list) {
302 if (!uid_eq(pos->uid, uid))
303 continue;
304 if (pos->auth->service != auth->service)
305 continue;
306 refcount_inc(&pos->count);
307 return pos;
308 }
309 return NULL;
310 }
311
312 /* Try to add an upcall to the pipefs queue.
313 * If an upcall owned by our uid already exists, then we return a reference
314 * to that upcall instead of adding the new upcall.
315 */
316 static inline struct gss_upcall_msg *
gss_add_msg(struct gss_upcall_msg * gss_msg)317 gss_add_msg(struct gss_upcall_msg *gss_msg)
318 {
319 struct rpc_pipe *pipe = gss_msg->pipe;
320 struct gss_upcall_msg *old;
321
322 spin_lock(&pipe->lock);
323 old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
324 if (old == NULL) {
325 refcount_inc(&gss_msg->count);
326 list_add(&gss_msg->list, &pipe->in_downcall);
327 } else
328 gss_msg = old;
329 spin_unlock(&pipe->lock);
330 return gss_msg;
331 }
332
333 static void
__gss_unhash_msg(struct gss_upcall_msg * gss_msg)334 __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
335 {
336 list_del_init(&gss_msg->list);
337 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
338 wake_up_all(&gss_msg->waitqueue);
339 refcount_dec(&gss_msg->count);
340 }
341
342 static void
gss_unhash_msg(struct gss_upcall_msg * gss_msg)343 gss_unhash_msg(struct gss_upcall_msg *gss_msg)
344 {
345 struct rpc_pipe *pipe = gss_msg->pipe;
346
347 if (list_empty(&gss_msg->list))
348 return;
349 spin_lock(&pipe->lock);
350 if (!list_empty(&gss_msg->list))
351 __gss_unhash_msg(gss_msg);
352 spin_unlock(&pipe->lock);
353 }
354
355 static void
gss_handle_downcall_result(struct gss_cred * gss_cred,struct gss_upcall_msg * gss_msg)356 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
357 {
358 switch (gss_msg->msg.errno) {
359 case 0:
360 if (gss_msg->ctx == NULL)
361 break;
362 clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
363 gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
364 break;
365 case -EKEYEXPIRED:
366 set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
367 }
368 gss_cred->gc_upcall_timestamp = jiffies;
369 gss_cred->gc_upcall = NULL;
370 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
371 }
372
373 static void
gss_upcall_callback(struct rpc_task * task)374 gss_upcall_callback(struct rpc_task *task)
375 {
376 struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
377 struct gss_cred, gc_base);
378 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
379 struct rpc_pipe *pipe = gss_msg->pipe;
380
381 spin_lock(&pipe->lock);
382 gss_handle_downcall_result(gss_cred, gss_msg);
383 spin_unlock(&pipe->lock);
384 task->tk_status = gss_msg->msg.errno;
385 gss_release_msg(gss_msg);
386 }
387
gss_encode_v0_msg(struct gss_upcall_msg * gss_msg,const struct cred * cred)388 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg,
389 const struct cred *cred)
390 {
391 struct user_namespace *userns = cred->user_ns;
392
393 uid_t uid = from_kuid_munged(userns, gss_msg->uid);
394 memcpy(gss_msg->databuf, &uid, sizeof(uid));
395 gss_msg->msg.data = gss_msg->databuf;
396 gss_msg->msg.len = sizeof(uid);
397
398 BUILD_BUG_ON(sizeof(uid) > sizeof(gss_msg->databuf));
399 }
400
401 static ssize_t
gss_v0_upcall(struct file * file,struct rpc_pipe_msg * msg,char __user * buf,size_t buflen)402 gss_v0_upcall(struct file *file, struct rpc_pipe_msg *msg,
403 char __user *buf, size_t buflen)
404 {
405 struct gss_upcall_msg *gss_msg = container_of(msg,
406 struct gss_upcall_msg,
407 msg);
408 if (msg->copied == 0)
409 gss_encode_v0_msg(gss_msg, file->f_cred);
410 return rpc_pipe_generic_upcall(file, msg, buf, buflen);
411 }
412
gss_encode_v1_msg(struct gss_upcall_msg * gss_msg,const char * service_name,const char * target_name,const struct cred * cred)413 static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
414 const char *service_name,
415 const char *target_name,
416 const struct cred *cred)
417 {
418 struct user_namespace *userns = cred->user_ns;
419 struct gss_api_mech *mech = gss_msg->auth->mech;
420 char *p = gss_msg->databuf;
421 size_t buflen = sizeof(gss_msg->databuf);
422 int len;
423
424 len = scnprintf(p, buflen, "mech=%s uid=%d", mech->gm_name,
425 from_kuid_munged(userns, gss_msg->uid));
426 buflen -= len;
427 p += len;
428 gss_msg->msg.len = len;
429
430 /*
431 * target= is a full service principal that names the remote
432 * identity that we are authenticating to.
433 */
434 if (target_name) {
435 len = scnprintf(p, buflen, " target=%s", target_name);
436 buflen -= len;
437 p += len;
438 gss_msg->msg.len += len;
439 }
440
441 /*
442 * gssd uses service= and srchost= to select a matching key from
443 * the system's keytab to use as the source principal.
444 *
445 * service= is the service name part of the source principal,
446 * or "*" (meaning choose any).
447 *
448 * srchost= is the hostname part of the source principal. When
449 * not provided, gssd uses the local hostname.
450 */
451 if (service_name) {
452 char *c = strchr(service_name, '@');
453
454 if (!c)
455 len = scnprintf(p, buflen, " service=%s",
456 service_name);
457 else
458 len = scnprintf(p, buflen,
459 " service=%.*s srchost=%s",
460 (int)(c - service_name),
461 service_name, c + 1);
462 buflen -= len;
463 p += len;
464 gss_msg->msg.len += len;
465 }
466
467 if (mech->gm_upcall_enctypes) {
468 len = scnprintf(p, buflen, " enctypes=%s",
469 mech->gm_upcall_enctypes);
470 buflen -= len;
471 p += len;
472 gss_msg->msg.len += len;
473 }
474 trace_rpcgss_upcall_msg(gss_msg->databuf);
475 len = scnprintf(p, buflen, "\n");
476 if (len == 0)
477 goto out_overflow;
478 gss_msg->msg.len += len;
479 gss_msg->msg.data = gss_msg->databuf;
480 return 0;
481 out_overflow:
482 WARN_ON_ONCE(1);
483 return -ENOMEM;
484 }
485
486 static ssize_t
gss_v1_upcall(struct file * file,struct rpc_pipe_msg * msg,char __user * buf,size_t buflen)487 gss_v1_upcall(struct file *file, struct rpc_pipe_msg *msg,
488 char __user *buf, size_t buflen)
489 {
490 struct gss_upcall_msg *gss_msg = container_of(msg,
491 struct gss_upcall_msg,
492 msg);
493 int err;
494 if (msg->copied == 0) {
495 err = gss_encode_v1_msg(gss_msg,
496 gss_msg->service_name,
497 gss_msg->auth->target_name,
498 file->f_cred);
499 if (err)
500 return err;
501 }
502 return rpc_pipe_generic_upcall(file, msg, buf, buflen);
503 }
504
505 static struct gss_upcall_msg *
gss_alloc_msg(struct gss_auth * gss_auth,kuid_t uid,const char * service_name)506 gss_alloc_msg(struct gss_auth *gss_auth,
507 kuid_t uid, const char *service_name)
508 {
509 struct gss_upcall_msg *gss_msg;
510 int vers;
511 int err = -ENOMEM;
512
513 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
514 if (gss_msg == NULL)
515 goto err;
516 vers = get_pipe_version(gss_auth->net);
517 err = vers;
518 if (err < 0)
519 goto err_free_msg;
520 gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe;
521 INIT_LIST_HEAD(&gss_msg->list);
522 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
523 init_waitqueue_head(&gss_msg->waitqueue);
524 refcount_set(&gss_msg->count, 1);
525 gss_msg->uid = uid;
526 gss_msg->auth = gss_auth;
527 kref_get(&gss_auth->kref);
528 if (service_name) {
529 gss_msg->service_name = kstrdup_const(service_name, GFP_NOFS);
530 if (!gss_msg->service_name) {
531 err = -ENOMEM;
532 goto err_put_pipe_version;
533 }
534 }
535 return gss_msg;
536 err_put_pipe_version:
537 put_pipe_version(gss_auth->net);
538 err_free_msg:
539 kfree(gss_msg);
540 err:
541 return ERR_PTR(err);
542 }
543
544 static struct gss_upcall_msg *
gss_setup_upcall(struct gss_auth * gss_auth,struct rpc_cred * cred)545 gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
546 {
547 struct gss_cred *gss_cred = container_of(cred,
548 struct gss_cred, gc_base);
549 struct gss_upcall_msg *gss_new, *gss_msg;
550 kuid_t uid = cred->cr_cred->fsuid;
551
552 gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal);
553 if (IS_ERR(gss_new))
554 return gss_new;
555 gss_msg = gss_add_msg(gss_new);
556 if (gss_msg == gss_new) {
557 int res;
558 refcount_inc(&gss_msg->count);
559 res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
560 if (res) {
561 gss_unhash_msg(gss_new);
562 refcount_dec(&gss_msg->count);
563 gss_release_msg(gss_new);
564 gss_msg = ERR_PTR(res);
565 }
566 } else
567 gss_release_msg(gss_new);
568 return gss_msg;
569 }
570
warn_gssd(void)571 static void warn_gssd(void)
572 {
573 dprintk("AUTH_GSS upcall failed. Please check user daemon is running.\n");
574 }
575
576 static inline int
gss_refresh_upcall(struct rpc_task * task)577 gss_refresh_upcall(struct rpc_task *task)
578 {
579 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
580 struct gss_auth *gss_auth = container_of(cred->cr_auth,
581 struct gss_auth, rpc_auth);
582 struct gss_cred *gss_cred = container_of(cred,
583 struct gss_cred, gc_base);
584 struct gss_upcall_msg *gss_msg;
585 struct rpc_pipe *pipe;
586 int err = 0;
587
588 gss_msg = gss_setup_upcall(gss_auth, cred);
589 if (PTR_ERR(gss_msg) == -EAGAIN) {
590 /* XXX: warning on the first, under the assumption we
591 * shouldn't normally hit this case on a refresh. */
592 warn_gssd();
593 rpc_sleep_on_timeout(&pipe_version_rpc_waitqueue,
594 task, NULL, jiffies + (15 * HZ));
595 err = -EAGAIN;
596 goto out;
597 }
598 if (IS_ERR(gss_msg)) {
599 err = PTR_ERR(gss_msg);
600 goto out;
601 }
602 pipe = gss_msg->pipe;
603 spin_lock(&pipe->lock);
604 if (gss_cred->gc_upcall != NULL)
605 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
606 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
607 gss_cred->gc_upcall = gss_msg;
608 /* gss_upcall_callback will release the reference to gss_upcall_msg */
609 refcount_inc(&gss_msg->count);
610 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
611 } else {
612 gss_handle_downcall_result(gss_cred, gss_msg);
613 err = gss_msg->msg.errno;
614 }
615 spin_unlock(&pipe->lock);
616 gss_release_msg(gss_msg);
617 out:
618 trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
619 cred->cr_cred->fsuid), err);
620 return err;
621 }
622
623 static inline int
gss_create_upcall(struct gss_auth * gss_auth,struct gss_cred * gss_cred)624 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
625 {
626 struct net *net = gss_auth->net;
627 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
628 struct rpc_pipe *pipe;
629 struct rpc_cred *cred = &gss_cred->gc_base;
630 struct gss_upcall_msg *gss_msg;
631 DEFINE_WAIT(wait);
632 int err;
633
634 retry:
635 err = 0;
636 /* if gssd is down, just skip upcalling altogether */
637 if (!gssd_running(net)) {
638 warn_gssd();
639 err = -EACCES;
640 goto out;
641 }
642 gss_msg = gss_setup_upcall(gss_auth, cred);
643 if (PTR_ERR(gss_msg) == -EAGAIN) {
644 err = wait_event_interruptible_timeout(pipe_version_waitqueue,
645 sn->pipe_version >= 0, 15 * HZ);
646 if (sn->pipe_version < 0) {
647 warn_gssd();
648 err = -EACCES;
649 }
650 if (err < 0)
651 goto out;
652 goto retry;
653 }
654 if (IS_ERR(gss_msg)) {
655 err = PTR_ERR(gss_msg);
656 goto out;
657 }
658 pipe = gss_msg->pipe;
659 for (;;) {
660 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
661 spin_lock(&pipe->lock);
662 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
663 break;
664 }
665 spin_unlock(&pipe->lock);
666 if (fatal_signal_pending(current)) {
667 err = -ERESTARTSYS;
668 goto out_intr;
669 }
670 schedule();
671 }
672 if (gss_msg->ctx) {
673 trace_rpcgss_ctx_init(gss_cred);
674 gss_cred_set_ctx(cred, gss_msg->ctx);
675 } else {
676 err = gss_msg->msg.errno;
677 }
678 spin_unlock(&pipe->lock);
679 out_intr:
680 finish_wait(&gss_msg->waitqueue, &wait);
681 gss_release_msg(gss_msg);
682 out:
683 trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
684 cred->cr_cred->fsuid), err);
685 return err;
686 }
687
688 static struct gss_upcall_msg *
gss_find_downcall(struct rpc_pipe * pipe,kuid_t uid)689 gss_find_downcall(struct rpc_pipe *pipe, kuid_t uid)
690 {
691 struct gss_upcall_msg *pos;
692 list_for_each_entry(pos, &pipe->in_downcall, list) {
693 if (!uid_eq(pos->uid, uid))
694 continue;
695 if (!rpc_msg_is_inflight(&pos->msg))
696 continue;
697 refcount_inc(&pos->count);
698 return pos;
699 }
700 return NULL;
701 }
702
703 #define MSG_BUF_MAXSIZE 1024
704
705 static ssize_t
gss_pipe_downcall(struct file * filp,const char __user * src,size_t mlen)706 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
707 {
708 const void *p, *end;
709 void *buf;
710 struct gss_upcall_msg *gss_msg;
711 struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
712 struct gss_cl_ctx *ctx;
713 uid_t id;
714 kuid_t uid;
715 ssize_t err = -EFBIG;
716
717 if (mlen > MSG_BUF_MAXSIZE)
718 goto out;
719 err = -ENOMEM;
720 buf = kmalloc(mlen, GFP_NOFS);
721 if (!buf)
722 goto out;
723
724 err = -EFAULT;
725 if (copy_from_user(buf, src, mlen))
726 goto err;
727
728 end = (const void *)((char *)buf + mlen);
729 p = simple_get_bytes(buf, end, &id, sizeof(id));
730 if (IS_ERR(p)) {
731 err = PTR_ERR(p);
732 goto err;
733 }
734
735 uid = make_kuid(current_user_ns(), id);
736 if (!uid_valid(uid)) {
737 err = -EINVAL;
738 goto err;
739 }
740
741 err = -ENOMEM;
742 ctx = gss_alloc_context();
743 if (ctx == NULL)
744 goto err;
745
746 err = -ENOENT;
747 /* Find a matching upcall */
748 spin_lock(&pipe->lock);
749 gss_msg = gss_find_downcall(pipe, uid);
750 if (gss_msg == NULL) {
751 spin_unlock(&pipe->lock);
752 goto err_put_ctx;
753 }
754 list_del_init(&gss_msg->list);
755 spin_unlock(&pipe->lock);
756
757 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
758 if (IS_ERR(p)) {
759 err = PTR_ERR(p);
760 switch (err) {
761 case -EACCES:
762 case -EKEYEXPIRED:
763 gss_msg->msg.errno = err;
764 err = mlen;
765 break;
766 case -EFAULT:
767 case -ENOMEM:
768 case -EINVAL:
769 case -ENOSYS:
770 gss_msg->msg.errno = -EAGAIN;
771 break;
772 default:
773 printk(KERN_CRIT "%s: bad return from "
774 "gss_fill_context: %zd\n", __func__, err);
775 gss_msg->msg.errno = -EIO;
776 }
777 goto err_release_msg;
778 }
779 gss_msg->ctx = gss_get_ctx(ctx);
780 err = mlen;
781
782 err_release_msg:
783 spin_lock(&pipe->lock);
784 __gss_unhash_msg(gss_msg);
785 spin_unlock(&pipe->lock);
786 gss_release_msg(gss_msg);
787 err_put_ctx:
788 gss_put_ctx(ctx);
789 err:
790 kfree(buf);
791 out:
792 return err;
793 }
794
gss_pipe_open(struct inode * inode,int new_version)795 static int gss_pipe_open(struct inode *inode, int new_version)
796 {
797 struct net *net = inode->i_sb->s_fs_info;
798 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
799 int ret = 0;
800
801 spin_lock(&pipe_version_lock);
802 if (sn->pipe_version < 0) {
803 /* First open of any gss pipe determines the version: */
804 sn->pipe_version = new_version;
805 rpc_wake_up(&pipe_version_rpc_waitqueue);
806 wake_up(&pipe_version_waitqueue);
807 } else if (sn->pipe_version != new_version) {
808 /* Trying to open a pipe of a different version */
809 ret = -EBUSY;
810 goto out;
811 }
812 atomic_inc(&sn->pipe_users);
813 out:
814 spin_unlock(&pipe_version_lock);
815 return ret;
816
817 }
818
gss_pipe_open_v0(struct inode * inode)819 static int gss_pipe_open_v0(struct inode *inode)
820 {
821 return gss_pipe_open(inode, 0);
822 }
823
gss_pipe_open_v1(struct inode * inode)824 static int gss_pipe_open_v1(struct inode *inode)
825 {
826 return gss_pipe_open(inode, 1);
827 }
828
829 static void
gss_pipe_release(struct inode * inode)830 gss_pipe_release(struct inode *inode)
831 {
832 struct net *net = inode->i_sb->s_fs_info;
833 struct rpc_pipe *pipe = RPC_I(inode)->pipe;
834 struct gss_upcall_msg *gss_msg;
835
836 restart:
837 spin_lock(&pipe->lock);
838 list_for_each_entry(gss_msg, &pipe->in_downcall, list) {
839
840 if (!list_empty(&gss_msg->msg.list))
841 continue;
842 gss_msg->msg.errno = -EPIPE;
843 refcount_inc(&gss_msg->count);
844 __gss_unhash_msg(gss_msg);
845 spin_unlock(&pipe->lock);
846 gss_release_msg(gss_msg);
847 goto restart;
848 }
849 spin_unlock(&pipe->lock);
850
851 put_pipe_version(net);
852 }
853
854 static void
gss_pipe_destroy_msg(struct rpc_pipe_msg * msg)855 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
856 {
857 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
858
859 if (msg->errno < 0) {
860 refcount_inc(&gss_msg->count);
861 gss_unhash_msg(gss_msg);
862 if (msg->errno == -ETIMEDOUT)
863 warn_gssd();
864 gss_release_msg(gss_msg);
865 }
866 gss_release_msg(gss_msg);
867 }
868
gss_pipe_dentry_destroy(struct dentry * dir,struct rpc_pipe_dir_object * pdo)869 static void gss_pipe_dentry_destroy(struct dentry *dir,
870 struct rpc_pipe_dir_object *pdo)
871 {
872 struct gss_pipe *gss_pipe = pdo->pdo_data;
873 struct rpc_pipe *pipe = gss_pipe->pipe;
874
875 if (pipe->dentry != NULL) {
876 rpc_unlink(pipe->dentry);
877 pipe->dentry = NULL;
878 }
879 }
880
gss_pipe_dentry_create(struct dentry * dir,struct rpc_pipe_dir_object * pdo)881 static int gss_pipe_dentry_create(struct dentry *dir,
882 struct rpc_pipe_dir_object *pdo)
883 {
884 struct gss_pipe *p = pdo->pdo_data;
885 struct dentry *dentry;
886
887 dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe);
888 if (IS_ERR(dentry))
889 return PTR_ERR(dentry);
890 p->pipe->dentry = dentry;
891 return 0;
892 }
893
894 static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = {
895 .create = gss_pipe_dentry_create,
896 .destroy = gss_pipe_dentry_destroy,
897 };
898
gss_pipe_alloc(struct rpc_clnt * clnt,const char * name,const struct rpc_pipe_ops * upcall_ops)899 static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt,
900 const char *name,
901 const struct rpc_pipe_ops *upcall_ops)
902 {
903 struct gss_pipe *p;
904 int err = -ENOMEM;
905
906 p = kmalloc(sizeof(*p), GFP_KERNEL);
907 if (p == NULL)
908 goto err;
909 p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
910 if (IS_ERR(p->pipe)) {
911 err = PTR_ERR(p->pipe);
912 goto err_free_gss_pipe;
913 }
914 p->name = name;
915 p->clnt = clnt;
916 kref_init(&p->kref);
917 rpc_init_pipe_dir_object(&p->pdo,
918 &gss_pipe_dir_object_ops,
919 p);
920 return p;
921 err_free_gss_pipe:
922 kfree(p);
923 err:
924 return ERR_PTR(err);
925 }
926
927 struct gss_alloc_pdo {
928 struct rpc_clnt *clnt;
929 const char *name;
930 const struct rpc_pipe_ops *upcall_ops;
931 };
932
gss_pipe_match_pdo(struct rpc_pipe_dir_object * pdo,void * data)933 static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data)
934 {
935 struct gss_pipe *gss_pipe;
936 struct gss_alloc_pdo *args = data;
937
938 if (pdo->pdo_ops != &gss_pipe_dir_object_ops)
939 return 0;
940 gss_pipe = container_of(pdo, struct gss_pipe, pdo);
941 if (strcmp(gss_pipe->name, args->name) != 0)
942 return 0;
943 if (!kref_get_unless_zero(&gss_pipe->kref))
944 return 0;
945 return 1;
946 }
947
gss_pipe_alloc_pdo(void * data)948 static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data)
949 {
950 struct gss_pipe *gss_pipe;
951 struct gss_alloc_pdo *args = data;
952
953 gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops);
954 if (!IS_ERR(gss_pipe))
955 return &gss_pipe->pdo;
956 return NULL;
957 }
958
gss_pipe_get(struct rpc_clnt * clnt,const char * name,const struct rpc_pipe_ops * upcall_ops)959 static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt,
960 const char *name,
961 const struct rpc_pipe_ops *upcall_ops)
962 {
963 struct net *net = rpc_net_ns(clnt);
964 struct rpc_pipe_dir_object *pdo;
965 struct gss_alloc_pdo args = {
966 .clnt = clnt,
967 .name = name,
968 .upcall_ops = upcall_ops,
969 };
970
971 pdo = rpc_find_or_alloc_pipe_dir_object(net,
972 &clnt->cl_pipedir_objects,
973 gss_pipe_match_pdo,
974 gss_pipe_alloc_pdo,
975 &args);
976 if (pdo != NULL)
977 return container_of(pdo, struct gss_pipe, pdo);
978 return ERR_PTR(-ENOMEM);
979 }
980
__gss_pipe_free(struct gss_pipe * p)981 static void __gss_pipe_free(struct gss_pipe *p)
982 {
983 struct rpc_clnt *clnt = p->clnt;
984 struct net *net = rpc_net_ns(clnt);
985
986 rpc_remove_pipe_dir_object(net,
987 &clnt->cl_pipedir_objects,
988 &p->pdo);
989 rpc_destroy_pipe_data(p->pipe);
990 kfree(p);
991 }
992
__gss_pipe_release(struct kref * kref)993 static void __gss_pipe_release(struct kref *kref)
994 {
995 struct gss_pipe *p = container_of(kref, struct gss_pipe, kref);
996
997 __gss_pipe_free(p);
998 }
999
gss_pipe_free(struct gss_pipe * p)1000 static void gss_pipe_free(struct gss_pipe *p)
1001 {
1002 if (p != NULL)
1003 kref_put(&p->kref, __gss_pipe_release);
1004 }
1005
1006 /*
1007 * NOTE: we have the opportunity to use different
1008 * parameters based on the input flavor (which must be a pseudoflavor)
1009 */
1010 static struct gss_auth *
gss_create_new(const struct rpc_auth_create_args * args,struct rpc_clnt * clnt)1011 gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1012 {
1013 rpc_authflavor_t flavor = args->pseudoflavor;
1014 struct gss_auth *gss_auth;
1015 struct gss_pipe *gss_pipe;
1016 struct rpc_auth * auth;
1017 int err = -ENOMEM; /* XXX? */
1018
1019 if (!try_module_get(THIS_MODULE))
1020 return ERR_PTR(err);
1021 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
1022 goto out_dec;
1023 INIT_HLIST_NODE(&gss_auth->hash);
1024 gss_auth->target_name = NULL;
1025 if (args->target_name) {
1026 gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL);
1027 if (gss_auth->target_name == NULL)
1028 goto err_free;
1029 }
1030 gss_auth->client = clnt;
1031 gss_auth->net = get_net(rpc_net_ns(clnt));
1032 err = -EINVAL;
1033 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
1034 if (!gss_auth->mech)
1035 goto err_put_net;
1036 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
1037 if (gss_auth->service == 0)
1038 goto err_put_mech;
1039 if (!gssd_running(gss_auth->net))
1040 goto err_put_mech;
1041 auth = &gss_auth->rpc_auth;
1042 auth->au_cslack = GSS_CRED_SLACK >> 2;
1043 auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
1044 auth->au_verfsize = GSS_VERF_SLACK >> 2;
1045 auth->au_ralign = GSS_VERF_SLACK >> 2;
1046 __set_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags);
1047 auth->au_ops = &authgss_ops;
1048 auth->au_flavor = flavor;
1049 if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
1050 __set_bit(RPCAUTH_AUTH_DATATOUCH, &auth->au_flags);
1051 refcount_set(&auth->au_count, 1);
1052 kref_init(&gss_auth->kref);
1053
1054 err = rpcauth_init_credcache(auth);
1055 if (err)
1056 goto err_put_mech;
1057 /*
1058 * Note: if we created the old pipe first, then someone who
1059 * examined the directory at the right moment might conclude
1060 * that we supported only the old pipe. So we instead create
1061 * the new pipe first.
1062 */
1063 gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1);
1064 if (IS_ERR(gss_pipe)) {
1065 err = PTR_ERR(gss_pipe);
1066 goto err_destroy_credcache;
1067 }
1068 gss_auth->gss_pipe[1] = gss_pipe;
1069
1070 gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name,
1071 &gss_upcall_ops_v0);
1072 if (IS_ERR(gss_pipe)) {
1073 err = PTR_ERR(gss_pipe);
1074 goto err_destroy_pipe_1;
1075 }
1076 gss_auth->gss_pipe[0] = gss_pipe;
1077
1078 return gss_auth;
1079 err_destroy_pipe_1:
1080 gss_pipe_free(gss_auth->gss_pipe[1]);
1081 err_destroy_credcache:
1082 rpcauth_destroy_credcache(auth);
1083 err_put_mech:
1084 gss_mech_put(gss_auth->mech);
1085 err_put_net:
1086 put_net(gss_auth->net);
1087 err_free:
1088 kfree(gss_auth->target_name);
1089 kfree(gss_auth);
1090 out_dec:
1091 module_put(THIS_MODULE);
1092 trace_rpcgss_createauth(flavor, err);
1093 return ERR_PTR(err);
1094 }
1095
1096 static void
gss_free(struct gss_auth * gss_auth)1097 gss_free(struct gss_auth *gss_auth)
1098 {
1099 gss_pipe_free(gss_auth->gss_pipe[0]);
1100 gss_pipe_free(gss_auth->gss_pipe[1]);
1101 gss_mech_put(gss_auth->mech);
1102 put_net(gss_auth->net);
1103 kfree(gss_auth->target_name);
1104
1105 kfree(gss_auth);
1106 module_put(THIS_MODULE);
1107 }
1108
1109 static void
gss_free_callback(struct kref * kref)1110 gss_free_callback(struct kref *kref)
1111 {
1112 struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
1113
1114 gss_free(gss_auth);
1115 }
1116
1117 static void
gss_put_auth(struct gss_auth * gss_auth)1118 gss_put_auth(struct gss_auth *gss_auth)
1119 {
1120 kref_put(&gss_auth->kref, gss_free_callback);
1121 }
1122
1123 static void
gss_destroy(struct rpc_auth * auth)1124 gss_destroy(struct rpc_auth *auth)
1125 {
1126 struct gss_auth *gss_auth = container_of(auth,
1127 struct gss_auth, rpc_auth);
1128
1129 if (hash_hashed(&gss_auth->hash)) {
1130 spin_lock(&gss_auth_hash_lock);
1131 hash_del(&gss_auth->hash);
1132 spin_unlock(&gss_auth_hash_lock);
1133 }
1134
1135 gss_pipe_free(gss_auth->gss_pipe[0]);
1136 gss_auth->gss_pipe[0] = NULL;
1137 gss_pipe_free(gss_auth->gss_pipe[1]);
1138 gss_auth->gss_pipe[1] = NULL;
1139 rpcauth_destroy_credcache(auth);
1140
1141 gss_put_auth(gss_auth);
1142 }
1143
1144 /*
1145 * Auths may be shared between rpc clients that were cloned from a
1146 * common client with the same xprt, if they also share the flavor and
1147 * target_name.
1148 *
1149 * The auth is looked up from the oldest parent sharing the same
1150 * cl_xprt, and the auth itself references only that common parent
1151 * (which is guaranteed to last as long as any of its descendants).
1152 */
1153 static struct gss_auth *
gss_auth_find_or_add_hashed(const struct rpc_auth_create_args * args,struct rpc_clnt * clnt,struct gss_auth * new)1154 gss_auth_find_or_add_hashed(const struct rpc_auth_create_args *args,
1155 struct rpc_clnt *clnt,
1156 struct gss_auth *new)
1157 {
1158 struct gss_auth *gss_auth;
1159 unsigned long hashval = (unsigned long)clnt;
1160
1161 spin_lock(&gss_auth_hash_lock);
1162 hash_for_each_possible(gss_auth_hash_table,
1163 gss_auth,
1164 hash,
1165 hashval) {
1166 if (gss_auth->client != clnt)
1167 continue;
1168 if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
1169 continue;
1170 if (gss_auth->target_name != args->target_name) {
1171 if (gss_auth->target_name == NULL)
1172 continue;
1173 if (args->target_name == NULL)
1174 continue;
1175 if (strcmp(gss_auth->target_name, args->target_name))
1176 continue;
1177 }
1178 if (!refcount_inc_not_zero(&gss_auth->rpc_auth.au_count))
1179 continue;
1180 goto out;
1181 }
1182 if (new)
1183 hash_add(gss_auth_hash_table, &new->hash, hashval);
1184 gss_auth = new;
1185 out:
1186 spin_unlock(&gss_auth_hash_lock);
1187 return gss_auth;
1188 }
1189
1190 static struct gss_auth *
gss_create_hashed(const struct rpc_auth_create_args * args,struct rpc_clnt * clnt)1191 gss_create_hashed(const struct rpc_auth_create_args *args,
1192 struct rpc_clnt *clnt)
1193 {
1194 struct gss_auth *gss_auth;
1195 struct gss_auth *new;
1196
1197 gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL);
1198 if (gss_auth != NULL)
1199 goto out;
1200 new = gss_create_new(args, clnt);
1201 if (IS_ERR(new))
1202 return new;
1203 gss_auth = gss_auth_find_or_add_hashed(args, clnt, new);
1204 if (gss_auth != new)
1205 gss_destroy(&new->rpc_auth);
1206 out:
1207 return gss_auth;
1208 }
1209
1210 static struct rpc_auth *
gss_create(const struct rpc_auth_create_args * args,struct rpc_clnt * clnt)1211 gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1212 {
1213 struct gss_auth *gss_auth;
1214 struct rpc_xprt_switch *xps = rcu_access_pointer(clnt->cl_xpi.xpi_xpswitch);
1215
1216 while (clnt != clnt->cl_parent) {
1217 struct rpc_clnt *parent = clnt->cl_parent;
1218 /* Find the original parent for this transport */
1219 if (rcu_access_pointer(parent->cl_xpi.xpi_xpswitch) != xps)
1220 break;
1221 clnt = parent;
1222 }
1223
1224 gss_auth = gss_create_hashed(args, clnt);
1225 if (IS_ERR(gss_auth))
1226 return ERR_CAST(gss_auth);
1227 return &gss_auth->rpc_auth;
1228 }
1229
1230 static struct gss_cred *
gss_dup_cred(struct gss_auth * gss_auth,struct gss_cred * gss_cred)1231 gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
1232 {
1233 struct gss_cred *new;
1234
1235 /* Make a copy of the cred so that we can reference count it */
1236 new = kzalloc(sizeof(*gss_cred), GFP_NOFS);
1237 if (new) {
1238 struct auth_cred acred = {
1239 .cred = gss_cred->gc_base.cr_cred,
1240 };
1241 struct gss_cl_ctx *ctx =
1242 rcu_dereference_protected(gss_cred->gc_ctx, 1);
1243
1244 rpcauth_init_cred(&new->gc_base, &acred,
1245 &gss_auth->rpc_auth,
1246 &gss_nullops);
1247 new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
1248 new->gc_service = gss_cred->gc_service;
1249 new->gc_principal = gss_cred->gc_principal;
1250 kref_get(&gss_auth->kref);
1251 rcu_assign_pointer(new->gc_ctx, ctx);
1252 gss_get_ctx(ctx);
1253 }
1254 return new;
1255 }
1256
1257 /*
1258 * gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
1259 * to the server with the GSS control procedure field set to
1260 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
1261 * all RPCSEC_GSS state associated with that context.
1262 */
1263 static void
gss_send_destroy_context(struct rpc_cred * cred)1264 gss_send_destroy_context(struct rpc_cred *cred)
1265 {
1266 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1267 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1268 struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1269 struct gss_cred *new;
1270 struct rpc_task *task;
1271
1272 new = gss_dup_cred(gss_auth, gss_cred);
1273 if (new) {
1274 ctx->gc_proc = RPC_GSS_PROC_DESTROY;
1275
1276 trace_rpcgss_ctx_destroy(gss_cred);
1277 task = rpc_call_null(gss_auth->client, &new->gc_base,
1278 RPC_TASK_ASYNC);
1279 if (!IS_ERR(task))
1280 rpc_put_task(task);
1281
1282 put_rpccred(&new->gc_base);
1283 }
1284 }
1285
1286 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
1287 * to create a new cred or context, so they check that things have been
1288 * allocated before freeing them. */
1289 static void
gss_do_free_ctx(struct gss_cl_ctx * ctx)1290 gss_do_free_ctx(struct gss_cl_ctx *ctx)
1291 {
1292 gss_delete_sec_context(&ctx->gc_gss_ctx);
1293 kfree(ctx->gc_wire_ctx.data);
1294 kfree(ctx->gc_acceptor.data);
1295 kfree(ctx);
1296 }
1297
1298 static void
gss_free_ctx_callback(struct rcu_head * head)1299 gss_free_ctx_callback(struct rcu_head *head)
1300 {
1301 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
1302 gss_do_free_ctx(ctx);
1303 }
1304
1305 static void
gss_free_ctx(struct gss_cl_ctx * ctx)1306 gss_free_ctx(struct gss_cl_ctx *ctx)
1307 {
1308 call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
1309 }
1310
1311 static void
gss_free_cred(struct gss_cred * gss_cred)1312 gss_free_cred(struct gss_cred *gss_cred)
1313 {
1314 kfree(gss_cred);
1315 }
1316
1317 static void
gss_free_cred_callback(struct rcu_head * head)1318 gss_free_cred_callback(struct rcu_head *head)
1319 {
1320 struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
1321 gss_free_cred(gss_cred);
1322 }
1323
1324 static void
gss_destroy_nullcred(struct rpc_cred * cred)1325 gss_destroy_nullcred(struct rpc_cred *cred)
1326 {
1327 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1328 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1329 struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1330
1331 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
1332 put_cred(cred->cr_cred);
1333 call_rcu(&cred->cr_rcu, gss_free_cred_callback);
1334 if (ctx)
1335 gss_put_ctx(ctx);
1336 gss_put_auth(gss_auth);
1337 }
1338
1339 static void
gss_destroy_cred(struct rpc_cred * cred)1340 gss_destroy_cred(struct rpc_cred *cred)
1341 {
1342 if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
1343 gss_send_destroy_context(cred);
1344 gss_destroy_nullcred(cred);
1345 }
1346
1347 static int
gss_hash_cred(struct auth_cred * acred,unsigned int hashbits)1348 gss_hash_cred(struct auth_cred *acred, unsigned int hashbits)
1349 {
1350 return hash_64(from_kuid(&init_user_ns, acred->cred->fsuid), hashbits);
1351 }
1352
1353 /*
1354 * Lookup RPCSEC_GSS cred for the current process
1355 */
1356 static struct rpc_cred *
gss_lookup_cred(struct rpc_auth * auth,struct auth_cred * acred,int flags)1357 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1358 {
1359 return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS);
1360 }
1361
1362 static struct rpc_cred *
gss_create_cred(struct rpc_auth * auth,struct auth_cred * acred,int flags,gfp_t gfp)1363 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
1364 {
1365 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1366 struct gss_cred *cred = NULL;
1367 int err = -ENOMEM;
1368
1369 if (!(cred = kzalloc(sizeof(*cred), gfp)))
1370 goto out_err;
1371
1372 rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
1373 /*
1374 * Note: in order to force a call to call_refresh(), we deliberately
1375 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
1376 */
1377 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
1378 cred->gc_service = gss_auth->service;
1379 cred->gc_principal = acred->principal;
1380 kref_get(&gss_auth->kref);
1381 return &cred->gc_base;
1382
1383 out_err:
1384 return ERR_PTR(err);
1385 }
1386
1387 static int
gss_cred_init(struct rpc_auth * auth,struct rpc_cred * cred)1388 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
1389 {
1390 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1391 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
1392 int err;
1393
1394 do {
1395 err = gss_create_upcall(gss_auth, gss_cred);
1396 } while (err == -EAGAIN);
1397 return err;
1398 }
1399
1400 static char *
gss_stringify_acceptor(struct rpc_cred * cred)1401 gss_stringify_acceptor(struct rpc_cred *cred)
1402 {
1403 char *string = NULL;
1404 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1405 struct gss_cl_ctx *ctx;
1406 unsigned int len;
1407 struct xdr_netobj *acceptor;
1408
1409 rcu_read_lock();
1410 ctx = rcu_dereference(gss_cred->gc_ctx);
1411 if (!ctx)
1412 goto out;
1413
1414 len = ctx->gc_acceptor.len;
1415 rcu_read_unlock();
1416
1417 /* no point if there's no string */
1418 if (!len)
1419 return NULL;
1420 realloc:
1421 string = kmalloc(len + 1, GFP_KERNEL);
1422 if (!string)
1423 return NULL;
1424
1425 rcu_read_lock();
1426 ctx = rcu_dereference(gss_cred->gc_ctx);
1427
1428 /* did the ctx disappear or was it replaced by one with no acceptor? */
1429 if (!ctx || !ctx->gc_acceptor.len) {
1430 kfree(string);
1431 string = NULL;
1432 goto out;
1433 }
1434
1435 acceptor = &ctx->gc_acceptor;
1436
1437 /*
1438 * Did we find a new acceptor that's longer than the original? Allocate
1439 * a longer buffer and try again.
1440 */
1441 if (len < acceptor->len) {
1442 len = acceptor->len;
1443 rcu_read_unlock();
1444 kfree(string);
1445 goto realloc;
1446 }
1447
1448 memcpy(string, acceptor->data, acceptor->len);
1449 string[acceptor->len] = '\0';
1450 out:
1451 rcu_read_unlock();
1452 return string;
1453 }
1454
1455 /*
1456 * Returns -EACCES if GSS context is NULL or will expire within the
1457 * timeout (miliseconds)
1458 */
1459 static int
gss_key_timeout(struct rpc_cred * rc)1460 gss_key_timeout(struct rpc_cred *rc)
1461 {
1462 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1463 struct gss_cl_ctx *ctx;
1464 unsigned long timeout = jiffies + (gss_key_expire_timeo * HZ);
1465 int ret = 0;
1466
1467 rcu_read_lock();
1468 ctx = rcu_dereference(gss_cred->gc_ctx);
1469 if (!ctx || time_after(timeout, ctx->gc_expiry))
1470 ret = -EACCES;
1471 rcu_read_unlock();
1472
1473 return ret;
1474 }
1475
1476 static int
gss_match(struct auth_cred * acred,struct rpc_cred * rc,int flags)1477 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
1478 {
1479 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1480 struct gss_cl_ctx *ctx;
1481 int ret;
1482
1483 if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
1484 goto out;
1485 /* Don't match with creds that have expired. */
1486 rcu_read_lock();
1487 ctx = rcu_dereference(gss_cred->gc_ctx);
1488 if (!ctx || time_after(jiffies, ctx->gc_expiry)) {
1489 rcu_read_unlock();
1490 return 0;
1491 }
1492 rcu_read_unlock();
1493 if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
1494 return 0;
1495 out:
1496 if (acred->principal != NULL) {
1497 if (gss_cred->gc_principal == NULL)
1498 return 0;
1499 ret = strcmp(acred->principal, gss_cred->gc_principal) == 0;
1500 } else {
1501 if (gss_cred->gc_principal != NULL)
1502 return 0;
1503 ret = uid_eq(rc->cr_cred->fsuid, acred->cred->fsuid);
1504 }
1505 return ret;
1506 }
1507
1508 /*
1509 * Marshal credentials.
1510 *
1511 * The expensive part is computing the verifier. We can't cache a
1512 * pre-computed version of the verifier because the seqno, which
1513 * is different every time, is included in the MIC.
1514 */
gss_marshal(struct rpc_task * task,struct xdr_stream * xdr)1515 static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
1516 {
1517 struct rpc_rqst *req = task->tk_rqstp;
1518 struct rpc_cred *cred = req->rq_cred;
1519 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1520 gc_base);
1521 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1522 __be32 *p, *cred_len;
1523 u32 maj_stat = 0;
1524 struct xdr_netobj mic;
1525 struct kvec iov;
1526 struct xdr_buf verf_buf;
1527 int status;
1528
1529 /* Credential */
1530
1531 p = xdr_reserve_space(xdr, 7 * sizeof(*p) +
1532 ctx->gc_wire_ctx.len);
1533 if (!p)
1534 goto marshal_failed;
1535 *p++ = rpc_auth_gss;
1536 cred_len = p++;
1537
1538 spin_lock(&ctx->gc_seq_lock);
1539 req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
1540 spin_unlock(&ctx->gc_seq_lock);
1541 if (req->rq_seqno == MAXSEQ)
1542 goto expired;
1543 trace_rpcgss_seqno(task);
1544
1545 *p++ = cpu_to_be32(RPC_GSS_VERSION);
1546 *p++ = cpu_to_be32(ctx->gc_proc);
1547 *p++ = cpu_to_be32(req->rq_seqno);
1548 *p++ = cpu_to_be32(gss_cred->gc_service);
1549 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
1550 *cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);
1551
1552 /* Verifier */
1553
1554 /* We compute the checksum for the verifier over the xdr-encoded bytes
1555 * starting with the xid and ending at the end of the credential: */
1556 iov.iov_base = req->rq_snd_buf.head[0].iov_base;
1557 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
1558 xdr_buf_from_iov(&iov, &verf_buf);
1559
1560 p = xdr_reserve_space(xdr, sizeof(*p));
1561 if (!p)
1562 goto marshal_failed;
1563 *p++ = rpc_auth_gss;
1564 mic.data = (u8 *)(p + 1);
1565 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1566 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1567 goto expired;
1568 else if (maj_stat != 0)
1569 goto bad_mic;
1570 if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
1571 goto marshal_failed;
1572 status = 0;
1573 out:
1574 gss_put_ctx(ctx);
1575 return status;
1576 expired:
1577 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1578 status = -EKEYEXPIRED;
1579 goto out;
1580 marshal_failed:
1581 status = -EMSGSIZE;
1582 goto out;
1583 bad_mic:
1584 trace_rpcgss_get_mic(task, maj_stat);
1585 status = -EIO;
1586 goto out;
1587 }
1588
gss_renew_cred(struct rpc_task * task)1589 static int gss_renew_cred(struct rpc_task *task)
1590 {
1591 struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
1592 struct gss_cred *gss_cred = container_of(oldcred,
1593 struct gss_cred,
1594 gc_base);
1595 struct rpc_auth *auth = oldcred->cr_auth;
1596 struct auth_cred acred = {
1597 .cred = oldcred->cr_cred,
1598 .principal = gss_cred->gc_principal,
1599 };
1600 struct rpc_cred *new;
1601
1602 new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
1603 if (IS_ERR(new))
1604 return PTR_ERR(new);
1605
1606 task->tk_rqstp->rq_cred = new;
1607 put_rpccred(oldcred);
1608 return 0;
1609 }
1610
gss_cred_is_negative_entry(struct rpc_cred * cred)1611 static int gss_cred_is_negative_entry(struct rpc_cred *cred)
1612 {
1613 if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
1614 unsigned long now = jiffies;
1615 unsigned long begin, expire;
1616 struct gss_cred *gss_cred;
1617
1618 gss_cred = container_of(cred, struct gss_cred, gc_base);
1619 begin = gss_cred->gc_upcall_timestamp;
1620 expire = begin + gss_expired_cred_retry_delay * HZ;
1621
1622 if (time_in_range_open(now, begin, expire))
1623 return 1;
1624 }
1625 return 0;
1626 }
1627
1628 /*
1629 * Refresh credentials. XXX - finish
1630 */
1631 static int
gss_refresh(struct rpc_task * task)1632 gss_refresh(struct rpc_task *task)
1633 {
1634 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1635 int ret = 0;
1636
1637 if (gss_cred_is_negative_entry(cred))
1638 return -EKEYEXPIRED;
1639
1640 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
1641 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
1642 ret = gss_renew_cred(task);
1643 if (ret < 0)
1644 goto out;
1645 cred = task->tk_rqstp->rq_cred;
1646 }
1647
1648 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
1649 ret = gss_refresh_upcall(task);
1650 out:
1651 return ret;
1652 }
1653
1654 /* Dummy refresh routine: used only when destroying the context */
1655 static int
gss_refresh_null(struct rpc_task * task)1656 gss_refresh_null(struct rpc_task *task)
1657 {
1658 return 0;
1659 }
1660
1661 static int
gss_validate(struct rpc_task * task,struct xdr_stream * xdr)1662 gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
1663 {
1664 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1665 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1666 __be32 *p, *seq = NULL;
1667 struct kvec iov;
1668 struct xdr_buf verf_buf;
1669 struct xdr_netobj mic;
1670 u32 len, maj_stat;
1671 int status;
1672
1673 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1674 if (!p)
1675 goto validate_failed;
1676 if (*p++ != rpc_auth_gss)
1677 goto validate_failed;
1678 len = be32_to_cpup(p);
1679 if (len > RPC_MAX_AUTH_SIZE)
1680 goto validate_failed;
1681 p = xdr_inline_decode(xdr, len);
1682 if (!p)
1683 goto validate_failed;
1684
1685 seq = kmalloc(4, GFP_NOFS);
1686 if (!seq)
1687 goto validate_failed;
1688 *seq = cpu_to_be32(task->tk_rqstp->rq_seqno);
1689 iov.iov_base = seq;
1690 iov.iov_len = 4;
1691 xdr_buf_from_iov(&iov, &verf_buf);
1692 mic.data = (u8 *)p;
1693 mic.len = len;
1694 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1695 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1696 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1697 if (maj_stat)
1698 goto bad_mic;
1699
1700 /* We leave it to unwrap to calculate au_rslack. For now we just
1701 * calculate the length of the verifier: */
1702 if (test_bit(RPCAUTH_AUTH_UPDATE_SLACK, &cred->cr_auth->au_flags))
1703 cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
1704 status = 0;
1705 out:
1706 gss_put_ctx(ctx);
1707 kfree(seq);
1708 return status;
1709
1710 validate_failed:
1711 status = -EIO;
1712 goto out;
1713 bad_mic:
1714 trace_rpcgss_verify_mic(task, maj_stat);
1715 status = -EACCES;
1716 goto out;
1717 }
1718
1719 static noinline_for_stack int
gss_wrap_req_integ(struct rpc_cred * cred,struct gss_cl_ctx * ctx,struct rpc_task * task,struct xdr_stream * xdr)1720 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1721 struct rpc_task *task, struct xdr_stream *xdr)
1722 {
1723 struct rpc_rqst *rqstp = task->tk_rqstp;
1724 struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf;
1725 struct xdr_netobj mic;
1726 __be32 *p, *integ_len;
1727 u32 offset, maj_stat;
1728
1729 p = xdr_reserve_space(xdr, 2 * sizeof(*p));
1730 if (!p)
1731 goto wrap_failed;
1732 integ_len = p++;
1733 *p = cpu_to_be32(rqstp->rq_seqno);
1734
1735 if (rpcauth_wrap_req_encode(task, xdr))
1736 goto wrap_failed;
1737
1738 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1739 if (xdr_buf_subsegment(snd_buf, &integ_buf,
1740 offset, snd_buf->len - offset))
1741 goto wrap_failed;
1742 *integ_len = cpu_to_be32(integ_buf.len);
1743
1744 p = xdr_reserve_space(xdr, 0);
1745 if (!p)
1746 goto wrap_failed;
1747 mic.data = (u8 *)(p + 1);
1748 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1749 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1750 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1751 else if (maj_stat)
1752 goto bad_mic;
1753 /* Check that the trailing MIC fit in the buffer, after the fact */
1754 if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
1755 goto wrap_failed;
1756 return 0;
1757 wrap_failed:
1758 return -EMSGSIZE;
1759 bad_mic:
1760 trace_rpcgss_get_mic(task, maj_stat);
1761 return -EIO;
1762 }
1763
1764 static void
priv_release_snd_buf(struct rpc_rqst * rqstp)1765 priv_release_snd_buf(struct rpc_rqst *rqstp)
1766 {
1767 int i;
1768
1769 for (i=0; i < rqstp->rq_enc_pages_num; i++)
1770 __free_page(rqstp->rq_enc_pages[i]);
1771 kfree(rqstp->rq_enc_pages);
1772 rqstp->rq_release_snd_buf = NULL;
1773 }
1774
1775 static int
alloc_enc_pages(struct rpc_rqst * rqstp)1776 alloc_enc_pages(struct rpc_rqst *rqstp)
1777 {
1778 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1779 int first, last, i;
1780
1781 if (rqstp->rq_release_snd_buf)
1782 rqstp->rq_release_snd_buf(rqstp);
1783
1784 if (snd_buf->page_len == 0) {
1785 rqstp->rq_enc_pages_num = 0;
1786 return 0;
1787 }
1788
1789 first = snd_buf->page_base >> PAGE_SHIFT;
1790 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
1791 rqstp->rq_enc_pages_num = last - first + 1 + 1;
1792 rqstp->rq_enc_pages
1793 = kmalloc_array(rqstp->rq_enc_pages_num,
1794 sizeof(struct page *),
1795 GFP_NOFS);
1796 if (!rqstp->rq_enc_pages)
1797 goto out;
1798 for (i=0; i < rqstp->rq_enc_pages_num; i++) {
1799 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1800 if (rqstp->rq_enc_pages[i] == NULL)
1801 goto out_free;
1802 }
1803 rqstp->rq_release_snd_buf = priv_release_snd_buf;
1804 return 0;
1805 out_free:
1806 rqstp->rq_enc_pages_num = i;
1807 priv_release_snd_buf(rqstp);
1808 out:
1809 return -EAGAIN;
1810 }
1811
1812 static noinline_for_stack int
gss_wrap_req_priv(struct rpc_cred * cred,struct gss_cl_ctx * ctx,struct rpc_task * task,struct xdr_stream * xdr)1813 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1814 struct rpc_task *task, struct xdr_stream *xdr)
1815 {
1816 struct rpc_rqst *rqstp = task->tk_rqstp;
1817 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1818 u32 pad, offset, maj_stat;
1819 int status;
1820 __be32 *p, *opaque_len;
1821 struct page **inpages;
1822 int first;
1823 struct kvec *iov;
1824
1825 status = -EIO;
1826 p = xdr_reserve_space(xdr, 2 * sizeof(*p));
1827 if (!p)
1828 goto wrap_failed;
1829 opaque_len = p++;
1830 *p = cpu_to_be32(rqstp->rq_seqno);
1831
1832 if (rpcauth_wrap_req_encode(task, xdr))
1833 goto wrap_failed;
1834
1835 status = alloc_enc_pages(rqstp);
1836 if (unlikely(status))
1837 goto wrap_failed;
1838 first = snd_buf->page_base >> PAGE_SHIFT;
1839 inpages = snd_buf->pages + first;
1840 snd_buf->pages = rqstp->rq_enc_pages;
1841 snd_buf->page_base -= first << PAGE_SHIFT;
1842 /*
1843 * Move the tail into its own page, in case gss_wrap needs
1844 * more space in the head when wrapping.
1845 *
1846 * Still... Why can't gss_wrap just slide the tail down?
1847 */
1848 if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1849 char *tmp;
1850
1851 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1852 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1853 snd_buf->tail[0].iov_base = tmp;
1854 }
1855 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1856 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1857 /* slack space should prevent this ever happening: */
1858 if (unlikely(snd_buf->len > snd_buf->buflen))
1859 goto wrap_failed;
1860 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1861 * done anyway, so it's safe to put the request on the wire: */
1862 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1863 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1864 else if (maj_stat)
1865 goto bad_wrap;
1866
1867 *opaque_len = cpu_to_be32(snd_buf->len - offset);
1868 /* guess whether the pad goes into the head or the tail: */
1869 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1870 iov = snd_buf->tail;
1871 else
1872 iov = snd_buf->head;
1873 p = iov->iov_base + iov->iov_len;
1874 pad = xdr_pad_size(snd_buf->len - offset);
1875 memset(p, 0, pad);
1876 iov->iov_len += pad;
1877 snd_buf->len += pad;
1878
1879 return 0;
1880 wrap_failed:
1881 return status;
1882 bad_wrap:
1883 trace_rpcgss_wrap(task, maj_stat);
1884 return -EIO;
1885 }
1886
gss_wrap_req(struct rpc_task * task,struct xdr_stream * xdr)1887 static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
1888 {
1889 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1890 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1891 gc_base);
1892 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1893 int status;
1894
1895 status = -EIO;
1896 if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1897 /* The spec seems a little ambiguous here, but I think that not
1898 * wrapping context destruction requests makes the most sense.
1899 */
1900 status = rpcauth_wrap_req_encode(task, xdr);
1901 goto out;
1902 }
1903 switch (gss_cred->gc_service) {
1904 case RPC_GSS_SVC_NONE:
1905 status = rpcauth_wrap_req_encode(task, xdr);
1906 break;
1907 case RPC_GSS_SVC_INTEGRITY:
1908 status = gss_wrap_req_integ(cred, ctx, task, xdr);
1909 break;
1910 case RPC_GSS_SVC_PRIVACY:
1911 status = gss_wrap_req_priv(cred, ctx, task, xdr);
1912 break;
1913 default:
1914 status = -EIO;
1915 }
1916 out:
1917 gss_put_ctx(ctx);
1918 return status;
1919 }
1920
1921 /**
1922 * gss_update_rslack - Possibly update RPC receive buffer size estimates
1923 * @task: rpc_task for incoming RPC Reply being unwrapped
1924 * @cred: controlling rpc_cred for @task
1925 * @before: XDR words needed before each RPC Reply message
1926 * @after: XDR words needed following each RPC Reply message
1927 *
1928 */
gss_update_rslack(struct rpc_task * task,struct rpc_cred * cred,unsigned int before,unsigned int after)1929 static void gss_update_rslack(struct rpc_task *task, struct rpc_cred *cred,
1930 unsigned int before, unsigned int after)
1931 {
1932 struct rpc_auth *auth = cred->cr_auth;
1933
1934 if (test_and_clear_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags)) {
1935 auth->au_ralign = auth->au_verfsize + before;
1936 auth->au_rslack = auth->au_verfsize + after;
1937 trace_rpcgss_update_slack(task, auth);
1938 }
1939 }
1940
1941 static int
gss_unwrap_resp_auth(struct rpc_task * task,struct rpc_cred * cred)1942 gss_unwrap_resp_auth(struct rpc_task *task, struct rpc_cred *cred)
1943 {
1944 gss_update_rslack(task, cred, 0, 0);
1945 return 0;
1946 }
1947
1948 /*
1949 * RFC 2203, Section 5.3.2.2
1950 *
1951 * struct rpc_gss_integ_data {
1952 * opaque databody_integ<>;
1953 * opaque checksum<>;
1954 * };
1955 *
1956 * struct rpc_gss_data_t {
1957 * unsigned int seq_num;
1958 * proc_req_arg_t arg;
1959 * };
1960 */
1961 static noinline_for_stack int
gss_unwrap_resp_integ(struct rpc_task * task,struct rpc_cred * cred,struct gss_cl_ctx * ctx,struct rpc_rqst * rqstp,struct xdr_stream * xdr)1962 gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
1963 struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
1964 struct xdr_stream *xdr)
1965 {
1966 struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf;
1967 u32 len, offset, seqno, maj_stat;
1968 struct xdr_netobj mic;
1969 int ret;
1970
1971 ret = -EIO;
1972 mic.data = NULL;
1973
1974 /* opaque databody_integ<>; */
1975 if (xdr_stream_decode_u32(xdr, &len))
1976 goto unwrap_failed;
1977 if (len & 3)
1978 goto unwrap_failed;
1979 offset = rcv_buf->len - xdr_stream_remaining(xdr);
1980 if (xdr_stream_decode_u32(xdr, &seqno))
1981 goto unwrap_failed;
1982 if (seqno != rqstp->rq_seqno)
1983 goto bad_seqno;
1984 if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len))
1985 goto unwrap_failed;
1986
1987 /*
1988 * The xdr_stream now points to the beginning of the
1989 * upper layer payload, to be passed below to
1990 * rpcauth_unwrap_resp_decode(). The checksum, which
1991 * follows the upper layer payload in @rcv_buf, is
1992 * located and parsed without updating the xdr_stream.
1993 */
1994
1995 /* opaque checksum<>; */
1996 offset += len;
1997 if (xdr_decode_word(rcv_buf, offset, &len))
1998 goto unwrap_failed;
1999 offset += sizeof(__be32);
2000 if (offset + len > rcv_buf->len)
2001 goto unwrap_failed;
2002 mic.len = len;
2003 mic.data = kmalloc(len, GFP_NOFS);
2004 if (!mic.data)
2005 goto unwrap_failed;
2006 if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len))
2007 goto unwrap_failed;
2008
2009 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic);
2010 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
2011 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
2012 if (maj_stat != GSS_S_COMPLETE)
2013 goto bad_mic;
2014
2015 gss_update_rslack(task, cred, 2, 2 + 1 + XDR_QUADLEN(mic.len));
2016 ret = 0;
2017
2018 out:
2019 kfree(mic.data);
2020 return ret;
2021
2022 unwrap_failed:
2023 trace_rpcgss_unwrap_failed(task);
2024 goto out;
2025 bad_seqno:
2026 trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno);
2027 goto out;
2028 bad_mic:
2029 trace_rpcgss_verify_mic(task, maj_stat);
2030 goto out;
2031 }
2032
2033 static noinline_for_stack int
gss_unwrap_resp_priv(struct rpc_task * task,struct rpc_cred * cred,struct gss_cl_ctx * ctx,struct rpc_rqst * rqstp,struct xdr_stream * xdr)2034 gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
2035 struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
2036 struct xdr_stream *xdr)
2037 {
2038 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
2039 struct kvec *head = rqstp->rq_rcv_buf.head;
2040 u32 offset, opaque_len, maj_stat;
2041 __be32 *p;
2042
2043 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
2044 if (unlikely(!p))
2045 goto unwrap_failed;
2046 opaque_len = be32_to_cpup(p++);
2047 offset = (u8 *)(p) - (u8 *)head->iov_base;
2048 if (offset + opaque_len > rcv_buf->len)
2049 goto unwrap_failed;
2050
2051 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset,
2052 offset + opaque_len, rcv_buf);
2053 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
2054 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
2055 if (maj_stat != GSS_S_COMPLETE)
2056 goto bad_unwrap;
2057 /* gss_unwrap decrypted the sequence number */
2058 if (be32_to_cpup(p++) != rqstp->rq_seqno)
2059 goto bad_seqno;
2060
2061 /* gss_unwrap redacts the opaque blob from the head iovec.
2062 * rcv_buf has changed, thus the stream needs to be reset.
2063 */
2064 xdr_init_decode(xdr, rcv_buf, p, rqstp);
2065
2066 gss_update_rslack(task, cred, 2 + ctx->gc_gss_ctx->align,
2067 2 + ctx->gc_gss_ctx->slack);
2068
2069 return 0;
2070 unwrap_failed:
2071 trace_rpcgss_unwrap_failed(task);
2072 return -EIO;
2073 bad_seqno:
2074 trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p));
2075 return -EIO;
2076 bad_unwrap:
2077 trace_rpcgss_unwrap(task, maj_stat);
2078 return -EIO;
2079 }
2080
2081 static bool
gss_seq_is_newer(u32 new,u32 old)2082 gss_seq_is_newer(u32 new, u32 old)
2083 {
2084 return (s32)(new - old) > 0;
2085 }
2086
2087 static bool
gss_xmit_need_reencode(struct rpc_task * task)2088 gss_xmit_need_reencode(struct rpc_task *task)
2089 {
2090 struct rpc_rqst *req = task->tk_rqstp;
2091 struct rpc_cred *cred = req->rq_cred;
2092 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2093 u32 win, seq_xmit = 0;
2094 bool ret = true;
2095
2096 if (!ctx)
2097 goto out;
2098
2099 if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
2100 goto out_ctx;
2101
2102 seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
2103 while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
2104 u32 tmp = seq_xmit;
2105
2106 seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
2107 if (seq_xmit == tmp) {
2108 ret = false;
2109 goto out_ctx;
2110 }
2111 }
2112
2113 win = ctx->gc_win;
2114 if (win > 0)
2115 ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
2116
2117 out_ctx:
2118 gss_put_ctx(ctx);
2119 out:
2120 trace_rpcgss_need_reencode(task, seq_xmit, ret);
2121 return ret;
2122 }
2123
2124 static int
gss_unwrap_resp(struct rpc_task * task,struct xdr_stream * xdr)2125 gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
2126 {
2127 struct rpc_rqst *rqstp = task->tk_rqstp;
2128 struct rpc_cred *cred = rqstp->rq_cred;
2129 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
2130 gc_base);
2131 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2132 int status = -EIO;
2133
2134 if (ctx->gc_proc != RPC_GSS_PROC_DATA)
2135 goto out_decode;
2136 switch (gss_cred->gc_service) {
2137 case RPC_GSS_SVC_NONE:
2138 status = gss_unwrap_resp_auth(task, cred);
2139 break;
2140 case RPC_GSS_SVC_INTEGRITY:
2141 status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
2142 break;
2143 case RPC_GSS_SVC_PRIVACY:
2144 status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr);
2145 break;
2146 }
2147 if (status)
2148 goto out;
2149
2150 out_decode:
2151 status = rpcauth_unwrap_resp_decode(task, xdr);
2152 out:
2153 gss_put_ctx(ctx);
2154 return status;
2155 }
2156
2157 static const struct rpc_authops authgss_ops = {
2158 .owner = THIS_MODULE,
2159 .au_flavor = RPC_AUTH_GSS,
2160 .au_name = "RPCSEC_GSS",
2161 .create = gss_create,
2162 .destroy = gss_destroy,
2163 .hash_cred = gss_hash_cred,
2164 .lookup_cred = gss_lookup_cred,
2165 .crcreate = gss_create_cred,
2166 .info2flavor = gss_mech_info2flavor,
2167 .flavor2info = gss_mech_flavor2info,
2168 };
2169
2170 static const struct rpc_credops gss_credops = {
2171 .cr_name = "AUTH_GSS",
2172 .crdestroy = gss_destroy_cred,
2173 .cr_init = gss_cred_init,
2174 .crmatch = gss_match,
2175 .crmarshal = gss_marshal,
2176 .crrefresh = gss_refresh,
2177 .crvalidate = gss_validate,
2178 .crwrap_req = gss_wrap_req,
2179 .crunwrap_resp = gss_unwrap_resp,
2180 .crkey_timeout = gss_key_timeout,
2181 .crstringify_acceptor = gss_stringify_acceptor,
2182 .crneed_reencode = gss_xmit_need_reencode,
2183 };
2184
2185 static const struct rpc_credops gss_nullops = {
2186 .cr_name = "AUTH_GSS",
2187 .crdestroy = gss_destroy_nullcred,
2188 .crmatch = gss_match,
2189 .crmarshal = gss_marshal,
2190 .crrefresh = gss_refresh_null,
2191 .crvalidate = gss_validate,
2192 .crwrap_req = gss_wrap_req,
2193 .crunwrap_resp = gss_unwrap_resp,
2194 .crstringify_acceptor = gss_stringify_acceptor,
2195 };
2196
2197 static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
2198 .upcall = gss_v0_upcall,
2199 .downcall = gss_pipe_downcall,
2200 .destroy_msg = gss_pipe_destroy_msg,
2201 .open_pipe = gss_pipe_open_v0,
2202 .release_pipe = gss_pipe_release,
2203 };
2204
2205 static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
2206 .upcall = gss_v1_upcall,
2207 .downcall = gss_pipe_downcall,
2208 .destroy_msg = gss_pipe_destroy_msg,
2209 .open_pipe = gss_pipe_open_v1,
2210 .release_pipe = gss_pipe_release,
2211 };
2212
rpcsec_gss_init_net(struct net * net)2213 static __net_init int rpcsec_gss_init_net(struct net *net)
2214 {
2215 return gss_svc_init_net(net);
2216 }
2217
rpcsec_gss_exit_net(struct net * net)2218 static __net_exit void rpcsec_gss_exit_net(struct net *net)
2219 {
2220 gss_svc_shutdown_net(net);
2221 }
2222
2223 static struct pernet_operations rpcsec_gss_net_ops = {
2224 .init = rpcsec_gss_init_net,
2225 .exit = rpcsec_gss_exit_net,
2226 };
2227
2228 /*
2229 * Initialize RPCSEC_GSS module
2230 */
init_rpcsec_gss(void)2231 static int __init init_rpcsec_gss(void)
2232 {
2233 int err = 0;
2234
2235 err = rpcauth_register(&authgss_ops);
2236 if (err)
2237 goto out;
2238 err = gss_svc_init();
2239 if (err)
2240 goto out_unregister;
2241 err = register_pernet_subsys(&rpcsec_gss_net_ops);
2242 if (err)
2243 goto out_svc_exit;
2244 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
2245 return 0;
2246 out_svc_exit:
2247 gss_svc_shutdown();
2248 out_unregister:
2249 rpcauth_unregister(&authgss_ops);
2250 out:
2251 return err;
2252 }
2253
exit_rpcsec_gss(void)2254 static void __exit exit_rpcsec_gss(void)
2255 {
2256 unregister_pernet_subsys(&rpcsec_gss_net_ops);
2257 gss_svc_shutdown();
2258 rpcauth_unregister(&authgss_ops);
2259 rcu_barrier(); /* Wait for completion of call_rcu()'s */
2260 }
2261
2262 MODULE_ALIAS("rpc-auth-6");
2263 MODULE_LICENSE("GPL");
2264 module_param_named(expired_cred_retry_delay,
2265 gss_expired_cred_retry_delay,
2266 uint, 0644);
2267 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
2268 "the RPC engine retries an expired credential");
2269
2270 module_param_named(key_expire_timeo,
2271 gss_key_expire_timeo,
2272 uint, 0644);
2273 MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a "
2274 "credential keys lifetime where the NFS layer cleans up "
2275 "prior to key expiration");
2276
2277 module_init(init_rpcsec_gss)
2278 module_exit(exit_rpcsec_gss)
2279