• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Basic authentication token and access key management
2  *
3  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/poison.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/security.h>
18 #include <linux/workqueue.h>
19 #include <linux/random.h>
20 #include <linux/err.h>
21 #include "internal.h"
22 
23 static struct kmem_cache	*key_jar;
24 struct rb_root		key_serial_tree; /* tree of keys indexed by serial */
25 DEFINE_SPINLOCK(key_serial_lock);
26 
27 struct rb_root	key_user_tree; /* tree of quota records indexed by UID */
28 DEFINE_SPINLOCK(key_user_lock);
29 
30 unsigned int key_quota_root_maxkeys = 200;	/* root's key count quota */
31 unsigned int key_quota_root_maxbytes = 20000;	/* root's key space quota */
32 unsigned int key_quota_maxkeys = 200;		/* general key count quota */
33 unsigned int key_quota_maxbytes = 20000;	/* general key space quota */
34 
35 static LIST_HEAD(key_types_list);
36 static DECLARE_RWSEM(key_types_sem);
37 
38 static void key_cleanup(struct work_struct *work);
39 static DECLARE_WORK(key_cleanup_task, key_cleanup);
40 
41 /* we serialise key instantiation and link */
42 DEFINE_MUTEX(key_construction_mutex);
43 
44 /* any key who's type gets unegistered will be re-typed to this */
45 static struct key_type key_type_dead = {
46 	.name		= "dead",
47 };
48 
49 #ifdef KEY_DEBUGGING
__key_check(const struct key * key)50 void __key_check(const struct key *key)
51 {
52 	printk("__key_check: key %p {%08x} should be {%08x}\n",
53 	       key, key->magic, KEY_DEBUG_MAGIC);
54 	BUG();
55 }
56 #endif
57 
58 /*****************************************************************************/
59 /*
60  * get the key quota record for a user, allocating a new record if one doesn't
61  * already exist
62  */
key_user_lookup(uid_t uid)63 struct key_user *key_user_lookup(uid_t uid)
64 {
65 	struct key_user *candidate = NULL, *user;
66 	struct rb_node *parent = NULL;
67 	struct rb_node **p;
68 
69  try_again:
70 	p = &key_user_tree.rb_node;
71 	spin_lock(&key_user_lock);
72 
73 	/* search the tree for a user record with a matching UID */
74 	while (*p) {
75 		parent = *p;
76 		user = rb_entry(parent, struct key_user, node);
77 
78 		if (uid < user->uid)
79 			p = &(*p)->rb_left;
80 		else if (uid > user->uid)
81 			p = &(*p)->rb_right;
82 		else
83 			goto found;
84 	}
85 
86 	/* if we get here, we failed to find a match in the tree */
87 	if (!candidate) {
88 		/* allocate a candidate user record if we don't already have
89 		 * one */
90 		spin_unlock(&key_user_lock);
91 
92 		user = NULL;
93 		candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
94 		if (unlikely(!candidate))
95 			goto out;
96 
97 		/* the allocation may have scheduled, so we need to repeat the
98 		 * search lest someone else added the record whilst we were
99 		 * asleep */
100 		goto try_again;
101 	}
102 
103 	/* if we get here, then the user record still hadn't appeared on the
104 	 * second pass - so we use the candidate record */
105 	atomic_set(&candidate->usage, 1);
106 	atomic_set(&candidate->nkeys, 0);
107 	atomic_set(&candidate->nikeys, 0);
108 	candidate->uid = uid;
109 	candidate->qnkeys = 0;
110 	candidate->qnbytes = 0;
111 	spin_lock_init(&candidate->lock);
112 	mutex_init(&candidate->cons_lock);
113 
114 	rb_link_node(&candidate->node, parent, p);
115 	rb_insert_color(&candidate->node, &key_user_tree);
116 	spin_unlock(&key_user_lock);
117 	user = candidate;
118 	goto out;
119 
120 	/* okay - we found a user record for this UID */
121  found:
122 	atomic_inc(&user->usage);
123 	spin_unlock(&key_user_lock);
124 	kfree(candidate);
125  out:
126 	return user;
127 
128 } /* end key_user_lookup() */
129 
130 /*****************************************************************************/
131 /*
132  * dispose of a user structure
133  */
key_user_put(struct key_user * user)134 void key_user_put(struct key_user *user)
135 {
136 	if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
137 		rb_erase(&user->node, &key_user_tree);
138 		spin_unlock(&key_user_lock);
139 
140 		kfree(user);
141 	}
142 
143 } /* end key_user_put() */
144 
145 /*****************************************************************************/
146 /*
147  * assign a key the next unique serial number
148  * - these are assigned randomly to avoid security issues through covert
149  *   channel problems
150  */
key_alloc_serial(struct key * key)151 static inline void key_alloc_serial(struct key *key)
152 {
153 	struct rb_node *parent, **p;
154 	struct key *xkey;
155 
156 	/* propose a random serial number and look for a hole for it in the
157 	 * serial number tree */
158 	do {
159 		get_random_bytes(&key->serial, sizeof(key->serial));
160 
161 		key->serial >>= 1; /* negative numbers are not permitted */
162 	} while (key->serial < 3);
163 
164 	spin_lock(&key_serial_lock);
165 
166 attempt_insertion:
167 	parent = NULL;
168 	p = &key_serial_tree.rb_node;
169 
170 	while (*p) {
171 		parent = *p;
172 		xkey = rb_entry(parent, struct key, serial_node);
173 
174 		if (key->serial < xkey->serial)
175 			p = &(*p)->rb_left;
176 		else if (key->serial > xkey->serial)
177 			p = &(*p)->rb_right;
178 		else
179 			goto serial_exists;
180 	}
181 
182 	/* we've found a suitable hole - arrange for this key to occupy it */
183 	rb_link_node(&key->serial_node, parent, p);
184 	rb_insert_color(&key->serial_node, &key_serial_tree);
185 
186 	spin_unlock(&key_serial_lock);
187 	return;
188 
189 	/* we found a key with the proposed serial number - walk the tree from
190 	 * that point looking for the next unused serial number */
191 serial_exists:
192 	for (;;) {
193 		key->serial++;
194 		if (key->serial < 3) {
195 			key->serial = 3;
196 			goto attempt_insertion;
197 		}
198 
199 		parent = rb_next(parent);
200 		if (!parent)
201 			goto attempt_insertion;
202 
203 		xkey = rb_entry(parent, struct key, serial_node);
204 		if (key->serial < xkey->serial)
205 			goto attempt_insertion;
206 	}
207 
208 } /* end key_alloc_serial() */
209 
210 /*****************************************************************************/
211 /*
212  * allocate a key of the specified type
213  * - update the user's quota to reflect the existence of the key
214  * - called from a key-type operation with key_types_sem read-locked by
215  *   key_create_or_update()
216  *   - this prevents unregistration of the key type
217  * - upon return the key is as yet uninstantiated; the caller needs to either
218  *   instantiate the key or discard it before returning
219  */
key_alloc(struct key_type * type,const char * desc,uid_t uid,gid_t gid,const struct cred * cred,key_perm_t perm,unsigned long flags)220 struct key *key_alloc(struct key_type *type, const char *desc,
221 		      uid_t uid, gid_t gid, const struct cred *cred,
222 		      key_perm_t perm, unsigned long flags)
223 {
224 	struct key_user *user = NULL;
225 	struct key *key;
226 	size_t desclen, quotalen;
227 	int ret;
228 
229 	key = ERR_PTR(-EINVAL);
230 	if (!desc || !*desc)
231 		goto error;
232 
233 	desclen = strlen(desc) + 1;
234 	quotalen = desclen + type->def_datalen;
235 
236 	/* get hold of the key tracking for this user */
237 	user = key_user_lookup(uid);
238 	if (!user)
239 		goto no_memory_1;
240 
241 	/* check that the user's quota permits allocation of another key and
242 	 * its description */
243 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
244 		unsigned maxkeys = (uid == 0) ?
245 			key_quota_root_maxkeys : key_quota_maxkeys;
246 		unsigned maxbytes = (uid == 0) ?
247 			key_quota_root_maxbytes : key_quota_maxbytes;
248 
249 		spin_lock(&user->lock);
250 		if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
251 			if (user->qnkeys + 1 >= maxkeys ||
252 			    user->qnbytes + quotalen >= maxbytes ||
253 			    user->qnbytes + quotalen < user->qnbytes)
254 				goto no_quota;
255 		}
256 
257 		user->qnkeys++;
258 		user->qnbytes += quotalen;
259 		spin_unlock(&user->lock);
260 	}
261 
262 	/* allocate and initialise the key and its description */
263 	key = kmem_cache_alloc(key_jar, GFP_KERNEL);
264 	if (!key)
265 		goto no_memory_2;
266 
267 	if (desc) {
268 		key->description = kmemdup(desc, desclen, GFP_KERNEL);
269 		if (!key->description)
270 			goto no_memory_3;
271 	}
272 
273 	atomic_set(&key->usage, 1);
274 	init_rwsem(&key->sem);
275 	key->type = type;
276 	key->user = user;
277 	key->quotalen = quotalen;
278 	key->datalen = type->def_datalen;
279 	key->uid = uid;
280 	key->gid = gid;
281 	key->perm = perm;
282 	key->flags = 0;
283 	key->expiry = 0;
284 	key->payload.data = NULL;
285 	key->security = NULL;
286 
287 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
288 		key->flags |= 1 << KEY_FLAG_IN_QUOTA;
289 
290 	memset(&key->type_data, 0, sizeof(key->type_data));
291 
292 #ifdef KEY_DEBUGGING
293 	key->magic = KEY_DEBUG_MAGIC;
294 #endif
295 
296 	/* let the security module know about the key */
297 	ret = security_key_alloc(key, cred, flags);
298 	if (ret < 0)
299 		goto security_error;
300 
301 	/* publish the key by giving it a serial number */
302 	atomic_inc(&user->nkeys);
303 	key_alloc_serial(key);
304 
305 error:
306 	return key;
307 
308 security_error:
309 	kfree(key->description);
310 	kmem_cache_free(key_jar, key);
311 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
312 		spin_lock(&user->lock);
313 		user->qnkeys--;
314 		user->qnbytes -= quotalen;
315 		spin_unlock(&user->lock);
316 	}
317 	key_user_put(user);
318 	key = ERR_PTR(ret);
319 	goto error;
320 
321 no_memory_3:
322 	kmem_cache_free(key_jar, key);
323 no_memory_2:
324 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
325 		spin_lock(&user->lock);
326 		user->qnkeys--;
327 		user->qnbytes -= quotalen;
328 		spin_unlock(&user->lock);
329 	}
330 	key_user_put(user);
331 no_memory_1:
332 	key = ERR_PTR(-ENOMEM);
333 	goto error;
334 
335 no_quota:
336 	spin_unlock(&user->lock);
337 	key_user_put(user);
338 	key = ERR_PTR(-EDQUOT);
339 	goto error;
340 
341 } /* end key_alloc() */
342 
343 EXPORT_SYMBOL(key_alloc);
344 
345 /*****************************************************************************/
346 /*
347  * reserve an amount of quota for the key's payload
348  */
key_payload_reserve(struct key * key,size_t datalen)349 int key_payload_reserve(struct key *key, size_t datalen)
350 {
351 	int delta = (int) datalen - key->datalen;
352 	int ret = 0;
353 
354 	key_check(key);
355 
356 	/* contemplate the quota adjustment */
357 	if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
358 		unsigned maxbytes = (key->user->uid == 0) ?
359 			key_quota_root_maxbytes : key_quota_maxbytes;
360 
361 		spin_lock(&key->user->lock);
362 
363 		if (delta > 0 &&
364 		    (key->user->qnbytes + delta >= maxbytes ||
365 		     key->user->qnbytes + delta < key->user->qnbytes)) {
366 			ret = -EDQUOT;
367 		}
368 		else {
369 			key->user->qnbytes += delta;
370 			key->quotalen += delta;
371 		}
372 		spin_unlock(&key->user->lock);
373 	}
374 
375 	/* change the recorded data length if that didn't generate an error */
376 	if (ret == 0)
377 		key->datalen = datalen;
378 
379 	return ret;
380 
381 } /* end key_payload_reserve() */
382 
383 EXPORT_SYMBOL(key_payload_reserve);
384 
385 /*****************************************************************************/
386 /*
387  * instantiate a key and link it into the target keyring atomically
388  * - called with the target keyring's semaphore writelocked
389  */
__key_instantiate_and_link(struct key * key,const void * data,size_t datalen,struct key * keyring,struct key * authkey)390 static int __key_instantiate_and_link(struct key *key,
391 				      const void *data,
392 				      size_t datalen,
393 				      struct key *keyring,
394 				      struct key *authkey)
395 {
396 	int ret, awaken;
397 
398 	key_check(key);
399 	key_check(keyring);
400 
401 	awaken = 0;
402 	ret = -EBUSY;
403 
404 	mutex_lock(&key_construction_mutex);
405 
406 	/* can't instantiate twice */
407 	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
408 		/* instantiate the key */
409 		ret = key->type->instantiate(key, data, datalen);
410 
411 		if (ret == 0) {
412 			/* mark the key as being instantiated */
413 			atomic_inc(&key->user->nikeys);
414 			set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
415 
416 			if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
417 				awaken = 1;
418 
419 			/* and link it into the destination keyring */
420 			if (keyring)
421 				ret = __key_link(keyring, key);
422 
423 			/* disable the authorisation key */
424 			if (authkey)
425 				key_revoke(authkey);
426 		}
427 	}
428 
429 	mutex_unlock(&key_construction_mutex);
430 
431 	/* wake up anyone waiting for a key to be constructed */
432 	if (awaken)
433 		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
434 
435 	return ret;
436 
437 } /* end __key_instantiate_and_link() */
438 
439 /*****************************************************************************/
440 /*
441  * instantiate a key and link it into the target keyring atomically
442  */
key_instantiate_and_link(struct key * key,const void * data,size_t datalen,struct key * keyring,struct key * authkey)443 int key_instantiate_and_link(struct key *key,
444 			     const void *data,
445 			     size_t datalen,
446 			     struct key *keyring,
447 			     struct key *authkey)
448 {
449 	int ret;
450 
451 	if (keyring)
452 		down_write(&keyring->sem);
453 
454 	ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey);
455 
456 	if (keyring)
457 		up_write(&keyring->sem);
458 
459 	return ret;
460 
461 } /* end key_instantiate_and_link() */
462 
463 EXPORT_SYMBOL(key_instantiate_and_link);
464 
465 /*****************************************************************************/
466 /*
467  * negatively instantiate a key and link it into the target keyring atomically
468  */
key_negate_and_link(struct key * key,unsigned timeout,struct key * keyring,struct key * authkey)469 int key_negate_and_link(struct key *key,
470 			unsigned timeout,
471 			struct key *keyring,
472 			struct key *authkey)
473 {
474 	struct timespec now;
475 	int ret, awaken;
476 
477 	key_check(key);
478 	key_check(keyring);
479 
480 	awaken = 0;
481 	ret = -EBUSY;
482 
483 	if (keyring)
484 		down_write(&keyring->sem);
485 
486 	mutex_lock(&key_construction_mutex);
487 
488 	/* can't instantiate twice */
489 	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
490 		/* mark the key as being negatively instantiated */
491 		atomic_inc(&key->user->nikeys);
492 		set_bit(KEY_FLAG_NEGATIVE, &key->flags);
493 		set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
494 		now = current_kernel_time();
495 		key->expiry = now.tv_sec + timeout;
496 
497 		if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
498 			awaken = 1;
499 
500 		ret = 0;
501 
502 		/* and link it into the destination keyring */
503 		if (keyring)
504 			ret = __key_link(keyring, key);
505 
506 		/* disable the authorisation key */
507 		if (authkey)
508 			key_revoke(authkey);
509 	}
510 
511 	mutex_unlock(&key_construction_mutex);
512 
513 	if (keyring)
514 		up_write(&keyring->sem);
515 
516 	/* wake up anyone waiting for a key to be constructed */
517 	if (awaken)
518 		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
519 
520 	return ret;
521 
522 } /* end key_negate_and_link() */
523 
524 EXPORT_SYMBOL(key_negate_and_link);
525 
526 /*****************************************************************************/
527 /*
528  * do cleaning up in process context so that we don't have to disable
529  * interrupts all over the place
530  */
key_cleanup(struct work_struct * work)531 static void key_cleanup(struct work_struct *work)
532 {
533 	struct rb_node *_n;
534 	struct key *key;
535 
536  go_again:
537 	/* look for a dead key in the tree */
538 	spin_lock(&key_serial_lock);
539 
540 	for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
541 		key = rb_entry(_n, struct key, serial_node);
542 
543 		if (atomic_read(&key->usage) == 0)
544 			goto found_dead_key;
545 	}
546 
547 	spin_unlock(&key_serial_lock);
548 	return;
549 
550  found_dead_key:
551 	/* we found a dead key - once we've removed it from the tree, we can
552 	 * drop the lock */
553 	rb_erase(&key->serial_node, &key_serial_tree);
554 	spin_unlock(&key_serial_lock);
555 
556 	key_check(key);
557 
558 	security_key_free(key);
559 
560 	/* deal with the user's key tracking and quota */
561 	if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
562 		spin_lock(&key->user->lock);
563 		key->user->qnkeys--;
564 		key->user->qnbytes -= key->quotalen;
565 		spin_unlock(&key->user->lock);
566 	}
567 
568 	atomic_dec(&key->user->nkeys);
569 	if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
570 		atomic_dec(&key->user->nikeys);
571 
572 	key_user_put(key->user);
573 
574 	/* now throw away the key memory */
575 	if (key->type->destroy)
576 		key->type->destroy(key);
577 
578 	kfree(key->description);
579 
580 #ifdef KEY_DEBUGGING
581 	key->magic = KEY_DEBUG_MAGIC_X;
582 #endif
583 	kmem_cache_free(key_jar, key);
584 
585 	/* there may, of course, be more than one key to destroy */
586 	goto go_again;
587 
588 } /* end key_cleanup() */
589 
590 /*****************************************************************************/
591 /*
592  * dispose of a reference to a key
593  * - when all the references are gone, we schedule the cleanup task to come and
594  *   pull it out of the tree in definite process context
595  */
key_put(struct key * key)596 void key_put(struct key *key)
597 {
598 	if (key) {
599 		key_check(key);
600 
601 		if (atomic_dec_and_test(&key->usage))
602 			schedule_work(&key_cleanup_task);
603 	}
604 
605 } /* end key_put() */
606 
607 EXPORT_SYMBOL(key_put);
608 
609 /*****************************************************************************/
610 /*
611  * find a key by its serial number
612  */
key_lookup(key_serial_t id)613 struct key *key_lookup(key_serial_t id)
614 {
615 	struct rb_node *n;
616 	struct key *key;
617 
618 	spin_lock(&key_serial_lock);
619 
620 	/* search the tree for the specified key */
621 	n = key_serial_tree.rb_node;
622 	while (n) {
623 		key = rb_entry(n, struct key, serial_node);
624 
625 		if (id < key->serial)
626 			n = n->rb_left;
627 		else if (id > key->serial)
628 			n = n->rb_right;
629 		else
630 			goto found;
631 	}
632 
633  not_found:
634 	key = ERR_PTR(-ENOKEY);
635 	goto error;
636 
637  found:
638 	/* pretend it doesn't exist if it's dead */
639 	if (atomic_read(&key->usage) == 0 ||
640 	    test_bit(KEY_FLAG_DEAD, &key->flags) ||
641 	    key->type == &key_type_dead)
642 		goto not_found;
643 
644 	/* this races with key_put(), but that doesn't matter since key_put()
645 	 * doesn't actually change the key
646 	 */
647 	atomic_inc(&key->usage);
648 
649  error:
650 	spin_unlock(&key_serial_lock);
651 	return key;
652 
653 } /* end key_lookup() */
654 
655 /*****************************************************************************/
656 /*
657  * find and lock the specified key type against removal
658  * - we return with the sem readlocked
659  */
key_type_lookup(const char * type)660 struct key_type *key_type_lookup(const char *type)
661 {
662 	struct key_type *ktype;
663 
664 	down_read(&key_types_sem);
665 
666 	/* look up the key type to see if it's one of the registered kernel
667 	 * types */
668 	list_for_each_entry(ktype, &key_types_list, link) {
669 		if (strcmp(ktype->name, type) == 0)
670 			goto found_kernel_type;
671 	}
672 
673 	up_read(&key_types_sem);
674 	ktype = ERR_PTR(-ENOKEY);
675 
676  found_kernel_type:
677 	return ktype;
678 
679 } /* end key_type_lookup() */
680 
681 /*****************************************************************************/
682 /*
683  * unlock a key type
684  */
key_type_put(struct key_type * ktype)685 void key_type_put(struct key_type *ktype)
686 {
687 	up_read(&key_types_sem);
688 
689 } /* end key_type_put() */
690 
691 /*****************************************************************************/
692 /*
693  * attempt to update an existing key
694  * - the key has an incremented refcount
695  * - we need to put the key if we get an error
696  */
__key_update(key_ref_t key_ref,const void * payload,size_t plen)697 static inline key_ref_t __key_update(key_ref_t key_ref,
698 				     const void *payload, size_t plen)
699 {
700 	struct key *key = key_ref_to_ptr(key_ref);
701 	int ret;
702 
703 	/* need write permission on the key to update it */
704 	ret = key_permission(key_ref, KEY_WRITE);
705 	if (ret < 0)
706 		goto error;
707 
708 	ret = -EEXIST;
709 	if (!key->type->update)
710 		goto error;
711 
712 	down_write(&key->sem);
713 
714 	ret = key->type->update(key, payload, plen);
715 	if (ret == 0)
716 		/* updating a negative key instantiates it */
717 		clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
718 
719 	up_write(&key->sem);
720 
721 	if (ret < 0)
722 		goto error;
723 out:
724 	return key_ref;
725 
726 error:
727 	key_put(key);
728 	key_ref = ERR_PTR(ret);
729 	goto out;
730 
731 } /* end __key_update() */
732 
733 /*****************************************************************************/
734 /*
735  * search the specified keyring for a key of the same description; if one is
736  * found, update it, otherwise add a new one
737  */
key_create_or_update(key_ref_t keyring_ref,const char * type,const char * description,const void * payload,size_t plen,key_perm_t perm,unsigned long flags)738 key_ref_t key_create_or_update(key_ref_t keyring_ref,
739 			       const char *type,
740 			       const char *description,
741 			       const void *payload,
742 			       size_t plen,
743 			       key_perm_t perm,
744 			       unsigned long flags)
745 {
746 	const struct cred *cred = current_cred();
747 	struct key_type *ktype;
748 	struct key *keyring, *key = NULL;
749 	key_ref_t key_ref;
750 	int ret;
751 
752 	/* look up the key type to see if it's one of the registered kernel
753 	 * types */
754 	ktype = key_type_lookup(type);
755 	if (IS_ERR(ktype)) {
756 		key_ref = ERR_PTR(-ENODEV);
757 		goto error;
758 	}
759 
760 	key_ref = ERR_PTR(-EINVAL);
761 	if (!ktype->match || !ktype->instantiate)
762 		goto error_2;
763 
764 	keyring = key_ref_to_ptr(keyring_ref);
765 
766 	key_check(keyring);
767 
768 	key_ref = ERR_PTR(-ENOTDIR);
769 	if (keyring->type != &key_type_keyring)
770 		goto error_2;
771 
772 	down_write(&keyring->sem);
773 
774 	/* if we're going to allocate a new key, we're going to have
775 	 * to modify the keyring */
776 	ret = key_permission(keyring_ref, KEY_WRITE);
777 	if (ret < 0) {
778 		key_ref = ERR_PTR(ret);
779 		goto error_3;
780 	}
781 
782 	/* if it's possible to update this type of key, search for an existing
783 	 * key of the same type and description in the destination keyring and
784 	 * update that instead if possible
785 	 */
786 	if (ktype->update) {
787 		key_ref = __keyring_search_one(keyring_ref, ktype, description,
788 					       0);
789 		if (!IS_ERR(key_ref))
790 			goto found_matching_key;
791 	}
792 
793 	/* if the client doesn't provide, decide on the permissions we want */
794 	if (perm == KEY_PERM_UNDEF) {
795 		perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
796 		perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
797 
798 		if (ktype->read)
799 			perm |= KEY_POS_READ | KEY_USR_READ;
800 
801 		if (ktype == &key_type_keyring || ktype->update)
802 			perm |= KEY_USR_WRITE;
803 	}
804 
805 	/* allocate a new key */
806 	key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
807 			perm, flags);
808 	if (IS_ERR(key)) {
809 		key_ref = ERR_CAST(key);
810 		goto error_3;
811 	}
812 
813 	/* instantiate it and link it into the target keyring */
814 	ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
815 	if (ret < 0) {
816 		key_put(key);
817 		key_ref = ERR_PTR(ret);
818 		goto error_3;
819 	}
820 
821 	key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
822 
823  error_3:
824 	up_write(&keyring->sem);
825  error_2:
826 	key_type_put(ktype);
827  error:
828 	return key_ref;
829 
830  found_matching_key:
831 	/* we found a matching key, so we're going to try to update it
832 	 * - we can drop the locks first as we have the key pinned
833 	 */
834 	up_write(&keyring->sem);
835 	key_type_put(ktype);
836 
837 	key_ref = __key_update(key_ref, payload, plen);
838 	goto error;
839 
840 } /* end key_create_or_update() */
841 
842 EXPORT_SYMBOL(key_create_or_update);
843 
844 /*****************************************************************************/
845 /*
846  * update a key
847  */
key_update(key_ref_t key_ref,const void * payload,size_t plen)848 int key_update(key_ref_t key_ref, const void *payload, size_t plen)
849 {
850 	struct key *key = key_ref_to_ptr(key_ref);
851 	int ret;
852 
853 	key_check(key);
854 
855 	/* the key must be writable */
856 	ret = key_permission(key_ref, KEY_WRITE);
857 	if (ret < 0)
858 		goto error;
859 
860 	/* attempt to update it if supported */
861 	ret = -EOPNOTSUPP;
862 	if (key->type->update) {
863 		down_write(&key->sem);
864 
865 		ret = key->type->update(key, payload, plen);
866 		if (ret == 0)
867 			/* updating a negative key instantiates it */
868 			clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
869 
870 		up_write(&key->sem);
871 	}
872 
873  error:
874 	return ret;
875 
876 } /* end key_update() */
877 
878 EXPORT_SYMBOL(key_update);
879 
880 /*****************************************************************************/
881 /*
882  * revoke a key
883  */
key_revoke(struct key * key)884 void key_revoke(struct key *key)
885 {
886 	key_check(key);
887 
888 	/* make sure no one's trying to change or use the key when we mark it
889 	 * - we tell lockdep that we might nest because we might be revoking an
890 	 *   authorisation key whilst holding the sem on a key we've just
891 	 *   instantiated
892 	 */
893 	down_write_nested(&key->sem, 1);
894 	if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
895 	    key->type->revoke)
896 		key->type->revoke(key);
897 
898 	up_write(&key->sem);
899 
900 } /* end key_revoke() */
901 
902 EXPORT_SYMBOL(key_revoke);
903 
904 /*****************************************************************************/
905 /*
906  * register a type of key
907  */
register_key_type(struct key_type * ktype)908 int register_key_type(struct key_type *ktype)
909 {
910 	struct key_type *p;
911 	int ret;
912 
913 	ret = -EEXIST;
914 	down_write(&key_types_sem);
915 
916 	/* disallow key types with the same name */
917 	list_for_each_entry(p, &key_types_list, link) {
918 		if (strcmp(p->name, ktype->name) == 0)
919 			goto out;
920 	}
921 
922 	/* store the type */
923 	list_add(&ktype->link, &key_types_list);
924 	ret = 0;
925 
926  out:
927 	up_write(&key_types_sem);
928 	return ret;
929 
930 } /* end register_key_type() */
931 
932 EXPORT_SYMBOL(register_key_type);
933 
934 /*****************************************************************************/
935 /*
936  * unregister a type of key
937  */
unregister_key_type(struct key_type * ktype)938 void unregister_key_type(struct key_type *ktype)
939 {
940 	struct rb_node *_n;
941 	struct key *key;
942 
943 	down_write(&key_types_sem);
944 
945 	/* withdraw the key type */
946 	list_del_init(&ktype->link);
947 
948 	/* mark all the keys of this type dead */
949 	spin_lock(&key_serial_lock);
950 
951 	for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
952 		key = rb_entry(_n, struct key, serial_node);
953 
954 		if (key->type == ktype)
955 			key->type = &key_type_dead;
956 	}
957 
958 	spin_unlock(&key_serial_lock);
959 
960 	/* make sure everyone revalidates their keys */
961 	synchronize_rcu();
962 
963 	/* we should now be able to destroy the payloads of all the keys of
964 	 * this type with impunity */
965 	spin_lock(&key_serial_lock);
966 
967 	for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
968 		key = rb_entry(_n, struct key, serial_node);
969 
970 		if (key->type == ktype) {
971 			if (ktype->destroy)
972 				ktype->destroy(key);
973 			memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
974 		}
975 	}
976 
977 	spin_unlock(&key_serial_lock);
978 	up_write(&key_types_sem);
979 
980 } /* end unregister_key_type() */
981 
982 EXPORT_SYMBOL(unregister_key_type);
983 
984 /*****************************************************************************/
985 /*
986  * initialise the key management stuff
987  */
key_init(void)988 void __init key_init(void)
989 {
990 	/* allocate a slab in which we can store keys */
991 	key_jar = kmem_cache_create("key_jar", sizeof(struct key),
992 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
993 
994 	/* add the special key types */
995 	list_add_tail(&key_type_keyring.link, &key_types_list);
996 	list_add_tail(&key_type_dead.link, &key_types_list);
997 	list_add_tail(&key_type_user.link, &key_types_list);
998 
999 	/* record the root user tracking */
1000 	rb_link_node(&root_key_user.node,
1001 		     NULL,
1002 		     &key_user_tree.rb_node);
1003 
1004 	rb_insert_color(&root_key_user.node,
1005 			&key_user_tree);
1006 
1007 } /* end key_init() */
1008