• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/obdclass/lustre_handles.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  */
40 
41 #define DEBUG_SUBSYSTEM S_CLASS
42 
43 #include "../include/obd_support.h"
44 #include "../include/lustre_handles.h"
45 #include "../include/lustre_lib.h"
46 
47 static __u64 handle_base;
48 #define HANDLE_INCR 7
49 static spinlock_t handle_base_lock;
50 
51 static struct handle_bucket {
52 	spinlock_t	lock;
53 	struct list_head	head;
54 } *handle_hash;
55 
56 #define HANDLE_HASH_SIZE (1 << 16)
57 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
58 
59 /*
60  * Generate a unique 64bit cookie (hash) for a handle and insert it into
61  * global (per-node) hash-table.
62  */
class_handle_hash(struct portals_handle * h,struct portals_handle_ops * ops)63 void class_handle_hash(struct portals_handle *h,
64 		       struct portals_handle_ops *ops)
65 {
66 	struct handle_bucket *bucket;
67 
68 	LASSERT(h != NULL);
69 	LASSERT(list_empty(&h->h_link));
70 
71 	/*
72 	 * This is fast, but simplistic cookie generation algorithm, it will
73 	 * need a re-do at some point in the future for security.
74 	 */
75 	spin_lock(&handle_base_lock);
76 	handle_base += HANDLE_INCR;
77 
78 	if (unlikely(handle_base == 0)) {
79 		/*
80 		 * Cookie of zero is "dangerous", because in many places it's
81 		 * assumed that 0 means "unassigned" handle, not bound to any
82 		 * object.
83 		 */
84 		CWARN("The universe has been exhausted: cookie wrap-around.\n");
85 		handle_base += HANDLE_INCR;
86 	}
87 	h->h_cookie = handle_base;
88 	spin_unlock(&handle_base_lock);
89 
90 	h->h_ops = ops;
91 	spin_lock_init(&h->h_lock);
92 
93 	bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
94 	spin_lock(&bucket->lock);
95 	list_add_rcu(&h->h_link, &bucket->head);
96 	h->h_in = 1;
97 	spin_unlock(&bucket->lock);
98 
99 	CDEBUG(D_INFO, "added object %p with handle %#llx to hash\n",
100 	       h, h->h_cookie);
101 }
102 EXPORT_SYMBOL(class_handle_hash);
103 
class_handle_unhash_nolock(struct portals_handle * h)104 static void class_handle_unhash_nolock(struct portals_handle *h)
105 {
106 	if (list_empty(&h->h_link)) {
107 		CERROR("removing an already-removed handle (%#llx)\n",
108 		       h->h_cookie);
109 		return;
110 	}
111 
112 	CDEBUG(D_INFO, "removing object %p with handle %#llx from hash\n",
113 	       h, h->h_cookie);
114 
115 	spin_lock(&h->h_lock);
116 	if (h->h_in == 0) {
117 		spin_unlock(&h->h_lock);
118 		return;
119 	}
120 	h->h_in = 0;
121 	spin_unlock(&h->h_lock);
122 	list_del_rcu(&h->h_link);
123 }
124 
class_handle_unhash(struct portals_handle * h)125 void class_handle_unhash(struct portals_handle *h)
126 {
127 	struct handle_bucket *bucket;
128 
129 	bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
130 
131 	spin_lock(&bucket->lock);
132 	class_handle_unhash_nolock(h);
133 	spin_unlock(&bucket->lock);
134 }
135 EXPORT_SYMBOL(class_handle_unhash);
136 
class_handle2object(__u64 cookie)137 void *class_handle2object(__u64 cookie)
138 {
139 	struct handle_bucket *bucket;
140 	struct portals_handle *h;
141 	void *retval = NULL;
142 
143 	LASSERT(handle_hash != NULL);
144 
145 	/* Be careful when you want to change this code. See the
146 	 * rcu_read_lock() definition on top this file. - jxiong */
147 	bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
148 
149 	rcu_read_lock();
150 	list_for_each_entry_rcu(h, &bucket->head, h_link) {
151 		if (h->h_cookie != cookie)
152 			continue;
153 
154 		spin_lock(&h->h_lock);
155 		if (likely(h->h_in != 0)) {
156 			h->h_ops->hop_addref(h);
157 			retval = h;
158 		}
159 		spin_unlock(&h->h_lock);
160 		break;
161 	}
162 	rcu_read_unlock();
163 
164 	return retval;
165 }
166 EXPORT_SYMBOL(class_handle2object);
167 
class_handle_free_cb(struct rcu_head * rcu)168 void class_handle_free_cb(struct rcu_head *rcu)
169 {
170 	struct portals_handle *h = RCU2HANDLE(rcu);
171 	void *ptr = (void *)(unsigned long)h->h_cookie;
172 
173 	if (h->h_ops->hop_free != NULL)
174 		h->h_ops->hop_free(ptr, h->h_size);
175 	else
176 		kfree(ptr);
177 }
178 EXPORT_SYMBOL(class_handle_free_cb);
179 
class_handle_init(void)180 int class_handle_init(void)
181 {
182 	struct handle_bucket *bucket;
183 	struct timespec64 ts;
184 	int seed[2];
185 
186 	LASSERT(handle_hash == NULL);
187 
188 	handle_hash = libcfs_kvzalloc(sizeof(*bucket) * HANDLE_HASH_SIZE,
189 				      GFP_NOFS);
190 	if (handle_hash == NULL)
191 		return -ENOMEM;
192 
193 	spin_lock_init(&handle_base_lock);
194 	for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
195 	     bucket--) {
196 		INIT_LIST_HEAD(&bucket->head);
197 		spin_lock_init(&bucket->lock);
198 	}
199 
200 	/** bug 21430: add randomness to the initial base */
201 	cfs_get_random_bytes(seed, sizeof(seed));
202 	ktime_get_ts64(&ts);
203 	cfs_srand(ts.tv_sec ^ seed[0], ts.tv_nsec ^ seed[1]);
204 
205 	cfs_get_random_bytes(&handle_base, sizeof(handle_base));
206 	LASSERT(handle_base != 0ULL);
207 
208 	return 0;
209 }
210 
cleanup_all_handles(void)211 static int cleanup_all_handles(void)
212 {
213 	int rc;
214 	int i;
215 
216 	for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
217 		struct portals_handle *h;
218 
219 		spin_lock(&handle_hash[i].lock);
220 		list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
221 			CERROR("force clean handle %#llx addr %p ops %p\n",
222 			       h->h_cookie, h, h->h_ops);
223 
224 			class_handle_unhash_nolock(h);
225 			rc++;
226 		}
227 		spin_unlock(&handle_hash[i].lock);
228 	}
229 
230 	return rc;
231 }
232 
class_handle_cleanup(void)233 void class_handle_cleanup(void)
234 {
235 	int count;
236 
237 	LASSERT(handle_hash != NULL);
238 
239 	count = cleanup_all_handles();
240 
241 	kvfree(handle_hash);
242 	handle_hash = NULL;
243 
244 	if (count != 0)
245 		CERROR("handle_count at cleanup: %d\n", count);
246 }
247