1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2000,2003 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/12.2/sys/kern/subr_kobj.c 343914 2019-02-08 16:38:30Z markj $");
31
32 #include <sys/param.h>
33 #include <sys/kobj.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/systm.h>
37
38 #ifdef TEST
39 #include "usertest.h"
40 #endif
41
42 #ifdef KOBJ_STATS
43
44 u_int kobj_lookup_hits;
45 u_int kobj_lookup_misses;
46 #endif
47
48 static struct mtx kobj_mtx;
49 static int kobj_mutex_inited;
50 static int kobj_next_id = 1;
51
52 #define KOBJ_LOCK() mtx_lock(&kobj_mtx)
53 #define KOBJ_UNLOCK() mtx_unlock(&kobj_mtx)
54 #define KOBJ_ASSERT(what) mtx_assert(&kobj_mtx, what);
55
56 void
kobj_init_mutex(void * arg)57 kobj_init_mutex(void *arg)
58 {
59 if (!kobj_mutex_inited) {
60 mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
61 kobj_mutex_inited = 1;
62 }
63 }
64
65 /*
66 * This method structure is used to initialise new caches. Since the
67 * desc pointer is NULL, it is guaranteed never to match any read
68 * descriptors.
69 */
70 static const struct kobj_method null_method = {
71 0, 0,
72 };
73
74 int
kobj_error_method(void)75 kobj_error_method(void)
76 {
77
78 return ENXIO;
79 }
80
81 static void
kobj_class_compile_common(kobj_class_t cls,kobj_ops_t ops)82 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
83 {
84 kobj_method_t *m = NULL;
85 int i;
86
87 /*
88 * Don't do anything if we are already compiled.
89 */
90 if (cls->ops)
91 return;
92
93 /*
94 * First register any methods which need it.
95 */
96 for (i = 0, m = cls->methods; m->desc; i++, m++) {
97 if (m->desc->id == 0)
98 m->desc->id = kobj_next_id++;
99 }
100
101 /*
102 * Then initialise the ops table.
103 */
104 for (i = 0; i < KOBJ_CACHE_SIZE; i++)
105 ops->cache[i] = &null_method;
106 ops->cls = cls;
107 cls->ops = ops;
108 }
109
110 void
kobj_class_compile(kobj_class_t cls)111 kobj_class_compile(kobj_class_t cls)
112 {
113 kobj_ops_t ops;
114
115 KOBJ_ASSERT(MA_NOTOWNED);
116
117 /*
118 * Allocate space for the compiled ops table.
119 */
120 ops = bsd_malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
121 if (!ops)
122 panic("%s: out of memory", __func__);
123
124 KOBJ_LOCK();
125
126 /*
127 * We may have lost a race for kobj_class_compile here - check
128 * to make sure someone else hasn't already compiled this
129 * class.
130 */
131 if (cls->ops) {
132 KOBJ_UNLOCK();
133 bsd_free(ops, M_KOBJ);
134 return;
135 }
136
137 kobj_class_compile_common(cls, ops);
138 KOBJ_UNLOCK();
139 }
140
141 void
kobj_class_compile_static(kobj_class_t cls,kobj_ops_t ops)142 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
143 {
144
145 KASSERT(kobj_mutex_inited == 0,
146 ("%s: only supported during early cycles", __func__));
147
148 /*
149 * Increment refs to make sure that the ops table is not freed.
150 */
151 cls->refs++;
152 kobj_class_compile_common(cls, ops);
153 }
154
155 static kobj_method_t*
kobj_lookup_method_class(kobj_class_t cls,kobjop_desc_t desc)156 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
157 {
158 kobj_method_t *methods = cls->methods;
159 kobj_method_t *ce;
160
161 for (ce = methods; ce && ce->desc; ce++) {
162 if (ce->desc == desc) {
163 return ce;
164 }
165 }
166
167 return NULL;
168 }
169
170 static kobj_method_t*
kobj_lookup_method_mi(kobj_class_t cls,kobjop_desc_t desc)171 kobj_lookup_method_mi(kobj_class_t cls,
172 kobjop_desc_t desc)
173 {
174 kobj_method_t *ce = NULL;
175 kobj_class_t *basep = NULL;
176
177 ce = kobj_lookup_method_class(cls, desc);
178 if (ce)
179 return ce;
180
181 basep = cls->baseclasses;
182 if (basep) {
183 for (; *basep; basep++) {
184 ce = kobj_lookup_method_mi(*basep, desc);
185 if (ce)
186 return ce;
187 }
188 }
189
190 return NULL;
191 }
192
193 kobj_method_t*
kobj_lookup_method(kobj_class_t cls,kobj_method_t ** cep,kobjop_desc_t desc)194 kobj_lookup_method(kobj_class_t cls,
195 kobj_method_t **cep,
196 kobjop_desc_t desc)
197 {
198 kobj_method_t *ce;
199
200 ce = kobj_lookup_method_mi(cls, desc);
201 if (!ce)
202 ce = &desc->deflt;
203 if (cep)
204 *cep = ce;
205 return ce;
206 }
207
208 void
kobj_class_free(kobj_class_t cls)209 kobj_class_free(kobj_class_t cls)
210 {
211 void* ops = NULL;
212
213 KOBJ_ASSERT(MA_NOTOWNED);
214 KOBJ_LOCK();
215
216 /*
217 * Protect against a race between kobj_create and
218 * kobj_delete.
219 */
220 if (cls->refs == 0) {
221 /*
222 * For now we don't do anything to unregister any methods
223 * which are no longer used.
224 */
225
226 /*
227 * Free memory and clean up.
228 */
229 ops = cls->ops;
230 cls->ops = NULL;
231 }
232
233 KOBJ_UNLOCK();
234
235 if (ops)
236 bsd_free(ops, M_KOBJ);
237 }
238
239 kobj_t
kobj_create(kobj_class_t cls,struct malloc_type * mtype,int mflags)240 kobj_create(kobj_class_t cls,
241 struct malloc_type *mtype,
242 int mflags)
243 {
244 kobj_t obj;
245
246 /*
247 * Allocate and initialise the new object.
248 */
249 obj = bsd_malloc(cls->size, mtype, mflags | M_ZERO);
250 if (!obj)
251 return NULL;
252 kobj_init(obj, cls);
253
254 return obj;
255 }
256
257 static void
kobj_init_common(kobj_t obj,kobj_class_t cls)258 kobj_init_common(kobj_t obj, kobj_class_t cls)
259 {
260
261 obj->ops = cls->ops;
262 cls->refs++;
263 }
264
265 void
kobj_init(kobj_t obj,kobj_class_t cls)266 kobj_init(kobj_t obj, kobj_class_t cls)
267 {
268 KOBJ_ASSERT(MA_NOTOWNED);
269 retry:
270 KOBJ_LOCK();
271
272 /*
273 * Consider compiling the class' method table.
274 */
275 if (!cls->ops) {
276 /*
277 * kobj_class_compile doesn't want the lock held
278 * because of the call to malloc - we drop the lock
279 * and re-try.
280 */
281 KOBJ_UNLOCK();
282 kobj_class_compile(cls);
283 goto retry;
284 }
285
286 kobj_init_common(obj, cls);
287
288 KOBJ_UNLOCK();
289 }
290
291 void
kobj_init_static(kobj_t obj,kobj_class_t cls)292 kobj_init_static(kobj_t obj, kobj_class_t cls)
293 {
294
295 KASSERT(kobj_mutex_inited == 0,
296 ("%s: only supported during early cycles", __func__));
297
298 kobj_init_common(obj, cls);
299 }
300
301 void
kobj_delete(kobj_t obj,struct malloc_type * mtype)302 kobj_delete(kobj_t obj, struct malloc_type *mtype)
303 {
304 kobj_class_t cls = obj->ops->cls;
305 int refs;
306
307 /*
308 * Consider freeing the compiled method table for the class
309 * after its last instance is deleted. As an optimisation, we
310 * should defer this for a short while to avoid thrashing.
311 */
312 KOBJ_ASSERT(MA_NOTOWNED);
313 KOBJ_LOCK();
314 cls->refs--;
315 refs = cls->refs;
316 KOBJ_UNLOCK();
317
318 if (!refs)
319 kobj_class_free(cls);
320
321 obj->ops = NULL;
322 if (mtype)
323 bsd_free(obj, mtype);
324 }
325