Deleted Added
full compact
subr_kobj.c (217326) subr_kobj.c (227343)
1/*-
2 * Copyright (c) 2000,2003 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2000,2003 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/subr_kobj.c 217326 2011-01-12 19:54:19Z mdf $");
28__FBSDID("$FreeBSD: head/sys/kern/subr_kobj.c 227343 2011-11-08 15:38:21Z ed $");
29
30#include <sys/param.h>
31#include <sys/kernel.h>
32#include <sys/kobj.h>
33#include <sys/lock.h>
34#include <sys/malloc.h>
35#include <sys/mutex.h>
36#include <sys/sysctl.h>
37#ifndef TEST
38#include <sys/systm.h>
39#endif
40
41#ifdef TEST
42#include "usertest.h"
43#endif
44
45static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
46
47#ifdef KOBJ_STATS
48
49u_int kobj_lookup_hits;
50u_int kobj_lookup_misses;
51
52SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
53 &kobj_lookup_hits, 0, "");
54SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
55 &kobj_lookup_misses, 0, "");
56
57#endif
58
59static struct mtx kobj_mtx;
60static int kobj_mutex_inited;
61static int kobj_next_id = 1;
62
63/*
64 * In the event that kobj_mtx has not been initialized yet,
65 * we will ignore it, and run without locks in order to support
66 * use of KOBJ before mutexes are available. This early in the boot
67 * process, everything is single threaded and so races should not
68 * happen. This is used to provide the PMAP layer on PowerPC, as well
69 * as board support.
70 */
71
72#define KOBJ_LOCK() if (kobj_mutex_inited) mtx_lock(&kobj_mtx);
73#define KOBJ_UNLOCK() if (kobj_mutex_inited) mtx_unlock(&kobj_mtx);
74#define KOBJ_ASSERT(what) if (kobj_mutex_inited) mtx_assert(&kobj_mtx,what);
75
76SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
77 &kobj_next_id, 0, "");
78
79static void
80kobj_init_mutex(void *arg)
81{
82 if (!kobj_mutex_inited) {
83 mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
84 kobj_mutex_inited = 1;
85 }
86}
87
88SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
89
90/*
91 * This method structure is used to initialise new caches. Since the
92 * desc pointer is NULL, it is guaranteed never to match any read
93 * descriptors.
94 */
29
30#include <sys/param.h>
31#include <sys/kernel.h>
32#include <sys/kobj.h>
33#include <sys/lock.h>
34#include <sys/malloc.h>
35#include <sys/mutex.h>
36#include <sys/sysctl.h>
37#ifndef TEST
38#include <sys/systm.h>
39#endif
40
41#ifdef TEST
42#include "usertest.h"
43#endif
44
45static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
46
47#ifdef KOBJ_STATS
48
49u_int kobj_lookup_hits;
50u_int kobj_lookup_misses;
51
52SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
53 &kobj_lookup_hits, 0, "");
54SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
55 &kobj_lookup_misses, 0, "");
56
57#endif
58
59static struct mtx kobj_mtx;
60static int kobj_mutex_inited;
61static int kobj_next_id = 1;
62
63/*
64 * In the event that kobj_mtx has not been initialized yet,
65 * we will ignore it, and run without locks in order to support
66 * use of KOBJ before mutexes are available. This early in the boot
67 * process, everything is single threaded and so races should not
68 * happen. This is used to provide the PMAP layer on PowerPC, as well
69 * as board support.
70 */
71
72#define KOBJ_LOCK() if (kobj_mutex_inited) mtx_lock(&kobj_mtx);
73#define KOBJ_UNLOCK() if (kobj_mutex_inited) mtx_unlock(&kobj_mtx);
74#define KOBJ_ASSERT(what) if (kobj_mutex_inited) mtx_assert(&kobj_mtx,what);
75
76SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
77 &kobj_next_id, 0, "");
78
79static void
80kobj_init_mutex(void *arg)
81{
82 if (!kobj_mutex_inited) {
83 mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
84 kobj_mutex_inited = 1;
85 }
86}
87
88SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
89
90/*
91 * This method structure is used to initialise new caches. Since the
92 * desc pointer is NULL, it is guaranteed never to match any read
93 * descriptors.
94 */
95static struct kobj_method null_method = {
95static const struct kobj_method null_method = {
96 0, 0,
97};
98
99int
100kobj_error_method(void)
101{
102
103 return ENXIO;
104}
105
106static void
107kobj_register_method(struct kobjop_desc *desc)
108{
109 KOBJ_ASSERT(MA_OWNED);
110
111 if (desc->id == 0) {
112 desc->id = kobj_next_id++;
113 }
114}
115
116static void
117kobj_unregister_method(struct kobjop_desc *desc)
118{
119}
120
121static void
122kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
123{
124 kobj_method_t *m;
125 int i;
126
127 KOBJ_ASSERT(MA_OWNED);
128
129 /*
130 * Don't do anything if we are already compiled.
131 */
132 if (cls->ops)
133 return;
134
135 /*
136 * First register any methods which need it.
137 */
138 for (i = 0, m = cls->methods; m->desc; i++, m++)
139 kobj_register_method(m->desc);
140
141 /*
142 * Then initialise the ops table.
143 */
144 for (i = 0; i < KOBJ_CACHE_SIZE; i++)
145 ops->cache[i] = &null_method;
146 ops->cls = cls;
147 cls->ops = ops;
148}
149
150void
151kobj_class_compile(kobj_class_t cls)
152{
153 kobj_ops_t ops;
154
155 KOBJ_ASSERT(MA_NOTOWNED);
156
157 /*
158 * Allocate space for the compiled ops table.
159 */
160 ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
161 if (!ops)
162 panic("kobj_compile_methods: out of memory");
163
164 KOBJ_LOCK();
165
166 /*
167 * We may have lost a race for kobj_class_compile here - check
168 * to make sure someone else hasn't already compiled this
169 * class.
170 */
171 if (cls->ops) {
172 KOBJ_UNLOCK();
173 free(ops, M_KOBJ);
174 return;
175 }
176
177 kobj_class_compile_common(cls, ops);
178 KOBJ_UNLOCK();
179}
180
181void
182kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
183{
184
185 KOBJ_ASSERT(MA_NOTOWNED);
186
187 /*
188 * Increment refs to make sure that the ops table is not freed.
189 */
190 KOBJ_LOCK();
191
192 cls->refs++;
193 kobj_class_compile_common(cls, ops);
194
195 KOBJ_UNLOCK();
196}
197
198static kobj_method_t*
199kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
200{
201 kobj_method_t *methods = cls->methods;
202 kobj_method_t *ce;
203
204 for (ce = methods; ce && ce->desc; ce++) {
205 if (ce->desc == desc) {
206 return ce;
207 }
208 }
209
210 return NULL;
211}
212
213static kobj_method_t*
214kobj_lookup_method_mi(kobj_class_t cls,
215 kobjop_desc_t desc)
216{
217 kobj_method_t *ce;
218 kobj_class_t *basep;
219
220 ce = kobj_lookup_method_class(cls, desc);
221 if (ce)
222 return ce;
223
224 basep = cls->baseclasses;
225 if (basep) {
226 for (; *basep; basep++) {
227 ce = kobj_lookup_method_mi(*basep, desc);
228 if (ce)
229 return ce;
230 }
231 }
232
233 return NULL;
234}
235
236kobj_method_t*
237kobj_lookup_method(kobj_class_t cls,
238 kobj_method_t **cep,
239 kobjop_desc_t desc)
240{
241 kobj_method_t *ce;
242
243#ifdef KOBJ_STATS
244 /*
245 * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
246 * a 'miss'.
247 */
248 kobj_lookup_hits--;
249 kobj_lookup_misses++;
250#endif
251
252 ce = kobj_lookup_method_mi(cls, desc);
253 if (!ce)
254 ce = desc->deflt;
255 *cep = ce;
256 return ce;
257}
258
259void
260kobj_class_free(kobj_class_t cls)
261{
262 int i;
263 kobj_method_t *m;
264 void* ops = NULL;
265
266 KOBJ_ASSERT(MA_NOTOWNED);
267 KOBJ_LOCK();
268
269 /*
270 * Protect against a race between kobj_create and
271 * kobj_delete.
272 */
273 if (cls->refs == 0) {
274 /*
275 * Unregister any methods which are no longer used.
276 */
277 for (i = 0, m = cls->methods; m->desc; i++, m++)
278 kobj_unregister_method(m->desc);
279
280 /*
281 * Free memory and clean up.
282 */
283 ops = cls->ops;
284 cls->ops = NULL;
285 }
286
287 KOBJ_UNLOCK();
288
289 if (ops)
290 free(ops, M_KOBJ);
291}
292
293kobj_t
294kobj_create(kobj_class_t cls,
295 struct malloc_type *mtype,
296 int mflags)
297{
298 kobj_t obj;
299
300 /*
301 * Allocate and initialise the new object.
302 */
303 obj = malloc(cls->size, mtype, mflags | M_ZERO);
304 if (!obj)
305 return NULL;
306 kobj_init(obj, cls);
307
308 return obj;
309}
310
311void
312kobj_init(kobj_t obj, kobj_class_t cls)
313{
314 KOBJ_ASSERT(MA_NOTOWNED);
315 retry:
316 KOBJ_LOCK();
317
318 /*
319 * Consider compiling the class' method table.
320 */
321 if (!cls->ops) {
322 /*
323 * kobj_class_compile doesn't want the lock held
324 * because of the call to malloc - we drop the lock
325 * and re-try.
326 */
327 KOBJ_UNLOCK();
328 kobj_class_compile(cls);
329 goto retry;
330 }
331
332 obj->ops = cls->ops;
333 cls->refs++;
334
335 KOBJ_UNLOCK();
336}
337
338void
339kobj_delete(kobj_t obj, struct malloc_type *mtype)
340{
341 kobj_class_t cls = obj->ops->cls;
342 int refs;
343
344 /*
345 * Consider freeing the compiled method table for the class
346 * after its last instance is deleted. As an optimisation, we
347 * should defer this for a short while to avoid thrashing.
348 */
349 KOBJ_ASSERT(MA_NOTOWNED);
350 KOBJ_LOCK();
351 cls->refs--;
352 refs = cls->refs;
353 KOBJ_UNLOCK();
354
355 if (!refs)
356 kobj_class_free(cls);
357
358 obj->ops = NULL;
359 if (mtype)
360 free(obj, mtype);
361}
96 0, 0,
97};
98
99int
100kobj_error_method(void)
101{
102
103 return ENXIO;
104}
105
106static void
107kobj_register_method(struct kobjop_desc *desc)
108{
109 KOBJ_ASSERT(MA_OWNED);
110
111 if (desc->id == 0) {
112 desc->id = kobj_next_id++;
113 }
114}
115
116static void
117kobj_unregister_method(struct kobjop_desc *desc)
118{
119}
120
121static void
122kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
123{
124 kobj_method_t *m;
125 int i;
126
127 KOBJ_ASSERT(MA_OWNED);
128
129 /*
130 * Don't do anything if we are already compiled.
131 */
132 if (cls->ops)
133 return;
134
135 /*
136 * First register any methods which need it.
137 */
138 for (i = 0, m = cls->methods; m->desc; i++, m++)
139 kobj_register_method(m->desc);
140
141 /*
142 * Then initialise the ops table.
143 */
144 for (i = 0; i < KOBJ_CACHE_SIZE; i++)
145 ops->cache[i] = &null_method;
146 ops->cls = cls;
147 cls->ops = ops;
148}
149
150void
151kobj_class_compile(kobj_class_t cls)
152{
153 kobj_ops_t ops;
154
155 KOBJ_ASSERT(MA_NOTOWNED);
156
157 /*
158 * Allocate space for the compiled ops table.
159 */
160 ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
161 if (!ops)
162 panic("kobj_compile_methods: out of memory");
163
164 KOBJ_LOCK();
165
166 /*
167 * We may have lost a race for kobj_class_compile here - check
168 * to make sure someone else hasn't already compiled this
169 * class.
170 */
171 if (cls->ops) {
172 KOBJ_UNLOCK();
173 free(ops, M_KOBJ);
174 return;
175 }
176
177 kobj_class_compile_common(cls, ops);
178 KOBJ_UNLOCK();
179}
180
181void
182kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
183{
184
185 KOBJ_ASSERT(MA_NOTOWNED);
186
187 /*
188 * Increment refs to make sure that the ops table is not freed.
189 */
190 KOBJ_LOCK();
191
192 cls->refs++;
193 kobj_class_compile_common(cls, ops);
194
195 KOBJ_UNLOCK();
196}
197
198static kobj_method_t*
199kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
200{
201 kobj_method_t *methods = cls->methods;
202 kobj_method_t *ce;
203
204 for (ce = methods; ce && ce->desc; ce++) {
205 if (ce->desc == desc) {
206 return ce;
207 }
208 }
209
210 return NULL;
211}
212
213static kobj_method_t*
214kobj_lookup_method_mi(kobj_class_t cls,
215 kobjop_desc_t desc)
216{
217 kobj_method_t *ce;
218 kobj_class_t *basep;
219
220 ce = kobj_lookup_method_class(cls, desc);
221 if (ce)
222 return ce;
223
224 basep = cls->baseclasses;
225 if (basep) {
226 for (; *basep; basep++) {
227 ce = kobj_lookup_method_mi(*basep, desc);
228 if (ce)
229 return ce;
230 }
231 }
232
233 return NULL;
234}
235
236kobj_method_t*
237kobj_lookup_method(kobj_class_t cls,
238 kobj_method_t **cep,
239 kobjop_desc_t desc)
240{
241 kobj_method_t *ce;
242
243#ifdef KOBJ_STATS
244 /*
245 * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
246 * a 'miss'.
247 */
248 kobj_lookup_hits--;
249 kobj_lookup_misses++;
250#endif
251
252 ce = kobj_lookup_method_mi(cls, desc);
253 if (!ce)
254 ce = desc->deflt;
255 *cep = ce;
256 return ce;
257}
258
259void
260kobj_class_free(kobj_class_t cls)
261{
262 int i;
263 kobj_method_t *m;
264 void* ops = NULL;
265
266 KOBJ_ASSERT(MA_NOTOWNED);
267 KOBJ_LOCK();
268
269 /*
270 * Protect against a race between kobj_create and
271 * kobj_delete.
272 */
273 if (cls->refs == 0) {
274 /*
275 * Unregister any methods which are no longer used.
276 */
277 for (i = 0, m = cls->methods; m->desc; i++, m++)
278 kobj_unregister_method(m->desc);
279
280 /*
281 * Free memory and clean up.
282 */
283 ops = cls->ops;
284 cls->ops = NULL;
285 }
286
287 KOBJ_UNLOCK();
288
289 if (ops)
290 free(ops, M_KOBJ);
291}
292
293kobj_t
294kobj_create(kobj_class_t cls,
295 struct malloc_type *mtype,
296 int mflags)
297{
298 kobj_t obj;
299
300 /*
301 * Allocate and initialise the new object.
302 */
303 obj = malloc(cls->size, mtype, mflags | M_ZERO);
304 if (!obj)
305 return NULL;
306 kobj_init(obj, cls);
307
308 return obj;
309}
310
311void
312kobj_init(kobj_t obj, kobj_class_t cls)
313{
314 KOBJ_ASSERT(MA_NOTOWNED);
315 retry:
316 KOBJ_LOCK();
317
318 /*
319 * Consider compiling the class' method table.
320 */
321 if (!cls->ops) {
322 /*
323 * kobj_class_compile doesn't want the lock held
324 * because of the call to malloc - we drop the lock
325 * and re-try.
326 */
327 KOBJ_UNLOCK();
328 kobj_class_compile(cls);
329 goto retry;
330 }
331
332 obj->ops = cls->ops;
333 cls->refs++;
334
335 KOBJ_UNLOCK();
336}
337
338void
339kobj_delete(kobj_t obj, struct malloc_type *mtype)
340{
341 kobj_class_t cls = obj->ops->cls;
342 int refs;
343
344 /*
345 * Consider freeing the compiled method table for the class
346 * after its last instance is deleted. As an optimisation, we
347 * should defer this for a short while to avoid thrashing.
348 */
349 KOBJ_ASSERT(MA_NOTOWNED);
350 KOBJ_LOCK();
351 cls->refs--;
352 refs = cls->refs;
353 KOBJ_UNLOCK();
354
355 if (!refs)
356 kobj_class_free(cls);
357
358 obj->ops = NULL;
359 if (mtype)
360 free(obj, mtype);
361}