subr_kobj.c revision 148811
1/*-
2 * Copyright (c) 2000,2003 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/subr_kobj.c 148811 2005-08-07 02:20:35Z grehan $");
29
30#include <sys/param.h>
31#include <sys/kernel.h>
32#include <sys/kobj.h>
33#include <sys/lock.h>
34#include <sys/malloc.h>
35#include <sys/mutex.h>
36#include <sys/sysctl.h>
37#ifndef TEST
38#include <sys/systm.h>
39#endif
40
41#ifdef TEST
42#include "usertest.h"
43#endif
44
45static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
46
47#ifdef KOBJ_STATS
48
49u_int kobj_lookup_hits;
50u_int kobj_lookup_misses;
51
52SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
53	   &kobj_lookup_hits, 0, "");
54SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
55	   &kobj_lookup_misses, 0, "");
56
57#endif
58
59static struct mtx kobj_mtx;
60static int kobj_mutex_inited;
61static int kobj_next_id = 1;
62
63SYSCTL_UINT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
64	   &kobj_next_id, 0, "");
65
66static void
67kobj_init_mutex(void *arg)
68{
69	if (!kobj_mutex_inited) {
70		mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
71		kobj_mutex_inited = 1;
72	}
73}
74
75SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
76
77void
78kobj_machdep_init(void)
79{
80	kobj_init_mutex(NULL);
81}
82
83/*
84 * This method structure is used to initialise new caches. Since the
85 * desc pointer is NULL, it is guaranteed never to match any read
86 * descriptors.
87 */
88static struct kobj_method null_method = {
89	0, 0,
90};
91
92int
93kobj_error_method(void)
94{
95
96	return ENXIO;
97}
98
99static void
100kobj_register_method(struct kobjop_desc *desc)
101{
102
103	mtx_assert(&kobj_mtx, MA_OWNED);
104	if (desc->id == 0) {
105		desc->id = kobj_next_id++;
106	}
107}
108
109static void
110kobj_unregister_method(struct kobjop_desc *desc)
111{
112}
113
114static void
115kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
116{
117	kobj_method_t *m;
118	int i;
119
120	mtx_assert(&kobj_mtx, MA_OWNED);
121
122	/*
123	 * Don't do anything if we are already compiled.
124	 */
125	if (cls->ops)
126		return;
127
128	/*
129	 * First register any methods which need it.
130	 */
131	for (i = 0, m = cls->methods; m->desc; i++, m++)
132		kobj_register_method(m->desc);
133
134	/*
135	 * Then initialise the ops table.
136	 */
137	for (i = 0; i < KOBJ_CACHE_SIZE; i++)
138		ops->cache[i] = &null_method;
139	ops->cls = cls;
140	cls->ops = ops;
141}
142
143void
144kobj_class_compile(kobj_class_t cls)
145{
146	kobj_ops_t ops;
147
148	mtx_assert(&kobj_mtx, MA_NOTOWNED);
149
150	/*
151	 * Allocate space for the compiled ops table.
152	 */
153	ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
154	if (!ops)
155		panic("kobj_compile_methods: out of memory");
156
157	mtx_lock(&kobj_mtx);
158
159	/*
160	 * We may have lost a race for kobj_class_compile here - check
161	 * to make sure someone else hasn't already compiled this
162	 * class.
163	 */
164	if (cls->ops) {
165		mtx_unlock(&kobj_mtx);
166		free(ops, M_KOBJ);
167		return;
168	}
169
170	kobj_class_compile_common(cls, ops);
171	mtx_unlock(&kobj_mtx);
172}
173
174void
175kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
176{
177
178	mtx_assert(&kobj_mtx, MA_NOTOWNED);
179
180	/*
181	 * Increment refs to make sure that the ops table is not freed.
182	 */
183	mtx_lock(&kobj_mtx);
184	cls->refs++;
185	kobj_class_compile_common(cls, ops);
186	mtx_unlock(&kobj_mtx);
187}
188
189static kobj_method_t*
190kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
191{
192	kobj_method_t *methods = cls->methods;
193	kobj_method_t *ce;
194
195	for (ce = methods; ce && ce->desc; ce++) {
196		if (ce->desc == desc) {
197			return ce;
198		}
199	}
200
201	return 0;
202}
203
204static kobj_method_t*
205kobj_lookup_method_mi(kobj_class_t cls,
206		      kobjop_desc_t desc)
207{
208	kobj_method_t *ce;
209	kobj_class_t *basep;
210
211	ce = kobj_lookup_method_class(cls, desc);
212	if (ce)
213		return ce;
214
215	basep = cls->baseclasses;
216	if (basep) {
217		for (; *basep; basep++) {
218			ce = kobj_lookup_method_mi(*basep, desc);
219			if (ce)
220				return ce;
221		}
222	}
223
224	return 0;
225}
226
227kobj_method_t*
228kobj_lookup_method(kobj_class_t cls,
229		   kobj_method_t **cep,
230		   kobjop_desc_t desc)
231{
232	kobj_method_t *ce;
233
234#ifdef KOBJ_STATS
235	/*
236	 * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
237	 * a 'miss'.
238	 */
239	kobj_lookup_hits--;
240	kobj_lookup_misses--;
241#endif
242
243	ce = kobj_lookup_method_mi(cls, desc);
244	if (!ce)
245		ce = desc->deflt;
246	*cep = ce;
247	return ce;
248}
249
250void
251kobj_class_free(kobj_class_t cls)
252{
253	int i;
254	kobj_method_t *m;
255	void* ops = 0;
256
257	mtx_assert(&kobj_mtx, MA_NOTOWNED);
258	mtx_lock(&kobj_mtx);
259
260	/*
261	 * Protect against a race between kobj_create and
262	 * kobj_delete.
263	 */
264	if (cls->refs == 0) {
265		/*
266		 * Unregister any methods which are no longer used.
267		 */
268		for (i = 0, m = cls->methods; m->desc; i++, m++)
269			kobj_unregister_method(m->desc);
270
271		/*
272		 * Free memory and clean up.
273		 */
274		ops = cls->ops;
275		cls->ops = 0;
276	}
277
278	mtx_unlock(&kobj_mtx);
279
280	if (ops)
281		free(ops, M_KOBJ);
282}
283
284kobj_t
285kobj_create(kobj_class_t cls,
286	    struct malloc_type *mtype,
287	    int mflags)
288{
289	kobj_t obj;
290
291	/*
292	 * Allocate and initialise the new object.
293	 */
294	obj = malloc(cls->size, mtype, mflags | M_ZERO);
295	if (!obj)
296		return 0;
297	kobj_init(obj, cls);
298
299	return obj;
300}
301
302void
303kobj_init(kobj_t obj, kobj_class_t cls)
304{
305	mtx_assert(&kobj_mtx, MA_NOTOWNED);
306  retry:
307	mtx_lock(&kobj_mtx);
308
309	/*
310	 * Consider compiling the class' method table.
311	 */
312	if (!cls->ops) {
313		/*
314		 * kobj_class_compile doesn't want the lock held
315		 * because of the call to malloc - we drop the lock
316		 * and re-try.
317		 */
318		mtx_unlock(&kobj_mtx);
319		kobj_class_compile(cls);
320		goto retry;
321	}
322
323	obj->ops = cls->ops;
324	cls->refs++;
325
326	mtx_unlock(&kobj_mtx);
327}
328
329void
330kobj_delete(kobj_t obj, struct malloc_type *mtype)
331{
332	kobj_class_t cls = obj->ops->cls;
333	int refs;
334
335	/*
336	 * Consider freeing the compiled method table for the class
337	 * after its last instance is deleted. As an optimisation, we
338	 * should defer this for a short while to avoid thrashing.
339	 */
340	mtx_assert(&kobj_mtx, MA_NOTOWNED);
341	mtx_lock(&kobj_mtx);
342	cls->refs--;
343	refs = cls->refs;
344	mtx_unlock(&kobj_mtx);
345
346	if (!refs)
347		kobj_class_free(cls);
348
349	obj->ops = 0;
350	if (mtype)
351		free(obj, mtype);
352}
353