subr_kobj.c revision 217326
159093Sdfr/*-
2121129Sdfr * Copyright (c) 2000,2003 Doug Rabson
359093Sdfr * All rights reserved.
459093Sdfr *
559093Sdfr * Redistribution and use in source and binary forms, with or without
659093Sdfr * modification, are permitted provided that the following conditions
759093Sdfr * are met:
859093Sdfr * 1. Redistributions of source code must retain the above copyright
959093Sdfr *    notice, this list of conditions and the following disclaimer.
1059093Sdfr * 2. Redistributions in binary form must reproduce the above copyright
1159093Sdfr *    notice, this list of conditions and the following disclaimer in the
1259093Sdfr *    documentation and/or other materials provided with the distribution.
1359093Sdfr *
1459093Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1559093Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1659093Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1759093Sdfr * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1859093Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1959093Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2059093Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2159093Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2259093Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2359093Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2459093Sdfr * SUCH DAMAGE.
2559093Sdfr */
2659093Sdfr
27116182Sobrien#include <sys/cdefs.h>
28116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/subr_kobj.c 217326 2011-01-12 19:54:19Z mdf $");
29116182Sobrien
3059093Sdfr#include <sys/param.h>
31121129Sdfr#include <sys/kernel.h>
32121129Sdfr#include <sys/kobj.h>
33121129Sdfr#include <sys/lock.h>
3459093Sdfr#include <sys/malloc.h>
35121129Sdfr#include <sys/mutex.h>
36118921Scg#include <sys/sysctl.h>
3759093Sdfr#ifndef TEST
3859093Sdfr#include <sys/systm.h>
3959093Sdfr#endif
4059093Sdfr
4159093Sdfr#ifdef TEST
4259093Sdfr#include "usertest.h"
4359093Sdfr#endif
4459093Sdfr
4559093Sdfrstatic MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
4659093Sdfr
4759093Sdfr#ifdef KOBJ_STATS
4859093Sdfr
4998105Skbyancu_int kobj_lookup_hits;
5098105Skbyancu_int kobj_lookup_misses;
5159093Sdfr
5298105SkbyancSYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
53118921Scg	   &kobj_lookup_hits, 0, "");
5498105SkbyancSYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
55118921Scg	   &kobj_lookup_misses, 0, "");
5659093Sdfr
5759093Sdfr#endif
5859093Sdfr
59121129Sdfrstatic struct mtx kobj_mtx;
60148811Sgrehanstatic int kobj_mutex_inited;
6159093Sdfrstatic int kobj_next_id = 1;
6259093Sdfr
63186347Snwhitehorn/*
64186347Snwhitehorn * In the event that kobj_mtx has not been initialized yet,
65186347Snwhitehorn * we will ignore it, and run without locks in order to support
66186347Snwhitehorn * use of KOBJ before mutexes are available. This early in the boot
67186347Snwhitehorn * process, everything is single threaded and so races should not
68186347Snwhitehorn * happen. This is used to provide the PMAP layer on PowerPC, as well
69186347Snwhitehorn * as board support.
70186347Snwhitehorn */
71186347Snwhitehorn
72186347Snwhitehorn#define KOBJ_LOCK()	if (kobj_mutex_inited) mtx_lock(&kobj_mtx);
73186347Snwhitehorn#define KOBJ_UNLOCK()	if (kobj_mutex_inited) mtx_unlock(&kobj_mtx);
74186347Snwhitehorn#define KOBJ_ASSERT(what) if (kobj_mutex_inited) mtx_assert(&kobj_mtx,what);
75186347Snwhitehorn
76217326SmdfSYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
77118921Scg	   &kobj_next_id, 0, "");
78118921Scg
79121129Sdfrstatic void
80121129Sdfrkobj_init_mutex(void *arg)
81121129Sdfr{
82148811Sgrehan	if (!kobj_mutex_inited) {
83148811Sgrehan		mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
84148811Sgrehan		kobj_mutex_inited = 1;
85148811Sgrehan	}
86121129Sdfr}
87121129Sdfr
88121129SdfrSYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
89121129Sdfr
90121129Sdfr/*
91121129Sdfr * This method structure is used to initialise new caches. Since the
92121129Sdfr * desc pointer is NULL, it is guaranteed never to match any read
93121129Sdfr * descriptors.
94121129Sdfr */
95121129Sdfrstatic struct kobj_method null_method = {
96121129Sdfr	0, 0,
97121129Sdfr};
98121129Sdfr
99121129Sdfrint
10059093Sdfrkobj_error_method(void)
10159093Sdfr{
102121129Sdfr
10359093Sdfr	return ENXIO;
10459093Sdfr}
10559093Sdfr
10659093Sdfrstatic void
10759093Sdfrkobj_register_method(struct kobjop_desc *desc)
10859093Sdfr{
109186347Snwhitehorn	KOBJ_ASSERT(MA_OWNED);
110121129Sdfr
111118921Scg	if (desc->id == 0) {
11259093Sdfr		desc->id = kobj_next_id++;
113118921Scg	}
11459093Sdfr}
11559093Sdfr
11659093Sdfrstatic void
11759093Sdfrkobj_unregister_method(struct kobjop_desc *desc)
11859093Sdfr{
11959093Sdfr}
12059093Sdfr
12165173Sdfrstatic void
12265173Sdfrkobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
12359093Sdfr{
12459093Sdfr	kobj_method_t *m;
12559093Sdfr	int i;
12659093Sdfr
127186347Snwhitehorn	KOBJ_ASSERT(MA_OWNED);
128121129Sdfr
12959093Sdfr	/*
13059093Sdfr	 * Don't do anything if we are already compiled.
13159093Sdfr	 */
13259093Sdfr	if (cls->ops)
13359093Sdfr		return;
13459093Sdfr
13559093Sdfr	/*
13659093Sdfr	 * First register any methods which need it.
13759093Sdfr	 */
13859093Sdfr	for (i = 0, m = cls->methods; m->desc; i++, m++)
13959093Sdfr		kobj_register_method(m->desc);
14059093Sdfr
14159093Sdfr	/*
14265173Sdfr	 * Then initialise the ops table.
14359093Sdfr	 */
144121129Sdfr	for (i = 0; i < KOBJ_CACHE_SIZE; i++)
145121129Sdfr		ops->cache[i] = &null_method;
14659093Sdfr	ops->cls = cls;
14759093Sdfr	cls->ops = ops;
14859093Sdfr}
14959093Sdfr
15059093Sdfrvoid
15165173Sdfrkobj_class_compile(kobj_class_t cls)
15265173Sdfr{
15365173Sdfr	kobj_ops_t ops;
15465173Sdfr
155186347Snwhitehorn	KOBJ_ASSERT(MA_NOTOWNED);
156121129Sdfr
15765173Sdfr	/*
15865173Sdfr	 * Allocate space for the compiled ops table.
15965173Sdfr	 */
16065173Sdfr	ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
16165173Sdfr	if (!ops)
16265173Sdfr		panic("kobj_compile_methods: out of memory");
163121129Sdfr
164186347Snwhitehorn	KOBJ_LOCK();
165121129Sdfr
166121129Sdfr	/*
167121129Sdfr	 * We may have lost a race for kobj_class_compile here - check
168121129Sdfr	 * to make sure someone else hasn't already compiled this
169121129Sdfr	 * class.
170121129Sdfr	 */
171121129Sdfr	if (cls->ops) {
172186347Snwhitehorn		KOBJ_UNLOCK();
173121129Sdfr		free(ops, M_KOBJ);
174121129Sdfr		return;
175121129Sdfr	}
176121129Sdfr
17765173Sdfr	kobj_class_compile_common(cls, ops);
178186347Snwhitehorn	KOBJ_UNLOCK();
17965173Sdfr}
18065173Sdfr
18165173Sdfrvoid
18265173Sdfrkobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
18365173Sdfr{
184121129Sdfr
185186347Snwhitehorn	KOBJ_ASSERT(MA_NOTOWNED);
186121129Sdfr
18765173Sdfr	/*
18865173Sdfr	 * Increment refs to make sure that the ops table is not freed.
18965173Sdfr	 */
190186347Snwhitehorn	KOBJ_LOCK();
191186347Snwhitehorn
19265173Sdfr	cls->refs++;
19365173Sdfr	kobj_class_compile_common(cls, ops);
194186347Snwhitehorn
195186347Snwhitehorn	KOBJ_UNLOCK();
19665173Sdfr}
19765173Sdfr
198121129Sdfrstatic kobj_method_t*
199121129Sdfrkobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
20059093Sdfr{
201121129Sdfr	kobj_method_t *methods = cls->methods;
202121129Sdfr	kobj_method_t *ce;
203121129Sdfr
204121129Sdfr	for (ce = methods; ce && ce->desc; ce++) {
205121129Sdfr		if (ce->desc == desc) {
206121129Sdfr			return ce;
20759093Sdfr		}
20859093Sdfr	}
209121129Sdfr
210188063Simp	return NULL;
21159093Sdfr}
21259093Sdfr
213121129Sdfrstatic kobj_method_t*
214121129Sdfrkobj_lookup_method_mi(kobj_class_t cls,
215121129Sdfr		      kobjop_desc_t desc)
216121129Sdfr{
217121129Sdfr	kobj_method_t *ce;
218121129Sdfr	kobj_class_t *basep;
219121129Sdfr
220121129Sdfr	ce = kobj_lookup_method_class(cls, desc);
221121129Sdfr	if (ce)
222121129Sdfr		return ce;
223121129Sdfr
224121129Sdfr	basep = cls->baseclasses;
225121129Sdfr	if (basep) {
226121129Sdfr		for (; *basep; basep++) {
227121129Sdfr			ce = kobj_lookup_method_mi(*basep, desc);
228121129Sdfr			if (ce)
229121129Sdfr				return ce;
230121129Sdfr		}
231121129Sdfr	}
232121129Sdfr
233188063Simp	return NULL;
234121129Sdfr}
235121129Sdfr
236121129Sdfrkobj_method_t*
237121129Sdfrkobj_lookup_method(kobj_class_t cls,
238121129Sdfr		   kobj_method_t **cep,
239121129Sdfr		   kobjop_desc_t desc)
240121129Sdfr{
241121129Sdfr	kobj_method_t *ce;
242121129Sdfr
243121129Sdfr#ifdef KOBJ_STATS
244121129Sdfr	/*
245121129Sdfr	 * Correct for the 'hit' assumption in KOBJOPLOOKUP and record
246121129Sdfr	 * a 'miss'.
247121129Sdfr	 */
248121129Sdfr	kobj_lookup_hits--;
249153844Sjhb	kobj_lookup_misses++;
250121129Sdfr#endif
251121129Sdfr
252121129Sdfr	ce = kobj_lookup_method_mi(cls, desc);
253121129Sdfr	if (!ce)
254121129Sdfr		ce = desc->deflt;
255121129Sdfr	*cep = ce;
256121129Sdfr	return ce;
257121129Sdfr}
258121129Sdfr
25959093Sdfrvoid
26059093Sdfrkobj_class_free(kobj_class_t cls)
26159093Sdfr{
26259093Sdfr	int i;
26359093Sdfr	kobj_method_t *m;
264188063Simp	void* ops = NULL;
26559093Sdfr
266186347Snwhitehorn	KOBJ_ASSERT(MA_NOTOWNED);
267186347Snwhitehorn	KOBJ_LOCK();
268121129Sdfr
26959093Sdfr	/*
270121129Sdfr	 * Protect against a race between kobj_create and
271121129Sdfr	 * kobj_delete.
27259093Sdfr	 */
273121129Sdfr	if (cls->refs == 0) {
274121129Sdfr		/*
275121129Sdfr		 * Unregister any methods which are no longer used.
276121129Sdfr		 */
277121129Sdfr		for (i = 0, m = cls->methods; m->desc; i++, m++)
278121129Sdfr			kobj_unregister_method(m->desc);
27959093Sdfr
280121129Sdfr		/*
281121129Sdfr		 * Free memory and clean up.
282121129Sdfr		 */
283121129Sdfr		ops = cls->ops;
284188063Simp		cls->ops = NULL;
285121129Sdfr	}
286121129Sdfr
287186347Snwhitehorn	KOBJ_UNLOCK();
288121129Sdfr
289121129Sdfr	if (ops)
290121129Sdfr		free(ops, M_KOBJ);
29159093Sdfr}
29259093Sdfr
29359093Sdfrkobj_t
29459093Sdfrkobj_create(kobj_class_t cls,
29559093Sdfr	    struct malloc_type *mtype,
29659093Sdfr	    int mflags)
29759093Sdfr{
29859093Sdfr	kobj_t obj;
29959093Sdfr
30059093Sdfr	/*
30159093Sdfr	 * Allocate and initialise the new object.
30259093Sdfr	 */
30369781Sdwmalone	obj = malloc(cls->size, mtype, mflags | M_ZERO);
30459093Sdfr	if (!obj)
305188063Simp		return NULL;
30659093Sdfr	kobj_init(obj, cls);
30759093Sdfr
30859093Sdfr	return obj;
30959093Sdfr}
31059093Sdfr
31159093Sdfrvoid
31259093Sdfrkobj_init(kobj_t obj, kobj_class_t cls)
31359093Sdfr{
314186347Snwhitehorn	KOBJ_ASSERT(MA_NOTOWNED);
315121129Sdfr  retry:
316186347Snwhitehorn	KOBJ_LOCK();
317121129Sdfr
31859093Sdfr	/*
31959093Sdfr	 * Consider compiling the class' method table.
32059093Sdfr	 */
321121129Sdfr	if (!cls->ops) {
322121129Sdfr		/*
323121129Sdfr		 * kobj_class_compile doesn't want the lock held
324121129Sdfr		 * because of the call to malloc - we drop the lock
325121129Sdfr		 * and re-try.
326121129Sdfr		 */
327186347Snwhitehorn		KOBJ_UNLOCK();
32859093Sdfr		kobj_class_compile(cls);
329121129Sdfr		goto retry;
330121129Sdfr	}
33159093Sdfr
33259093Sdfr	obj->ops = cls->ops;
33359820Sdfr	cls->refs++;
334121129Sdfr
335186347Snwhitehorn	KOBJ_UNLOCK();
33659093Sdfr}
33759093Sdfr
33859093Sdfrvoid
33959093Sdfrkobj_delete(kobj_t obj, struct malloc_type *mtype)
34059093Sdfr{
34159093Sdfr	kobj_class_t cls = obj->ops->cls;
342121129Sdfr	int refs;
34359093Sdfr
34459093Sdfr	/*
34559093Sdfr	 * Consider freeing the compiled method table for the class
34659093Sdfr	 * after its last instance is deleted. As an optimisation, we
34759093Sdfr	 * should defer this for a short while to avoid thrashing.
34859093Sdfr	 */
349186347Snwhitehorn	KOBJ_ASSERT(MA_NOTOWNED);
350186347Snwhitehorn	KOBJ_LOCK();
35159820Sdfr	cls->refs--;
352121129Sdfr	refs = cls->refs;
353186347Snwhitehorn	KOBJ_UNLOCK();
354121129Sdfr
355121129Sdfr	if (!refs)
35659093Sdfr		kobj_class_free(cls);
35759093Sdfr
358188063Simp	obj->ops = NULL;
35959093Sdfr	if (mtype)
36059093Sdfr		free(obj, mtype);
36159093Sdfr}
362