159093Sdfr/*- 2121129Sdfr * Copyright (c) 2000,2003 Doug Rabson 359093Sdfr * All rights reserved. 459093Sdfr * 559093Sdfr * Redistribution and use in source and binary forms, with or without 659093Sdfr * modification, are permitted provided that the following conditions 759093Sdfr * are met: 859093Sdfr * 1. Redistributions of source code must retain the above copyright 959093Sdfr * notice, this list of conditions and the following disclaimer. 1059093Sdfr * 2. Redistributions in binary form must reproduce the above copyright 1159093Sdfr * notice, this list of conditions and the following disclaimer in the 1259093Sdfr * documentation and/or other materials provided with the distribution. 1359093Sdfr * 1459093Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1559093Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1659093Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1759093Sdfr * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1859093Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1959093Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2059093Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2159093Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2259093Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2359093Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2459093Sdfr * SUCH DAMAGE. 2559093Sdfr */ 2659093Sdfr 27116182Sobrien#include <sys/cdefs.h> 28116182Sobrien__FBSDID("$FreeBSD$"); 29116182Sobrien 3059093Sdfr#include <sys/param.h> 31121129Sdfr#include <sys/kernel.h> 32121129Sdfr#include <sys/kobj.h> 33121129Sdfr#include <sys/lock.h> 3459093Sdfr#include <sys/malloc.h> 35121129Sdfr#include <sys/mutex.h> 36118921Scg#include <sys/sysctl.h> 3759093Sdfr#ifndef TEST 3859093Sdfr#include <sys/systm.h> 3959093Sdfr#endif 4059093Sdfr 4159093Sdfr#ifdef TEST 4259093Sdfr#include "usertest.h" 4359093Sdfr#endif 4459093Sdfr 4559093Sdfrstatic MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures"); 4659093Sdfr 4759093Sdfr#ifdef KOBJ_STATS 4859093Sdfr 4998105Skbyancu_int kobj_lookup_hits; 5098105Skbyancu_int kobj_lookup_misses; 5159093Sdfr 5298105SkbyancSYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD, 53118921Scg &kobj_lookup_hits, 0, ""); 5498105SkbyancSYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD, 55118921Scg &kobj_lookup_misses, 0, ""); 5659093Sdfr 5759093Sdfr#endif 5859093Sdfr 59121129Sdfrstatic struct mtx kobj_mtx; 60148811Sgrehanstatic int kobj_mutex_inited; 6159093Sdfrstatic int kobj_next_id = 1; 6259093Sdfr 63227537Smarius#define KOBJ_LOCK() mtx_lock(&kobj_mtx) 64227537Smarius#define KOBJ_UNLOCK() mtx_unlock(&kobj_mtx) 65227537Smarius#define KOBJ_ASSERT(what) mtx_assert(&kobj_mtx, what); 66186347Snwhitehorn 67217326SmdfSYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD, 68118921Scg &kobj_next_id, 0, ""); 69118921Scg 70121129Sdfrstatic void 71121129Sdfrkobj_init_mutex(void *arg) 72121129Sdfr{ 73148811Sgrehan if (!kobj_mutex_inited) { 74148811Sgrehan mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF); 75148811Sgrehan kobj_mutex_inited = 1; 76148811Sgrehan } 77121129Sdfr} 78121129Sdfr 79121129SdfrSYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL); 80121129Sdfr 81121129Sdfr/* 82121129Sdfr * This method structure is used to initialise new caches. Since the 83121129Sdfr * desc pointer is NULL, it is guaranteed never to match any read 84121129Sdfr * descriptors. 85121129Sdfr */ 86227343Sedstatic const struct kobj_method null_method = { 87121129Sdfr 0, 0, 88121129Sdfr}; 89121129Sdfr 90121129Sdfrint 9159093Sdfrkobj_error_method(void) 9259093Sdfr{ 93121129Sdfr 9459093Sdfr return ENXIO; 9559093Sdfr} 9659093Sdfr 9759093Sdfrstatic void 9865173Sdfrkobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops) 9959093Sdfr{ 10059093Sdfr kobj_method_t *m; 10159093Sdfr int i; 10259093Sdfr 10359093Sdfr /* 10459093Sdfr * Don't do anything if we are already compiled. 10559093Sdfr */ 10659093Sdfr if (cls->ops) 10759093Sdfr return; 10859093Sdfr 10959093Sdfr /* 11059093Sdfr * First register any methods which need it. 11159093Sdfr */ 112227537Smarius for (i = 0, m = cls->methods; m->desc; i++, m++) { 113227537Smarius if (m->desc->id == 0) 114227537Smarius m->desc->id = kobj_next_id++; 115227537Smarius } 11659093Sdfr 11759093Sdfr /* 11865173Sdfr * Then initialise the ops table. 11959093Sdfr */ 120121129Sdfr for (i = 0; i < KOBJ_CACHE_SIZE; i++) 121121129Sdfr ops->cache[i] = &null_method; 12259093Sdfr ops->cls = cls; 12359093Sdfr cls->ops = ops; 12459093Sdfr} 12559093Sdfr 12659093Sdfrvoid 12765173Sdfrkobj_class_compile(kobj_class_t cls) 12865173Sdfr{ 12965173Sdfr kobj_ops_t ops; 13065173Sdfr 131186347Snwhitehorn KOBJ_ASSERT(MA_NOTOWNED); 132121129Sdfr 13365173Sdfr /* 13465173Sdfr * Allocate space for the compiled ops table. 13565173Sdfr */ 13665173Sdfr ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT); 13765173Sdfr if (!ops) 138227537Smarius panic("%s: out of memory", __func__); 139121129Sdfr 140186347Snwhitehorn KOBJ_LOCK(); 141121129Sdfr 142121129Sdfr /* 143121129Sdfr * We may have lost a race for kobj_class_compile here - check 144121129Sdfr * to make sure someone else hasn't already compiled this 145121129Sdfr * class. 146121129Sdfr */ 147121129Sdfr if (cls->ops) { 148186347Snwhitehorn KOBJ_UNLOCK(); 149121129Sdfr free(ops, M_KOBJ); 150121129Sdfr return; 151121129Sdfr } 152121129Sdfr 15365173Sdfr kobj_class_compile_common(cls, ops); 154186347Snwhitehorn KOBJ_UNLOCK(); 15565173Sdfr} 15665173Sdfr 15765173Sdfrvoid 15865173Sdfrkobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops) 15965173Sdfr{ 160121129Sdfr 161227537Smarius KASSERT(kobj_mutex_inited == 0, 162227537Smarius ("%s: only supported during early cycles", __func__)); 163121129Sdfr 16465173Sdfr /* 16565173Sdfr * Increment refs to make sure that the ops table is not freed. 16665173Sdfr */ 16765173Sdfr cls->refs++; 16865173Sdfr kobj_class_compile_common(cls, ops); 16965173Sdfr} 17065173Sdfr 171121129Sdfrstatic kobj_method_t* 172121129Sdfrkobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc) 17359093Sdfr{ 174121129Sdfr kobj_method_t *methods = cls->methods; 175121129Sdfr kobj_method_t *ce; 176121129Sdfr 177121129Sdfr for (ce = methods; ce && ce->desc; ce++) { 178121129Sdfr if (ce->desc == desc) { 179121129Sdfr return ce; 18059093Sdfr } 18159093Sdfr } 182121129Sdfr 183188063Simp return NULL; 18459093Sdfr} 18559093Sdfr 186121129Sdfrstatic kobj_method_t* 187121129Sdfrkobj_lookup_method_mi(kobj_class_t cls, 188121129Sdfr kobjop_desc_t desc) 189121129Sdfr{ 190121129Sdfr kobj_method_t *ce; 191121129Sdfr kobj_class_t *basep; 192121129Sdfr 193121129Sdfr ce = kobj_lookup_method_class(cls, desc); 194121129Sdfr if (ce) 195121129Sdfr return ce; 196121129Sdfr 197121129Sdfr basep = cls->baseclasses; 198121129Sdfr if (basep) { 199121129Sdfr for (; *basep; basep++) { 200121129Sdfr ce = kobj_lookup_method_mi(*basep, desc); 201121129Sdfr if (ce) 202121129Sdfr return ce; 203121129Sdfr } 204121129Sdfr } 205121129Sdfr 206188063Simp return NULL; 207121129Sdfr} 208121129Sdfr 209121129Sdfrkobj_method_t* 210121129Sdfrkobj_lookup_method(kobj_class_t cls, 211121129Sdfr kobj_method_t **cep, 212121129Sdfr kobjop_desc_t desc) 213121129Sdfr{ 214121129Sdfr kobj_method_t *ce; 215121129Sdfr 216121129Sdfr#ifdef KOBJ_STATS 217121129Sdfr /* 218121129Sdfr * Correct for the 'hit' assumption in KOBJOPLOOKUP and record 219121129Sdfr * a 'miss'. 220121129Sdfr */ 221121129Sdfr kobj_lookup_hits--; 222153844Sjhb kobj_lookup_misses++; 223121129Sdfr#endif 224121129Sdfr 225121129Sdfr ce = kobj_lookup_method_mi(cls, desc); 226121129Sdfr if (!ce) 227227384Sed ce = &desc->deflt; 228121129Sdfr *cep = ce; 229121129Sdfr return ce; 230121129Sdfr} 231121129Sdfr 23259093Sdfrvoid 23359093Sdfrkobj_class_free(kobj_class_t cls) 23459093Sdfr{ 235188063Simp void* ops = NULL; 23659093Sdfr 237186347Snwhitehorn KOBJ_ASSERT(MA_NOTOWNED); 238186347Snwhitehorn KOBJ_LOCK(); 239121129Sdfr 24059093Sdfr /* 241121129Sdfr * Protect against a race between kobj_create and 242121129Sdfr * kobj_delete. 24359093Sdfr */ 244121129Sdfr if (cls->refs == 0) { 245121129Sdfr /* 246227537Smarius * For now we don't do anything to unregister any methods 247227537Smarius * which are no longer used. 248121129Sdfr */ 24959093Sdfr 250121129Sdfr /* 251121129Sdfr * Free memory and clean up. 252121129Sdfr */ 253121129Sdfr ops = cls->ops; 254188063Simp cls->ops = NULL; 255121129Sdfr } 256121129Sdfr 257186347Snwhitehorn KOBJ_UNLOCK(); 258121129Sdfr 259121129Sdfr if (ops) 260121129Sdfr free(ops, M_KOBJ); 26159093Sdfr} 26259093Sdfr 26359093Sdfrkobj_t 26459093Sdfrkobj_create(kobj_class_t cls, 26559093Sdfr struct malloc_type *mtype, 26659093Sdfr int mflags) 26759093Sdfr{ 26859093Sdfr kobj_t obj; 26959093Sdfr 27059093Sdfr /* 27159093Sdfr * Allocate and initialise the new object. 27259093Sdfr */ 27369781Sdwmalone obj = malloc(cls->size, mtype, mflags | M_ZERO); 27459093Sdfr if (!obj) 275188063Simp return NULL; 27659093Sdfr kobj_init(obj, cls); 27759093Sdfr 27859093Sdfr return obj; 27959093Sdfr} 28059093Sdfr 281227537Smariusstatic void 282227537Smariuskobj_init_common(kobj_t obj, kobj_class_t cls) 283227537Smarius{ 284227537Smarius 285227537Smarius obj->ops = cls->ops; 286227537Smarius cls->refs++; 287227537Smarius} 288227537Smarius 28959093Sdfrvoid 29059093Sdfrkobj_init(kobj_t obj, kobj_class_t cls) 29159093Sdfr{ 292186347Snwhitehorn KOBJ_ASSERT(MA_NOTOWNED); 293121129Sdfr retry: 294186347Snwhitehorn KOBJ_LOCK(); 295121129Sdfr 29659093Sdfr /* 29759093Sdfr * Consider compiling the class' method table. 29859093Sdfr */ 299121129Sdfr if (!cls->ops) { 300121129Sdfr /* 301121129Sdfr * kobj_class_compile doesn't want the lock held 302121129Sdfr * because of the call to malloc - we drop the lock 303121129Sdfr * and re-try. 304121129Sdfr */ 305186347Snwhitehorn KOBJ_UNLOCK(); 30659093Sdfr kobj_class_compile(cls); 307121129Sdfr goto retry; 308121129Sdfr } 30959093Sdfr 310227537Smarius kobj_init_common(obj, cls); 311121129Sdfr 312186347Snwhitehorn KOBJ_UNLOCK(); 31359093Sdfr} 31459093Sdfr 31559093Sdfrvoid 316227537Smariuskobj_init_static(kobj_t obj, kobj_class_t cls) 317227537Smarius{ 318227537Smarius 319227537Smarius KASSERT(kobj_mutex_inited == 0, 320227537Smarius ("%s: only supported during early cycles", __func__)); 321227537Smarius 322227537Smarius kobj_init_common(obj, cls); 323227537Smarius} 324227537Smarius 325227537Smariusvoid 32659093Sdfrkobj_delete(kobj_t obj, struct malloc_type *mtype) 32759093Sdfr{ 32859093Sdfr kobj_class_t cls = obj->ops->cls; 329121129Sdfr int refs; 33059093Sdfr 33159093Sdfr /* 33259093Sdfr * Consider freeing the compiled method table for the class 33359093Sdfr * after its last instance is deleted. As an optimisation, we 33459093Sdfr * should defer this for a short while to avoid thrashing. 33559093Sdfr */ 336186347Snwhitehorn KOBJ_ASSERT(MA_NOTOWNED); 337186347Snwhitehorn KOBJ_LOCK(); 33859820Sdfr cls->refs--; 339121129Sdfr refs = cls->refs; 340186347Snwhitehorn KOBJ_UNLOCK(); 341121129Sdfr 342121129Sdfr if (!refs) 34359093Sdfr kobj_class_free(cls); 34459093Sdfr 345188063Simp obj->ops = NULL; 34659093Sdfr if (mtype) 34759093Sdfr free(obj, mtype); 34859093Sdfr} 349