kern_mtxpool.c revision 256281
19Sjkh/*-
29Sjkh * Copyright (c) 2001 Matthew Dillon.  All Rights Reserved.
39Sjkh *
49Sjkh * Redistribution and use in source and binary forms, with or without
59Sjkh * modification, are permitted provided that the following conditions
69Sjkh * are met:
79Sjkh * 1. Redistributions of source code must retain the above copyright
89Sjkh *    notice, this list of conditions and the following disclaimer.
99Sjkh * 2. Redistributions in binary form must reproduce the above copyright
109Sjkh *    notice, this list of conditions and the following disclaimer in the
119Sjkh *    documentation and/or other materials provided with the distribution.
129Sjkh *
139Sjkh * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
149Sjkh * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
159Sjkh * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
169Sjkh * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
179Sjkh * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
189Sjkh * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
199Sjkh * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
209Sjkh * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
219Sjkh * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
229Sjkh * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
239Sjkh * SUCH DAMAGE.
249Sjkh */
259Sjkh
269Sjkh/* Mutex pool routines.  These routines are designed to be used as short
279Sjkh * term leaf mutexes (e.g. the last mutex you might acquire other then
289Sjkh * calling msleep()).  They operate using a shared pool.  A mutex is chosen
299Sjkh * from the pool based on the supplied pointer (which may or may not be
309Sjkh * valid).
319Sjkh *
329Sjkh * Advantages:
339Sjkh *	- no structural overhead.  Mutexes can be associated with structures
349Sjkh *	  without adding bloat to the structures.
359Sjkh *	- mutexes can be obtained for invalid pointers, useful when uses
369Sjkh *	  mutexes to interlock destructor ops.
379Sjkh *	- no initialization/destructor overhead.
389Sjkh *	- can be used with msleep.
399Sjkh *
409Sjkh * Disadvantages:
419Sjkh *	- should generally only be used as leaf mutexes.
429Sjkh *	- pool/pool dependancy ordering cannot be depended on.
439Sjkh *	- possible L1 cache mastersip contention between cpus.
449Sjkh */
459Sjkh
469Sjkh#include <sys/cdefs.h>
479Sjkh__FBSDID("$FreeBSD: stable/10/sys/kern/kern_mtxpool.c 184214 2008-10-23 20:26:15Z des $");
489Sjkh
499Sjkh#include <sys/param.h>
509Sjkh#include <sys/proc.h>
519Sjkh#include <sys/kernel.h>
529Sjkh#include <sys/ktr.h>
539Sjkh#include <sys/lock.h>
549Sjkh#include <sys/malloc.h>
559Sjkh#include <sys/mutex.h>
569Sjkh#include <sys/systm.h>
579Sjkh
589Sjkh
599Sjkhstatic MALLOC_DEFINE(M_MTXPOOL, "mtx_pool", "mutex pool");
609Sjkh
619Sjkh/* Pool sizes must be a power of two */
629Sjkh#ifndef MTX_POOL_LOCKBUILDER_SIZE
639Sjkh#define MTX_POOL_LOCKBUILDER_SIZE	128
649Sjkh#endif
659Sjkh#ifndef MTX_POOL_SLEEP_SIZE
669Sjkh#define MTX_POOL_SLEEP_SIZE		128
679Sjkh#endif
689Sjkh
699Sjkhstruct mtxpool_header {
709Sjkh	int		mtxpool_size;
719Sjkh	int		mtxpool_mask;
729Sjkh	int		mtxpool_shift;
739Sjkh	int		mtxpool_next;
749Sjkh};
759Sjkh
769Sjkhstruct mtx_pool {
779Sjkh	struct mtxpool_header mtx_pool_header;
789Sjkh	struct mtx	mtx_pool_ary[1];
799Sjkh};
809Sjkh
819Sjkhstatic struct mtx_pool_lockbuilder {
829Sjkh	struct mtxpool_header mtx_pool_header;
839Sjkh	struct mtx	mtx_pool_ary[MTX_POOL_LOCKBUILDER_SIZE];
849Sjkh} lockbuilder_pool;
859Sjkh
869Sjkh#define mtx_pool_size	mtx_pool_header.mtxpool_size
879Sjkh#define mtx_pool_mask	mtx_pool_header.mtxpool_mask
889Sjkh#define mtx_pool_shift	mtx_pool_header.mtxpool_shift
899Sjkh#define mtx_pool_next	mtx_pool_header.mtxpool_next
909Sjkh
919Sjkhstruct mtx_pool *mtxpool_sleep;
929Sjkhstruct mtx_pool *mtxpool_lockbuilder;
939Sjkh
949Sjkh#if UINTPTR_MAX == UINT64_MAX	/* 64 bits */
959Sjkh# define POINTER_BITS		64
969Sjkh# define HASH_MULTIPLIER	11400714819323198485u /* (2^64)*(sqrt(5)-1)/2 */
979Sjkh#else				/* assume 32 bits */
989Sjkh# define POINTER_BITS		32
999Sjkh# define HASH_MULTIPLIER	2654435769u	      /* (2^32)*(sqrt(5)-1)/2 */
1009Sjkh#endif
1019Sjkh
1029Sjkh/*
1039Sjkh * Return the (shared) pool mutex associated with the specified address.
1049Sjkh * The returned mutex is a leaf level mutex, meaning that if you obtain it
1059Sjkh * you cannot obtain any other mutexes until you release it.  You can
1069Sjkh * legally msleep() on the mutex.
1079Sjkh */
1089Sjkhstruct mtx *
1099Sjkhmtx_pool_find(struct mtx_pool *pool, void *ptr)
1109Sjkh{
1119Sjkh	int p;
1129Sjkh
1139Sjkh	KASSERT(pool != NULL, ("_mtx_pool_find(): null pool"));
1149Sjkh	/*
1159Sjkh	 * Fibonacci hash, see Knuth's
1169Sjkh	 * _Art of Computer Programming, Volume 3 / Sorting and Searching_
1179Sjkh	 */
1189Sjkh	p = ((HASH_MULTIPLIER * (uintptr_t)ptr) >> pool->mtx_pool_shift) &
1199Sjkh	    pool->mtx_pool_mask;
1209Sjkh	return (&pool->mtx_pool_ary[p]);
1219Sjkh}
1229Sjkh
1239Sjkhstatic void
1249Sjkhmtx_pool_initialize(struct mtx_pool *pool, const char *mtx_name, int pool_size,
1259Sjkh    int opts)
1269Sjkh{
1279Sjkh	int i, maskbits;
1289Sjkh
1299Sjkh	pool->mtx_pool_size = pool_size;
1309Sjkh	pool->mtx_pool_mask = pool_size - 1;
1319Sjkh	for (i = 1, maskbits = 0; (i & pool_size) == 0; i = i << 1)
1329Sjkh		maskbits++;
1339Sjkh	pool->mtx_pool_shift = POINTER_BITS - maskbits;
1349Sjkh	pool->mtx_pool_next = 0;
1359Sjkh	for (i = 0; i < pool_size; ++i)
1369Sjkh		mtx_init(&pool->mtx_pool_ary[i], mtx_name, NULL, opts);
1379Sjkh}
1389Sjkh
1399Sjkhstruct mtx_pool *
1409Sjkhmtx_pool_create(const char *mtx_name, int pool_size, int opts)
1419Sjkh{
1429Sjkh	struct mtx_pool *pool;
1439Sjkh
1449Sjkh	if (pool_size <= 0 || !powerof2(pool_size)) {
1459Sjkh		printf("WARNING: %s pool size is not a power of 2.\n",
1469Sjkh		    mtx_name);
1479Sjkh		pool_size = 128;
1489Sjkh	}
1499Sjkh	pool = malloc(sizeof (struct mtx_pool) +
1509Sjkh	    ((pool_size - 1) * sizeof (struct mtx)),
1519Sjkh	    M_MTXPOOL, M_WAITOK | M_ZERO);
1529Sjkh	mtx_pool_initialize(pool, mtx_name, pool_size, opts);
1539Sjkh	return pool;
1549Sjkh}
1559Sjkh
1569Sjkhvoid
1579Sjkhmtx_pool_destroy(struct mtx_pool **poolp)
1589Sjkh{
1599Sjkh	int i;
1609Sjkh	struct mtx_pool *pool = *poolp;
1619Sjkh
1629Sjkh	for (i = pool->mtx_pool_size - 1; i >= 0; --i)
1639Sjkh		mtx_destroy(&pool->mtx_pool_ary[i]);
1649Sjkh	free(pool, M_MTXPOOL);
1659Sjkh	*poolp = NULL;
1669Sjkh}
1679Sjkh
1689Sjkhstatic void
1699Sjkhmtx_pool_setup_static(void *dummy __unused)
1709Sjkh{
1719Sjkh	mtx_pool_initialize((struct mtx_pool *)&lockbuilder_pool,
1729Sjkh	    "lockbuilder mtxpool", MTX_POOL_LOCKBUILDER_SIZE,
1739Sjkh	    MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
1749Sjkh	mtxpool_lockbuilder = (struct mtx_pool *)&lockbuilder_pool;
1759Sjkh}
1769Sjkh
1779Sjkhstatic void
1789Sjkhmtx_pool_setup_dynamic(void *dummy __unused)
1799Sjkh{
1809Sjkh	mtxpool_sleep = mtx_pool_create("sleep mtxpool",
1819Sjkh	    MTX_POOL_SLEEP_SIZE, MTX_DEF);
1829Sjkh}
1839Sjkh
1849Sjkh/*
1859Sjkh * Obtain a (shared) mutex from the pool.  The returned mutex is a leaf
1869Sjkh * level mutex, meaning that if you obtain it you cannot obtain any other
1879Sjkh * mutexes until you release it.  You can legally msleep() on the mutex.
1889Sjkh */
1899Sjkhstruct mtx *
1909Sjkhmtx_pool_alloc(struct mtx_pool *pool)
1919Sjkh{
1929Sjkh	int i;
1939Sjkh
1949Sjkh	KASSERT(pool != NULL, ("mtx_pool_alloc(): null pool"));
1959Sjkh	/*
1969Sjkh	 * mtx_pool_next is unprotected against multiple accesses,
1979Sjkh	 * but simultaneous access by two CPUs should not be very
1989Sjkh	 * harmful.
1999Sjkh	 */
2009Sjkh	i = pool->mtx_pool_next;
2019Sjkh	pool->mtx_pool_next = (i + 1) & pool->mtx_pool_mask;
2029Sjkh	return (&pool->mtx_pool_ary[i]);
2039Sjkh}
2049Sjkh
2059Sjkh/*
2069Sjkh * The lockbuilder pool must be initialized early because the lockmgr
2079Sjkh * and sx locks depend on it.  The sx locks are used in the kernel
2089Sjkh * memory allocator.  The lockmgr subsystem is initialized by
2099Sjkh * SYSINIT(..., SI_SUB_LOCKMGR, ...).
2109Sjkh *
2119Sjkh * We can't call malloc() to dynamically allocate the sleep pool
2129Sjkh * until after kmeminit() has been called, which is done by
2139Sjkh * SYSINIT(..., SI_SUB_KMEM, ...).
2149Sjkh */
2159SjkhSYSINIT(mtxpooli1, SI_SUB_MTX_POOL_STATIC, SI_ORDER_FIRST,
2169Sjkh    mtx_pool_setup_static, NULL);
2179SjkhSYSINIT(mtxpooli2, SI_SUB_MTX_POOL_DYNAMIC, SI_ORDER_FIRST,
2189Sjkh    mtx_pool_setup_dynamic, NULL);
2199Sjkh