sx.h (73901) | sx.h (74912) |
---|---|
1/* 2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice(s), this list of conditions and the following disclaimer as --- 10 unchanged lines hidden (view full) --- 19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 25 * DAMAGE. 26 * | 1/* 2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice(s), this list of conditions and the following disclaimer as --- 10 unchanged lines hidden (view full) --- 19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 25 * DAMAGE. 26 * |
27 * $FreeBSD: head/sys/sys/sx.h 73901 2001-03-06 23:13:15Z jhb $ | 27 * $FreeBSD: head/sys/sys/sx.h 74912 2001-03-28 09:03:24Z jhb $ |
28 */ 29 30#ifndef _SYS_SX_H_ 31#define _SYS_SX_H_ 32 33#ifndef LOCORE | 28 */ 29 30#ifndef _SYS_SX_H_ 31#define _SYS_SX_H_ 32 33#ifndef LOCORE |
34#include <sys/mutex.h> 35#include <sys/condvar.h> | 34#include <sys/lock.h> /* XXX */ 35#include <sys/mutex.h> /* XXX */ 36#include <sys/condvar.h> /* XXX */ |
36 | 37 |
38struct lock_object; 39 |
|
37struct sx { | 40struct sx { |
41 struct lock_object sx_object; /* Common lock properties. */ |
|
38 struct mtx sx_lock; /* General protection lock. */ | 42 struct mtx sx_lock; /* General protection lock. */ |
39 const char *sx_descr; /* sx lock description. */ | |
40 int sx_cnt; /* -1: xlock, > 0: slock count. */ 41 struct cv sx_shrd_cv; /* slock waiters. */ 42 int sx_shrd_wcnt; /* Number of slock waiters. */ 43 struct cv sx_excl_cv; /* xlock waiters. */ 44 int sx_excl_wcnt; /* Number of xlock waiters. */ 45 struct proc *sx_xholder; /* Thread presently holding xlock. */ 46}; 47 48#ifdef _KERNEL 49void sx_init(struct sx *sx, const char *description); 50void sx_destroy(struct sx *sx); | 43 int sx_cnt; /* -1: xlock, > 0: slock count. */ 44 struct cv sx_shrd_cv; /* slock waiters. */ 45 int sx_shrd_wcnt; /* Number of slock waiters. */ 46 struct cv sx_excl_cv; /* xlock waiters. */ 47 int sx_excl_wcnt; /* Number of xlock waiters. */ 48 struct proc *sx_xholder; /* Thread presently holding xlock. */ 49}; 50 51#ifdef _KERNEL 52void sx_init(struct sx *sx, const char *description); 53void sx_destroy(struct sx *sx); |
51void sx_slock(struct sx *sx); 52void sx_xlock(struct sx *sx); 53void sx_sunlock(struct sx *sx); 54void sx_xunlock(struct sx *sx); | 54void _sx_slock(struct sx *sx, const char *file, int line); 55void _sx_xlock(struct sx *sx, const char *file, int line); 56void _sx_sunlock(struct sx *sx, const char *file, int line); 57void _sx_xunlock(struct sx *sx, const char *file, int line); |
55 | 58 |
59#define sx_slock(sx) _sx_slock((sx), __FILE__, __LINE__) 60#define sx_xlock(sx) _sx_xlock((sx), __FILE__, __LINE__) 61#define sx_sunlock(sx) _sx_sunlock((sx), __FILE__, __LINE__) 62#define sx_xunlock(sx) _sx_xunlock((sx), __FILE__, __LINE__) 63 |
|
56#ifdef INVARIANTS 57/* 58 * SX_ASSERT_SLOCKED() can only detect that at least *some* thread owns an 59 * slock, but it cannot guarantee that *this* thread owns an slock. 60 */ 61#define SX_ASSERT_SLOCKED(sx) do { \ 62 mtx_lock(&(sx)->sx_lock); \ 63 _SX_ASSERT_SLOCKED((sx)); \ 64 mtx_unlock(&(sx)->sx_lock); \ 65} while (0) 66#define _SX_ASSERT_SLOCKED(sx) do { \ 67 KASSERT(((sx)->sx_cnt > 0), ("%s: lacking slock %s\n", \ | 64#ifdef INVARIANTS 65/* 66 * SX_ASSERT_SLOCKED() can only detect that at least *some* thread owns an 67 * slock, but it cannot guarantee that *this* thread owns an slock. 68 */ 69#define SX_ASSERT_SLOCKED(sx) do { \ 70 mtx_lock(&(sx)->sx_lock); \ 71 _SX_ASSERT_SLOCKED((sx)); \ 72 mtx_unlock(&(sx)->sx_lock); \ 73} while (0) 74#define _SX_ASSERT_SLOCKED(sx) do { \ 75 KASSERT(((sx)->sx_cnt > 0), ("%s: lacking slock %s\n", \ |
68 __FUNCTION__, (sx)->sx_descr)); \ | 76 __FUNCTION__, (sx)->sx_object.lo_name)); \ |
69} while (0) 70 71/* 72 * SX_ASSERT_XLOCKED() detects and guarantees that *we* own the xlock. 73 */ 74#define SX_ASSERT_XLOCKED(sx) do { \ 75 mtx_lock(&(sx)->sx_lock); \ 76 _SX_ASSERT_XLOCKED((sx)); \ 77 mtx_unlock(&(sx)->sx_lock); \ 78} while (0) 79#define _SX_ASSERT_XLOCKED(sx) do { \ 80 KASSERT(((sx)->sx_xholder == curproc), \ 81 ("%s: thread %p lacking xlock %s\n", __FUNCTION__, \ | 77} while (0) 78 79/* 80 * SX_ASSERT_XLOCKED() detects and guarantees that *we* own the xlock. 81 */ 82#define SX_ASSERT_XLOCKED(sx) do { \ 83 mtx_lock(&(sx)->sx_lock); \ 84 _SX_ASSERT_XLOCKED((sx)); \ 85 mtx_unlock(&(sx)->sx_lock); \ 86} while (0) 87#define _SX_ASSERT_XLOCKED(sx) do { \ 88 KASSERT(((sx)->sx_xholder == curproc), \ 89 ("%s: thread %p lacking xlock %s\n", __FUNCTION__, \ |
82 curproc, (sx)->sx_descr)); \ | 90 curproc, (sx)->sx_object.lo_name)); \ |
83} while (0) 84 85#else /* INVARIANTS */ 86#define SX_ASSERT_SLOCKED(sx) 87#define SX_ASSERT_XLOCKED(sx) 88#define _SX_ASSERT_SLOCKED(sx) 89#define _SX_ASSERT_XLOCKED(sx) 90#endif /* INVARIANTS */ 91 92#endif /* _KERNEL */ 93#endif /* !LOCORE */ 94#endif /* _SYS_SX_H_ */ | 91} while (0) 92 93#else /* INVARIANTS */ 94#define SX_ASSERT_SLOCKED(sx) 95#define SX_ASSERT_XLOCKED(sx) 96#define _SX_ASSERT_SLOCKED(sx) 97#define _SX_ASSERT_XLOCKED(sx) 98#endif /* INVARIANTS */ 99 100#endif /* _KERNEL */ 101#endif /* !LOCORE */ 102#endif /* _SYS_SX_H_ */ |