1/*-
2 * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org>
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/sys/refcount.h 367457 2020-11-07 18:10:59Z dim $
26 */
27
28#ifndef __SYS_REFCOUNT_H__
29#define __SYS_REFCOUNT_H__
30
31#include <sys/limits.h>
32#include <machine/atomic.h>
33
34#ifdef _KERNEL
35#include <sys/systm.h>
36#else
37#define	KASSERT(exp, msg)	/* */
38#endif
39
40static __inline void
41refcount_init(volatile u_int *count, u_int value)
42{
43
44	*count = value;
45}
46
47static __inline void
48refcount_acquire(volatile u_int *count)
49{
50
51	KASSERT(*count < UINT_MAX, ("refcount %p overflowed", count));
52	atomic_add_int(count, 1);
53}
54
55static __inline int
56refcount_release(volatile u_int *count)
57{
58	u_int old;
59
60	atomic_thread_fence_rel();
61	old = atomic_fetchadd_int(count, -1);
62	KASSERT(old > 0, ("refcount %p is zero", count));
63	if (old > 1)
64		return (0);
65
66	/*
67	 * Last reference.  Signal the user to call the destructor.
68	 *
69	 * Ensure that the destructor sees all updates.  The fence_rel
70	 * at the start of the function synchronized with this fence.
71	 */
72	atomic_thread_fence_acq();
73	return (1);
74}
75
76/*
77 * This functions returns non-zero if the refcount was
78 * incremented. Else zero is returned.
79 *
80 * A temporary hack until refcount_* APIs are sorted out.
81 */
82static __inline __result_use_check int
83refcount_acquire_if_not_zero(volatile u_int *count)
84{
85	u_int old;
86
87	old = *count;
88	for (;;) {
89		KASSERT(old < UINT_MAX, ("refcount %p overflowed", count));
90		if (old == 0)
91			return (0);
92		if (atomic_fcmpset_int(count, &old, old + 1))
93			return (1);
94	}
95}
96
97static __inline __result_use_check int
98refcount_release_if_not_last(volatile u_int *count)
99{
100	u_int old;
101
102	old = *count;
103	for (;;) {
104		KASSERT(old > 0, ("refcount %p is zero", count));
105		if (old == 1)
106			return (0);
107		if (atomic_fcmpset_int(count, &old, old - 1))
108			return (1);
109	}
110}
111
112#endif	/* ! __SYS_REFCOUNT_H__ */
113