kref.h revision 1.13
1/*	$NetBSD: kref.h,v 1.13 2022/04/09 23:43:39 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#ifndef _LINUX_KREF_H_
33#define _LINUX_KREF_H_
34
35#include <sys/types.h>
36#include <sys/atomic.h>
37#include <sys/systm.h>
38
39#include <linux/atomic.h>
40#include <linux/refcount.h>
41#include <linux/mutex.h>
42#include <linux/spinlock.h>
43
44struct kref {
45	unsigned int kr_count;
46};
47
48static inline void
49kref_init(struct kref *kref)
50{
51	atomic_store_relaxed(&kref->kr_count, 1);
52}
53
54static inline void
55kref_get(struct kref *kref)
56{
57	const unsigned int count __unused =
58	    atomic_inc_uint_nv(&kref->kr_count);
59
60	KASSERTMSG((count > 1), "getting released kref");
61}
62
63static inline bool
64kref_get_unless_zero(struct kref *kref)
65{
66	unsigned count;
67
68	do {
69		count = atomic_load_relaxed(&kref->kr_count);
70		if ((count == 0) || (count == UINT_MAX))
71			return false;
72	} while (atomic_cas_uint(&kref->kr_count, count, (count + 1)) !=
73	    count);
74
75	return true;
76}
77
78static inline int
79kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *))
80{
81	unsigned int old, new;
82
83#ifndef __HAVE_ATOMIC_AS_MEMBAR
84	membar_release();
85#endif
86
87	do {
88		old = atomic_load_relaxed(&kref->kr_count);
89		KASSERTMSG((count <= old), "overreleasing kref: %u - %u",
90		    old, count);
91		new = (old - count);
92	} while (atomic_cas_uint(&kref->kr_count, old, new) != old);
93
94	if (new == 0) {
95#ifndef __HAVE_ATOMIC_AS_MEMBAR
96		membar_acquire();
97#endif
98		(*release)(kref);
99		return 1;
100	}
101
102	return 0;
103}
104
105static inline int
106kref_put_lock(struct kref *kref, void (*release)(struct kref *),
107    spinlock_t *interlock)
108{
109	unsigned int old, new;
110
111#ifndef __HAVE_ATOMIC_AS_MEMBAR
112	membar_release();
113#endif
114
115	do {
116		old = atomic_load_relaxed(&kref->kr_count);
117		KASSERT(old > 0);
118		if (old == 1) {
119			spin_lock(interlock);
120			if (atomic_add_int_nv(&kref->kr_count, -1) == 0) {
121#ifndef __HAVE_ATOMIC_AS_MEMBAR
122				membar_acquire();
123#endif
124				(*release)(kref);
125				return 1;
126			}
127			spin_unlock(interlock);
128			return 0;
129		}
130		new = (old - 1);
131	} while (atomic_cas_uint(&kref->kr_count, old, new) != old);
132
133	return 0;
134}
135
136static inline int
137kref_put(struct kref *kref, void (*release)(struct kref *))
138{
139
140	return kref_sub(kref, 1, release);
141}
142
143static inline int
144kref_put_mutex(struct kref *kref, void (*release)(struct kref *),
145    struct mutex *interlock)
146{
147	unsigned int old, new;
148
149#ifndef __HAVE_ATOMIC_AS_MEMBAR
150	membar_release();
151#endif
152
153	do {
154		old = atomic_load_relaxed(&kref->kr_count);
155		KASSERT(old > 0);
156		if (old == 1) {
157			mutex_lock(interlock);
158			if (atomic_add_int_nv(&kref->kr_count, -1) == 0) {
159#ifndef __HAVE_ATOMIC_AS_MEMBAR
160				membar_acquire();
161#endif
162				(*release)(kref);
163				return 1;
164			}
165			mutex_unlock(interlock);
166			return 0;
167		}
168		new = (old - 1);
169	} while (atomic_cas_uint(&kref->kr_count, old, new) != old);
170
171	return 0;
172}
173
174static inline unsigned
175kref_read(const struct kref *kref)
176{
177
178	return atomic_load_relaxed(&kref->kr_count);
179}
180
181/*
182 * Not native to Linux.  Mostly used for assertions...
183 */
184
185static inline bool
186kref_referenced_p(struct kref *kref)
187{
188
189	return (0 < kref->kr_count);
190}
191
192static inline bool
193kref_exclusive_p(struct kref *kref)
194{
195
196	KASSERT(0 < kref->kr_count);
197	return (kref->kr_count == 1);
198}
199
200#endif  /* _LINUX_KREF_H_ */
201