kern_srp.c revision 1.7
1/*	$OpenBSD: kern_srp.c,v 1.7 2015/11/23 10:56:19 mpi Exp $ */
2
3/*
4 * Copyright (c) 2014 Jonathan Matthew <jmatthew@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/param.h>
20#include <sys/types.h>
21#include <sys/systm.h>
22#include <sys/timeout.h>
23#include <sys/srp.h>
24#include <sys/atomic.h>
25
26void	srp_v_gc_start(struct srp_gc *, struct srp *, void *);
27
28void
29srpl_rc_init(struct srpl_rc *rc,  void (*ref)(void *, void *),
30    void (*unref)(void *, void *), void *cookie)
31{
32	rc->srpl_ref = ref;
33	srp_gc_init(&rc->srpl_gc, unref, cookie);
34}
35
36void
37srp_gc_init(struct srp_gc *srp_gc, void (*dtor)(void *, void *), void *cookie)
38{
39	srp_gc->srp_gc_dtor = dtor;
40	srp_gc->srp_gc_cookie = cookie;
41	refcnt_init(&srp_gc->srp_gc_refcnt);
42}
43
44void
45srp_init(struct srp *srp)
46{
47	srp->ref = NULL;
48}
49
50void
51srp_update_locked(struct srp_gc *srp_gc, struct srp *srp, void *nv)
52{
53	void *ov;
54
55	if (nv != NULL)
56		refcnt_take(&srp_gc->srp_gc_refcnt);
57
58	/*
59	 * this doesn't have to be as careful as the caller has already
60	 * prevented concurrent updates, eg. by holding the kernel lock.
61	 * can't be mixed with non-locked updates though.
62	 */
63
64	ov = srp->ref;
65	srp->ref = nv;
66	if (ov != NULL)
67		srp_v_gc_start(srp_gc, srp, ov);
68}
69
70void *
71srp_get_locked(struct srp *srp)
72{
73	return (srp->ref);
74}
75
76void
77srp_finalize(struct srp_gc *srp_gc)
78{
79	refcnt_finalize(&srp_gc->srp_gc_refcnt, "srpfini");
80}
81
82#ifdef MULTIPROCESSOR
83#include <machine/cpu.h>
84#include <sys/pool.h>
85
86struct srp_gc_ctx {
87	struct srp_gc		*srp_gc;
88	struct timeout		tick;
89	struct srp_hazard	hzrd;
90};
91
92int	srp_v_referenced(struct srp *, void *);
93void	srp_v_gc(void *);
94
95struct pool srp_gc_ctx_pool;
96
97void
98srp_startup(void)
99{
100	pool_init(&srp_gc_ctx_pool, sizeof(struct srp_gc_ctx), 0, 0,
101	    PR_WAITOK, "srpgc", NULL);
102
103	/* items are allocated in a process, but freed from a timeout */
104	pool_setipl(&srp_gc_ctx_pool, IPL_SOFTCLOCK);
105}
106
107int
108srp_v_referenced(struct srp *srp, void *v)
109{
110	struct cpu_info *ci;
111	CPU_INFO_ITERATOR cii;
112	u_int i;
113	struct srp_hazard *hzrd;
114
115	CPU_INFO_FOREACH(cii, ci) {
116		for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
117			hzrd = &ci->ci_srp_hazards[i];
118
119			if (hzrd->sh_p != srp)
120				continue;
121			membar_consumer();
122			if (hzrd->sh_v != v)
123				continue;
124
125			return (1);
126		}
127	}
128
129	return (0);
130}
131
132void
133srp_v_dtor(struct srp_gc *srp_gc, void *v)
134{
135	(*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
136
137	refcnt_rele_wake(&srp_gc->srp_gc_refcnt);
138}
139
140void
141srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
142{
143	struct srp_gc_ctx *ctx;
144
145	if (!srp_v_referenced(srp, v)) {
146		/* we win */
147		srp_v_dtor(srp_gc, v);
148		return;
149	}
150
151	/* in use, try later */
152
153	ctx = pool_get(&srp_gc_ctx_pool, PR_WAITOK);
154	ctx->srp_gc = srp_gc;
155	ctx->hzrd.sh_p = srp;
156	ctx->hzrd.sh_v = v;
157
158	timeout_set(&ctx->tick, srp_v_gc, ctx);
159	timeout_add(&ctx->tick, 1);
160}
161
162void
163srp_v_gc(void *x)
164{
165	struct srp_gc_ctx *ctx = x;
166
167	if (srp_v_referenced(ctx->hzrd.sh_p, ctx->hzrd.sh_v)) {
168		/* oh well, try again later */
169		timeout_add(&ctx->tick, 1);
170		return;
171	}
172
173	srp_v_dtor(ctx->srp_gc, ctx->hzrd.sh_v);
174	pool_put(&srp_gc_ctx_pool, ctx);
175}
176
177void
178srp_update(struct srp_gc *srp_gc, struct srp *srp, void *v)
179{
180	if (v != NULL)
181		refcnt_take(&srp_gc->srp_gc_refcnt);
182
183	v = atomic_swap_ptr(&srp->ref, v);
184	if (v != NULL)
185		srp_v_gc_start(srp_gc, srp, v);
186}
187
188static inline void *
189srp_v(struct srp_hazard *hzrd, struct srp *srp)
190{
191	void *v;
192
193	hzrd->sh_p = srp;
194
195	/*
196	 * ensure we update this cpu's hazard pointer to a value that's still
197	 * current after the store finishes, otherwise the gc task may already
198	 * be destroying it
199	 */
200	do {
201		v = srp->ref;
202		hzrd->sh_v = v;
203		membar_consumer();
204	} while (__predict_false(v != srp->ref));
205
206	return (v);
207}
208
209void *
210srp_enter(struct srp *srp)
211{
212	struct cpu_info *ci = curcpu();
213	struct srp_hazard *hzrd;
214	u_int i;
215
216	for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
217		hzrd = &ci->ci_srp_hazards[i];
218		if (hzrd->sh_p == NULL)
219			return (srp_v(hzrd, srp));
220	}
221
222	panic("%s: not enough srp hazard records", __func__);
223
224	/* NOTREACHED */
225	return (NULL);
226}
227
228void *
229srp_follow(struct srp *srp, void *v, struct srp *next)
230{
231	struct cpu_info *ci = curcpu();
232	struct srp_hazard *hzrd;
233
234	hzrd = ci->ci_srp_hazards + nitems(ci->ci_srp_hazards);
235	while (hzrd-- != ci->ci_srp_hazards) {
236		if (hzrd->sh_p == srp && hzrd->sh_v == v)
237			return (srp_v(hzrd, next));
238	}
239
240	panic("%s: unexpected ref %p via %p", __func__, v, srp);
241
242	/* NOTREACHED */
243	return (NULL);
244}
245
246void
247srp_leave(struct srp *srp, void *v)
248{
249	struct cpu_info *ci = curcpu();
250	struct srp_hazard *hzrd;
251
252	hzrd = ci->ci_srp_hazards + nitems(ci->ci_srp_hazards);
253	while (hzrd-- != ci->ci_srp_hazards) {
254		if (hzrd->sh_p == srp && hzrd->sh_v == v) {
255			hzrd->sh_p = NULL;
256			return;
257		}
258	}
259
260	panic("%s: unexpected ref %p via %p", __func__, v, srp);
261}
262
263#else /* MULTIPROCESSOR */
264
265void
266srp_startup(void)
267{
268
269}
270
271void
272srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
273{
274	(*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
275	refcnt_rele_wake(&srp_gc->srp_gc_refcnt);
276}
277
278#endif /* MULTIPROCESSOR */
279