kern_srp.c revision 1.2
1/*	$OpenBSD: kern_srp.c,v 1.2 2015/09/01 03:47:58 dlg Exp $ */
2
3/*
4 * Copyright (c) 2014 Jonathan Matthew <jmatthew@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/param.h>
20#include <sys/types.h>
21#include <sys/systm.h>
22#include <sys/proc.h>
23#include <sys/atomic.h>
24
25#include <sys/srp.h>
26
27void	srp_v_gc_start(struct srp_gc *, struct srp *, void *);
28
29void
30srp_gc_init(struct srp_gc *srp_gc, void (*dtor)(void *, void *), void *cookie)
31{
32	srp_gc->srp_gc_dtor = dtor;
33	srp_gc->srp_gc_cookie = cookie;
34	srp_gc->srp_gc_refcount = 1;
35}
36
37void
38srp_init(struct srp *srp)
39{
40	srp->ref = NULL;
41}
42
43void
44srp_update_locked(struct srp_gc *srp_gc, struct srp *srp, void *nv)
45{
46	void *ov;
47
48	if (nv != NULL)
49		atomic_inc_int(&srp_gc->srp_gc_refcount);
50
51	/*
52	 * this doesn't have to be as careful as the caller has already
53	 * prevented concurrent updates, eg. by holding the kernel lock.
54	 * can't be mixed with non-locked updates though.
55	 */
56
57	ov = srp->ref;
58	srp->ref = nv;
59	if (ov != NULL)
60		srp_v_gc_start(srp_gc, srp, ov);
61}
62
63void *
64srp_get_locked(struct srp *srp)
65{
66	return (srp->ref);
67}
68
69#ifdef MULTIPROCESSOR
70#include <machine/cpu.h>
71#include <sys/pool.h>
72
73struct srp_gc_ctx {
74	struct srp_gc		*srp_gc;
75	struct timeout		tick;
76	struct srp_hazard	hzrd;
77};
78
79int	srp_v_referenced(struct srp *, void *);
80void	srp_v_gc(void *);
81
82struct pool srp_gc_ctx_pool;
83
84void
85srp_startup(void)
86{
87	pool_init(&srp_gc_ctx_pool, sizeof(struct srp_gc_ctx), 0, 0,
88	    PR_WAITOK, "srpgc", NULL);
89
90	/* items are allocated in a process, but freed from a timeout */
91	pool_setipl(&srp_gc_ctx_pool, IPL_SOFTCLOCK);
92}
93
94int
95srp_v_referenced(struct srp *srp, void *v)
96{
97	struct cpu_info *ci;
98	CPU_INFO_ITERATOR cii;
99	u_int i;
100	struct srp_hazard *hzrd;
101
102	CPU_INFO_FOREACH(cii, ci) {
103		for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
104			hzrd = &ci->ci_srp_hazards[i];
105
106			if (hzrd->sh_p != srp)
107				continue;
108			membar_consumer();
109			if (hzrd->sh_v != v)
110				continue;
111
112			return (1);
113		}
114	}
115
116	return (0);
117}
118
119void
120srp_v_dtor(struct srp_gc *srp_gc, void *v)
121{
122	(*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
123
124	if (atomic_dec_int_nv(&srp_gc->srp_gc_refcount) == 0)
125		wakeup_one(&srp_gc->srp_gc_refcount);
126}
127
128void
129srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
130{
131	struct srp_gc_ctx *ctx;
132
133	if (!srp_v_referenced(srp, v)) {
134		/* we win */
135		srp_v_dtor(srp_gc, v);
136		return;
137	}
138
139	/* in use, try later */
140
141	ctx = pool_get(&srp_gc_ctx_pool, PR_WAITOK);
142	ctx->srp_gc = srp_gc;
143	ctx->hzrd.sh_p = srp;
144	ctx->hzrd.sh_v = v;
145
146	timeout_set(&ctx->tick, srp_v_gc, ctx);
147	timeout_add(&ctx->tick, 1);
148}
149
150void
151srp_v_gc(void *x)
152{
153	struct srp_gc_ctx *ctx = x;
154
155	if (srp_v_referenced(ctx->hzrd.sh_p, ctx->hzrd.sh_v)) {
156		/* oh well, try again later */
157		timeout_add(&ctx->tick, 1);
158		return;
159	}
160
161	srp_v_dtor(ctx->srp_gc, ctx->hzrd.sh_v);
162	pool_put(&srp_gc_ctx_pool, ctx);
163}
164
165void
166srp_update(struct srp_gc *srp_gc, struct srp *srp, void *v)
167{
168	if (v != NULL)
169		atomic_inc_int(&srp_gc->srp_gc_refcount);
170
171	v = atomic_swap_ptr(&srp->ref, v);
172	if (v != NULL)
173		srp_v_gc_start(srp_gc, srp, v);
174}
175
176void
177srp_finalize(struct srp_gc *srp_gc)
178{
179	struct sleep_state sls;
180	u_int r;
181
182	r = atomic_dec_int_nv(&srp_gc->srp_gc_refcount);
183	while (r > 0) {
184		sleep_setup(&sls, &srp_gc->srp_gc_refcount, PWAIT, "srpfini");
185		r = srp_gc->srp_gc_refcount;
186		sleep_finish(&sls, r);
187	}
188}
189
190static inline void *
191srp_v(struct srp_hazard *hzrd, struct srp *srp)
192{
193	void *v;
194
195	hzrd->sh_p = srp;
196
197	/*
198	 * ensure we update this cpu's hazard pointer to a value that's still
199	 * current after the store finishes, otherwise the gc task may already
200	 * be destroying it
201	 */
202	do {
203		v = srp->ref;
204		hzrd->sh_v = v;
205		membar_consumer();
206	} while (__predict_false(v != srp->ref));
207
208	return (v);
209}
210
211void *
212srp_enter(struct srp *srp)
213{
214	struct cpu_info *ci = curcpu();
215	struct srp_hazard *hzrd;
216	u_int i;
217
218	for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
219		hzrd = &ci->ci_srp_hazards[i];
220		if (hzrd->sh_p == NULL)
221			return (srp_v(hzrd, srp));
222	}
223
224	panic("%s: not enough srp hazard records", __func__);
225
226	/* NOTREACHED */
227	return (NULL);
228}
229
230void *
231srp_follow(struct srp *srp, void *v, struct srp *next)
232{
233	struct cpu_info *ci = curcpu();
234	struct srp_hazard *hzrd;
235
236	hzrd = ci->ci_srp_hazards + nitems(ci->ci_srp_hazards);
237	while (hzrd-- != ci->ci_srp_hazards) {
238		if (hzrd->sh_p == srp && hzrd->sh_v == v)
239			return (srp_v(hzrd, next));
240	}
241
242	panic("%s: unexpected ref %p via %p", __func__, v, srp);
243
244	/* NOTREACHED */
245	return (NULL);
246}
247
248void
249srp_leave(struct srp *srp, void *v)
250{
251	struct cpu_info *ci = curcpu();
252	struct srp_hazard *hzrd;
253
254	hzrd = ci->ci_srp_hazards + nitems(ci->ci_srp_hazards);
255	while (hzrd-- != ci->ci_srp_hazards) {
256		if (hzrd->sh_p == srp && hzrd->sh_v == v) {
257			hzrd->sh_p = NULL;
258			return;
259		}
260	}
261
262	panic("%s: unexpected ref %p via %p", __func__, v, srp);
263}
264
265#else /* MULTIPROCESSOR */
266
267void
268srp_startup(void)
269{
270
271}
272
273void
274srp_finalize(struct srp_gc *srp_gc)
275{
276	KASSERT(srp_gc->srp_gc_refcount == 1);
277
278	srp_gc->srp_gc_refcount--;
279}
280
281void
282srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
283{
284	(*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
285	srp_gc->srp_gc_refcount--;
286}
287
288#endif /* MULTIPROCESSOR */
289