kern_srp.c revision 1.5
1/* $OpenBSD: kern_srp.c,v 1.5 2015/09/11 19:22:37 dlg Exp $ */ 2 3/* 4 * Copyright (c) 2014 Jonathan Matthew <jmatthew@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19#include <sys/param.h> 20#include <sys/types.h> 21#include <sys/systm.h> 22#include <sys/timeout.h> 23#include <sys/srp.h> 24 25void srp_v_gc_start(struct srp_gc *, struct srp *, void *); 26 27void 28srpl_rc_init(struct srpl_rc *rc, void (*ref)(void *, void *), 29 void (*unref)(void *, void *), void *cookie) 30{ 31 rc->srpl_ref = ref; 32 srp_gc_init(&rc->srpl_gc, unref, cookie); 33} 34 35void 36srp_gc_init(struct srp_gc *srp_gc, void (*dtor)(void *, void *), void *cookie) 37{ 38 srp_gc->srp_gc_dtor = dtor; 39 srp_gc->srp_gc_cookie = cookie; 40 refcnt_init(&srp_gc->srp_gc_refcnt); 41} 42 43void 44srp_init(struct srp *srp) 45{ 46 srp->ref = NULL; 47} 48 49void 50srp_update_locked(struct srp_gc *srp_gc, struct srp *srp, void *nv) 51{ 52 void *ov; 53 54 if (nv != NULL) 55 refcnt_take(&srp_gc->srp_gc_refcnt); 56 57 /* 58 * this doesn't have to be as careful as the caller has already 59 * prevented concurrent updates, eg. by holding the kernel lock. 60 * can't be mixed with non-locked updates though. 61 */ 62 63 ov = srp->ref; 64 srp->ref = nv; 65 if (ov != NULL) 66 srp_v_gc_start(srp_gc, srp, ov); 67} 68 69void * 70srp_get_locked(struct srp *srp) 71{ 72 return (srp->ref); 73} 74 75#ifdef MULTIPROCESSOR 76#include <machine/cpu.h> 77#include <sys/pool.h> 78 79struct srp_gc_ctx { 80 struct srp_gc *srp_gc; 81 struct timeout tick; 82 struct srp_hazard hzrd; 83}; 84 85int srp_v_referenced(struct srp *, void *); 86void srp_v_gc(void *); 87 88struct pool srp_gc_ctx_pool; 89 90void 91srp_startup(void) 92{ 93 pool_init(&srp_gc_ctx_pool, sizeof(struct srp_gc_ctx), 0, 0, 94 PR_WAITOK, "srpgc", NULL); 95 96 /* items are allocated in a process, but freed from a timeout */ 97 pool_setipl(&srp_gc_ctx_pool, IPL_SOFTCLOCK); 98} 99 100int 101srp_v_referenced(struct srp *srp, void *v) 102{ 103 struct cpu_info *ci; 104 CPU_INFO_ITERATOR cii; 105 u_int i; 106 struct srp_hazard *hzrd; 107 108 CPU_INFO_FOREACH(cii, ci) { 109 for (i = 0; i < nitems(ci->ci_srp_hazards); i++) { 110 hzrd = &ci->ci_srp_hazards[i]; 111 112 if (hzrd->sh_p != srp) 113 continue; 114 membar_consumer(); 115 if (hzrd->sh_v != v) 116 continue; 117 118 return (1); 119 } 120 } 121 122 return (0); 123} 124 125void 126srp_v_dtor(struct srp_gc *srp_gc, void *v) 127{ 128 (*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v); 129 130 refcnt_rele_wake(&srp_gc->srp_gc_refcnt); 131} 132 133void 134srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v) 135{ 136 struct srp_gc_ctx *ctx; 137 138 if (!srp_v_referenced(srp, v)) { 139 /* we win */ 140 srp_v_dtor(srp_gc, v); 141 return; 142 } 143 144 /* in use, try later */ 145 146 ctx = pool_get(&srp_gc_ctx_pool, PR_WAITOK); 147 ctx->srp_gc = srp_gc; 148 ctx->hzrd.sh_p = srp; 149 ctx->hzrd.sh_v = v; 150 151 timeout_set(&ctx->tick, srp_v_gc, ctx); 152 timeout_add(&ctx->tick, 1); 153} 154 155void 156srp_v_gc(void *x) 157{ 158 struct srp_gc_ctx *ctx = x; 159 160 if (srp_v_referenced(ctx->hzrd.sh_p, ctx->hzrd.sh_v)) { 161 /* oh well, try again later */ 162 timeout_add(&ctx->tick, 1); 163 return; 164 } 165 166 srp_v_dtor(ctx->srp_gc, ctx->hzrd.sh_v); 167 pool_put(&srp_gc_ctx_pool, ctx); 168} 169 170void 171srp_update(struct srp_gc *srp_gc, struct srp *srp, void *v) 172{ 173 if (v != NULL) 174 refcnt_take(&srp_gc->srp_gc_refcnt); 175 176 v = atomic_swap_ptr(&srp->ref, v); 177 if (v != NULL) 178 srp_v_gc_start(srp_gc, srp, v); 179} 180 181void 182srp_finalize(struct srp_gc *srp_gc) 183{ 184 refcnt_finalize(&srp_gc->srp_gc_refcnt, "srpfini"); 185} 186 187static inline void * 188srp_v(struct srp_hazard *hzrd, struct srp *srp) 189{ 190 void *v; 191 192 hzrd->sh_p = srp; 193 194 /* 195 * ensure we update this cpu's hazard pointer to a value that's still 196 * current after the store finishes, otherwise the gc task may already 197 * be destroying it 198 */ 199 do { 200 v = srp->ref; 201 hzrd->sh_v = v; 202 membar_consumer(); 203 } while (__predict_false(v != srp->ref)); 204 205 return (v); 206} 207 208void * 209srp_enter(struct srp *srp) 210{ 211 struct cpu_info *ci = curcpu(); 212 struct srp_hazard *hzrd; 213 u_int i; 214 215 for (i = 0; i < nitems(ci->ci_srp_hazards); i++) { 216 hzrd = &ci->ci_srp_hazards[i]; 217 if (hzrd->sh_p == NULL) 218 return (srp_v(hzrd, srp)); 219 } 220 221 panic("%s: not enough srp hazard records", __func__); 222 223 /* NOTREACHED */ 224 return (NULL); 225} 226 227void * 228srp_follow(struct srp *srp, void *v, struct srp *next) 229{ 230 struct cpu_info *ci = curcpu(); 231 struct srp_hazard *hzrd; 232 233 hzrd = ci->ci_srp_hazards + nitems(ci->ci_srp_hazards); 234 while (hzrd-- != ci->ci_srp_hazards) { 235 if (hzrd->sh_p == srp && hzrd->sh_v == v) 236 return (srp_v(hzrd, next)); 237 } 238 239 panic("%s: unexpected ref %p via %p", __func__, v, srp); 240 241 /* NOTREACHED */ 242 return (NULL); 243} 244 245void 246srp_leave(struct srp *srp, void *v) 247{ 248 struct cpu_info *ci = curcpu(); 249 struct srp_hazard *hzrd; 250 251 hzrd = ci->ci_srp_hazards + nitems(ci->ci_srp_hazards); 252 while (hzrd-- != ci->ci_srp_hazards) { 253 if (hzrd->sh_p == srp && hzrd->sh_v == v) { 254 hzrd->sh_p = NULL; 255 return; 256 } 257 } 258 259 panic("%s: unexpected ref %p via %p", __func__, v, srp); 260} 261 262#else /* MULTIPROCESSOR */ 263 264void 265srp_startup(void) 266{ 267 268} 269 270void 271srp_finalize(struct srp_gc *srp_gc) 272{ 273 KASSERT(srp_gc->srp_gc_refcount == 1); 274 275 srp_gc->srp_gc_refcount--; 276} 277 278void 279srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v) 280{ 281 (*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v); 282 srp_gc->srp_gc_refcount--; 283} 284 285#endif /* MULTIPROCESSOR */ 286