intr.c revision 1.32
1/* $NetBSD: intr.c,v 1.32 2010/08/15 21:28:33 pooka Exp $ */ 2 3/* 4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.32 2010/08/15 21:28:33 pooka Exp $"); 30 31#include <sys/param.h> 32#include <sys/atomic.h> 33#include <sys/cpu.h> 34#include <sys/kernel.h> 35#include <sys/kmem.h> 36#include <sys/kthread.h> 37#include <sys/malloc.h> 38#include <sys/intr.h> 39#include <sys/timetc.h> 40 41#include <rump/rumpuser.h> 42 43#include "rump_private.h" 44 45/* 46 * Interrupt simulator. It executes hardclock() and softintrs. 47 */ 48 49#define SI_MPSAFE 0x01 50#define SI_KILLME 0x02 51 52struct softint_percpu; 53struct softint { 54 void (*si_func)(void *); 55 void *si_arg; 56 int si_flags; 57 int si_level; 58 59 struct softint_percpu *si_entry; /* [0,ncpu-1] */ 60}; 61 62struct softint_percpu { 63 struct softint *sip_parent; 64 bool sip_onlist; 65 66 LIST_ENTRY(softint_percpu) sip_entries; 67}; 68 69struct softint_lev { 70 struct rumpuser_cv *si_cv; 71 LIST_HEAD(, softint_percpu) si_pending; 72}; 73 74kcondvar_t lbolt; /* Oh Kath Ra */ 75 76static u_int ticks; 77 78static u_int 79rumptc_get(struct timecounter *tc) 80{ 81 82 KASSERT(rump_threads); 83 return ticks; 84} 85 86static struct timecounter rumptc = { 87 .tc_get_timecount = rumptc_get, 88 .tc_poll_pps = NULL, 89 .tc_counter_mask = ~0, 90 .tc_frequency = 0, 91 .tc_name = "rumpclk", 92 .tc_quality = 0, 93}; 94 95/* 96 * clock "interrupt" 97 */ 98static void 99doclock(void *noarg) 100{ 101 struct timespec clockbase, clockup; 102 struct timespec thetick, curtime; 103 struct rumpuser_cv *clockcv; 104 struct rumpuser_mtx *clockmtx; 105 uint64_t sec, nsec; 106 int error; 107 extern int hz; 108 109 memset(&clockup, 0, sizeof(clockup)); 110 rumpuser_gettime(&sec, &nsec, &error); 111 clockbase.tv_sec = sec; 112 clockbase.tv_nsec = nsec; 113 curtime = clockbase; 114 thetick.tv_sec = 0; 115 thetick.tv_nsec = 1000000000/hz; 116 117 /* XXX: dummies */ 118 rumpuser_cv_init(&clockcv); 119 rumpuser_mutex_init(&clockmtx); 120 121 rumpuser_mutex_enter(clockmtx); 122 for (;;) { 123 callout_hardclock(); 124 125 /* wait until the next tick. XXX: what if the clock changes? */ 126 while (rumpuser_cv_timedwait(clockcv, clockmtx, 127 curtime.tv_sec, curtime.tv_nsec) == 0) 128 continue; 129 130 /* XXX: sync with a) virtual clock b) host clock */ 131 timespecadd(&clockup, &clockbase, &curtime); 132 timespecadd(&clockup, &thetick, &clockup); 133 134#if 0 135 /* CPU_IS_PRIMARY is MD and hence unreliably correct here */ 136 if (!CPU_IS_PRIMARY(curcpu())) 137 continue; 138#else 139 if (curcpu()->ci_index != 0) 140 continue; 141#endif 142 143 if ((++ticks % hz) == 0) { 144 cv_broadcast(&lbolt); 145 } 146 tc_ticktock(); 147 } 148} 149 150/* 151 * Soft interrupt execution thread. This thread is pinned to the 152 * same CPU that scheduled the interrupt, so we don't need to do 153 * lock against si_lvl. 154 */ 155static void 156sithread(void *arg) 157{ 158 struct softint_percpu *sip; 159 struct softint *si; 160 void (*func)(void *) = NULL; 161 void *funarg; 162 bool mpsafe; 163 int mylevel = (uintptr_t)arg; 164 struct softint_lev *si_lvlp, *si_lvl; 165 struct cpu_data *cd = &curcpu()->ci_data; 166 167 si_lvlp = cd->cpu_softcpu; 168 si_lvl = &si_lvlp[mylevel]; 169 170 for (;;) { 171 if (!LIST_EMPTY(&si_lvl->si_pending)) { 172 sip = LIST_FIRST(&si_lvl->si_pending); 173 si = sip->sip_parent; 174 175 func = si->si_func; 176 funarg = si->si_arg; 177 mpsafe = si->si_flags & SI_MPSAFE; 178 179 sip->sip_onlist = false; 180 LIST_REMOVE(sip, sip_entries); 181 if (si->si_flags & SI_KILLME) { 182 softint_disestablish(si); 183 continue; 184 } 185 } else { 186 rump_schedlock_cv_wait(si_lvl->si_cv); 187 continue; 188 } 189 190 if (!mpsafe) 191 KERNEL_LOCK(1, curlwp); 192 func(funarg); 193 if (!mpsafe) 194 KERNEL_UNLOCK_ONE(curlwp); 195 } 196 197 panic("sithread unreachable"); 198} 199 200void 201rump_intr_init() 202{ 203 204 cv_init(&lbolt, "oh kath ra"); 205} 206 207void 208softint_init(struct cpu_info *ci) 209{ 210 struct cpu_data *cd = &ci->ci_data; 211 struct softint_lev *slev; 212 int rv, i; 213 214 if (!rump_threads) 215 return; 216 217 /* XXX */ 218 if (ci->ci_index == 0) { 219 rumptc.tc_frequency = hz; 220 tc_init(&rumptc); 221 } 222 223 slev = kmem_alloc(sizeof(struct softint_lev) * SOFTINT_COUNT, KM_SLEEP); 224 for (i = 0; i < SOFTINT_COUNT; i++) { 225 rumpuser_cv_init(&slev[i].si_cv); 226 LIST_INIT(&slev[i].si_pending); 227 } 228 cd->cpu_softcpu = slev; 229 230 /* softint might run on different physical CPU */ 231 membar_sync(); 232 233 for (i = 0; i < SOFTINT_COUNT; i++) { 234 rv = kthread_create(PRI_NONE, 235 KTHREAD_MPSAFE | KTHREAD_INTR, ci, 236 sithread, (void *)(uintptr_t)i, 237 NULL, "rsi%d/%d", ci->ci_index, i); 238 } 239 240 rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE, 241 ci, doclock, NULL, NULL, "rumpclk%d", ci->ci_index); 242 if (rv) 243 panic("clock thread creation failed: %d", rv); 244} 245 246/* 247 * Soft interrupts bring two choices. If we are running with thread 248 * support enabled, defer execution, otherwise execute in place. 249 * See softint_schedule(). 250 * 251 * As there is currently no clear concept of when a thread finishes 252 * work (although rump_clear_curlwp() is close), simply execute all 253 * softints in the timer thread. This is probably not the most 254 * efficient method, but good enough for now. 255 */ 256void * 257softint_establish(u_int flags, void (*func)(void *), void *arg) 258{ 259 struct softint *si; 260 struct softint_percpu *sip; 261 int i; 262 263 si = malloc(sizeof(*si), M_TEMP, M_WAITOK); 264 si->si_func = func; 265 si->si_arg = arg; 266 si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0; 267 si->si_level = flags & SOFTINT_LVLMASK; 268 KASSERT(si->si_level < SOFTINT_COUNT); 269 si->si_entry = malloc(sizeof(*si->si_entry) * ncpu, 270 M_TEMP, M_WAITOK | M_ZERO); 271 for (i = 0; i < ncpu; i++) { 272 sip = &si->si_entry[i]; 273 sip->sip_parent = si; 274 } 275 276 return si; 277} 278 279void 280softint_schedule(void *arg) 281{ 282 struct softint *si = arg; 283 struct softint_percpu *sip = &si->si_entry[curcpu()->ci_index]; 284 struct cpu_data *cd = &curcpu()->ci_data; 285 struct softint_lev *si_lvl = cd->cpu_softcpu; 286 287 if (!rump_threads) { 288 si->si_func(si->si_arg); 289 } else { 290 if (!sip->sip_onlist) { 291 LIST_INSERT_HEAD(&si_lvl[si->si_level].si_pending, 292 sip, sip_entries); 293 sip->sip_onlist = true; 294 } 295 } 296} 297 298/* 299 * flimsy disestablish: should wait for softints to finish. 300 */ 301void 302softint_disestablish(void *cook) 303{ 304 struct softint *si = cook; 305 int i; 306 307 for (i = 0; i < ncpu; i++) { 308 struct softint_percpu *sip; 309 310 sip = &si->si_entry[i]; 311 if (sip->sip_onlist) { 312 si->si_flags |= SI_KILLME; 313 return; 314 } 315 } 316 free(si->si_entry, M_TEMP); 317 free(si, M_TEMP); 318} 319 320void 321rump_softint_run(struct cpu_info *ci) 322{ 323 struct cpu_data *cd = &ci->ci_data; 324 struct softint_lev *si_lvl = cd->cpu_softcpu; 325 int i; 326 327 if (!rump_threads) 328 return; 329 330 for (i = 0; i < SOFTINT_COUNT; i++) { 331 if (!LIST_EMPTY(&si_lvl[i].si_pending)) 332 rumpuser_cv_signal(si_lvl[i].si_cv); 333 } 334} 335 336bool 337cpu_intr_p(void) 338{ 339 340 return false; 341} 342 343bool 344cpu_softintr_p(void) 345{ 346 347 return curlwp->l_pflag & LP_INTR; 348} 349