intr.c revision 1.19
1/*	$NetBSD: intr.c,v 1.19 2009/11/06 15:22:16 pooka Exp $	*/
2
3/*
4 * Copyright (c) 2008 Antti Kantee.  All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.19 2009/11/06 15:22:16 pooka Exp $");
30
31#include <sys/param.h>
32#include <sys/cpu.h>
33#include <sys/kmem.h>
34#include <sys/kthread.h>
35#include <sys/intr.h>
36
37#include <rump/rumpuser.h>
38
39#include "rump_private.h"
40
41/*
42 * Interrupt simulator.  It executes hardclock() and softintrs.
43 */
44
45time_t time_uptime = 0;
46
47#define SI_MPSAFE 0x01
48#define SI_ONLIST 0x02
49#define SI_KILLME 0x04
50struct softint {
51	void (*si_func)(void *);
52	void *si_arg;
53	int si_flags;
54
55	LIST_ENTRY(softint) si_entries;
56};
57static LIST_HEAD(, softint) si_pending = LIST_HEAD_INITIALIZER(si_pending);
58static kmutex_t si_mtx;
59static kcondvar_t si_cv;
60
61#define INTRTHREAD_DEFAULT	2
62#define INTRTHREAD_MAX		20
63static int wrkidle, wrktotal;
64
65static void sithread(void *);
66
67static void
68makeworker(bool bootstrap)
69{
70	int rv;
71
72	if (wrktotal > INTRTHREAD_MAX) {
73		/* XXX: ratecheck */
74		printf("maximum interrupt threads (%d) reached\n",
75		    INTRTHREAD_MAX);
76		return;
77	}
78	rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE | KTHREAD_INTR, NULL,
79	    sithread, NULL, NULL, "rumpsi");
80	if (rv) {
81		if (bootstrap)
82			panic("intr thread creation failed %d", rv);
83		else
84			printf("intr thread creation failed %d\n", rv);
85	} else {
86		wrkidle++;
87		wrktotal++;
88	}
89}
90
91/* rumpuser structures since we call rumpuser interfaces directly */
92static struct rumpuser_cv *clockcv;
93static struct rumpuser_mtx *clockmtx;
94static struct timespec clockbase, clockup;
95static unsigned clkgen;
96
97void
98rump_getuptime(struct timespec *ts)
99{
100	int startgen, i = 0;
101
102	do {
103		startgen = clkgen;
104		if (__predict_false(i++ > 10)) {
105			yield();
106			i = 0;
107		}
108		*ts = clockup;
109	} while (startgen != clkgen || clkgen % 2 != 0);
110}
111
112void
113rump_gettime(struct timespec *ts)
114{
115	struct timespec ts_up;
116
117	rump_getuptime(&ts_up);
118	timespecadd(&clockbase, &ts_up, ts);
119}
120
121/*
122 * clock "interrupt"
123 */
124static void
125doclock(void *noarg)
126{
127	struct timespec tick, curtime;
128	uint64_t sec, nsec;
129	int ticks = 0, error;
130	extern int hz;
131
132	rumpuser_gettime(&sec, &nsec, &error);
133	clockbase.tv_sec = sec;
134	clockbase.tv_nsec = nsec;
135	curtime = clockbase;
136	tick.tv_sec = 0;
137	tick.tv_nsec = 1000000000/hz;
138
139	rumpuser_mutex_enter(clockmtx);
140	rumpuser_cv_signal(clockcv);
141
142	for (;;) {
143		callout_hardclock();
144
145		if (++ticks == hz) {
146			time_uptime++;
147			ticks = 0;
148		}
149
150		/* wait until the next tick. XXX: what if the clock changes? */
151		while (rumpuser_cv_timedwait(clockcv, clockmtx,
152		    &curtime) != EWOULDBLOCK)
153			continue;
154
155		clkgen++;
156		timespecadd(&clockup, &tick, &clockup);
157		clkgen++;
158		timespecadd(&clockup, &clockbase, &curtime);
159	}
160}
161
162/*
163 * run a scheduled soft interrupt
164 */
165static void
166sithread(void *arg)
167{
168	struct softint *si;
169	void (*func)(void *) = NULL;
170	void *funarg;
171	bool mpsafe;
172
173	mutex_enter(&si_mtx);
174	for (;;) {
175		if (!LIST_EMPTY(&si_pending)) {
176			si = LIST_FIRST(&si_pending);
177			func = si->si_func;
178			funarg = si->si_arg;
179			mpsafe = si->si_flags & SI_MPSAFE;
180
181			si->si_flags &= ~SI_ONLIST;
182			LIST_REMOVE(si, si_entries);
183			if (si->si_flags & SI_KILLME)
184				softint_disestablish(si);
185		} else {
186			cv_wait(&si_cv, &si_mtx);
187			continue;
188		}
189		wrkidle--;
190		if (__predict_false(wrkidle == 0))
191			makeworker(false);
192		mutex_exit(&si_mtx);
193
194		if (!mpsafe)
195			KERNEL_LOCK(1, curlwp);
196		func(funarg);
197		if (!mpsafe)
198			KERNEL_UNLOCK_ONE(curlwp);
199
200		mutex_enter(&si_mtx);
201		wrkidle++;
202	}
203}
204
205void
206softint_init(struct cpu_info *ci)
207{
208	int rv;
209
210	mutex_init(&si_mtx, MUTEX_DEFAULT, IPL_NONE);
211	cv_init(&si_cv, "intrw8"); /* cv of temporary w8ness */
212
213	rumpuser_cv_init(&clockcv);
214	rumpuser_mutex_init(&clockmtx);
215
216	/* XXX: should have separate "wanttimer" control */
217	if (rump_threads) {
218		rumpuser_mutex_enter(clockmtx);
219		rv = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, doclock,
220		    NULL, NULL, "rumpclk");
221		if (rv)
222			panic("clock thread creation failed: %d", rv);
223		mutex_enter(&si_mtx);
224		while (wrktotal < INTRTHREAD_DEFAULT) {
225			makeworker(true);
226		}
227		mutex_exit(&si_mtx);
228
229		/* make sure we have a clocktime before returning */
230		rumpuser_cv_wait(clockcv, clockmtx);
231		rumpuser_mutex_exit(clockmtx);
232	}
233}
234
235/*
236 * Soft interrupts bring two choices.  If we are running with thread
237 * support enabled, defer execution, otherwise execute in place.
238 * See softint_schedule().
239 *
240 * As there is currently no clear concept of when a thread finishes
241 * work (although rump_clear_curlwp() is close), simply execute all
242 * softints in the timer thread.  This is probably not the most
243 * efficient method, but good enough for now.
244 */
245void *
246softint_establish(u_int flags, void (*func)(void *), void *arg)
247{
248	struct softint *si;
249
250	si = kmem_alloc(sizeof(*si), KM_SLEEP);
251	si->si_func = func;
252	si->si_arg = arg;
253	si->si_flags = flags & SOFTINT_MPSAFE ? SI_MPSAFE : 0;
254
255	return si;
256}
257
258void
259softint_schedule(void *arg)
260{
261	struct softint *si = arg;
262
263	if (!rump_threads) {
264		si->si_func(si->si_arg);
265	} else {
266		mutex_enter(&si_mtx);
267		if (!(si->si_flags & SI_ONLIST)) {
268			LIST_INSERT_HEAD(&si_pending, si, si_entries);
269			si->si_flags |= SI_ONLIST;
270		}
271		cv_signal(&si_cv);
272		mutex_exit(&si_mtx);
273	}
274}
275
276/* flimsy disestablish: should wait for softints to finish */
277void
278softint_disestablish(void *cook)
279{
280	struct softint *si = cook;
281
282	if (si->si_flags & SI_ONLIST) {
283		si->si_flags |= SI_KILLME;
284		return;
285	}
286	kmem_free(si, sizeof(*si));
287}
288
289bool
290cpu_intr_p(void)
291{
292
293	return false;
294}
295
296/* yea, we lie a bit for now */
297bool
298cpu_softintr_p(void)
299{
300
301	return false;
302}
303