kern_poll.c revision 121292
1/*-
2 * Copyright (c) 2001-2002 Luigi Rizzo
3 *
4 * Supported by: the Xorp Project (www.xorp.org)
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/kern/kern_poll.c 121292 2003-10-20 21:14:24Z sam $");
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/kernel.h>
34#include <sys/socket.h>			/* needed by net/if.h		*/
35#include <sys/sysctl.h>
36
37#include <net/if.h>			/* for IFF_* flags		*/
38#include <net/netisr.h>			/* for NETISR_POLL		*/
39
40#include <sys/proc.h>
41#include <sys/resourcevar.h>
42#include <sys/kthread.h>
43
44#ifdef SMP
45#ifndef COMPILING_LINT
46#error DEVICE_POLLING is not compatible with SMP
47#endif
48#endif
49
50static void netisr_poll(void);		/* the two netisr handlers      */
51static void netisr_pollmore(void);
52
53void hardclock_device_poll(void);	/* hook from hardclock		*/
54void ether_poll(int);			/* polling while in trap	*/
55
56/*
57 * Polling support for [network] device drivers.
58 *
59 * Drivers which support this feature try to register with the
60 * polling code.
61 *
62 * If registration is successful, the driver must disable interrupts,
63 * and further I/O is performed through the handler, which is invoked
64 * (at least once per clock tick) with 3 arguments: the "arg" passed at
65 * register time (a struct ifnet pointer), a command, and a "count" limit.
66 *
67 * The command can be one of the following:
68 *  POLL_ONLY: quick move of "count" packets from input/output queues.
69 *  POLL_AND_CHECK_STATUS: as above, plus check status registers or do
70 *	other more expensive operations. This command is issued periodically
71 *	but less frequently than POLL_ONLY.
72 *  POLL_DEREGISTER: deregister and return to interrupt mode.
73 *
74 * The first two commands are only issued if the interface is marked as
75 * 'IFF_UP and IFF_RUNNING', the last one only if IFF_RUNNING is set.
76 *
77 * The count limit specifies how much work the handler can do during the
78 * call -- typically this is the number of packets to be received, or
79 * transmitted, etc. (drivers are free to interpret this number, as long
80 * as the max time spent in the function grows roughly linearly with the
81 * count).
82 *
83 * Deregistration can be requested by the driver itself (typically in the
84 * *_stop() routine), or by the polling code, by invoking the handler.
85 *
86 * Polling can be globally enabled or disabled with the sysctl variable
87 * kern.polling.enable (default is 0, disabled)
88 *
89 * A second variable controls the sharing of CPU between polling/kernel
90 * network processing, and other activities (typically userlevel tasks):
91 * kern.polling.user_frac (between 0 and 100, default 50) sets the share
92 * of CPU allocated to user tasks. CPU is allocated proportionally to the
93 * shares, by dynamically adjusting the "count" (poll_burst).
94 *
95 * Other parameters can should be left to their default values.
96 * The following constraints hold
97 *
98 *	1 <= poll_each_burst <= poll_burst <= poll_burst_max
99 *	0 <= poll_in_trap <= poll_each_burst
100 *	MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
101 */
102
103#define MIN_POLL_BURST_MAX	10
104#define MAX_POLL_BURST_MAX	1000
105
106SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0,
107	"Device polling parameters");
108
109static u_int32_t poll_burst = 5;
110SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RW,
111	&poll_burst, 0, "Current polling burst size");
112
113static u_int32_t poll_each_burst = 5;
114SYSCTL_UINT(_kern_polling, OID_AUTO, each_burst, CTLFLAG_RW,
115	&poll_each_burst, 0, "Max size of each burst");
116
117static u_int32_t poll_burst_max = 150;	/* good for 100Mbit net and HZ=1000 */
118SYSCTL_UINT(_kern_polling, OID_AUTO, burst_max, CTLFLAG_RW,
119	&poll_burst_max, 0, "Max Polling burst size");
120
121static u_int32_t poll_in_idle_loop=0;	/* do we poll in idle loop ? */
122SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
123	&poll_in_idle_loop, 0, "Enable device polling in idle loop");
124
125u_int32_t poll_in_trap;			/* used in trap.c */
126SYSCTL_UINT(_kern_polling, OID_AUTO, poll_in_trap, CTLFLAG_RW,
127	&poll_in_trap, 0, "Poll burst size during a trap");
128
129static u_int32_t user_frac = 50;
130SYSCTL_UINT(_kern_polling, OID_AUTO, user_frac, CTLFLAG_RW,
131	&user_frac, 0, "Desired user fraction of cpu time");
132
133static u_int32_t reg_frac = 20 ;
134SYSCTL_UINT(_kern_polling, OID_AUTO, reg_frac, CTLFLAG_RW,
135	&reg_frac, 0, "Every this many cycles poll register");
136
137static u_int32_t short_ticks;
138SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RW,
139	&short_ticks, 0, "Hardclock ticks shorter than they should be");
140
141static u_int32_t lost_polls;
142SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RW,
143	&lost_polls, 0, "How many times we would have lost a poll tick");
144
145static u_int32_t pending_polls;
146SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RW,
147	&pending_polls, 0, "Do we need to poll again");
148
149static int residual_burst = 0;
150SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RW,
151	&residual_burst, 0, "# of residual cycles in burst");
152
153static u_int32_t poll_handlers; /* next free entry in pr[]. */
154SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
155	&poll_handlers, 0, "Number of registered poll handlers");
156
157static int polling = 0;		/* global polling enable */
158SYSCTL_UINT(_kern_polling, OID_AUTO, enable, CTLFLAG_RW,
159	&polling, 0, "Polling enabled");
160
161static u_int32_t phase;
162SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RW,
163	&phase, 0, "Polling phase");
164
165static u_int32_t suspect;
166SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RW,
167	&suspect, 0, "suspect event");
168
169static u_int32_t stalled;
170SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RW,
171	&stalled, 0, "potential stalls");
172
173static u_int32_t idlepoll_sleeping; /* idlepoll is sleeping */
174SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
175	&idlepoll_sleeping, 0, "idlepoll is sleeping");
176
177
178#define POLL_LIST_LEN  128
179struct pollrec {
180	poll_handler_t	*handler;
181	struct ifnet	*ifp;
182};
183
184static struct pollrec pr[POLL_LIST_LEN];
185
186static void
187init_device_poll(void)
188{
189
190	netisr_register(NETISR_POLL, (netisr_t *)netisr_poll, NULL);
191	netisr_register(NETISR_POLLMORE, (netisr_t *)netisr_pollmore, NULL);
192}
193SYSINIT(device_poll, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, init_device_poll, NULL)
194
195
196/*
197 * Hook from hardclock. Tries to schedule a netisr, but keeps track
198 * of lost ticks due to the previous handler taking too long.
199 * Normally, this should not happen, because polling handler should
200 * run for a short time. However, in some cases (e.g. when there are
201 * changes in link status etc.) the drivers take a very long time
202 * (even in the order of milliseconds) to reset and reconfigure the
203 * device, causing apparent lost polls.
204 *
205 * The first part of the code is just for debugging purposes, and tries
206 * to count how often hardclock ticks are shorter than they should,
207 * meaning either stray interrupts or delayed events.
208 */
209void
210hardclock_device_poll(void)
211{
212	static struct timeval prev_t, t;
213	int delta;
214
215	if (poll_handlers == 0)
216		return;
217
218	microuptime(&t);
219	delta = (t.tv_usec - prev_t.tv_usec) +
220		(t.tv_sec - prev_t.tv_sec)*1000000;
221	if (delta * hz < 500000)
222		short_ticks++;
223	else
224		prev_t = t;
225
226	if (pending_polls > 100) {
227		/*
228		 * Too much, assume it has stalled (not always true
229		 * see comment above).
230		 */
231		stalled++;
232		pending_polls = 0;
233		phase = 0;
234	}
235
236	if (phase <= 2) {
237		if (phase != 0)
238			suspect++;
239		phase = 1;
240		schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
241		phase = 2;
242	}
243	if (pending_polls++ > 0)
244		lost_polls++;
245}
246
247/*
248 * ether_poll is called from the idle loop or from the trap handler.
249 */
250void
251ether_poll(int count)
252{
253	int i;
254
255	mtx_lock(&Giant);
256
257	if (count > poll_each_burst)
258		count = poll_each_burst;
259	for (i = 0 ; i < poll_handlers ; i++)
260		if (pr[i].handler && (IFF_UP|IFF_RUNNING) ==
261		    (pr[i].ifp->if_flags & (IFF_UP|IFF_RUNNING)) )
262			pr[i].handler(pr[i].ifp, 0, count); /* quick check */
263	mtx_unlock(&Giant);
264}
265
266/*
267 * netisr_pollmore is called after other netisr's, possibly scheduling
268 * another NETISR_POLL call, or adapting the burst size for the next cycle.
269 *
270 * It is very bad to fetch large bursts of packets from a single card at once,
271 * because the burst could take a long time to be completely processed, or
272 * could saturate the intermediate queue (ipintrq or similar) leading to
273 * losses or unfairness. To reduce the problem, and also to account better for
274 * time spent in network-related processing, we split the burst in smaller
275 * chunks of fixed size, giving control to the other netisr's between chunks.
276 * This helps in improving the fairness, reducing livelock (because we
277 * emulate more closely the "process to completion" that we have with
278 * fastforwarding) and accounting for the work performed in low level
279 * handling and forwarding.
280 */
281
282static struct timeval poll_start_t;
283
284void
285netisr_pollmore()
286{
287	struct timeval t;
288	int kern_load;
289	/* XXX run at splhigh() or equivalent */
290
291	phase = 5;
292	if (residual_burst > 0) {
293		schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
294		/* will run immediately on return, followed by netisrs */
295		return;
296	}
297	/* here we can account time spent in netisr's in this tick */
298	microuptime(&t);
299	kern_load = (t.tv_usec - poll_start_t.tv_usec) +
300		(t.tv_sec - poll_start_t.tv_sec)*1000000;	/* us */
301	kern_load = (kern_load * hz) / 10000;			/* 0..100 */
302	if (kern_load > (100 - user_frac)) { /* try decrease ticks */
303		if (poll_burst > 1)
304			poll_burst--;
305	} else {
306		if (poll_burst < poll_burst_max)
307			poll_burst++;
308	}
309
310	pending_polls--;
311	if (pending_polls == 0) /* we are done */
312		phase = 0;
313	else {
314		/*
315		 * Last cycle was long and caused us to miss one or more
316		 * hardclock ticks. Restart processing again, but slightly
317		 * reduce the burst size to prevent that this happens again.
318		 */
319		poll_burst -= (poll_burst / 8);
320		if (poll_burst < 1)
321			poll_burst = 1;
322		schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
323		phase = 6;
324	}
325}
326
327/*
328 * netisr_poll is scheduled by schednetisr when appropriate, typically once
329 * per tick. It is called at splnet() so first thing to do is to upgrade to
330 * splimp(), and call all registered handlers.
331 */
332static void
333netisr_poll(void)
334{
335	static int reg_frac_count;
336	int i, cycles;
337	enum poll_cmd arg = POLL_ONLY;
338	mtx_lock(&Giant);
339
340	phase = 3;
341	if (residual_burst == 0) { /* first call in this tick */
342		microuptime(&poll_start_t);
343		/*
344		 * Check that paremeters are consistent with runtime
345		 * variables. Some of these tests could be done at sysctl
346		 * time, but the savings would be very limited because we
347		 * still have to check against reg_frac_count and
348		 * poll_each_burst. So, instead of writing separate sysctl
349		 * handlers, we do all here.
350		 */
351
352		if (reg_frac > hz)
353			reg_frac = hz;
354		else if (reg_frac < 1)
355			reg_frac = 1;
356		if (reg_frac_count > reg_frac)
357			reg_frac_count = reg_frac - 1;
358		if (reg_frac_count-- == 0) {
359			arg = POLL_AND_CHECK_STATUS;
360			reg_frac_count = reg_frac - 1;
361		}
362		if (poll_burst_max < MIN_POLL_BURST_MAX)
363			poll_burst_max = MIN_POLL_BURST_MAX;
364		else if (poll_burst_max > MAX_POLL_BURST_MAX)
365			poll_burst_max = MAX_POLL_BURST_MAX;
366
367		if (poll_each_burst < 1)
368			poll_each_burst = 1;
369		else if (poll_each_burst > poll_burst_max)
370			poll_each_burst = poll_burst_max;
371
372		residual_burst = poll_burst;
373	}
374	cycles = (residual_burst < poll_each_burst) ?
375		residual_burst : poll_each_burst;
376	residual_burst -= cycles;
377
378	if (polling) {
379		for (i = 0 ; i < poll_handlers ; i++)
380			if (pr[i].handler && (IFF_UP|IFF_RUNNING) ==
381			    (pr[i].ifp->if_flags & (IFF_UP|IFF_RUNNING)) )
382				pr[i].handler(pr[i].ifp, arg, cycles);
383	} else {	/* unregister */
384		for (i = 0 ; i < poll_handlers ; i++) {
385			if (pr[i].handler &&
386			    pr[i].ifp->if_flags & IFF_RUNNING) {
387				pr[i].ifp->if_flags &= ~IFF_POLLING;
388				pr[i].handler(pr[i].ifp, POLL_DEREGISTER, 1);
389			}
390			pr[i].handler=NULL;
391		}
392		residual_burst = 0;
393		poll_handlers = 0;
394	}
395	/* on -stable, schednetisr(NETISR_POLLMORE); */
396	phase = 4;
397	mtx_unlock(&Giant);
398}
399
400/*
401 * Try to register routine for polling. Returns 1 if successful
402 * (and polling should be enabled), 0 otherwise.
403 * A device is not supposed to register itself multiple times.
404 *
405 * This is called from within the *_intr() functions, so we do not need
406 * further locking.
407 */
408int
409ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
410{
411	int s;
412
413	if (polling == 0) /* polling disabled, cannot register */
414		return 0;
415	if (h == NULL || ifp == NULL)		/* bad arguments	*/
416		return 0;
417	if ( !(ifp->if_flags & IFF_UP) )	/* must be up		*/
418		return 0;
419	if (ifp->if_flags & IFF_POLLING)	/* already polling	*/
420		return 0;
421
422	s = splhigh();
423	if (poll_handlers >= POLL_LIST_LEN) {
424		/*
425		 * List full, cannot register more entries.
426		 * This should never happen; if it does, it is probably a
427		 * broken driver trying to register multiple times. Checking
428		 * this at runtime is expensive, and won't solve the problem
429		 * anyways, so just report a few times and then give up.
430		 */
431		static int verbose = 10 ;
432		splx(s);
433		if (verbose >0) {
434			printf("poll handlers list full, "
435				"maybe a broken driver ?\n");
436			verbose--;
437		}
438		return 0; /* no polling for you */
439	}
440
441	pr[poll_handlers].handler = h;
442	pr[poll_handlers].ifp = ifp;
443	poll_handlers++;
444	ifp->if_flags |= IFF_POLLING;
445	splx(s);
446	if (idlepoll_sleeping)
447		wakeup(&idlepoll_sleeping);
448	return 1; /* polling enabled in next call */
449}
450
451/*
452 * Remove interface from the polling list. Normally called by *_stop().
453 * It is not an error to call it with IFF_POLLING clear, the call is
454 * sufficiently rare to be preferable to save the space for the extra
455 * test in each driver in exchange of one additional function call.
456 */
457int
458ether_poll_deregister(struct ifnet *ifp)
459{
460	int i;
461
462	mtx_lock(&Giant);
463	if ( !ifp || !(ifp->if_flags & IFF_POLLING) ) {
464		mtx_unlock(&Giant);
465		return 0;
466	}
467	for (i = 0 ; i < poll_handlers ; i++)
468		if (pr[i].ifp == ifp) /* found it */
469			break;
470	ifp->if_flags &= ~IFF_POLLING; /* found or not... */
471	if (i == poll_handlers) {
472		mtx_unlock(&Giant);
473		printf("ether_poll_deregister: ifp not found!!!\n");
474		return 0;
475	}
476	poll_handlers--;
477	if (i < poll_handlers) { /* Last entry replaces this one. */
478		pr[i].handler = pr[poll_handlers].handler;
479		pr[i].ifp = pr[poll_handlers].ifp;
480	}
481	mtx_unlock(&Giant);
482	return 1;
483}
484
485static void
486poll_idle(void)
487{
488	struct thread *td = curthread;
489	struct rtprio rtp;
490	int pri;
491
492	rtp.prio = RTP_PRIO_MAX;	/* lowest priority */
493	rtp.type = RTP_PRIO_IDLE;
494	mtx_lock_spin(&sched_lock);
495	rtp_to_pri(&rtp, td->td_ksegrp);
496	pri = td->td_priority;
497	mtx_unlock_spin(&sched_lock);
498
499	for (;;) {
500		if (poll_in_idle_loop && poll_handlers > 0) {
501			idlepoll_sleeping = 0;
502			mtx_lock(&Giant);
503			ether_poll(poll_each_burst);
504			mtx_unlock(&Giant);
505			mtx_assert(&Giant, MA_NOTOWNED);
506			mtx_lock_spin(&sched_lock);
507			td->td_proc->p_stats->p_ru.ru_nvcsw++;
508			mi_switch();
509			mtx_unlock_spin(&sched_lock);
510		} else {
511			idlepoll_sleeping = 1;
512			tsleep(&idlepoll_sleeping, pri, "pollid", hz * 3);
513		}
514	}
515}
516
517static struct proc *idlepoll;
518static struct kproc_desc idlepoll_kp = {
519	 "idlepoll",
520	 poll_idle,
521	 &idlepoll
522};
523SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start, &idlepoll_kp)
524