netisr.c revision 222531
1/*-
2 * Copyright (c) 2007-2009 Robert N. M. Watson
3 * Copyright (c) 2010-2011 Juniper Networks, Inc.
4 * All rights reserved.
5 *
6 * This software was developed by Robert N. M. Watson under contract
7 * to Juniper Networks, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/net/netisr.c 222531 2011-05-31 15:11:43Z nwhitehorn $");
33
34/*
35 * netisr is a packet dispatch service, allowing synchronous (directly
36 * dispatched) and asynchronous (deferred dispatch) processing of packets by
37 * registered protocol handlers.  Callers pass a protocol identifier and
38 * packet to netisr, along with a direct dispatch hint, and work will either
39 * be immediately processed by the registered handler, or passed to a
40 * software interrupt (SWI) thread for deferred dispatch.  Callers will
41 * generally select one or the other based on:
42 *
43 * - Whether directly dispatching a netisr handler lead to code reentrance or
44 *   lock recursion, such as entering the socket code from the socket code.
45 * - Whether directly dispatching a netisr handler lead to recursive
46 *   processing, such as when decapsulating several wrapped layers of tunnel
47 *   information (IPSEC within IPSEC within ...).
48 *
49 * Maintaining ordering for protocol streams is a critical design concern.
50 * Enforcing ordering limits the opportunity for concurrency, but maintains
51 * the strong ordering requirements found in some protocols, such as TCP.  Of
52 * related concern is CPU affinity--it is desirable to process all data
53 * associated with a particular stream on the same CPU over time in order to
54 * avoid acquiring locks associated with the connection on different CPUs,
55 * keep connection data in one cache, and to generally encourage associated
56 * user threads to live on the same CPU as the stream.  It's also desirable
57 * to avoid lock migration and contention where locks are associated with
58 * more than one flow.
59 *
60 * netisr supports several policy variations, represented by the
61 * NETISR_POLICY_* constants, allowing protocols to play various roles in
62 * identifying flows, assigning work to CPUs, etc.  These are described in
63 * netisr.h.
64 */
65
66#include "opt_ddb.h"
67#include "opt_device_polling.h"
68
69#include <sys/param.h>
70#include <sys/bus.h>
71#include <sys/kernel.h>
72#include <sys/kthread.h>
73#include <sys/interrupt.h>
74#include <sys/lock.h>
75#include <sys/mbuf.h>
76#include <sys/mutex.h>
77#include <sys/pcpu.h>
78#include <sys/proc.h>
79#include <sys/rmlock.h>
80#include <sys/sched.h>
81#include <sys/smp.h>
82#include <sys/socket.h>
83#include <sys/sysctl.h>
84#include <sys/systm.h>
85
86#ifdef DDB
87#include <ddb/ddb.h>
88#endif
89
90#define	_WANT_NETISR_INTERNAL	/* Enable definitions from netisr_internal.h */
91#include <net/if.h>
92#include <net/if_var.h>
93#include <net/netisr.h>
94#include <net/netisr_internal.h>
95#include <net/vnet.h>
96
97/*-
98 * Synchronize use and modification of the registered netisr data structures;
99 * acquire a read lock while modifying the set of registered protocols to
100 * prevent partially registered or unregistered protocols from being run.
101 *
102 * The following data structures and fields are protected by this lock:
103 *
104 * - The netisr_proto array, including all fields of struct netisr_proto.
105 * - The nws array, including all fields of struct netisr_worker.
106 * - The nws_array array.
107 *
108 * Note: the NETISR_LOCKING define controls whether read locks are acquired
109 * in packet processing paths requiring netisr registration stability.  This
110 * is disabled by default as it can lead to measurable performance
111 * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and
112 * because netisr registration and unregistration is extremely rare at
113 * runtime.  If it becomes more common, this decision should be revisited.
114 *
115 * XXXRW: rmlocks don't support assertions.
116 */
117static struct rmlock	netisr_rmlock;
118#define	NETISR_LOCK_INIT()	rm_init_flags(&netisr_rmlock, "netisr", \
119				    RM_NOWITNESS)
120#define	NETISR_LOCK_ASSERT()
121#define	NETISR_RLOCK(tracker)	rm_rlock(&netisr_rmlock, (tracker))
122#define	NETISR_RUNLOCK(tracker)	rm_runlock(&netisr_rmlock, (tracker))
123#define	NETISR_WLOCK()		rm_wlock(&netisr_rmlock)
124#define	NETISR_WUNLOCK()	rm_wunlock(&netisr_rmlock)
125/* #define	NETISR_LOCKING */
126
127SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
128
129/*-
130 * Three global direct dispatch policies are supported:
131 *
132 * NETISR_DISPATCH_QUEUED: All work is deferred for a netisr, regardless of
133 * context (may be overriden by protocols).
134 *
135 * NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch,
136 * and we're running on the CPU the work would be performed on, then direct
137 * dispatch it if it wouldn't violate ordering constraints on the workstream.
138 *
139 * NETISR_DISPATCH_DIRECT: If the executing context allows direct dispatch,
140 * always direct dispatch.  (The default.)
141 *
142 * Notice that changing the global policy could lead to short periods of
143 * misordered processing, but this is considered acceptable as compared to
144 * the complexity of enforcing ordering during policy changes.  Protocols can
145 * override the global policy (when they're not doing that, they select
146 * NETISR_DISPATCH_DEFAULT).
147 */
148#define	NETISR_DISPATCH_POLICY_DEFAULT	NETISR_DISPATCH_DIRECT
149#define	NETISR_DISPATCH_POLICY_MAXSTR	20 /* Used for temporary buffers. */
150static u_int	netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT;
151static int	sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS);
152SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, CTLTYPE_STRING | CTLFLAG_RW |
153    CTLFLAG_TUN, 0, 0, sysctl_netisr_dispatch_policy, "A",
154    "netisr dispatch policy");
155
156/*
157 * These sysctls were used in previous versions to control and export
158 * dispatch policy state.  Now, we provide read-only export via them so that
159 * older netstat binaries work.  At some point they can be garbage collected.
160 */
161static int	netisr_direct_force;
162SYSCTL_INT(_net_isr, OID_AUTO, direct_force, CTLFLAG_RD,
163    &netisr_direct_force, 0, "compat: force direct dispatch");
164
165static int	netisr_direct;
166SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RD, &netisr_direct, 0,
167    "compat: enable direct dispatch");
168
169/*
170 * Allow the administrator to limit the number of threads (CPUs) to use for
171 * netisr.  We don't check netisr_maxthreads before creating the thread for
172 * CPU 0, so in practice we ignore values <= 1.  This must be set at boot.
173 * We will create at most one thread per CPU.
174 */
175static int	netisr_maxthreads = -1;		/* Max number of threads. */
176TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads);
177SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN,
178    &netisr_maxthreads, 0,
179    "Use at most this many CPUs for netisr processing");
180
181static int	netisr_bindthreads = 0;		/* Bind threads to CPUs. */
182TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads);
183SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN,
184    &netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
185
186/*
187 * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit,
188 * both for initial configuration and later modification using
189 * netisr_setqlimit().
190 */
191#define	NETISR_DEFAULT_MAXQLIMIT	10240
192static u_int	netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
193TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit);
194SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN,
195    &netisr_maxqlimit, 0,
196    "Maximum netisr per-protocol, per-CPU queue depth.");
197
198/*
199 * The default per-workstream mbuf queue limit for protocols that don't
200 * initialize the nh_qlimit field of their struct netisr_handler.  If this is
201 * set above netisr_maxqlimit, we truncate it to the maximum during boot.
202 */
203#define	NETISR_DEFAULT_DEFAULTQLIMIT	256
204static u_int	netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
205TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit);
206SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN,
207    &netisr_defaultqlimit, 0,
208    "Default netisr per-protocol, per-CPU queue limit if not set by protocol");
209
210/*
211 * Store and export the compile-time constant NETISR_MAXPROT limit on the
212 * number of protocols that can register with netisr at a time.  This is
213 * required for crashdump analysis, as it sizes netisr_proto[].
214 */
215static u_int	netisr_maxprot = NETISR_MAXPROT;
216SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD,
217    &netisr_maxprot, 0,
218    "Compile-time limit on the number of protocols supported by netisr.");
219
220/*
221 * The netisr_proto array describes all registered protocols, indexed by
222 * protocol number.  See netisr_internal.h for more details.
223 */
224static struct netisr_proto	netisr_proto[NETISR_MAXPROT];
225
226/*
227 * Per-CPU workstream data.  See netisr_internal.h for more details.
228 */
229DPCPU_DEFINE(struct netisr_workstream, nws);
230
231/*
232 * Map contiguous values between 0 and nws_count into CPU IDs appropriate for
233 * accessing workstreams.  This allows constructions of the form
234 * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws).
235 */
236static u_int				 nws_array[MAXCPU];
237
238/*
239 * Number of registered workstreams.  Will be at most the number of running
240 * CPUs once fully started.
241 */
242static u_int				 nws_count;
243SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD,
244    &nws_count, 0, "Number of extant netisr threads.");
245
246/*
247 * Synchronization for each workstream: a mutex protects all mutable fields
248 * in each stream, including per-protocol state (mbuf queues).  The SWI is
249 * woken up if asynchronous dispatch is required.
250 */
251#define	NWS_LOCK(s)		mtx_lock(&(s)->nws_mtx)
252#define	NWS_LOCK_ASSERT(s)	mtx_assert(&(s)->nws_mtx, MA_OWNED)
253#define	NWS_UNLOCK(s)		mtx_unlock(&(s)->nws_mtx)
254#define	NWS_SIGNAL(s)		swi_sched((s)->nws_swi_cookie, 0)
255
256/*
257 * Utility routines for protocols that implement their own mapping of flows
258 * to CPUs.
259 */
260u_int
261netisr_get_cpucount(void)
262{
263
264	return (nws_count);
265}
266
267u_int
268netisr_get_cpuid(u_int cpunumber)
269{
270
271	KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber,
272	    nws_count));
273
274	return (nws_array[cpunumber]);
275}
276
277/*
278 * The default implementation of flow -> CPU ID mapping.
279 *
280 * Non-static so that protocols can use it to map their own work to specific
281 * CPUs in a manner consistent to netisr for affinity purposes.
282 */
283u_int
284netisr_default_flow2cpu(u_int flowid)
285{
286
287	return (nws_array[flowid % nws_count]);
288}
289
290/*
291 * Dispatch tunable and sysctl configuration.
292 */
293struct netisr_dispatch_table_entry {
294	u_int		 ndte_policy;
295	const char	*ndte_policy_str;
296};
297static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = {
298	{ NETISR_DISPATCH_DEFAULT, "default" },
299	{ NETISR_DISPATCH_DEFERRED, "deferred" },
300	{ NETISR_DISPATCH_HYBRID, "hybrid" },
301	{ NETISR_DISPATCH_DIRECT, "direct" },
302};
303static const u_int netisr_dispatch_table_len =
304    (sizeof(netisr_dispatch_table) / sizeof(netisr_dispatch_table[0]));
305
306static void
307netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer,
308    u_int buflen)
309{
310	const struct netisr_dispatch_table_entry *ndtep;
311	const char *str;
312	u_int i;
313
314	str = "unknown";
315	for (i = 0; i < netisr_dispatch_table_len; i++) {
316		ndtep = &netisr_dispatch_table[i];
317		if (ndtep->ndte_policy == dispatch_policy) {
318			str = ndtep->ndte_policy_str;
319			break;
320		}
321	}
322	snprintf(buffer, buflen, "%s", str);
323}
324
325static int
326netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp)
327{
328	const struct netisr_dispatch_table_entry *ndtep;
329	u_int i;
330
331	for (i = 0; i < netisr_dispatch_table_len; i++) {
332		ndtep = &netisr_dispatch_table[i];
333		if (strcmp(ndtep->ndte_policy_str, str) == 0) {
334			*dispatch_policyp = ndtep->ndte_policy;
335			return (0);
336		}
337	}
338	return (EINVAL);
339}
340
341static void
342netisr_dispatch_policy_compat(void)
343{
344
345	switch (netisr_dispatch_policy) {
346	case NETISR_DISPATCH_DEFERRED:
347		netisr_direct_force = 0;
348		netisr_direct = 0;
349		break;
350
351	case NETISR_DISPATCH_HYBRID:
352		netisr_direct_force = 0;
353		netisr_direct = 1;
354		break;
355
356	case NETISR_DISPATCH_DIRECT:
357		netisr_direct_force = 1;
358		netisr_direct = 1;
359		break;
360
361	default:
362		panic("%s: unknown policy %u", __func__,
363		    netisr_dispatch_policy);
364	}
365}
366
367static int
368sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS)
369{
370	char tmp[NETISR_DISPATCH_POLICY_MAXSTR];
371	u_int dispatch_policy;
372	int error;
373
374	netisr_dispatch_policy_to_str(netisr_dispatch_policy, tmp,
375	    sizeof(tmp));
376	error = sysctl_handle_string(oidp, tmp, sizeof(tmp), req);
377	if (error == 0 && req->newptr != NULL) {
378		error = netisr_dispatch_policy_from_str(tmp,
379		    &dispatch_policy);
380		if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT)
381			error = EINVAL;
382		if (error == 0) {
383			netisr_dispatch_policy = dispatch_policy;
384			netisr_dispatch_policy_compat();
385		}
386	}
387	return (error);
388}
389
390/*
391 * Register a new netisr handler, which requires initializing per-protocol
392 * fields for each workstream.  All netisr work is briefly suspended while
393 * the protocol is installed.
394 */
395void
396netisr_register(const struct netisr_handler *nhp)
397{
398	struct netisr_work *npwp;
399	const char *name;
400	u_int i, proto;
401
402	proto = nhp->nh_proto;
403	name = nhp->nh_name;
404
405	/*
406	 * Test that the requested registration is valid.
407	 */
408	KASSERT(nhp->nh_name != NULL,
409	    ("%s: nh_name NULL for %u", __func__, proto));
410	KASSERT(nhp->nh_handler != NULL,
411	    ("%s: nh_handler NULL for %s", __func__, name));
412	KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE ||
413	    nhp->nh_policy == NETISR_POLICY_FLOW ||
414	    nhp->nh_policy == NETISR_POLICY_CPU,
415	    ("%s: unsupported nh_policy %u for %s", __func__,
416	    nhp->nh_policy, name));
417	KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW ||
418	    nhp->nh_m2flow == NULL,
419	    ("%s: nh_policy != FLOW but m2flow defined for %s", __func__,
420	    name));
421	KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL,
422	    ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__,
423	    name));
424	KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL,
425	    ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__,
426	    name));
427	KASSERT(nhp->nh_dispatch == NETISR_DISPATCH_DEFAULT ||
428	    nhp->nh_dispatch == NETISR_DISPATCH_DEFERRED ||
429	    nhp->nh_dispatch == NETISR_DISPATCH_HYBRID ||
430	    nhp->nh_dispatch == NETISR_DISPATCH_DIRECT,
431	    ("%s: invalid nh_dispatch (%u)", __func__, nhp->nh_dispatch));
432
433	KASSERT(proto < NETISR_MAXPROT,
434	    ("%s(%u, %s): protocol too big", __func__, proto, name));
435
436	/*
437	 * Test that no existing registration exists for this protocol.
438	 */
439	NETISR_WLOCK();
440	KASSERT(netisr_proto[proto].np_name == NULL,
441	    ("%s(%u, %s): name present", __func__, proto, name));
442	KASSERT(netisr_proto[proto].np_handler == NULL,
443	    ("%s(%u, %s): handler present", __func__, proto, name));
444
445	netisr_proto[proto].np_name = name;
446	netisr_proto[proto].np_handler = nhp->nh_handler;
447	netisr_proto[proto].np_m2flow = nhp->nh_m2flow;
448	netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid;
449	netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu;
450	if (nhp->nh_qlimit == 0)
451		netisr_proto[proto].np_qlimit = netisr_defaultqlimit;
452	else if (nhp->nh_qlimit > netisr_maxqlimit) {
453		printf("%s: %s requested queue limit %u capped to "
454		    "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit,
455		    netisr_maxqlimit);
456		netisr_proto[proto].np_qlimit = netisr_maxqlimit;
457	} else
458		netisr_proto[proto].np_qlimit = nhp->nh_qlimit;
459	netisr_proto[proto].np_policy = nhp->nh_policy;
460	netisr_proto[proto].np_dispatch = nhp->nh_dispatch;
461	CPU_FOREACH(i) {
462		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
463		bzero(npwp, sizeof(*npwp));
464		npwp->nw_qlimit = netisr_proto[proto].np_qlimit;
465	}
466	NETISR_WUNLOCK();
467}
468
469/*
470 * Clear drop counters across all workstreams for a protocol.
471 */
472void
473netisr_clearqdrops(const struct netisr_handler *nhp)
474{
475	struct netisr_work *npwp;
476#ifdef INVARIANTS
477	const char *name;
478#endif
479	u_int i, proto;
480
481	proto = nhp->nh_proto;
482#ifdef INVARIANTS
483	name = nhp->nh_name;
484#endif
485	KASSERT(proto < NETISR_MAXPROT,
486	    ("%s(%u): protocol too big for %s", __func__, proto, name));
487
488	NETISR_WLOCK();
489	KASSERT(netisr_proto[proto].np_handler != NULL,
490	    ("%s(%u): protocol not registered for %s", __func__, proto,
491	    name));
492
493	CPU_FOREACH(i) {
494		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
495		npwp->nw_qdrops = 0;
496	}
497	NETISR_WUNLOCK();
498}
499
500/*
501 * Query current drop counters across all workstreams for a protocol.
502 */
503void
504netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
505{
506	struct netisr_work *npwp;
507	struct rm_priotracker tracker;
508#ifdef INVARIANTS
509	const char *name;
510#endif
511	u_int i, proto;
512
513	*qdropp = 0;
514	proto = nhp->nh_proto;
515#ifdef INVARIANTS
516	name = nhp->nh_name;
517#endif
518	KASSERT(proto < NETISR_MAXPROT,
519	    ("%s(%u): protocol too big for %s", __func__, proto, name));
520
521	NETISR_RLOCK(&tracker);
522	KASSERT(netisr_proto[proto].np_handler != NULL,
523	    ("%s(%u): protocol not registered for %s", __func__, proto,
524	    name));
525
526	CPU_FOREACH(i) {
527		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
528		*qdropp += npwp->nw_qdrops;
529	}
530	NETISR_RUNLOCK(&tracker);
531}
532
533/*
534 * Query current per-workstream queue limit for a protocol.
535 */
536void
537netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp)
538{
539	struct rm_priotracker tracker;
540#ifdef INVARIANTS
541	const char *name;
542#endif
543	u_int proto;
544
545	proto = nhp->nh_proto;
546#ifdef INVARIANTS
547	name = nhp->nh_name;
548#endif
549	KASSERT(proto < NETISR_MAXPROT,
550	    ("%s(%u): protocol too big for %s", __func__, proto, name));
551
552	NETISR_RLOCK(&tracker);
553	KASSERT(netisr_proto[proto].np_handler != NULL,
554	    ("%s(%u): protocol not registered for %s", __func__, proto,
555	    name));
556	*qlimitp = netisr_proto[proto].np_qlimit;
557	NETISR_RUNLOCK(&tracker);
558}
559
560/*
561 * Update the queue limit across per-workstream queues for a protocol.  We
562 * simply change the limits, and don't drain overflowed packets as they will
563 * (hopefully) take care of themselves shortly.
564 */
565int
566netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
567{
568	struct netisr_work *npwp;
569#ifdef INVARIANTS
570	const char *name;
571#endif
572	u_int i, proto;
573
574	if (qlimit > netisr_maxqlimit)
575		return (EINVAL);
576
577	proto = nhp->nh_proto;
578#ifdef INVARIANTS
579	name = nhp->nh_name;
580#endif
581	KASSERT(proto < NETISR_MAXPROT,
582	    ("%s(%u): protocol too big for %s", __func__, proto, name));
583
584	NETISR_WLOCK();
585	KASSERT(netisr_proto[proto].np_handler != NULL,
586	    ("%s(%u): protocol not registered for %s", __func__, proto,
587	    name));
588
589	netisr_proto[proto].np_qlimit = qlimit;
590	CPU_FOREACH(i) {
591		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
592		npwp->nw_qlimit = qlimit;
593	}
594	NETISR_WUNLOCK();
595	return (0);
596}
597
598/*
599 * Drain all packets currently held in a particular protocol work queue.
600 */
601static void
602netisr_drain_proto(struct netisr_work *npwp)
603{
604	struct mbuf *m;
605
606	/*
607	 * We would assert the lock on the workstream but it's not passed in.
608	 */
609	while ((m = npwp->nw_head) != NULL) {
610		npwp->nw_head = m->m_nextpkt;
611		m->m_nextpkt = NULL;
612		if (npwp->nw_head == NULL)
613			npwp->nw_tail = NULL;
614		npwp->nw_len--;
615		m_freem(m);
616	}
617	KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__));
618	KASSERT(npwp->nw_len == 0, ("%s: len", __func__));
619}
620
621/*
622 * Remove the registration of a network protocol, which requires clearing
623 * per-protocol fields across all workstreams, including freeing all mbufs in
624 * the queues at time of unregister.  All work in netisr is briefly suspended
625 * while this takes place.
626 */
627void
628netisr_unregister(const struct netisr_handler *nhp)
629{
630	struct netisr_work *npwp;
631#ifdef INVARIANTS
632	const char *name;
633#endif
634	u_int i, proto;
635
636	proto = nhp->nh_proto;
637#ifdef INVARIANTS
638	name = nhp->nh_name;
639#endif
640	KASSERT(proto < NETISR_MAXPROT,
641	    ("%s(%u): protocol too big for %s", __func__, proto, name));
642
643	NETISR_WLOCK();
644	KASSERT(netisr_proto[proto].np_handler != NULL,
645	    ("%s(%u): protocol not registered for %s", __func__, proto,
646	    name));
647
648	netisr_proto[proto].np_name = NULL;
649	netisr_proto[proto].np_handler = NULL;
650	netisr_proto[proto].np_m2flow = NULL;
651	netisr_proto[proto].np_m2cpuid = NULL;
652	netisr_proto[proto].np_qlimit = 0;
653	netisr_proto[proto].np_policy = 0;
654	CPU_FOREACH(i) {
655		npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
656		netisr_drain_proto(npwp);
657		bzero(npwp, sizeof(*npwp));
658	}
659	NETISR_WUNLOCK();
660}
661
662/*
663 * Compose the global and per-protocol policies on dispatch, and return the
664 * dispatch policy to use.
665 */
666static u_int
667netisr_get_dispatch(struct netisr_proto *npp)
668{
669
670	/*
671	 * Protocol-specific configuration overrides the global default.
672	 */
673	if (npp->np_dispatch != NETISR_DISPATCH_DEFAULT)
674		return (npp->np_dispatch);
675	return (netisr_dispatch_policy);
676}
677
678/*
679 * Look up the workstream given a packet and source identifier.  Do this by
680 * checking the protocol's policy, and optionally call out to the protocol
681 * for assistance if required.
682 */
683static struct mbuf *
684netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy,
685    uintptr_t source, struct mbuf *m, u_int *cpuidp)
686{
687	struct ifnet *ifp;
688	u_int policy;
689
690	NETISR_LOCK_ASSERT();
691
692	/*
693	 * In the event we have only one worker, shortcut and deliver to it
694	 * without further ado.
695	 */
696	if (nws_count == 1) {
697		*cpuidp = nws_array[0];
698		return (m);
699	}
700
701	/*
702	 * What happens next depends on the policy selected by the protocol.
703	 * If we want to support per-interface policies, we should do that
704	 * here first.
705	 */
706	policy = npp->np_policy;
707	if (policy == NETISR_POLICY_CPU) {
708		m = npp->np_m2cpuid(m, source, cpuidp);
709		if (m == NULL)
710			return (NULL);
711
712		/*
713		 * It's possible for a protocol not to have a good idea about
714		 * where to process a packet, in which case we fall back on
715		 * the netisr code to decide.  In the hybrid case, return the
716		 * current CPU ID, which will force an immediate direct
717		 * dispatch.  In the queued case, fall back on the SOURCE
718		 * policy.
719		 */
720		if (*cpuidp != NETISR_CPUID_NONE)
721			return (m);
722		if (dispatch_policy == NETISR_DISPATCH_HYBRID) {
723			*cpuidp = curcpu;
724			return (m);
725		}
726		policy = NETISR_POLICY_SOURCE;
727	}
728
729	if (policy == NETISR_POLICY_FLOW) {
730		if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) {
731			m = npp->np_m2flow(m, source);
732			if (m == NULL)
733				return (NULL);
734		}
735		if (m->m_flags & M_FLOWID) {
736			*cpuidp =
737			    netisr_default_flow2cpu(m->m_pkthdr.flowid);
738			return (m);
739		}
740		policy = NETISR_POLICY_SOURCE;
741	}
742
743	KASSERT(policy == NETISR_POLICY_SOURCE,
744	    ("%s: invalid policy %u for %s", __func__, npp->np_policy,
745	    npp->np_name));
746
747	ifp = m->m_pkthdr.rcvif;
748	if (ifp != NULL)
749		*cpuidp = nws_array[(ifp->if_index + source) % nws_count];
750	else
751		*cpuidp = nws_array[source % nws_count];
752	return (m);
753}
754
755/*
756 * Process packets associated with a workstream and protocol.  For reasons of
757 * fairness, we process up to one complete netisr queue at a time, moving the
758 * queue to a stack-local queue for processing, but do not loop refreshing
759 * from the global queue.  The caller is responsible for deciding whether to
760 * loop, and for setting the NWS_RUNNING flag.  The passed workstream will be
761 * locked on entry and relocked before return, but will be released while
762 * processing.  The number of packets processed is returned.
763 */
764static u_int
765netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto)
766{
767	struct netisr_work local_npw, *npwp;
768	u_int handled;
769	struct mbuf *m;
770
771	NETISR_LOCK_ASSERT();
772	NWS_LOCK_ASSERT(nwsp);
773
774	KASSERT(nwsp->nws_flags & NWS_RUNNING,
775	    ("%s(%u): not running", __func__, proto));
776	KASSERT(proto >= 0 && proto < NETISR_MAXPROT,
777	    ("%s(%u): invalid proto\n", __func__, proto));
778
779	npwp = &nwsp->nws_work[proto];
780	if (npwp->nw_len == 0)
781		return (0);
782
783	/*
784	 * Move the global work queue to a thread-local work queue.
785	 *
786	 * Notice that this means the effective maximum length of the queue
787	 * is actually twice that of the maximum queue length specified in
788	 * the protocol registration call.
789	 */
790	handled = npwp->nw_len;
791	local_npw = *npwp;
792	npwp->nw_head = NULL;
793	npwp->nw_tail = NULL;
794	npwp->nw_len = 0;
795	nwsp->nws_pendingbits &= ~(1 << proto);
796	NWS_UNLOCK(nwsp);
797	while ((m = local_npw.nw_head) != NULL) {
798		local_npw.nw_head = m->m_nextpkt;
799		m->m_nextpkt = NULL;
800		if (local_npw.nw_head == NULL)
801			local_npw.nw_tail = NULL;
802		local_npw.nw_len--;
803		VNET_ASSERT(m->m_pkthdr.rcvif != NULL,
804		    ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m));
805		CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
806		netisr_proto[proto].np_handler(m);
807		CURVNET_RESTORE();
808	}
809	KASSERT(local_npw.nw_len == 0,
810	    ("%s(%u): len %u", __func__, proto, local_npw.nw_len));
811	if (netisr_proto[proto].np_drainedcpu)
812		netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu);
813	NWS_LOCK(nwsp);
814	npwp->nw_handled += handled;
815	return (handled);
816}
817
818/*
819 * SWI handler for netisr -- processes packets in a set of workstreams that
820 * it owns, woken up by calls to NWS_SIGNAL().  If this workstream is already
821 * being direct dispatched, go back to sleep and wait for the dispatching
822 * thread to wake us up again.
823 */
824static void
825swi_net(void *arg)
826{
827#ifdef NETISR_LOCKING
828	struct rm_priotracker tracker;
829#endif
830	struct netisr_workstream *nwsp;
831	u_int bits, prot;
832
833	nwsp = arg;
834
835#ifdef DEVICE_POLLING
836	KASSERT(nws_count == 1,
837	    ("%s: device_polling but nws_count != 1", __func__));
838	netisr_poll();
839#endif
840#ifdef NETISR_LOCKING
841	NETISR_RLOCK(&tracker);
842#endif
843	NWS_LOCK(nwsp);
844	KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
845	if (nwsp->nws_flags & NWS_DISPATCHING)
846		goto out;
847	nwsp->nws_flags |= NWS_RUNNING;
848	nwsp->nws_flags &= ~NWS_SCHEDULED;
849	while ((bits = nwsp->nws_pendingbits) != 0) {
850		while ((prot = ffs(bits)) != 0) {
851			prot--;
852			bits &= ~(1 << prot);
853			(void)netisr_process_workstream_proto(nwsp, prot);
854		}
855	}
856	nwsp->nws_flags &= ~NWS_RUNNING;
857out:
858	NWS_UNLOCK(nwsp);
859#ifdef NETISR_LOCKING
860	NETISR_RUNLOCK(&tracker);
861#endif
862#ifdef DEVICE_POLLING
863	netisr_pollmore();
864#endif
865}
866
867static int
868netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto,
869    struct netisr_work *npwp, struct mbuf *m, int *dosignalp)
870{
871
872	NWS_LOCK_ASSERT(nwsp);
873
874	*dosignalp = 0;
875	if (npwp->nw_len < npwp->nw_qlimit) {
876		m->m_nextpkt = NULL;
877		if (npwp->nw_head == NULL) {
878			npwp->nw_head = m;
879			npwp->nw_tail = m;
880		} else {
881			npwp->nw_tail->m_nextpkt = m;
882			npwp->nw_tail = m;
883		}
884		npwp->nw_len++;
885		if (npwp->nw_len > npwp->nw_watermark)
886			npwp->nw_watermark = npwp->nw_len;
887
888		/*
889		 * We must set the bit regardless of NWS_RUNNING, so that
890		 * swi_net() keeps calling netisr_process_workstream_proto().
891		 */
892		nwsp->nws_pendingbits |= (1 << proto);
893		if (!(nwsp->nws_flags &
894		    (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) {
895			nwsp->nws_flags |= NWS_SCHEDULED;
896			*dosignalp = 1;	/* Defer until unlocked. */
897		}
898		npwp->nw_queued++;
899		return (0);
900	} else {
901		m_freem(m);
902		npwp->nw_qdrops++;
903		return (ENOBUFS);
904	}
905}
906
907static int
908netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid)
909{
910	struct netisr_workstream *nwsp;
911	struct netisr_work *npwp;
912	int dosignal, error;
913
914#ifdef NETISR_LOCKING
915	NETISR_LOCK_ASSERT();
916#endif
917	KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__,
918	    cpuid, mp_maxid));
919	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
920
921	dosignal = 0;
922	error = 0;
923	nwsp = DPCPU_ID_PTR(cpuid, nws);
924	npwp = &nwsp->nws_work[proto];
925	NWS_LOCK(nwsp);
926	error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal);
927	NWS_UNLOCK(nwsp);
928	if (dosignal)
929		NWS_SIGNAL(nwsp);
930	return (error);
931}
932
933int
934netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
935{
936#ifdef NETISR_LOCKING
937	struct rm_priotracker tracker;
938#endif
939	u_int cpuid;
940	int error;
941
942	KASSERT(proto < NETISR_MAXPROT,
943	    ("%s: invalid proto %u", __func__, proto));
944
945#ifdef NETISR_LOCKING
946	NETISR_RLOCK(&tracker);
947#endif
948	KASSERT(netisr_proto[proto].np_handler != NULL,
949	    ("%s: invalid proto %u", __func__, proto));
950
951	m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED,
952	    source, m, &cpuid);
953	if (m != NULL) {
954		KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__,
955		    cpuid));
956		error = netisr_queue_internal(proto, m, cpuid);
957	} else
958		error = ENOBUFS;
959#ifdef NETISR_LOCKING
960	NETISR_RUNLOCK(&tracker);
961#endif
962	return (error);
963}
964
965int
966netisr_queue(u_int proto, struct mbuf *m)
967{
968
969	return (netisr_queue_src(proto, 0, m));
970}
971
972/*
973 * Dispatch a packet for netisr processing; direct dispatch is permitted by
974 * calling context.
975 */
976int
977netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
978{
979#ifdef NETISR_LOCKING
980	struct rm_priotracker tracker;
981#endif
982	struct netisr_workstream *nwsp;
983	struct netisr_proto *npp;
984	struct netisr_work *npwp;
985	int dosignal, error;
986	u_int cpuid, dispatch_policy;
987
988	KASSERT(proto < NETISR_MAXPROT,
989	    ("%s: invalid proto %u", __func__, proto));
990#ifdef NETISR_LOCKING
991	NETISR_RLOCK(&tracker);
992#endif
993	npp = &netisr_proto[proto];
994	KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__,
995	    proto));
996
997	dispatch_policy = netisr_get_dispatch(npp);
998	if (dispatch_policy == NETISR_DISPATCH_DEFERRED)
999		return (netisr_queue_src(proto, source, m));
1000
1001	/*
1002	 * If direct dispatch is forced, then unconditionally dispatch
1003	 * without a formal CPU selection.  Borrow the current CPU's stats,
1004	 * even if there's no worker on it.  In this case we don't update
1005	 * nws_flags because all netisr processing will be source ordered due
1006	 * to always being forced to directly dispatch.
1007	 */
1008	if (dispatch_policy == NETISR_DISPATCH_DIRECT) {
1009		nwsp = DPCPU_PTR(nws);
1010		npwp = &nwsp->nws_work[proto];
1011		npwp->nw_dispatched++;
1012		npwp->nw_handled++;
1013		netisr_proto[proto].np_handler(m);
1014		error = 0;
1015		goto out_unlock;
1016	}
1017
1018	KASSERT(dispatch_policy == NETISR_DISPATCH_HYBRID,
1019	    ("%s: unknown dispatch policy (%u)", __func__, dispatch_policy));
1020
1021	/*
1022	 * Otherwise, we execute in a hybrid mode where we will try to direct
1023	 * dispatch if we're on the right CPU and the netisr worker isn't
1024	 * already running.
1025	 */
1026	sched_pin();
1027	m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_HYBRID,
1028	    source, m, &cpuid);
1029	if (m == NULL) {
1030		error = ENOBUFS;
1031		goto out_unpin;
1032	}
1033	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
1034	if (cpuid != curcpu)
1035		goto queue_fallback;
1036	nwsp = DPCPU_PTR(nws);
1037	npwp = &nwsp->nws_work[proto];
1038
1039	/*-
1040	 * We are willing to direct dispatch only if three conditions hold:
1041	 *
1042	 * (1) The netisr worker isn't already running,
1043	 * (2) Another thread isn't already directly dispatching, and
1044	 * (3) The netisr hasn't already been woken up.
1045	 */
1046	NWS_LOCK(nwsp);
1047	if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) {
1048		error = netisr_queue_workstream(nwsp, proto, npwp, m,
1049		    &dosignal);
1050		NWS_UNLOCK(nwsp);
1051		if (dosignal)
1052			NWS_SIGNAL(nwsp);
1053		goto out_unpin;
1054	}
1055
1056	/*
1057	 * The current thread is now effectively the netisr worker, so set
1058	 * the dispatching flag to prevent concurrent processing of the
1059	 * stream from another thread (even the netisr worker), which could
1060	 * otherwise lead to effective misordering of the stream.
1061	 */
1062	nwsp->nws_flags |= NWS_DISPATCHING;
1063	NWS_UNLOCK(nwsp);
1064	netisr_proto[proto].np_handler(m);
1065	NWS_LOCK(nwsp);
1066	nwsp->nws_flags &= ~NWS_DISPATCHING;
1067	npwp->nw_handled++;
1068	npwp->nw_hybrid_dispatched++;
1069
1070	/*
1071	 * If other work was enqueued by another thread while we were direct
1072	 * dispatching, we need to signal the netisr worker to do that work.
1073	 * In the future, we might want to do some of that work in the
1074	 * current thread, rather than trigger further context switches.  If
1075	 * so, we'll want to establish a reasonable bound on the work done in
1076	 * the "borrowed" context.
1077	 */
1078	if (nwsp->nws_pendingbits != 0) {
1079		nwsp->nws_flags |= NWS_SCHEDULED;
1080		dosignal = 1;
1081	} else
1082		dosignal = 0;
1083	NWS_UNLOCK(nwsp);
1084	if (dosignal)
1085		NWS_SIGNAL(nwsp);
1086	error = 0;
1087	goto out_unpin;
1088
1089queue_fallback:
1090	error = netisr_queue_internal(proto, m, cpuid);
1091out_unpin:
1092	sched_unpin();
1093out_unlock:
1094#ifdef NETISR_LOCKING
1095	NETISR_RUNLOCK(&tracker);
1096#endif
1097	return (error);
1098}
1099
1100int
1101netisr_dispatch(u_int proto, struct mbuf *m)
1102{
1103
1104	return (netisr_dispatch_src(proto, 0, m));
1105}
1106
1107#ifdef DEVICE_POLLING
1108/*
1109 * Kernel polling borrows a netisr thread to run interface polling in; this
1110 * function allows kernel polling to request that the netisr thread be
1111 * scheduled even if no packets are pending for protocols.
1112 */
1113void
1114netisr_sched_poll(void)
1115{
1116	struct netisr_workstream *nwsp;
1117
1118	nwsp = DPCPU_ID_PTR(nws_array[0], nws);
1119	NWS_SIGNAL(nwsp);
1120}
1121#endif
1122
1123static void
1124netisr_start_swi(u_int cpuid, struct pcpu *pc)
1125{
1126	char swiname[12];
1127	struct netisr_workstream *nwsp;
1128	int error;
1129
1130	KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
1131
1132	nwsp = DPCPU_ID_PTR(cpuid, nws);
1133	mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF);
1134	nwsp->nws_cpu = cpuid;
1135	snprintf(swiname, sizeof(swiname), "netisr %u", cpuid);
1136	error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp,
1137	    SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie);
1138	if (error)
1139		panic("%s: swi_add %d", __func__, error);
1140	pc->pc_netisr = nwsp->nws_intr_event;
1141	if (netisr_bindthreads) {
1142		error = intr_event_bind(nwsp->nws_intr_event, cpuid);
1143		if (error != 0)
1144			printf("%s: cpu %u: intr_event_bind: %d", __func__,
1145			    cpuid, error);
1146	}
1147	NETISR_WLOCK();
1148	nws_array[nws_count] = nwsp->nws_cpu;
1149	nws_count++;
1150	NETISR_WUNLOCK();
1151}
1152
1153/*
1154 * Initialize the netisr subsystem.  We rely on BSS and static initialization
1155 * of most fields in global data structures.
1156 *
1157 * Start a worker thread for the boot CPU so that we can support network
1158 * traffic immediately in case the network stack is used before additional
1159 * CPUs are started (for example, diskless boot).
1160 */
1161static void
1162netisr_init(void *arg)
1163{
1164	char tmp[NETISR_DISPATCH_POLICY_MAXSTR];
1165	u_int dispatch_policy;
1166	int error;
1167
1168	KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__));
1169
1170	NETISR_LOCK_INIT();
1171	if (netisr_maxthreads < 1)
1172		netisr_maxthreads = 1;
1173	if (netisr_maxthreads > mp_ncpus) {
1174		printf("netisr_init: forcing maxthreads from %d to %d\n",
1175		    netisr_maxthreads, mp_ncpus);
1176		netisr_maxthreads = mp_ncpus;
1177	}
1178	if (netisr_defaultqlimit > netisr_maxqlimit) {
1179		printf("netisr_init: forcing defaultqlimit from %d to %d\n",
1180		    netisr_defaultqlimit, netisr_maxqlimit);
1181		netisr_defaultqlimit = netisr_maxqlimit;
1182	}
1183#ifdef DEVICE_POLLING
1184	/*
1185	 * The device polling code is not yet aware of how to deal with
1186	 * multiple netisr threads, so for the time being compiling in device
1187	 * polling disables parallel netisr workers.
1188	 */
1189	if (netisr_maxthreads != 1 || netisr_bindthreads != 0) {
1190		printf("netisr_init: forcing maxthreads to 1 and "
1191		    "bindthreads to 0 for device polling\n");
1192		netisr_maxthreads = 1;
1193		netisr_bindthreads = 0;
1194	}
1195#endif
1196
1197	if (TUNABLE_STR_FETCH("net.isr.dispatch", tmp, sizeof(tmp))) {
1198		error = netisr_dispatch_policy_from_str(tmp,
1199		    &dispatch_policy);
1200		if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT)
1201			error = EINVAL;
1202		if (error == 0) {
1203			netisr_dispatch_policy = dispatch_policy;
1204			netisr_dispatch_policy_compat();
1205		} else
1206			printf(
1207			    "%s: invalid dispatch policy %s, using default\n",
1208			    __func__, tmp);
1209	}
1210
1211	netisr_start_swi(curcpu, pcpu_find(curcpu));
1212}
1213SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
1214
1215/*
1216 * Start worker threads for additional CPUs.  No attempt to gracefully handle
1217 * work reassignment, we don't yet support dynamic reconfiguration.
1218 */
1219static void
1220netisr_start(void *arg)
1221{
1222	struct pcpu *pc;
1223
1224	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
1225		if (nws_count >= netisr_maxthreads)
1226			break;
1227		/* XXXRW: Is skipping absent CPUs still required here? */
1228		if (CPU_ABSENT(pc->pc_cpuid))
1229			continue;
1230		/* Worker will already be present for boot CPU. */
1231		if (pc->pc_netisr != NULL)
1232			continue;
1233		netisr_start_swi(pc->pc_cpuid, pc);
1234	}
1235}
1236SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
1237
1238/*
1239 * Sysctl monitoring for netisr: query a list of registered protocols.
1240 */
1241static int
1242sysctl_netisr_proto(SYSCTL_HANDLER_ARGS)
1243{
1244	struct rm_priotracker tracker;
1245	struct sysctl_netisr_proto *snpp, *snp_array;
1246	struct netisr_proto *npp;
1247	u_int counter, proto;
1248	int error;
1249
1250	if (req->newptr != NULL)
1251		return (EINVAL);
1252	snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP,
1253	    M_ZERO | M_WAITOK);
1254	counter = 0;
1255	NETISR_RLOCK(&tracker);
1256	for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1257		npp = &netisr_proto[proto];
1258		if (npp->np_name == NULL)
1259			continue;
1260		snpp = &snp_array[counter];
1261		snpp->snp_version = sizeof(*snpp);
1262		strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN);
1263		snpp->snp_proto = proto;
1264		snpp->snp_qlimit = npp->np_qlimit;
1265		snpp->snp_policy = npp->np_policy;
1266		snpp->snp_dispatch = npp->np_dispatch;
1267		if (npp->np_m2flow != NULL)
1268			snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW;
1269		if (npp->np_m2cpuid != NULL)
1270			snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID;
1271		if (npp->np_drainedcpu != NULL)
1272			snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU;
1273		counter++;
1274	}
1275	NETISR_RUNLOCK(&tracker);
1276	KASSERT(counter <= NETISR_MAXPROT,
1277	    ("sysctl_netisr_proto: counter too big (%d)", counter));
1278	error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter);
1279	free(snp_array, M_TEMP);
1280	return (error);
1281}
1282
1283SYSCTL_PROC(_net_isr, OID_AUTO, proto,
1284    CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto,
1285    "S,sysctl_netisr_proto",
1286    "Return list of protocols registered with netisr");
1287
1288/*
1289 * Sysctl monitoring for netisr: query a list of workstreams.
1290 */
1291static int
1292sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS)
1293{
1294	struct rm_priotracker tracker;
1295	struct sysctl_netisr_workstream *snwsp, *snws_array;
1296	struct netisr_workstream *nwsp;
1297	u_int counter, cpuid;
1298	int error;
1299
1300	if (req->newptr != NULL)
1301		return (EINVAL);
1302	snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP,
1303	    M_ZERO | M_WAITOK);
1304	counter = 0;
1305	NETISR_RLOCK(&tracker);
1306	CPU_FOREACH(cpuid) {
1307		nwsp = DPCPU_ID_PTR(cpuid, nws);
1308		if (nwsp->nws_intr_event == NULL)
1309			continue;
1310		NWS_LOCK(nwsp);
1311		snwsp = &snws_array[counter];
1312		snwsp->snws_version = sizeof(*snwsp);
1313
1314		/*
1315		 * For now, we equate workstream IDs and CPU IDs in the
1316		 * kernel, but expose them independently to userspace in case
1317		 * that assumption changes in the future.
1318		 */
1319		snwsp->snws_wsid = cpuid;
1320		snwsp->snws_cpu = cpuid;
1321		if (nwsp->nws_intr_event != NULL)
1322			snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR;
1323		NWS_UNLOCK(nwsp);
1324		counter++;
1325	}
1326	NETISR_RUNLOCK(&tracker);
1327	KASSERT(counter <= MAXCPU,
1328	    ("sysctl_netisr_workstream: counter too big (%d)", counter));
1329	error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter);
1330	free(snws_array, M_TEMP);
1331	return (error);
1332}
1333
1334SYSCTL_PROC(_net_isr, OID_AUTO, workstream,
1335    CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream,
1336    "S,sysctl_netisr_workstream",
1337    "Return list of workstreams implemented by netisr");
1338
1339/*
1340 * Sysctl monitoring for netisr: query per-protocol data across all
1341 * workstreams.
1342 */
1343static int
1344sysctl_netisr_work(SYSCTL_HANDLER_ARGS)
1345{
1346	struct rm_priotracker tracker;
1347	struct sysctl_netisr_work *snwp, *snw_array;
1348	struct netisr_workstream *nwsp;
1349	struct netisr_proto *npp;
1350	struct netisr_work *nwp;
1351	u_int counter, cpuid, proto;
1352	int error;
1353
1354	if (req->newptr != NULL)
1355		return (EINVAL);
1356	snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT,
1357	    M_TEMP, M_ZERO | M_WAITOK);
1358	counter = 0;
1359	NETISR_RLOCK(&tracker);
1360	CPU_FOREACH(cpuid) {
1361		nwsp = DPCPU_ID_PTR(cpuid, nws);
1362		if (nwsp->nws_intr_event == NULL)
1363			continue;
1364		NWS_LOCK(nwsp);
1365		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1366			npp = &netisr_proto[proto];
1367			if (npp->np_name == NULL)
1368				continue;
1369			nwp = &nwsp->nws_work[proto];
1370			snwp = &snw_array[counter];
1371			snwp->snw_version = sizeof(*snwp);
1372			snwp->snw_wsid = cpuid;		/* See comment above. */
1373			snwp->snw_proto = proto;
1374			snwp->snw_len = nwp->nw_len;
1375			snwp->snw_watermark = nwp->nw_watermark;
1376			snwp->snw_dispatched = nwp->nw_dispatched;
1377			snwp->snw_hybrid_dispatched =
1378			    nwp->nw_hybrid_dispatched;
1379			snwp->snw_qdrops = nwp->nw_qdrops;
1380			snwp->snw_queued = nwp->nw_queued;
1381			snwp->snw_handled = nwp->nw_handled;
1382			counter++;
1383		}
1384		NWS_UNLOCK(nwsp);
1385	}
1386	KASSERT(counter <= MAXCPU * NETISR_MAXPROT,
1387	    ("sysctl_netisr_work: counter too big (%d)", counter));
1388	NETISR_RUNLOCK(&tracker);
1389	error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter);
1390	free(snw_array, M_TEMP);
1391	return (error);
1392}
1393
1394SYSCTL_PROC(_net_isr, OID_AUTO, work,
1395    CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work,
1396    "S,sysctl_netisr_work",
1397    "Return list of per-workstream, per-protocol work in netisr");
1398
1399#ifdef DDB
1400DB_SHOW_COMMAND(netisr, db_show_netisr)
1401{
1402	struct netisr_workstream *nwsp;
1403	struct netisr_work *nwp;
1404	int first, proto;
1405	u_int cpuid;
1406
1407	db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
1408	    "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
1409	CPU_FOREACH(cpuid) {
1410		nwsp = DPCPU_ID_PTR(cpuid, nws);
1411		if (nwsp->nws_intr_event == NULL)
1412			continue;
1413		first = 1;
1414		for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1415			if (netisr_proto[proto].np_handler == NULL)
1416				continue;
1417			nwp = &nwsp->nws_work[proto];
1418			if (first) {
1419				db_printf("%3d ", cpuid);
1420				first = 0;
1421			} else
1422				db_printf("%3s ", "");
1423			db_printf(
1424			    "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n",
1425			    netisr_proto[proto].np_name, nwp->nw_len,
1426			    nwp->nw_watermark, nwp->nw_qlimit,
1427			    nwp->nw_dispatched, nwp->nw_hybrid_dispatched,
1428			    nwp->nw_qdrops, nwp->nw_queued);
1429		}
1430	}
1431}
1432#endif
1433