ifq.c revision 1.40
1/*	$OpenBSD: ifq.c,v 1.40 2020/06/17 06:45:22 dlg Exp $ */
2
3/*
4 * Copyright (c) 2015 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "bpfilter.h"
20
21#include <sys/param.h>
22#include <sys/systm.h>
23#include <sys/socket.h>
24#include <sys/mbuf.h>
25#include <sys/proc.h>
26#include <sys/sysctl.h>
27
28#include <net/if.h>
29#include <net/if_var.h>
30
31#if NBPFILTER > 0
32#include <net/bpf.h>
33#endif
34
35/*
36 * priq glue
37 */
38unsigned int	 priq_idx(unsigned int, const struct mbuf *);
39struct mbuf	*priq_enq(struct ifqueue *, struct mbuf *);
40struct mbuf	*priq_deq_begin(struct ifqueue *, void **);
41void		 priq_deq_commit(struct ifqueue *, struct mbuf *, void *);
42void		 priq_purge(struct ifqueue *, struct mbuf_list *);
43
44void		*priq_alloc(unsigned int, void *);
45void		 priq_free(unsigned int, void *);
46
47const struct ifq_ops priq_ops = {
48	priq_idx,
49	priq_enq,
50	priq_deq_begin,
51	priq_deq_commit,
52	priq_purge,
53	priq_alloc,
54	priq_free,
55};
56
57const struct ifq_ops * const ifq_priq_ops = &priq_ops;
58
59/*
60 * priq internal structures
61 */
62
63struct priq {
64	struct mbuf_list	 pq_lists[IFQ_NQUEUES];
65};
66
67/*
68 * ifqueue serialiser
69 */
70
71void	ifq_start_task(void *);
72void	ifq_restart_task(void *);
73void	ifq_barrier_task(void *);
74void	ifq_bundle_task(void *);
75
76static inline void
77ifq_run_start(struct ifqueue *ifq)
78{
79	ifq_serialize(ifq, &ifq->ifq_start);
80}
81
82void
83ifq_serialize(struct ifqueue *ifq, struct task *t)
84{
85	struct task work;
86
87	if (ISSET(t->t_flags, TASK_ONQUEUE))
88		return;
89
90	mtx_enter(&ifq->ifq_task_mtx);
91	if (!ISSET(t->t_flags, TASK_ONQUEUE)) {
92		SET(t->t_flags, TASK_ONQUEUE);
93		TAILQ_INSERT_TAIL(&ifq->ifq_task_list, t, t_entry);
94	}
95
96	if (ifq->ifq_serializer == NULL) {
97		ifq->ifq_serializer = curcpu();
98
99		while ((t = TAILQ_FIRST(&ifq->ifq_task_list)) != NULL) {
100			TAILQ_REMOVE(&ifq->ifq_task_list, t, t_entry);
101			CLR(t->t_flags, TASK_ONQUEUE);
102			work = *t; /* copy to caller to avoid races */
103
104			mtx_leave(&ifq->ifq_task_mtx);
105
106			(*work.t_func)(work.t_arg);
107
108			mtx_enter(&ifq->ifq_task_mtx);
109		}
110
111		ifq->ifq_serializer = NULL;
112	}
113	mtx_leave(&ifq->ifq_task_mtx);
114}
115
116int
117ifq_is_serialized(struct ifqueue *ifq)
118{
119	return (ifq->ifq_serializer == curcpu());
120}
121
122void
123ifq_start(struct ifqueue *ifq)
124{
125	if (ifq_len(ifq) >= min(ifq->ifq_if->if_txmit, ifq->ifq_maxlen)) {
126		task_del(ifq->ifq_softnet, &ifq->ifq_bundle);
127		ifq_run_start(ifq);
128	} else
129		task_add(ifq->ifq_softnet, &ifq->ifq_bundle);
130}
131
132void
133ifq_start_task(void *p)
134{
135	struct ifqueue *ifq = p;
136	struct ifnet *ifp = ifq->ifq_if;
137
138	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
139	    ifq_empty(ifq) || ifq_is_oactive(ifq))
140		return;
141
142	ifp->if_qstart(ifq);
143}
144
145void
146ifq_restart_task(void *p)
147{
148	struct ifqueue *ifq = p;
149	struct ifnet *ifp = ifq->ifq_if;
150
151	ifq_clr_oactive(ifq);
152	ifp->if_qstart(ifq);
153}
154
155void
156ifq_bundle_task(void *p)
157{
158	struct ifqueue *ifq = p;
159
160	ifq_run_start(ifq);
161}
162
163void
164ifq_barrier(struct ifqueue *ifq)
165{
166	struct cond c = COND_INITIALIZER();
167	struct task t = TASK_INITIALIZER(ifq_barrier_task, &c);
168
169	task_del(ifq->ifq_softnet, &ifq->ifq_bundle);
170
171	if (ifq->ifq_serializer == NULL)
172		return;
173
174	ifq_serialize(ifq, &t);
175
176	cond_wait(&c, "ifqbar");
177}
178
179void
180ifq_barrier_task(void *p)
181{
182	struct cond *c = p;
183
184	cond_signal(c);
185}
186
187/*
188 * ifqueue mbuf queue API
189 */
190
191void
192ifq_init(struct ifqueue *ifq, struct ifnet *ifp, unsigned int idx)
193{
194	ifq->ifq_if = ifp;
195	ifq->ifq_softnet = net_tq(ifp->if_index); /* + idx */
196	ifq->ifq_softc = NULL;
197
198	mtx_init(&ifq->ifq_mtx, IPL_NET);
199
200	/* default to priq */
201	ifq->ifq_ops = &priq_ops;
202	ifq->ifq_q = priq_ops.ifqop_alloc(idx, NULL);
203
204	ml_init(&ifq->ifq_free);
205	ifq->ifq_len = 0;
206
207	ifq->ifq_packets = 0;
208	ifq->ifq_bytes = 0;
209	ifq->ifq_qdrops = 0;
210	ifq->ifq_errors = 0;
211	ifq->ifq_mcasts = 0;
212
213	mtx_init(&ifq->ifq_task_mtx, IPL_NET);
214	TAILQ_INIT(&ifq->ifq_task_list);
215	ifq->ifq_serializer = NULL;
216	task_set(&ifq->ifq_bundle, ifq_bundle_task, ifq);
217
218	task_set(&ifq->ifq_start, ifq_start_task, ifq);
219	task_set(&ifq->ifq_restart, ifq_restart_task, ifq);
220
221	if (ifq->ifq_maxlen == 0)
222		ifq_set_maxlen(ifq, IFQ_MAXLEN);
223
224	ifq->ifq_idx = idx;
225}
226
227void
228ifq_attach(struct ifqueue *ifq, const struct ifq_ops *newops, void *opsarg)
229{
230	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
231	struct mbuf_list free_ml = MBUF_LIST_INITIALIZER();
232	struct mbuf *m;
233	const struct ifq_ops *oldops;
234	void *newq, *oldq;
235
236	newq = newops->ifqop_alloc(ifq->ifq_idx, opsarg);
237
238	mtx_enter(&ifq->ifq_mtx);
239	ifq->ifq_ops->ifqop_purge(ifq, &ml);
240	ifq->ifq_len = 0;
241
242	oldops = ifq->ifq_ops;
243	oldq = ifq->ifq_q;
244
245	ifq->ifq_ops = newops;
246	ifq->ifq_q = newq;
247
248	while ((m = ml_dequeue(&ml)) != NULL) {
249		m = ifq->ifq_ops->ifqop_enq(ifq, m);
250		if (m != NULL) {
251			ifq->ifq_qdrops++;
252			ml_enqueue(&free_ml, m);
253		} else
254			ifq->ifq_len++;
255	}
256	mtx_leave(&ifq->ifq_mtx);
257
258	oldops->ifqop_free(ifq->ifq_idx, oldq);
259
260	ml_purge(&free_ml);
261}
262
263void
264ifq_destroy(struct ifqueue *ifq)
265{
266	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
267
268	NET_ASSERT_UNLOCKED();
269	if (!task_del(ifq->ifq_softnet, &ifq->ifq_bundle))
270		taskq_barrier(ifq->ifq_softnet);
271
272	/* don't need to lock because this is the last use of the ifq */
273
274	ifq->ifq_ops->ifqop_purge(ifq, &ml);
275	ifq->ifq_ops->ifqop_free(ifq->ifq_idx, ifq->ifq_q);
276
277	ml_purge(&ml);
278}
279
280void
281ifq_add_data(struct ifqueue *ifq, struct if_data *data)
282{
283	mtx_enter(&ifq->ifq_mtx);
284	data->ifi_opackets += ifq->ifq_packets;
285	data->ifi_obytes += ifq->ifq_bytes;
286	data->ifi_oqdrops += ifq->ifq_qdrops;
287	data->ifi_omcasts += ifq->ifq_mcasts;
288	/* ifp->if_data.ifi_oerrors */
289	mtx_leave(&ifq->ifq_mtx);
290}
291
292int
293ifq_enqueue(struct ifqueue *ifq, struct mbuf *m)
294{
295	struct mbuf *dm;
296
297	mtx_enter(&ifq->ifq_mtx);
298	dm = ifq->ifq_ops->ifqop_enq(ifq, m);
299	if (dm != m) {
300		ifq->ifq_packets++;
301		ifq->ifq_bytes += m->m_pkthdr.len;
302		if (ISSET(m->m_flags, M_MCAST))
303			ifq->ifq_mcasts++;
304	}
305
306	if (dm == NULL)
307		ifq->ifq_len++;
308	else
309		ifq->ifq_qdrops++;
310	mtx_leave(&ifq->ifq_mtx);
311
312	if (dm != NULL)
313		m_freem(dm);
314
315	return (dm == m ? ENOBUFS : 0);
316}
317
318static inline void
319ifq_deq_enter(struct ifqueue *ifq)
320{
321	mtx_enter(&ifq->ifq_mtx);
322}
323
324static inline void
325ifq_deq_leave(struct ifqueue *ifq)
326{
327	struct mbuf_list ml;
328
329	ml = ifq->ifq_free;
330	ml_init(&ifq->ifq_free);
331
332	mtx_leave(&ifq->ifq_mtx);
333
334	if (!ml_empty(&ml))
335		ml_purge(&ml);
336}
337
338struct mbuf *
339ifq_deq_begin(struct ifqueue *ifq)
340{
341	struct mbuf *m = NULL;
342	void *cookie;
343
344	ifq_deq_enter(ifq);
345	if (ifq->ifq_len == 0 ||
346	    (m = ifq->ifq_ops->ifqop_deq_begin(ifq, &cookie)) == NULL) {
347		ifq_deq_leave(ifq);
348		return (NULL);
349	}
350
351	m->m_pkthdr.ph_cookie = cookie;
352
353	return (m);
354}
355
356void
357ifq_deq_commit(struct ifqueue *ifq, struct mbuf *m)
358{
359	void *cookie;
360
361	KASSERT(m != NULL);
362	cookie = m->m_pkthdr.ph_cookie;
363
364	ifq->ifq_ops->ifqop_deq_commit(ifq, m, cookie);
365	ifq->ifq_len--;
366	ifq_deq_leave(ifq);
367}
368
369void
370ifq_deq_rollback(struct ifqueue *ifq, struct mbuf *m)
371{
372	KASSERT(m != NULL);
373
374	ifq_deq_leave(ifq);
375}
376
377struct mbuf *
378ifq_dequeue(struct ifqueue *ifq)
379{
380	struct mbuf *m;
381
382	m = ifq_deq_begin(ifq);
383	if (m == NULL)
384		return (NULL);
385
386	ifq_deq_commit(ifq, m);
387
388	return (m);
389}
390
391int
392ifq_deq_sleep(struct ifqueue *ifq, struct mbuf **mp, int nbio, int priority,
393    const char *wmesg, volatile unsigned int *sleeping,
394    volatile unsigned int *alive)
395{
396	struct mbuf *m;
397	void *cookie;
398	int error = 0;
399
400	ifq_deq_enter(ifq);
401	if (ifq->ifq_len == 0 && nbio)
402		error = EWOULDBLOCK;
403	else {
404		for (;;) {
405			m = ifq->ifq_ops->ifqop_deq_begin(ifq, &cookie);
406			if (m != NULL) {
407				ifq->ifq_ops->ifqop_deq_commit(ifq, m, cookie);
408				ifq->ifq_len--;
409				*mp = m;
410				break;
411			}
412
413			(*sleeping)++;
414			error = msleep_nsec(ifq, &ifq->ifq_mtx,
415			    priority, wmesg, INFSLP);
416			(*sleeping)--;
417			if (error != 0)
418				break;
419			if (!(*alive)) {
420				error = ENXIO;
421				break;
422			}
423		}
424	}
425	ifq_deq_leave(ifq);
426
427	return (error);
428}
429
430int
431ifq_hdatalen(struct ifqueue *ifq)
432{
433	struct mbuf *m;
434	int len = 0;
435
436	m = ifq_deq_begin(ifq);
437	if (m != NULL) {
438		len = m->m_pkthdr.len;
439		ifq_deq_rollback(ifq, m);
440	}
441
442	return (len);
443}
444
445unsigned int
446ifq_purge(struct ifqueue *ifq)
447{
448	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
449	unsigned int rv;
450
451	mtx_enter(&ifq->ifq_mtx);
452	ifq->ifq_ops->ifqop_purge(ifq, &ml);
453	rv = ifq->ifq_len;
454	ifq->ifq_len = 0;
455	ifq->ifq_qdrops += rv;
456	mtx_leave(&ifq->ifq_mtx);
457
458	KASSERT(rv == ml_len(&ml));
459
460	ml_purge(&ml);
461
462	return (rv);
463}
464
465void *
466ifq_q_enter(struct ifqueue *ifq, const struct ifq_ops *ops)
467{
468	mtx_enter(&ifq->ifq_mtx);
469	if (ifq->ifq_ops == ops)
470		return (ifq->ifq_q);
471
472	mtx_leave(&ifq->ifq_mtx);
473
474	return (NULL);
475}
476
477void
478ifq_q_leave(struct ifqueue *ifq, void *q)
479{
480	KASSERT(q == ifq->ifq_q);
481	mtx_leave(&ifq->ifq_mtx);
482}
483
484void
485ifq_mfreem(struct ifqueue *ifq, struct mbuf *m)
486{
487	MUTEX_ASSERT_LOCKED(&ifq->ifq_mtx);
488
489	ifq->ifq_len--;
490	ifq->ifq_qdrops++;
491	ml_enqueue(&ifq->ifq_free, m);
492}
493
494void
495ifq_mfreeml(struct ifqueue *ifq, struct mbuf_list *ml)
496{
497	MUTEX_ASSERT_LOCKED(&ifq->ifq_mtx);
498
499	ifq->ifq_len -= ml_len(ml);
500	ifq->ifq_qdrops += ml_len(ml);
501	ml_enlist(&ifq->ifq_free, ml);
502}
503
504/*
505 * ifiq
506 */
507
508static void	ifiq_process(void *);
509
510void
511ifiq_init(struct ifiqueue *ifiq, struct ifnet *ifp, unsigned int idx)
512{
513	ifiq->ifiq_if = ifp;
514	ifiq->ifiq_softnet = net_tq(ifp->if_index); /* + idx */
515	ifiq->ifiq_softc = NULL;
516
517	mtx_init(&ifiq->ifiq_mtx, IPL_NET);
518	ml_init(&ifiq->ifiq_ml);
519	task_set(&ifiq->ifiq_task, ifiq_process, ifiq);
520	ifiq->ifiq_pressure = 0;
521
522	ifiq->ifiq_packets = 0;
523	ifiq->ifiq_bytes = 0;
524	ifiq->ifiq_qdrops = 0;
525	ifiq->ifiq_errors = 0;
526
527	ifiq->ifiq_idx = idx;
528}
529
530void
531ifiq_destroy(struct ifiqueue *ifiq)
532{
533	NET_ASSERT_UNLOCKED();
534	if (!task_del(ifiq->ifiq_softnet, &ifiq->ifiq_task))
535		taskq_barrier(ifiq->ifiq_softnet);
536
537	/* don't need to lock because this is the last use of the ifiq */
538	ml_purge(&ifiq->ifiq_ml);
539}
540
541unsigned int ifiq_maxlen_drop = 2048 * 5;
542unsigned int ifiq_maxlen_return = 2048 * 3;
543
544int
545ifiq_input(struct ifiqueue *ifiq, struct mbuf_list *ml)
546{
547	struct ifnet *ifp = ifiq->ifiq_if;
548	struct mbuf *m;
549	uint64_t packets;
550	uint64_t bytes = 0;
551	unsigned int len;
552#if NBPFILTER > 0
553	caddr_t if_bpf;
554#endif
555
556	if (ml_empty(ml))
557		return (0);
558
559	MBUF_LIST_FOREACH(ml, m) {
560		m->m_pkthdr.ph_ifidx = ifp->if_index;
561		m->m_pkthdr.ph_rtableid = ifp->if_rdomain;
562		bytes += m->m_pkthdr.len;
563	}
564	packets = ml_len(ml);
565
566#if NBPFILTER > 0
567	if_bpf = ifp->if_bpf;
568	if (if_bpf) {
569		struct mbuf_list ml0 = *ml;
570
571		ml_init(ml);
572
573		while ((m = ml_dequeue(&ml0)) != NULL) {
574			if (bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_IN))
575				m_freem(m);
576			else
577				ml_enqueue(ml, m);
578		}
579
580		if (ml_empty(ml)) {
581			mtx_enter(&ifiq->ifiq_mtx);
582			ifiq->ifiq_packets += packets;
583			ifiq->ifiq_bytes += bytes;
584			mtx_leave(&ifiq->ifiq_mtx);
585
586			return (0);
587		}
588	}
589#endif
590
591	mtx_enter(&ifiq->ifiq_mtx);
592	ifiq->ifiq_packets += packets;
593	ifiq->ifiq_bytes += bytes;
594
595	len = ml_len(&ifiq->ifiq_ml);
596	if (len > ifiq_maxlen_drop)
597		ifiq->ifiq_qdrops += ml_len(ml);
598	else
599		ml_enlist(&ifiq->ifiq_ml, ml);
600	mtx_leave(&ifiq->ifiq_mtx);
601
602	if (ml_empty(ml))
603		task_add(ifiq->ifiq_softnet, &ifiq->ifiq_task);
604	else
605		ml_purge(ml);
606
607	return (len > ifiq_maxlen_return);
608}
609
610void
611ifiq_add_data(struct ifiqueue *ifiq, struct if_data *data)
612{
613	mtx_enter(&ifiq->ifiq_mtx);
614	data->ifi_ipackets += ifiq->ifiq_packets;
615	data->ifi_ibytes += ifiq->ifiq_bytes;
616	data->ifi_iqdrops += ifiq->ifiq_qdrops;
617	mtx_leave(&ifiq->ifiq_mtx);
618}
619
620int
621ifiq_enqueue(struct ifiqueue *ifiq, struct mbuf *m)
622{
623	mtx_enter(&ifiq->ifiq_mtx);
624	ml_enqueue(&ifiq->ifiq_ml, m);
625	mtx_leave(&ifiq->ifiq_mtx);
626
627	task_add(ifiq->ifiq_softnet, &ifiq->ifiq_task);
628
629	return (0);
630}
631
632static void
633ifiq_process(void *arg)
634{
635	struct ifiqueue *ifiq = arg;
636	struct mbuf_list ml;
637
638	if (ifiq_empty(ifiq))
639		return;
640
641	mtx_enter(&ifiq->ifiq_mtx);
642	ml = ifiq->ifiq_ml;
643	ml_init(&ifiq->ifiq_ml);
644	mtx_leave(&ifiq->ifiq_mtx);
645
646	if_input_process(ifiq->ifiq_if, &ml);
647}
648
649int
650net_ifiq_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
651    void *newp, size_t newlen)
652{
653	int error = EOPNOTSUPP;
654/* pressure is disabled for 6.6-release */
655#if 0
656	int val;
657
658	if (namelen != 1)
659		return (EISDIR);
660
661	switch (name[0]) {
662	case NET_LINK_IFRXQ_PRESSURE_RETURN:
663		val = ifiq_pressure_return;
664		error = sysctl_int(oldp, oldlenp, newp, newlen, &val);
665		if (error != 0)
666			return (error);
667		if (val < 1 || val > ifiq_pressure_drop)
668			return (EINVAL);
669		ifiq_pressure_return = val;
670		break;
671	case NET_LINK_IFRXQ_PRESSURE_DROP:
672		val = ifiq_pressure_drop;
673		error = sysctl_int(oldp, oldlenp, newp, newlen, &val);
674		if (error != 0)
675			return (error);
676		if (ifiq_pressure_return > val)
677			return (EINVAL);
678		ifiq_pressure_drop = val;
679		break;
680	default:
681		error = EOPNOTSUPP;
682		break;
683	}
684#endif
685
686	return (error);
687}
688
689/*
690 * priq implementation
691 */
692
693unsigned int
694priq_idx(unsigned int nqueues, const struct mbuf *m)
695{
696	unsigned int flow = 0;
697
698	if (ISSET(m->m_pkthdr.csum_flags, M_FLOWID))
699		flow = m->m_pkthdr.ph_flowid;
700
701	return (flow % nqueues);
702}
703
704void *
705priq_alloc(unsigned int idx, void *null)
706{
707	struct priq *pq;
708	int i;
709
710	pq = malloc(sizeof(struct priq), M_DEVBUF, M_WAITOK);
711	for (i = 0; i < IFQ_NQUEUES; i++)
712		ml_init(&pq->pq_lists[i]);
713	return (pq);
714}
715
716void
717priq_free(unsigned int idx, void *pq)
718{
719	free(pq, M_DEVBUF, sizeof(struct priq));
720}
721
722struct mbuf *
723priq_enq(struct ifqueue *ifq, struct mbuf *m)
724{
725	struct priq *pq;
726	struct mbuf_list *pl;
727	struct mbuf *n = NULL;
728	unsigned int prio;
729
730	pq = ifq->ifq_q;
731	KASSERT(m->m_pkthdr.pf.prio <= IFQ_MAXPRIO);
732
733	/* Find a lower priority queue to drop from */
734	if (ifq_len(ifq) >= ifq->ifq_maxlen) {
735		for (prio = 0; prio < m->m_pkthdr.pf.prio; prio++) {
736			pl = &pq->pq_lists[prio];
737			if (ml_len(pl) > 0) {
738				n = ml_dequeue(pl);
739				goto enqueue;
740			}
741		}
742		/*
743		 * There's no lower priority queue that we can
744		 * drop from so don't enqueue this one.
745		 */
746		return (m);
747	}
748
749 enqueue:
750	pl = &pq->pq_lists[m->m_pkthdr.pf.prio];
751	ml_enqueue(pl, m);
752
753	return (n);
754}
755
756struct mbuf *
757priq_deq_begin(struct ifqueue *ifq, void **cookiep)
758{
759	struct priq *pq = ifq->ifq_q;
760	struct mbuf_list *pl;
761	unsigned int prio = nitems(pq->pq_lists);
762	struct mbuf *m;
763
764	do {
765		pl = &pq->pq_lists[--prio];
766		m = MBUF_LIST_FIRST(pl);
767		if (m != NULL) {
768			*cookiep = pl;
769			return (m);
770		}
771	} while (prio > 0);
772
773	return (NULL);
774}
775
776void
777priq_deq_commit(struct ifqueue *ifq, struct mbuf *m, void *cookie)
778{
779	struct mbuf_list *pl = cookie;
780
781	KASSERT(MBUF_LIST_FIRST(pl) == m);
782
783	ml_dequeue(pl);
784}
785
786void
787priq_purge(struct ifqueue *ifq, struct mbuf_list *ml)
788{
789	struct priq *pq = ifq->ifq_q;
790	struct mbuf_list *pl;
791	unsigned int prio = nitems(pq->pq_lists);
792
793	do {
794		pl = &pq->pq_lists[--prio];
795		ml_enlist(ml, pl);
796	} while (prio > 0);
797}
798