ifq.c revision 1.38
1/*	$OpenBSD: ifq.c,v 1.38 2020/05/20 01:28:59 dlg Exp $ */
2
3/*
4 * Copyright (c) 2015 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "bpfilter.h"
20
21#include <sys/param.h>
22#include <sys/systm.h>
23#include <sys/socket.h>
24#include <sys/mbuf.h>
25#include <sys/proc.h>
26#include <sys/sysctl.h>
27
28#include <net/if.h>
29#include <net/if_var.h>
30
31#if NBPFILTER > 0
32#include <net/bpf.h>
33#endif
34
35/*
36 * priq glue
37 */
38unsigned int	 priq_idx(unsigned int, const struct mbuf *);
39struct mbuf	*priq_enq(struct ifqueue *, struct mbuf *);
40struct mbuf	*priq_deq_begin(struct ifqueue *, void **);
41void		 priq_deq_commit(struct ifqueue *, struct mbuf *, void *);
42void		 priq_purge(struct ifqueue *, struct mbuf_list *);
43
44void		*priq_alloc(unsigned int, void *);
45void		 priq_free(unsigned int, void *);
46
47const struct ifq_ops priq_ops = {
48	priq_idx,
49	priq_enq,
50	priq_deq_begin,
51	priq_deq_commit,
52	priq_purge,
53	priq_alloc,
54	priq_free,
55};
56
57const struct ifq_ops * const ifq_priq_ops = &priq_ops;
58
59/*
60 * priq internal structures
61 */
62
63struct priq {
64	struct mbuf_list	 pq_lists[IFQ_NQUEUES];
65};
66
67/*
68 * ifqueue serialiser
69 */
70
71void	ifq_start_task(void *);
72void	ifq_restart_task(void *);
73void	ifq_barrier_task(void *);
74void	ifq_bundle_task(void *);
75
76static inline void
77ifq_run_start(struct ifqueue *ifq)
78{
79	ifq_serialize(ifq, &ifq->ifq_start);
80}
81
82void
83ifq_serialize(struct ifqueue *ifq, struct task *t)
84{
85	struct task work;
86
87	if (ISSET(t->t_flags, TASK_ONQUEUE))
88		return;
89
90	mtx_enter(&ifq->ifq_task_mtx);
91	if (!ISSET(t->t_flags, TASK_ONQUEUE)) {
92		SET(t->t_flags, TASK_ONQUEUE);
93		TAILQ_INSERT_TAIL(&ifq->ifq_task_list, t, t_entry);
94	}
95
96	if (ifq->ifq_serializer == NULL) {
97		ifq->ifq_serializer = curcpu();
98
99		while ((t = TAILQ_FIRST(&ifq->ifq_task_list)) != NULL) {
100			TAILQ_REMOVE(&ifq->ifq_task_list, t, t_entry);
101			CLR(t->t_flags, TASK_ONQUEUE);
102			work = *t; /* copy to caller to avoid races */
103
104			mtx_leave(&ifq->ifq_task_mtx);
105
106			(*work.t_func)(work.t_arg);
107
108			mtx_enter(&ifq->ifq_task_mtx);
109		}
110
111		ifq->ifq_serializer = NULL;
112	}
113	mtx_leave(&ifq->ifq_task_mtx);
114}
115
116int
117ifq_is_serialized(struct ifqueue *ifq)
118{
119	return (ifq->ifq_serializer == curcpu());
120}
121
122void
123ifq_start(struct ifqueue *ifq)
124{
125	struct ifnet *ifp = ifq->ifq_if;
126
127	if (ISSET(ifp->if_xflags, IFXF_MPSAFE) &&
128	    ifq_len(ifq) >= min(ifp->if_txmit, ifq->ifq_maxlen)) {
129		task_del(ifq->ifq_softnet, &ifq->ifq_bundle);
130		ifq_run_start(ifq);
131	} else
132		task_add(ifq->ifq_softnet, &ifq->ifq_bundle);
133}
134
135void
136ifq_start_task(void *p)
137{
138	struct ifqueue *ifq = p;
139	struct ifnet *ifp = ifq->ifq_if;
140
141	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
142	    ifq_empty(ifq) || ifq_is_oactive(ifq))
143		return;
144
145	ifp->if_qstart(ifq);
146}
147
148void
149ifq_restart_task(void *p)
150{
151	struct ifqueue *ifq = p;
152	struct ifnet *ifp = ifq->ifq_if;
153
154	ifq_clr_oactive(ifq);
155	ifp->if_qstart(ifq);
156}
157
158void
159ifq_bundle_task(void *p)
160{
161	struct ifqueue *ifq = p;
162
163	ifq_run_start(ifq);
164}
165
166void
167ifq_barrier(struct ifqueue *ifq)
168{
169	struct cond c = COND_INITIALIZER();
170	struct task t = TASK_INITIALIZER(ifq_barrier_task, &c);
171
172	task_del(ifq->ifq_softnet, &ifq->ifq_bundle);
173
174	if (ifq->ifq_serializer == NULL)
175		return;
176
177	ifq_serialize(ifq, &t);
178
179	cond_wait(&c, "ifqbar");
180}
181
182void
183ifq_barrier_task(void *p)
184{
185	struct cond *c = p;
186
187	cond_signal(c);
188}
189
190/*
191 * ifqueue mbuf queue API
192 */
193
194void
195ifq_init(struct ifqueue *ifq, struct ifnet *ifp, unsigned int idx)
196{
197	ifq->ifq_if = ifp;
198	ifq->ifq_softnet = ISSET(ifp->if_xflags, IFXF_MPSAFE) ?
199	    net_tq(ifp->if_index /* + idx */) : systq;
200	ifq->ifq_softc = NULL;
201
202	mtx_init(&ifq->ifq_mtx, IPL_NET);
203
204	/* default to priq */
205	ifq->ifq_ops = &priq_ops;
206	ifq->ifq_q = priq_ops.ifqop_alloc(idx, NULL);
207
208	ml_init(&ifq->ifq_free);
209	ifq->ifq_len = 0;
210
211	ifq->ifq_packets = 0;
212	ifq->ifq_bytes = 0;
213	ifq->ifq_qdrops = 0;
214	ifq->ifq_errors = 0;
215	ifq->ifq_mcasts = 0;
216
217	mtx_init(&ifq->ifq_task_mtx, IPL_NET);
218	TAILQ_INIT(&ifq->ifq_task_list);
219	ifq->ifq_serializer = NULL;
220	task_set(&ifq->ifq_bundle, ifq_bundle_task, ifq);
221
222	task_set(&ifq->ifq_start, ifq_start_task, ifq);
223	task_set(&ifq->ifq_restart, ifq_restart_task, ifq);
224
225	if (ifq->ifq_maxlen == 0)
226		ifq_set_maxlen(ifq, IFQ_MAXLEN);
227
228	ifq->ifq_idx = idx;
229}
230
231void
232ifq_attach(struct ifqueue *ifq, const struct ifq_ops *newops, void *opsarg)
233{
234	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
235	struct mbuf_list free_ml = MBUF_LIST_INITIALIZER();
236	struct mbuf *m;
237	const struct ifq_ops *oldops;
238	void *newq, *oldq;
239
240	newq = newops->ifqop_alloc(ifq->ifq_idx, opsarg);
241
242	mtx_enter(&ifq->ifq_mtx);
243	ifq->ifq_ops->ifqop_purge(ifq, &ml);
244	ifq->ifq_len = 0;
245
246	oldops = ifq->ifq_ops;
247	oldq = ifq->ifq_q;
248
249	ifq->ifq_ops = newops;
250	ifq->ifq_q = newq;
251
252	while ((m = ml_dequeue(&ml)) != NULL) {
253		m = ifq->ifq_ops->ifqop_enq(ifq, m);
254		if (m != NULL) {
255			ifq->ifq_qdrops++;
256			ml_enqueue(&free_ml, m);
257		} else
258			ifq->ifq_len++;
259	}
260	mtx_leave(&ifq->ifq_mtx);
261
262	oldops->ifqop_free(ifq->ifq_idx, oldq);
263
264	ml_purge(&free_ml);
265}
266
267void
268ifq_destroy(struct ifqueue *ifq)
269{
270	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
271
272	NET_ASSERT_UNLOCKED();
273	if (!task_del(ifq->ifq_softnet, &ifq->ifq_bundle))
274		taskq_barrier(ifq->ifq_softnet);
275
276	/* don't need to lock because this is the last use of the ifq */
277
278	ifq->ifq_ops->ifqop_purge(ifq, &ml);
279	ifq->ifq_ops->ifqop_free(ifq->ifq_idx, ifq->ifq_q);
280
281	ml_purge(&ml);
282}
283
284void
285ifq_add_data(struct ifqueue *ifq, struct if_data *data)
286{
287	mtx_enter(&ifq->ifq_mtx);
288	data->ifi_opackets += ifq->ifq_packets;
289	data->ifi_obytes += ifq->ifq_bytes;
290	data->ifi_oqdrops += ifq->ifq_qdrops;
291	data->ifi_omcasts += ifq->ifq_mcasts;
292	/* ifp->if_data.ifi_oerrors */
293	mtx_leave(&ifq->ifq_mtx);
294}
295
296int
297ifq_enqueue(struct ifqueue *ifq, struct mbuf *m)
298{
299	struct mbuf *dm;
300
301	mtx_enter(&ifq->ifq_mtx);
302	dm = ifq->ifq_ops->ifqop_enq(ifq, m);
303	if (dm != m) {
304		ifq->ifq_packets++;
305		ifq->ifq_bytes += m->m_pkthdr.len;
306		if (ISSET(m->m_flags, M_MCAST))
307			ifq->ifq_mcasts++;
308	}
309
310	if (dm == NULL)
311		ifq->ifq_len++;
312	else
313		ifq->ifq_qdrops++;
314	mtx_leave(&ifq->ifq_mtx);
315
316	if (dm != NULL)
317		m_freem(dm);
318
319	return (dm == m ? ENOBUFS : 0);
320}
321
322static inline void
323ifq_deq_enter(struct ifqueue *ifq)
324{
325	mtx_enter(&ifq->ifq_mtx);
326}
327
328static inline void
329ifq_deq_leave(struct ifqueue *ifq)
330{
331	struct mbuf_list ml;
332
333	ml = ifq->ifq_free;
334	ml_init(&ifq->ifq_free);
335
336	mtx_leave(&ifq->ifq_mtx);
337
338	if (!ml_empty(&ml))
339		ml_purge(&ml);
340}
341
342struct mbuf *
343ifq_deq_begin(struct ifqueue *ifq)
344{
345	struct mbuf *m = NULL;
346	void *cookie;
347
348	ifq_deq_enter(ifq);
349	if (ifq->ifq_len == 0 ||
350	    (m = ifq->ifq_ops->ifqop_deq_begin(ifq, &cookie)) == NULL) {
351		ifq_deq_leave(ifq);
352		return (NULL);
353	}
354
355	m->m_pkthdr.ph_cookie = cookie;
356
357	return (m);
358}
359
360void
361ifq_deq_commit(struct ifqueue *ifq, struct mbuf *m)
362{
363	void *cookie;
364
365	KASSERT(m != NULL);
366	cookie = m->m_pkthdr.ph_cookie;
367
368	ifq->ifq_ops->ifqop_deq_commit(ifq, m, cookie);
369	ifq->ifq_len--;
370	ifq_deq_leave(ifq);
371}
372
373void
374ifq_deq_rollback(struct ifqueue *ifq, struct mbuf *m)
375{
376	KASSERT(m != NULL);
377
378	ifq_deq_leave(ifq);
379}
380
381struct mbuf *
382ifq_dequeue(struct ifqueue *ifq)
383{
384	struct mbuf *m;
385
386	m = ifq_deq_begin(ifq);
387	if (m == NULL)
388		return (NULL);
389
390	ifq_deq_commit(ifq, m);
391
392	return (m);
393}
394
395int
396ifq_deq_sleep(struct ifqueue *ifq, struct mbuf **mp, int nbio, int priority,
397    const char *wmesg, volatile unsigned int *sleeping,
398    volatile unsigned int *alive)
399{
400	struct mbuf *m;
401	void *cookie;
402	int error = 0;
403
404	ifq_deq_enter(ifq);
405	if (ifq->ifq_len == 0 && nbio)
406		error = EWOULDBLOCK;
407	else {
408		for (;;) {
409			m = ifq->ifq_ops->ifqop_deq_begin(ifq, &cookie);
410			if (m != NULL) {
411				ifq->ifq_ops->ifqop_deq_commit(ifq, m, cookie);
412				ifq->ifq_len--;
413				*mp = m;
414				break;
415			}
416
417			(*sleeping)++;
418			error = msleep_nsec(ifq, &ifq->ifq_mtx,
419			    priority, wmesg, INFSLP);
420			(*sleeping)--;
421			if (error != 0)
422				break;
423			if (!(*alive)) {
424				error = ENXIO;
425				break;
426			}
427		}
428	}
429	ifq_deq_leave(ifq);
430
431	return (error);
432}
433
434int
435ifq_hdatalen(struct ifqueue *ifq)
436{
437	struct mbuf *m;
438	int len = 0;
439
440	m = ifq_deq_begin(ifq);
441	if (m != NULL) {
442		len = m->m_pkthdr.len;
443		ifq_deq_rollback(ifq, m);
444	}
445
446	return (len);
447}
448
449unsigned int
450ifq_purge(struct ifqueue *ifq)
451{
452	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
453	unsigned int rv;
454
455	mtx_enter(&ifq->ifq_mtx);
456	ifq->ifq_ops->ifqop_purge(ifq, &ml);
457	rv = ifq->ifq_len;
458	ifq->ifq_len = 0;
459	ifq->ifq_qdrops += rv;
460	mtx_leave(&ifq->ifq_mtx);
461
462	KASSERT(rv == ml_len(&ml));
463
464	ml_purge(&ml);
465
466	return (rv);
467}
468
469void *
470ifq_q_enter(struct ifqueue *ifq, const struct ifq_ops *ops)
471{
472	mtx_enter(&ifq->ifq_mtx);
473	if (ifq->ifq_ops == ops)
474		return (ifq->ifq_q);
475
476	mtx_leave(&ifq->ifq_mtx);
477
478	return (NULL);
479}
480
481void
482ifq_q_leave(struct ifqueue *ifq, void *q)
483{
484	KASSERT(q == ifq->ifq_q);
485	mtx_leave(&ifq->ifq_mtx);
486}
487
488void
489ifq_mfreem(struct ifqueue *ifq, struct mbuf *m)
490{
491	MUTEX_ASSERT_LOCKED(&ifq->ifq_mtx);
492
493	ifq->ifq_len--;
494	ifq->ifq_qdrops++;
495	ml_enqueue(&ifq->ifq_free, m);
496}
497
498void
499ifq_mfreeml(struct ifqueue *ifq, struct mbuf_list *ml)
500{
501	MUTEX_ASSERT_LOCKED(&ifq->ifq_mtx);
502
503	ifq->ifq_len -= ml_len(ml);
504	ifq->ifq_qdrops += ml_len(ml);
505	ml_enlist(&ifq->ifq_free, ml);
506}
507
508/*
509 * ifiq
510 */
511
512static void	ifiq_process(void *);
513
514void
515ifiq_init(struct ifiqueue *ifiq, struct ifnet *ifp, unsigned int idx)
516{
517	ifiq->ifiq_if = ifp;
518	ifiq->ifiq_softnet = net_tq(ifp->if_index); /* + idx */
519	ifiq->ifiq_softc = NULL;
520
521	mtx_init(&ifiq->ifiq_mtx, IPL_NET);
522	ml_init(&ifiq->ifiq_ml);
523	task_set(&ifiq->ifiq_task, ifiq_process, ifiq);
524	ifiq->ifiq_pressure = 0;
525
526	ifiq->ifiq_packets = 0;
527	ifiq->ifiq_bytes = 0;
528	ifiq->ifiq_qdrops = 0;
529	ifiq->ifiq_errors = 0;
530
531	ifiq->ifiq_idx = idx;
532}
533
534void
535ifiq_destroy(struct ifiqueue *ifiq)
536{
537	NET_ASSERT_UNLOCKED();
538	if (!task_del(ifiq->ifiq_softnet, &ifiq->ifiq_task))
539		taskq_barrier(ifiq->ifiq_softnet);
540
541	/* don't need to lock because this is the last use of the ifiq */
542	ml_purge(&ifiq->ifiq_ml);
543}
544
545unsigned int ifiq_maxlen_drop = 2048 * 5;
546unsigned int ifiq_maxlen_return = 2048 * 3;
547
548int
549ifiq_input(struct ifiqueue *ifiq, struct mbuf_list *ml)
550{
551	struct ifnet *ifp = ifiq->ifiq_if;
552	struct mbuf *m;
553	uint64_t packets;
554	uint64_t bytes = 0;
555	unsigned int len;
556#if NBPFILTER > 0
557	caddr_t if_bpf;
558#endif
559
560	if (ml_empty(ml))
561		return (0);
562
563	MBUF_LIST_FOREACH(ml, m) {
564		m->m_pkthdr.ph_ifidx = ifp->if_index;
565		m->m_pkthdr.ph_rtableid = ifp->if_rdomain;
566		bytes += m->m_pkthdr.len;
567	}
568	packets = ml_len(ml);
569
570#if NBPFILTER > 0
571	if_bpf = ifp->if_bpf;
572	if (if_bpf) {
573		struct mbuf_list ml0 = *ml;
574
575		ml_init(ml);
576
577		while ((m = ml_dequeue(&ml0)) != NULL) {
578			if (bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_IN))
579				m_freem(m);
580			else
581				ml_enqueue(ml, m);
582		}
583
584		if (ml_empty(ml)) {
585			mtx_enter(&ifiq->ifiq_mtx);
586			ifiq->ifiq_packets += packets;
587			ifiq->ifiq_bytes += bytes;
588			mtx_leave(&ifiq->ifiq_mtx);
589
590			return (0);
591		}
592	}
593#endif
594
595	mtx_enter(&ifiq->ifiq_mtx);
596	ifiq->ifiq_packets += packets;
597	ifiq->ifiq_bytes += bytes;
598
599	len = ml_len(&ifiq->ifiq_ml);
600	if (len > ifiq_maxlen_drop)
601		ifiq->ifiq_qdrops += ml_len(ml);
602	else
603		ml_enlist(&ifiq->ifiq_ml, ml);
604	mtx_leave(&ifiq->ifiq_mtx);
605
606	if (ml_empty(ml))
607		task_add(ifiq->ifiq_softnet, &ifiq->ifiq_task);
608	else
609		ml_purge(ml);
610
611	return (len > ifiq_maxlen_return);
612}
613
614void
615ifiq_add_data(struct ifiqueue *ifiq, struct if_data *data)
616{
617	mtx_enter(&ifiq->ifiq_mtx);
618	data->ifi_ipackets += ifiq->ifiq_packets;
619	data->ifi_ibytes += ifiq->ifiq_bytes;
620	data->ifi_iqdrops += ifiq->ifiq_qdrops;
621	mtx_leave(&ifiq->ifiq_mtx);
622}
623
624int
625ifiq_enqueue(struct ifiqueue *ifiq, struct mbuf *m)
626{
627	mtx_enter(&ifiq->ifiq_mtx);
628	ml_enqueue(&ifiq->ifiq_ml, m);
629	mtx_leave(&ifiq->ifiq_mtx);
630
631	task_add(ifiq->ifiq_softnet, &ifiq->ifiq_task);
632
633	return (0);
634}
635
636static void
637ifiq_process(void *arg)
638{
639	struct ifiqueue *ifiq = arg;
640	struct mbuf_list ml;
641
642	if (ifiq_empty(ifiq))
643		return;
644
645	mtx_enter(&ifiq->ifiq_mtx);
646	ml = ifiq->ifiq_ml;
647	ml_init(&ifiq->ifiq_ml);
648	mtx_leave(&ifiq->ifiq_mtx);
649
650	if_input_process(ifiq->ifiq_if, &ml);
651}
652
653int
654net_ifiq_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
655    void *newp, size_t newlen)
656{
657	int error = EOPNOTSUPP;
658/* pressure is disabled for 6.6-release */
659#if 0
660	int val;
661
662	if (namelen != 1)
663		return (EISDIR);
664
665	switch (name[0]) {
666	case NET_LINK_IFRXQ_PRESSURE_RETURN:
667		val = ifiq_pressure_return;
668		error = sysctl_int(oldp, oldlenp, newp, newlen, &val);
669		if (error != 0)
670			return (error);
671		if (val < 1 || val > ifiq_pressure_drop)
672			return (EINVAL);
673		ifiq_pressure_return = val;
674		break;
675	case NET_LINK_IFRXQ_PRESSURE_DROP:
676		val = ifiq_pressure_drop;
677		error = sysctl_int(oldp, oldlenp, newp, newlen, &val);
678		if (error != 0)
679			return (error);
680		if (ifiq_pressure_return > val)
681			return (EINVAL);
682		ifiq_pressure_drop = val;
683		break;
684	default:
685		error = EOPNOTSUPP;
686		break;
687	}
688#endif
689
690	return (error);
691}
692
693/*
694 * priq implementation
695 */
696
697unsigned int
698priq_idx(unsigned int nqueues, const struct mbuf *m)
699{
700	unsigned int flow = 0;
701
702	if (ISSET(m->m_pkthdr.ph_flowid, M_FLOWID_VALID))
703		flow = m->m_pkthdr.ph_flowid & M_FLOWID_MASK;
704
705	return (flow % nqueues);
706}
707
708void *
709priq_alloc(unsigned int idx, void *null)
710{
711	struct priq *pq;
712	int i;
713
714	pq = malloc(sizeof(struct priq), M_DEVBUF, M_WAITOK);
715	for (i = 0; i < IFQ_NQUEUES; i++)
716		ml_init(&pq->pq_lists[i]);
717	return (pq);
718}
719
720void
721priq_free(unsigned int idx, void *pq)
722{
723	free(pq, M_DEVBUF, sizeof(struct priq));
724}
725
726struct mbuf *
727priq_enq(struct ifqueue *ifq, struct mbuf *m)
728{
729	struct priq *pq;
730	struct mbuf_list *pl;
731	struct mbuf *n = NULL;
732	unsigned int prio;
733
734	pq = ifq->ifq_q;
735	KASSERT(m->m_pkthdr.pf.prio <= IFQ_MAXPRIO);
736
737	/* Find a lower priority queue to drop from */
738	if (ifq_len(ifq) >= ifq->ifq_maxlen) {
739		for (prio = 0; prio < m->m_pkthdr.pf.prio; prio++) {
740			pl = &pq->pq_lists[prio];
741			if (ml_len(pl) > 0) {
742				n = ml_dequeue(pl);
743				goto enqueue;
744			}
745		}
746		/*
747		 * There's no lower priority queue that we can
748		 * drop from so don't enqueue this one.
749		 */
750		return (m);
751	}
752
753 enqueue:
754	pl = &pq->pq_lists[m->m_pkthdr.pf.prio];
755	ml_enqueue(pl, m);
756
757	return (n);
758}
759
760struct mbuf *
761priq_deq_begin(struct ifqueue *ifq, void **cookiep)
762{
763	struct priq *pq = ifq->ifq_q;
764	struct mbuf_list *pl;
765	unsigned int prio = nitems(pq->pq_lists);
766	struct mbuf *m;
767
768	do {
769		pl = &pq->pq_lists[--prio];
770		m = MBUF_LIST_FIRST(pl);
771		if (m != NULL) {
772			*cookiep = pl;
773			return (m);
774		}
775	} while (prio > 0);
776
777	return (NULL);
778}
779
780void
781priq_deq_commit(struct ifqueue *ifq, struct mbuf *m, void *cookie)
782{
783	struct mbuf_list *pl = cookie;
784
785	KASSERT(MBUF_LIST_FIRST(pl) == m);
786
787	ml_dequeue(pl);
788}
789
790void
791priq_purge(struct ifqueue *ifq, struct mbuf_list *ml)
792{
793	struct priq *pq = ifq->ifq_q;
794	struct mbuf_list *pl;
795	unsigned int prio = nitems(pq->pq_lists);
796
797	do {
798		pl = &pq->pq_lists[--prio];
799		ml_enlist(ml, pl);
800	} while (prio > 0);
801}
802