Lines Matching defs:ifq

91 	struct ifclassq *ifq = &ifp->if_snd;
94 IFCQ_LOCK(ifq);
95 VERIFY(IFCQ_IS_EMPTY(ifq));
96 ifq->ifcq_ifp = ifp;
97 IFCQ_LEN(ifq) = 0;
98 bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt));
99 bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt));
101 VERIFY(!IFCQ_TBR_IS_ENABLED(ifq));
102 VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);
103 VERIFY(ifq->ifcq_flags == 0);
104 VERIFY(ifq->ifcq_sflags == 0);
105 VERIFY(ifq->ifcq_disc == NULL);
106 VERIFY(ifq->ifcq_enqueue == NULL);
107 VERIFY(ifq->ifcq_dequeue == NULL);
108 VERIFY(ifq->ifcq_dequeue_sc == NULL);
109 VERIFY(ifq->ifcq_request == NULL);
114 if ((maxlen = IFCQ_MAXLEN(ifq)) == 0)
116 IFCQ_SET_MAXLEN(ifq, maxlen);
118 ifq->ifcq_sflags = sflags;
119 err = ifclassq_pktsched_setup(ifq);
121 ifq->ifcq_flags = (IFCQF_READY | IFCQF_ENABLED);
125 ifq->ifcq_drain = 0;
126 IFCQ_ALTQ(ifq)->altq_ifcq = ifq;
127 VERIFY(IFCQ_ALTQ(ifq)->altq_type == ALTQT_NONE);
128 VERIFY(IFCQ_ALTQ(ifq)->altq_flags == 0);
129 VERIFY(IFCQ_ALTQ(ifq)->altq_disc == NULL);
130 VERIFY(IFCQ_ALTQ(ifq)->altq_enqueue == NULL);
131 VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue == NULL);
132 VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue_sc == NULL);
133 VERIFY(IFCQ_ALTQ(ifq)->altq_request == NULL);
137 ALTQ_SET_READY(IFCQ_ALTQ(ifq));
139 ALTQ_CLEAR_READY(IFCQ_ALTQ(ifq));
141 IFCQ_UNLOCK(ifq);
149 struct ifclassq *ifq = &ifp->if_snd;
151 IFCQ_LOCK(ifq);
153 if (ALTQ_IS_READY(IFCQ_ALTQ(ifq))) {
154 if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq)))
155 altq_disable(IFCQ_ALTQ(ifq));
156 if (ALTQ_IS_ATTACHED(IFCQ_ALTQ(ifq)))
157 altq_detach(IFCQ_ALTQ(ifq));
158 IFCQ_ALTQ(ifq)->altq_flags = 0;
160 ifq->ifcq_drain = 0;
161 IFCQ_ALTQ(ifq)->altq_ifcq = NULL;
162 VERIFY(IFCQ_ALTQ(ifq)->altq_type == ALTQT_NONE);
163 VERIFY(IFCQ_ALTQ(ifq)->altq_flags == 0);
164 VERIFY(IFCQ_ALTQ(ifq)->altq_disc == NULL);
165 VERIFY(IFCQ_ALTQ(ifq)->altq_enqueue == NULL);
166 VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue == NULL);
167 VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue_sc == NULL);
168 VERIFY(IFCQ_ALTQ(ifq)->altq_request == NULL);
171 if (IFCQ_IS_READY(ifq)) {
172 if (IFCQ_TBR_IS_ENABLED(ifq)) {
174 (void) ifclassq_tbr_set(ifq, &tb, FALSE);
176 (void) pktsched_teardown(ifq);
177 ifq->ifcq_flags = 0;
179 ifq->ifcq_sflags = 0;
181 VERIFY(IFCQ_IS_EMPTY(ifq));
182 VERIFY(!IFCQ_TBR_IS_ENABLED(ifq));
183 VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);
184 VERIFY(ifq->ifcq_flags == 0);
185 VERIFY(ifq->ifcq_sflags == 0);
186 VERIFY(ifq->ifcq_disc == NULL);
187 VERIFY(ifq->ifcq_enqueue == NULL);
188 VERIFY(ifq->ifcq_dequeue == NULL);
189 VERIFY(ifq->ifcq_dequeue_sc == NULL);
190 VERIFY(ifq->ifcq_request == NULL);
191 IFCQ_LEN(ifq) = 0;
192 IFCQ_MAXLEN(ifq) = 0;
193 bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt));
194 bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt));
196 IFCQ_UNLOCK(ifq);
200 ifclassq_pktsched_setup(struct ifclassq *ifq)
202 struct ifnet *ifp = ifq->ifcq_ifp;
205 IFCQ_LOCK_ASSERT_HELD(ifq);
210 err = pktsched_setup(ifq, PKTSCHEDT_TCQ, ifq->ifcq_sflags);
214 err = pktsched_setup(ifq, PKTSCHEDT_QFQ, ifq->ifcq_sflags);
226 ifclassq_set_maxlen(struct ifclassq *ifq, u_int32_t maxqlen)
228 IFCQ_LOCK(ifq);
231 IFCQ_SET_MAXLEN(ifq, maxqlen);
232 IFCQ_UNLOCK(ifq);
236 ifclassq_get_maxlen(struct ifclassq *ifq)
238 return (IFCQ_MAXLEN(ifq));
242 ifclassq_get_len(struct ifclassq *ifq, mbuf_svc_class_t sc, u_int32_t *packets,
247 IFCQ_LOCK(ifq);
250 *packets = IFCQ_LEN(ifq);
254 IFCQ_LEN_SC(ifq, sc, packets, bytes, err);
256 IFCQ_UNLOCK(ifq);
262 ifclassq_enqueue(struct ifclassq *ifq, struct mbuf *m)
266 IFCQ_LOCK_SPIN(ifq);
269 if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) {
270 ALTQ_ENQUEUE(IFCQ_ALTQ(ifq), m, err);
272 u_int32_t qlen = IFCQ_LEN(ifq);
273 IFCQ_ENQUEUE(ifq, m, err);
274 if (IFCQ_LEN(ifq) > qlen)
275 ifq->ifcq_drain += (IFCQ_LEN(ifq) - qlen);
278 IFCQ_ENQUEUE(ifq, m, err);
281 IFCQ_UNLOCK(ifq);
287 ifclassq_dequeue(struct ifclassq *ifq, u_int32_t limit, struct mbuf **head,
290 return (ifclassq_dequeue_common(ifq, MBUF_SC_UNSPEC, limit, head, tail,
295 ifclassq_dequeue_sc(struct ifclassq *ifq, mbuf_svc_class_t sc,
299 return (ifclassq_dequeue_common(ifq, sc, limit, head, tail,
304 ifclassq_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc,
308 struct ifnet *ifp = ifq->ifcq_ifp;
312 struct ifaltq *altq = IFCQ_ALTQ(ifq);
322 ifq = &ifp->if_snd;
323 IFCQ_LOCK_SPIN(ifq);
330 qlen = IFCQ_LEN(ifq);
331 draining = IFCQ_IS_DRAINING(ifq);
334 if (IFCQ_TBR_IS_ENABLED(ifq))
335 IFCQ_TBR_DEQUEUE_SC(ifq, sc, *head);
337 IFCQ_DEQUEUE_SC(ifq, sc, *head);
343 if (IFCQ_TBR_IS_ENABLED(ifq))
344 IFCQ_TBR_DEQUEUE(ifq, *head);
346 IFCQ_DEQUEUE(ifq, *head);
354 VERIFY(ifq->ifcq_drain >= (qlen - IFCQ_LEN(ifq)));
355 ifq->ifcq_drain -= (qlen - IFCQ_LEN(ifq));
359 if (IFCQ_TBR_IS_ENABLED(ifq))
360 IFCQ_TBR_DEQUEUE_SC(ifq, sc, *head);
362 IFCQ_DEQUEUE_SC(ifq, sc, *head);
364 if (IFCQ_TBR_IS_ENABLED(ifq))
365 IFCQ_TBR_DEQUEUE(ifq, *head);
367 IFCQ_DEQUEUE(ifq, *head);
389 IFCQ_UNLOCK(ifq);
402 ifclassq_poll(struct ifclassq *ifq)
404 return (ifclassq_poll_common(ifq, MBUF_SC_UNSPEC, FALSE));
408 ifclassq_poll_sc(struct ifclassq *ifq, mbuf_svc_class_t sc)
410 return (ifclassq_poll_common(ifq, sc, TRUE));
414 ifclassq_poll_common(struct ifclassq *ifq, mbuf_svc_class_t sc,
418 struct ifaltq *altq = IFCQ_ALTQ(ifq);
426 if (IFCQ_TBR_IS_ENABLED(ifq))
427 IFCQ_TBR_POLL_SC(ifq, sc, m);
428 else if (IFCQ_IS_DRAINING(ifq))
429 IFCQ_POLL_SC(ifq, sc, m);
435 if (IFCQ_TBR_IS_ENABLED(ifq))
436 IFCQ_TBR_POLL(ifq, m);
437 else if (IFCQ_IS_DRAINING(ifq))
438 IFCQ_POLL(ifq, m);
446 if (IFCQ_TBR_IS_ENABLED(ifq))
447 IFCQ_TBR_POLL_SC(ifq, sc, m);
449 IFCQ_POLL_SC(ifq, sc, m);
451 if (IFCQ_TBR_IS_ENABLED(ifq))
452 IFCQ_TBR_POLL(ifq, m);
454 IFCQ_POLL(ifq, m);
462 ifclassq_update(struct ifclassq *ifq, cqev_t ev)
464 IFCQ_LOCK_ASSERT_HELD(ifq);
465 VERIFY(IFCQ_IS_READY(ifq));
468 if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq)))
469 ALTQ_UPDATE(IFCQ_ALTQ(ifq), ev);
471 IFCQ_UPDATE(ifq, ev);
475 ifclassq_attach(struct ifclassq *ifq, u_int32_t type, void *discipline,
479 IFCQ_LOCK_ASSERT_HELD(ifq);
481 VERIFY(ifq->ifcq_disc == NULL);
486 ifq->ifcq_type = type;
487 ifq->ifcq_disc = discipline;
488 ifq->ifcq_enqueue = enqueue;
489 ifq->ifcq_dequeue = dequeue;
490 ifq->ifcq_dequeue_sc = dequeue_sc;
491 ifq->ifcq_request = request;
497 ifclassq_detach(struct ifclassq *ifq)
499 IFCQ_LOCK_ASSERT_HELD(ifq);
501 VERIFY(ifq->ifcq_disc == NULL);
503 ifq->ifcq_type = PKTSCHEDT_NONE;
504 ifq->ifcq_disc = NULL;
505 ifq->ifcq_enqueue = NULL;
506 ifq->ifcq_dequeue = NULL;
507 ifq->ifcq_dequeue_sc = NULL;
508 ifq->ifcq_request = NULL;
514 ifclassq_getqstats(struct ifclassq *ifq, u_int32_t qid, void *ubuf,
527 IFCQ_LOCK(ifq);
528 if (!IFCQ_IS_READY(ifq)) {
529 IFCQ_UNLOCK(ifq);
534 ifqs->ifqs_len = IFCQ_LEN(ifq);
535 ifqs->ifqs_maxlen = IFCQ_MAXLEN(ifq);
536 *(&ifqs->ifqs_xmitcnt) = *(&ifq->ifcq_xmitcnt);
537 *(&ifqs->ifqs_dropcnt) = *(&ifq->ifcq_dropcnt);
538 ifqs->ifqs_scheduler = ifq->ifcq_type;
540 err = pktsched_getqstats(ifq, qid, ifqs);
541 IFCQ_UNLOCK(ifq);
598 ifclassq_tbr_dequeue(struct ifclassq *ifq, int op)
600 return (ifclassq_tbr_dequeue_common(ifq, op, MBUF_SC_UNSPEC, FALSE));
604 ifclassq_tbr_dequeue_sc(struct ifclassq *ifq, int op, mbuf_svc_class_t sc)
606 return (ifclassq_tbr_dequeue_common(ifq, op, sc, TRUE));
610 ifclassq_tbr_dequeue_common(struct ifclassq *ifq, int op,
618 IFCQ_LOCK_ASSERT_HELD(ifq);
621 VERIFY(IFCQ_TBR_IS_ENABLED(ifq));
623 tbr = &ifq->ifcq_tbr;
650 if (IFCQ_IS_DRAINING(ifq)) {
654 IFCQ_POLL_SC(ifq, sc, m);
656 IFCQ_POLL(ifq, m);
659 IFCQ_DEQUEUE_SC(ifq, sc, m);
661 IFCQ_DEQUEUE(ifq, m);
665 struct ifaltq *altq = IFCQ_ALTQ(ifq);
689 ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile,
693 struct ifnet *ifp = ifq->ifcq_ifp;
696 IFCQ_LOCK_ASSERT_HELD(ifq);
697 VERIFY(IFCQ_IS_READY(ifq));
701 tbr = &ifq->ifcq_tbr;
716 if (!IFCQ_TBR_IS_ENABLED(ifq))
723 ifq->ifcq_flags &= ~IFCQF_TBR;
727 ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH);
733 (ifq->ifcq_flags & IFCQF_TBR) ? "reconfigured" :
741 ifq->ifcq_flags |= IFCQF_TBR;
811 ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH);