• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.10/xnu-2782.1.97/bsd/net/pktsched/

Lines Matching defs:cl

262 	struct fairq_class *cl;
269 if ((cl = fif->fif_classes[pri]) != NULL)
270 fairq_class_destroy(fif, cl);
279 struct fairq_class *cl;
285 if ((cl = fif->fif_classes[pri]) != NULL && cl->cl_head)
286 fairq_purgeq(fif, cl, 0, NULL, NULL);
302 struct fairq_class *cl;
308 if ((cl = fif->fif_classes[pri]) != NULL)
309 fairq_updateq(fif, cl, ev);
318 struct fairq_class *cl;
332 cl = fairq_class_create(fif, priority, qlimit, bandwidth,
334 if (cl == NULL)
338 *clp = cl;
351 struct fairq_class *cl;
412 if ((cl = fif->fif_classes[pri]) != NULL) {
414 if (cl->cl_head)
415 fairq_purgeq(fif, cl, 0, NULL, NULL);
417 if (cl->cl_qtype == Q_RIO)
418 rio_destroy(cl->cl_rio);
421 if (cl->cl_qtype == Q_RED)
422 red_destroy(cl->cl_red);
425 if (cl->cl_qtype == Q_BLUE)
426 blue_destroy(cl->cl_blue);
428 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
429 sfb_destroy(cl->cl_sfb);
430 cl->cl_qalg.ptr = NULL;
431 cl->cl_qtype = Q_DROPTAIL;
432 cl->cl_qstate = QS_RUNNING;
434 cl = zalloc(fairq_cl_zone);
435 if (cl == NULL)
437 bzero(cl, fairq_cl_size);
438 cl->cl_nbuckets = nbuckets;
439 cl->cl_nbucket_mask = nbuckets - 1;
441 cl->cl_buckets = _MALLOC(sizeof (struct fairq_bucket) *
442 cl->cl_nbuckets, M_DEVBUF, M_WAITOK|M_ZERO);
443 if (cl->cl_buckets == NULL)
445 cl->cl_head = NULL;
448 fif->fif_classes[pri] = cl;
450 fif->fif_default = cl;
456 cl->cl_qlimit = qlimit;
457 for (i = 0; i < cl->cl_nbuckets; ++i) {
458 _qinit(&cl->cl_buckets[i].queue, Q_DROPTAIL, qlimit);
460 cl->cl_bandwidth = bandwidth / 8; /* cvt to bytes per second */
461 cl->cl_qtype = Q_DROPTAIL;
462 cl->cl_qstate = QS_RUNNING;
463 cl->cl_flags = flags;
464 cl->cl_pri = pri;
467 cl->cl_fif = fif;
468 cl->cl_handle = qid;
469 cl->cl_hogs_m1 = hogs_m1 / 8;
470 cl->cl_lssc_m1 = lssc_m1 / 8; /* NOT YET USED */
471 cl->cl_bw_current = 0;
479 cl->cl_qflags = 0;
482 cl->cl_qflags |= BLUEF_ECN;
484 cl->cl_qflags |= SFBF_ECN;
486 cl->cl_qflags |= REDF_ECN;
488 cl->cl_qflags |= RIOF_ECN;
492 cl->cl_qflags |= SFBF_FLOWCTL;
496 cl->cl_qflags |= RIOF_CLEARDSCP;
512 cl->cl_rio =
513 rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime);
514 if (cl->cl_rio != NULL)
515 cl->cl_qtype = Q_RIO;
520 cl->cl_red = red_alloc(ifp, 0, 0,
521 cl->cl_qlimit * 10/100,
522 cl->cl_qlimit * 30/100,
523 cl->cl_qflags, pkttime);
524 if (cl->cl_red != NULL)
525 cl->cl_qtype = Q_RED;
531 cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags);
532 if (cl->cl_blue != NULL)
533 cl->cl_qtype = Q_BLUE;
537 if (!(cl->cl_flags & FARF_LAZY))
538 cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
539 cl->cl_qlimit, cl->cl_qflags);
540 if (cl->cl_sfb != NULL || (cl->cl_flags & FARF_LAZY))
541 cl->cl_qtype = Q_SFB;
548 cl->cl_handle, cl->cl_pri, cl->cl_qlimit, flags, FARF_BITS);
551 return (cl);
554 if (cl->cl_buckets != NULL)
555 _FREE(cl->cl_buckets, M_DEVBUF);
557 if (cl != NULL) {
558 if (cl->cl_qalg.ptr != NULL) {
560 if (cl->cl_qtype == Q_RIO)
561 rio_destroy(cl->cl_rio);
564 if (cl->cl_qtype == Q_RED)
565 red_destroy(cl->cl_red);
568 if (cl->cl_qtype == Q_BLUE)
569 blue_destroy(cl->cl_blue);
571 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
572 sfb_destroy(cl->cl_sfb);
573 cl->cl_qalg.ptr = NULL;
574 cl->cl_qtype = Q_DROPTAIL;
575 cl->cl_qstate = QS_RUNNING;
577 zfree(fairq_cl_zone, cl);
585 struct fairq_class *cl;
589 if ((cl = fairq_clh_to_clp(fif, qid)) == NULL)
592 return (fairq_class_destroy(fif, cl));
596 fairq_class_destroy(struct fairq_if *fif, struct fairq_class *cl)
603 if (cl->cl_head)
604 fairq_purgeq(fif, cl, 0, NULL, NULL);
606 fif->fif_classes[cl->cl_pri] = NULL;
607 if (fif->fif_poll_cache == cl)
609 if (fif->fif_maxpri == cl->cl_pri) {
610 for (pri = cl->cl_pri; pri >= 0; pri--)
619 if (cl->cl_qalg.ptr != NULL) {
621 if (cl->cl_qtype == Q_RIO)
622 rio_destroy(cl->cl_rio);
625 if (cl->cl_qtype == Q_RED)
626 red_destroy(cl->cl_red);
629 if (cl->cl_qtype == Q_BLUE)
630 blue_destroy(cl->cl_blue);
632 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
633 sfb_destroy(cl->cl_sfb);
634 cl->cl_qalg.ptr = NULL;
635 cl->cl_qtype = Q_DROPTAIL;
636 cl->cl_qstate = QS_RUNNING;
639 if (fif->fif_default == cl)
645 cl->cl_handle, cl->cl_pri);
648 _FREE(cl->cl_buckets, M_DEVBUF);
649 cl->cl_head = NULL; /* sanity */
650 cl->cl_polled = NULL; /* sanity */
651 cl->cl_buckets = NULL; /* sanity */
653 zfree(fairq_cl_zone, cl);
659 fairq_enqueue(struct fairq_if *fif, struct fairq_class *cl, struct mbuf *m,
666 VERIFY(cl == NULL || cl->cl_fif == fif);
668 if (cl == NULL) {
670 cl = fairq_clh_to_clp(fif, t->pftag_qid);
672 cl = fairq_clh_to_clp(fif, 0);
674 if (cl == NULL) {
675 cl = fif->fif_default;
676 if (cl == NULL) {
684 cl->cl_flags |= FARF_HAS_PACKETS;
687 ret = fairq_addq(cl, m, t);
698 PKTCNTR_ADD(&cl->cl_dropcnt, 1, len);
727 struct fairq_class *cl;
759 if ((cl = fif->fif_classes[pri]) == NULL)
761 if ((cl->cl_flags & FARF_HAS_PACKETS) == 0)
763 m = fairq_pollq(cl, cur_time, &hit_limit);
765 cl->cl_flags &= ~FARF_HAS_PACKETS;
774 best_cl = cl;
785 scale = cl->cl_bw_current * 100 / cl->cl_bandwidth;
787 best_cl = cl;
812 fairq_addq(struct fairq_class *cl, struct mbuf *m, struct pf_mtag *t)
814 struct ifclassq *ifq = cl->cl_fif->fif_ifq;
827 if (cl->cl_head)
828 b = cl->cl_head->prev;
830 b = &cl->cl_buckets[0];
832 hindex = (hash & cl->cl_nbucket_mask);
833 b = &cl->cl_buckets[hindex];
845 if (cl->cl_head == NULL) {
846 cl->cl_head = b;
850 b->next = cl->cl_head;
851 b->prev = cl->cl_head->prev;
855 if (b->bw_delta && cl->cl_hogs_m1) {
857 if (bw < cl->cl_hogs_m1)
858 cl->cl_head = b;
864 if (cl->cl_qtype == Q_RIO)
865 return (rio_addq(cl->cl_rio, &b->queue, m, t));
869 if (cl->cl_qtype == Q_RED)
870 return (red_addq(cl->cl_red, &b->queue, m, t));
874 if (cl->cl_qtype == Q_BLUE)
875 return (blue_addq(cl->cl_blue, &b->queue, m, t));
878 if (cl->cl_qtype == Q_SFB) {
879 if (cl->cl_sfb == NULL) {
880 struct ifnet *ifp = FAIRQIF_IFP(cl->cl_fif);
882 VERIFY(cl->cl_flags & FARF_LAZY);
885 cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
886 cl->cl_qlimit, cl->cl_qflags);
887 if (cl->cl_sfb == NULL) {
889 cl->cl_qtype = Q_DROPTAIL;
890 cl->cl_flags &= ~FARF_SFB;
891 cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);
896 fairq_style(cl->cl_fif), cl->cl_handle,
897 cl->cl_pri);
900 if (cl->cl_sfb != NULL)
901 return (sfb_addq(cl->cl_sfb, &b->queue, m, t));
909 if (cl->cl_flags & FARF_CLEARDSCP)
919 fairq_getq(struct fairq_class *cl, u_int64_t cur_time)
924 IFCQ_LOCK_ASSERT_HELD(cl->cl_fif->fif_ifq);
926 b = fairq_selectq(cl, 0);
930 else if (cl->cl_qtype == Q_RIO)
931 m = rio_getq(cl->cl_rio, &b->queue);
934 else if (cl->cl_qtype == Q_RED)
935 m = red_getq(cl->cl_red, &b->queue);
938 else if (cl->cl_qtype == Q_BLUE)
939 m = blue_getq(cl->cl_blue, &b->queue);
941 else if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
942 m = sfb_getq(cl->cl_sfb, &b->queue);
955 delta = (cur_time - cl->cl_last_time);
958 cl->cl_bw_delta += delta;
959 cl->cl_bw_bytes += m->m_pkthdr.len;
960 cl->cl_last_time = cur_time;
961 if (cl->cl_bw_delta > machclk_freq) {
962 cl->cl_bw_delta -= cl->cl_bw_delta >> 2;
963 cl->cl_bw_bytes -= cl->cl_bw_bytes >> 2;
989 fairq_pollq(struct fairq_class *cl, u_int64_t cur_time, int *hit_limit)
996 IFCQ_LOCK_ASSERT_HELD(cl->cl_fif->fif_ifq);
999 b = fairq_selectq(cl, 1);
1010 delta = cur_time - cl->cl_last_time;
1013 cl->cl_bw_delta += delta;
1014 cl->cl_last_time = cur_time;
1015 if (cl->cl_bw_delta) {
1016 bw = cl->cl_bw_bytes * machclk_freq / cl->cl_bw_delta;
1018 if (bw > cl->cl_bandwidth)
1020 cl->cl_bw_current = bw;
1023 bw, cl->cl_bandwidth, *hit_limit,
1035 fairq_selectq(struct fairq_class *cl, int ispoll)
1040 IFCQ_LOCK_ASSERT_HELD(cl->cl_fif->fif_ifq);
1042 if (ispoll == 0 && cl->cl_polled) {
1043 b = cl->cl_polled;
1044 cl->cl_polled = NULL;
1048 while ((b = cl->cl_head) != NULL) {
1054 cl->cl_head = b->next;
1055 if (cl->cl_head == b) {
1056 cl->cl_head = NULL;
1068 if (cl->cl_hogs_m1 == 0) {
1069 cl->cl_head = b->next;
1072 if (bw >= cl->cl_hogs_m1) {
1073 cl->cl_head = b->next;
1086 cl->cl_polled = b;
1091 fairq_purgeq(struct fairq_if *fif, struct fairq_class *cl, u_int32_t flow,
1103 while ((b = fairq_selectq(cl, 0)) != NULL) {
1110 if (cl->cl_qtype == Q_RIO)
1111 rio_purgeq(cl->cl_rio, &b->queue, flow, &cnt, &len);
1115 if (cl->cl_qtype == Q_RED)
1116 red_purgeq(cl->cl_red, &b->queue, flow, &cnt, &len);
1120 if (cl->cl_qtype == Q_BLUE)
1121 blue_purgeq(cl->cl_blue, &b->queue, flow, &cnt, &len);
1124 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
1125 sfb_purgeq(cl->cl_sfb, &b->queue, flow, &cnt, &len);
1134 PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len);
1147 cl->cl_handle, cl->cl_pri, qlen, qlen(&b->queue),
1159 fairq_updateq(struct fairq_if *fif, struct fairq_class *cl, cqev_t ev)
1166 cl->cl_handle, cl->cl_pri, ifclassq_ev2str(ev));
1170 if (cl->cl_qtype == Q_RIO)
1171 return (rio_updateq(cl->cl_rio, ev));
1174 if (cl->cl_qtype == Q_RED)
1175 return (red_updateq(cl->cl_red, ev));
1178 if (cl->cl_qtype == Q_BLUE)
1179 return (blue_updateq(cl->cl_blue, ev));
1181 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
1182 return (sfb_updateq(cl->cl_sfb, ev));
1189 struct fairq_class *cl;
1194 if ((cl = fairq_clh_to_clp(fif, qid)) == NULL)
1197 sp->class_handle = cl->cl_handle;
1198 sp->priority = cl->cl_pri;
1199 sp->qlimit = cl->cl_qlimit;
1200 sp->xmit_cnt = cl->cl_xmitcnt;
1201 sp->drop_cnt = cl->cl_dropcnt;
1202 sp->qtype = cl->cl_qtype;
1203 sp->qstate = cl->cl_qstate;
1206 if (cl->cl_head) {
1207 b = cl->cl_head;
1211 } while (b != cl->cl_head);
1215 if (cl->cl_qtype == Q_RED)
1216 red_getstats(cl->cl_red, &sp->red[0]);
1219 if (cl->cl_qtype == Q_RIO)
1220 rio_getstats(cl->cl_rio, &sp->red[0]);
1223 if (cl->cl_qtype == Q_BLUE)
1224 blue_getstats(cl->cl_blue, &sp->blue);
1226 if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
1227 sfb_getstats(cl->cl_sfb, &sp->sfb);
1236 struct fairq_class *cl;
1242 if ((cl = fif->fif_classes[idx]) != NULL &&
1243 cl->cl_handle == chandle)
1244 return (cl);
1276 ifq->ifcq_disc_slots[i].cl = NULL;