Lines Matching refs:cl

195 	struct rm_class	*cl;
226 cl = malloc(sizeof(struct rm_class), M_DEVBUF, M_NOWAIT | M_ZERO);
227 if (cl == NULL)
229 CALLOUT_INIT(&cl->callout_);
230 cl->q_ = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO);
231 if (cl->q_ == NULL) {
232 free(cl, M_DEVBUF);
239 cl->children_ = NULL;
240 cl->parent_ = parent;
241 cl->borrow_ = borrow;
242 cl->leaf_ = 1;
243 cl->ifdat_ = ifd;
244 cl->pri_ = pri;
245 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
246 cl->depth_ = 0;
247 cl->qthresh_ = 0;
248 cl->ns_per_byte_ = nsecPerByte;
250 qlimit(cl->q_) = maxq;
251 qtype(cl->q_) = Q_DROPHEAD;
252 qlen(cl->q_) = 0;
253 cl->flags_ = flags;
256 cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
257 if (cl->minidle_ > 0)
258 cl->minidle_ = 0;
260 cl->minidle_ = minidle;
262 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
263 if (cl->maxidle_ == 0)
264 cl->maxidle_ = 1;
266 cl->avgidle_ = cl->maxidle_;
267 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
268 if (cl->offtime_ == 0)
269 cl->offtime_ = 1;
271 cl->avgidle_ = 0;
272 cl->offtime_ = (offtime * nsecPerByte) / 8;
274 cl->overlimit = action;
292 cl->red_ = red_alloc(0, 0,
293 qlimit(cl->q_) * 10/100,
294 qlimit(cl->q_) * 30/100,
296 if (cl->red_ != NULL)
297 qtype(cl->q_) = Q_RED;
301 cl->red_ = (red_t *)rio_alloc(0, NULL,
303 if (cl->red_ != NULL)
304 qtype(cl->q_) = Q_RIO;
311 cl->codel_ = codel_alloc(5, 100, 0);
312 if (cl->codel_ != NULL)
313 qtype(cl->q_) = Q_CODEL;
324 cl->peer_ = peer;
327 peer->peer_ = cl;
329 ifd->active_[pri] = cl;
330 cl->peer_ = cl;
333 if (cl->parent_) {
334 cl->next_ = parent->children_;
335 parent->children_ = cl;
343 rmc_depth_compute(cl);
350 ifd->alloc_[pri] += cl->allotment_;
355 return (cl);
359 rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
366 ifd = cl->ifdat_;
367 old_allotment = cl->allotment_;
371 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
372 cl->qthresh_ = 0;
373 cl->ns_per_byte_ = nsecPerByte;
375 qlimit(cl->q_) = maxq;
378 cl->minidle_ = (minidle * nsecPerByte) / 8;
379 if (cl->minidle_ > 0)
380 cl->minidle_ = 0;
382 cl->minidle_ = minidle;
384 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
385 if (cl->maxidle_ == 0)
386 cl->maxidle_ = 1;
388 cl->avgidle_ = cl->maxidle_;
389 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
390 if (cl->offtime_ == 0)
391 cl->offtime_ = 1;
393 cl->avgidle_ = 0;
394 cl->offtime_ = (offtime * nsecPerByte) / 8;
401 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
422 struct rm_class *cl, *clh;
442 clh = cl = ifd->active_[i];
446 cl->w_allotment_ = 0;
448 cl->w_allotment_ = cl->allotment_ /
450 cl = cl->peer_;
451 } while ((cl != NULL) && (cl != clh));
467 * rmc_depth_compute(struct rm_class *cl) - This function computes the
468 * appropriate depth of class 'cl' and its ancestors.
474 rmc_depth_compute(struct rm_class *cl)
476 rm_class_t *t = cl, *p;
493 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
500 rmc_depth_recompute(rm_class_t *cl)
505 p = cl;
530 if (cl->depth_ >= 1) {
531 if (cl->children_ == NULL) {
532 cl->depth_ = 0;
533 } else if ((t = cl->children_) != NULL) {
540 rmc_depth_compute(cl);
547 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
555 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
560 ASSERT(cl->children_ == NULL);
562 if (cl->sleeping_)
563 CALLOUT_STOP(&cl->callout_);
572 rmc_dropall(cl);
578 if (cl->parent_ != NULL) {
579 head = cl->parent_->children_;
582 ASSERT(head == cl);
583 cl->parent_->children_ = NULL;
584 cl->parent_->leaf_ = 1;
586 if (p == cl) {
587 if (cl == head)
588 cl->parent_->children_ = cl->next_;
590 previous->next_ = cl->next_;
591 cl->next_ = NULL;
603 if ((p = ifd->active_[cl->pri_]) != NULL) {
606 * level, then look for class(cl) in the priority level.
609 while (p->peer_ != cl)
611 p->peer_ = cl->peer_;
613 if (ifd->active_[cl->pri_] == cl)
614 ifd->active_[cl->pri_] = cl->peer_;
616 ASSERT(p == cl);
617 ifd->active_[cl->pri_] = NULL;
625 ifd->alloc_[cl->pri_] -= cl->allotment_;
626 ifd->num_[cl->pri_]--;
634 rmc_depth_recompute(cl->parent_);
645 if (cl->red_ != NULL) {
647 if (q_is_rio(cl->q_))
648 rio_destroy((rio_t *)cl->red_);
651 if (q_is_red(cl->q_))
652 red_destroy(cl->red_);
655 if (q_is_codel(cl->q_))
656 codel_destroy(cl->codel_);
659 free(cl->q_, M_DEVBUF);
660 free(cl, M_DEVBUF);
748 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
749 * mbuf 'm' to queue for resource class 'cl'. This routine is called
758 rmc_queue_packet(struct rm_class *cl, mbuf_t *m)
761 struct rm_ifdat *ifd = cl->ifdat_;
762 int cpri = cl->pri_;
763 int is_empty = qempty(cl->q_);
767 if (TV_LT(&cl->undertime_, &now)) {
768 if (ifd->cutoff_ > cl->depth_)
769 ifd->cutoff_ = cl->depth_;
770 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
779 struct rm_class *borrow = cl->borrow_;
792 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
793 if (TV_LT(&cl->borrow_->undertime_, &now)) {
794 ifd->cutoff_ = cl->borrow_->depth_;
796 cl->borrow_->depth_);
802 if (_rmc_addq(cl, m) < 0)
807 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
811 if (qlen(cl->q_) > qlimit(cl->q_)) {
813 rmc_drop_action(cl);
852 rmc_satisfied(struct rm_class *cl, struct timeval *now)
856 if (cl == NULL)
858 if (TV_LT(now, &cl->undertime_))
860 if (cl->depth_ == 0) {
861 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
866 if (cl->children_ != NULL) {
867 p = cl->children_;
879 * Return 1 if class 'cl' is under limit or can borrow from a parent,
885 rmc_under_limit(struct rm_class *cl, struct timeval *now)
887 rm_class_t *p = cl;
889 struct rm_ifdat *ifd = cl->ifdat_;
893 * If cl is the root class, then always return that it is
896 if (cl->parent_ == NULL)
899 if (cl->sleeping_) {
900 if (TV_LT(now, &cl->undertime_))
903 CALLOUT_STOP(&cl->callout_);
904 cl->sleeping_ = 0;
905 cl->undertime_.tv_sec = 0;
910 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
911 if (((cl = cl->borrow_) == NULL) ||
912 (cl->depth_ > ifd->cutoff_)) {
914 if (cl != NULL)
926 if (cl != NULL) {
928 top = cl;
941 top = cl;
944 if (cl != p)
945 ifd->borrowed_[ifd->qi_] = cl;
969 struct rm_class *cl = NULL, *first = NULL;
982 cl = ifd->pollcache_;
983 cpri = cl->pri_;
986 if (cl->undertime_.tv_sec != 0 &&
987 rmc_under_limit(cl, &now) == 0)
988 first = cl;
1011 * "M[cl->pri_])" times "cl->allotment" is greater than
1015 cl = ifd->active_[cpri];
1016 ASSERT(cl != NULL);
1018 if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
1019 cl->bytes_alloc_ += cl->w_allotment_;
1020 if (!qempty(cl->q_)) {
1021 if ((cl->undertime_.tv_sec == 0) ||
1022 rmc_under_limit(cl, &now)) {
1023 if (cl->bytes_alloc_ > 0 || deficit > 1)
1032 else if (first == NULL && cl->borrow_ != NULL)
1033 first = cl; /* borrowing candidate */
1036 cl->bytes_alloc_ = 0;
1037 cl = cl->peer_;
1038 } while (cl != ifd->active_[cpri]);
1070 cl = first;
1071 cpri = cl->pri_;
1073 if (cl->sleeping_)
1074 CALLOUT_STOP(&cl->callout_);
1075 cl->sleeping_ = 0;
1076 cl->undertime_.tv_sec = 0;
1078 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1079 ifd->cutoff_ = cl->borrow_->depth_;
1086 m = _rmc_getq(cl);
1089 if (qempty(cl->q_))
1095 if (cl->bytes_alloc_ > 0)
1096 cl->bytes_alloc_ -= m_pktlen(m);
1098 if ((cl->bytes_alloc_ <= 0) || first == cl)
1099 ifd->active_[cl->pri_] = cl->peer_;
1101 ifd->active_[cl->pri_] = cl;
1103 ifd->class_[ifd->qi_] = cl;
1110 m = _rmc_pollq(cl);
1111 ifd->pollcache_ = cl;
1127 struct rm_class *cl, *first = NULL;
1137 cl = ifd->pollcache_;
1138 cpri = cl->pri_;
1152 cl = ifd->active_[cpri];
1153 ASSERT(cl != NULL);
1155 if (!qempty(cl->q_)) {
1156 if ((cl->undertime_.tv_sec == 0) ||
1157 rmc_under_limit(cl, &now))
1159 if (first == NULL && cl->borrow_ != NULL)
1160 first = cl;
1162 cl = cl->peer_;
1163 } while (cl != ifd->active_[cpri]);
1185 cl = first;
1186 cpri = cl->pri_;
1188 if (cl->sleeping_)
1189 CALLOUT_STOP(&cl->callout_);
1190 cl->sleeping_ = 0;
1191 cl->undertime_.tv_sec = 0;
1193 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1194 ifd->cutoff_ = cl->borrow_->depth_;
1201 m = _rmc_getq(cl);
1204 if (qempty(cl->q_))
1207 ifd->active_[cpri] = cl->peer_;
1209 ifd->class_[ifd->qi_] = cl;
1216 m = _rmc_pollq(cl);
1217 ifd->pollcache_ = cl;
1265 rm_class_t *cl, *borrowed;
1272 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1279 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1326 while (cl != NULL) {
1327 TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1333 cl->avgidle_ = cl->maxidle_;
1337 pkt_time = pktlen * cl->ns_per_byte_;
1340 pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1344 avgidle = cl->avgidle_;
1346 cl->avgidle_ = avgidle;
1350 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1356 if (avgidle < cl->minidle_)
1357 avgidle = cl->avgidle_ = cl->minidle_;
1362 TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1363 ++cl->stats_.over;
1365 cl->avgidle_ =
1366 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1367 cl->undertime_.tv_sec = 0;
1368 if (cl->sleeping_) {
1369 CALLOUT_STOP(&cl->callout_);
1370 cl->sleeping_ = 0;
1375 if (borrows != cl)
1376 ++cl->stats_.borrows;
1380 cl->last_ = ifd->ifnow_;
1381 cl->last_pkttime_ = pkt_time;
1384 if (cl->parent_ == NULL) {
1386 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1390 cl = cl->parent_;
1396 cl = ifd->class_[ifd->qo_];
1399 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1407 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1431 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1440 rmc_drop_action(struct rm_class *cl)
1442 struct rm_ifdat *ifd = cl->ifdat_;
1444 ASSERT(qlen(cl->q_) > 0);
1445 _rmc_dropq(cl);
1446 if (qempty(cl->q_))
1447 ifd->na_[cl->pri_]--;
1450 void rmc_dropall(struct rm_class *cl)
1452 struct rm_ifdat *ifd = cl->ifdat_;
1454 if (!qempty(cl->q_)) {
1455 _flushq(cl->q_);
1457 ifd->na_[cl->pri_]--;
1480 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1491 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1495 cl->stats_.overactions++;
1496 TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1498 delay += cl->offtime_;
1501 if (!cl->sleeping_) {
1502 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1508 extradelay = cl->offtime_;
1519 extradelay -= cl->last_pkttime_;
1522 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1526 cl->sleeping_ = 1;
1527 cl->stats_.delays++;
1538 t = hzto(&cl->undertime_);
1541 CALLOUT_RESET(&cl->callout_, t,
1542 (timeout_t *)rmc_restart, (caddr_t)cl);
1564 rmc_restart(struct rm_class *cl)
1566 struct rm_ifdat *ifd = cl->ifdat_;
1571 if (cl->sleeping_) {
1572 cl->sleeping_ = 0;
1573 cl->undertime_.tv_sec = 0;
1576 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1586 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1593 rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1605 _rmc_addq(rm_class_t *cl, mbuf_t *m)
1608 if (q_is_rio(cl->q_))
1609 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1612 if (q_is_red(cl->q_))
1613 return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1616 if (q_is_codel(cl->q_))
1617 return codel_addq(cl->codel_, cl->q_, m);
1620 if (cl->flags_ & RMCF_CLEARDSCP)
1621 write_dsfield(m, cl->pktattr_, 0);
1623 _addq(cl->q_, m);
1629 _rmc_dropq(rm_class_t *cl)
1633 if ((m = _getq(cl->q_)) != NULL)
1638 _rmc_getq(rm_class_t *cl)
1641 if (q_is_rio(cl->q_))
1642 return rio_getq((rio_t *)cl->red_, cl->q_);
1645 if (q_is_red(cl->q_))
1646 return red_getq(cl->red_, cl->q_);
1649 if (q_is_codel(cl->q_))
1650 return codel_getq(cl->codel_, cl->q_);
1652 return _getq(cl->q_);
1656 _rmc_pollq(rm_class_t *cl)
1658 return qhead(cl->q_);