Lines Matching refs:cl

223 	struct htb_class *cl;
234 cl = htb_find(skb->priority, sch);
235 if (cl) {
236 if (cl->level == 0)
237 return cl;
239 tcf = rcu_dereference_bh(cl->filter_list);
257 cl = (void *)res.class;
258 if (!cl) {
261 cl = htb_find(res.classid, sch);
262 if (!cl)
265 if (!cl->level)
266 return cl; /* we hit leaf; return it */
269 tcf = rcu_dereference_bh(cl->filter_list);
272 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
273 if (!cl || cl->level)
275 return cl;
281 * @cl: the class to add
288 struct htb_class *cl, int prio)
297 if (cl->common.classid > c->common.classid)
302 rb_link_node(&cl->node[prio], parent, p);
303 rb_insert_color(&cl->node[prio], root);
309 * @cl: the class to add
313 * change its mode in cl->pq_key microseconds. Make sure that class is not
317 struct htb_class *cl, s64 delay)
319 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
321 cl->pq_key = q->now + delay;
322 if (cl->pq_key == q->now)
323 cl->pq_key++;
326 if (q->near_ev_cache[cl->level] > cl->pq_key)
327 q->near_ev_cache[cl->level] = cl->pq_key;
333 if (cl->pq_key >= c->pq_key)
338 rb_link_node(&cl->pq_node, parent, p);
339 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
357 * @cl: the class to add
364 struct htb_class *cl, int mask)
366 q->row_mask[cl->level] |= mask;
370 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
389 * @cl: the class to add
396 struct htb_class *cl, int mask)
399 struct htb_level *hlevel = &q->hlevel[cl->level];
406 if (hprio->ptr == cl->node + prio)
409 htb_safe_rb_erase(cl->node + prio, &hprio->row);
413 q->row_mask[cl->level] &= ~m;
419 * @cl: the class to activate
422 * for priorities it is participating on. cl->cmode must be new
423 * (activated) mode. It does nothing if cl->prio_activity == 0.
425 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
427 struct htb_class *p = cl->parent;
428 long m, mask = cl->prio_activity;
430 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
445 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
448 cl = p;
449 p = cl->parent;
452 if (cl->cmode == HTB_CAN_SEND && mask)
453 htb_add_class_to_row(q, cl, mask);
459 * @cl: the class to deactivate
461 * cl->cmode must represent old mode (before deactivation). It does
462 * nothing if cl->prio_activity == 0. Class is removed from all feed
465 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
467 struct htb_class *p = cl->parent;
468 long m, mask = cl->prio_activity;
470 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
477 if (p->inner.clprio[prio].ptr == cl->node + prio) {
482 p->inner.clprio[prio].last_ptr_id = cl->common.classid;
486 htb_safe_rb_erase(cl->node + prio,
494 cl = p;
495 p = cl->parent;
498 if (cl->cmode == HTB_CAN_SEND && mask)
499 htb_remove_class_from_row(q, cl, mask);
502 static inline s64 htb_lowater(const struct htb_class *cl)
505 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
509 static inline s64 htb_hiwater(const struct htb_class *cl)
512 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
520 * @cl: the target class
523 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
524 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
525 * from now to time when cl will change its state.
527 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
528 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
532 htb_class_mode(struct htb_class *cl, s64 *diff)
536 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
541 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
551 * @cl: the target class
557 * be different from old one and cl->pq_key has to be valid if changing
561 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
563 enum htb_cmode new_mode = htb_class_mode(cl, diff);
565 if (new_mode == cl->cmode)
569 cl->overlimits++;
573 if (cl->prio_activity) { /* not necessary: speed optimization */
574 if (cl->cmode != HTB_CANT_SEND)
575 htb_deactivate_prios(q, cl);
576 cl->cmode = new_mode;
578 htb_activate_prios(q, cl);
580 cl->cmode = new_mode;
584 * htb_activate - inserts leaf cl into appropriate active feeds
586 * @cl: the target class
592 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
594 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
596 if (!cl->prio_activity) {
597 cl->prio_activity = 1 << cl->prio;
598 htb_activate_prios(q, cl);
603 * htb_deactivate - remove leaf cl from active feeds
605 * @cl: the target class
610 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
612 WARN_ON(!cl->prio_activity);
614 htb_deactivate_prios(q, cl);
615 cl->prio_activity = 0;
624 struct htb_class *cl = htb_classify(skb, sch, &ret);
626 if (cl == HTB_DIRECT) {
635 } else if (!cl) {
641 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
645 cl->drops++;
649 htb_activate(q, cl);
657 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
659 s64 toks = diff + cl->tokens;
661 if (toks > cl->buffer)
662 toks = cl->buffer;
663 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
664 if (toks <= -cl->mbuffer)
665 toks = 1 - cl->mbuffer;
667 cl->tokens = toks;
670 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
672 s64 toks = diff + cl->ctokens;
674 if (toks > cl->cbuffer)
675 toks = cl->cbuffer;
676 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
677 if (toks <= -cl->mbuffer)
678 toks = 1 - cl->mbuffer;
680 cl->ctokens = toks;
686 * @cl: the class to start iterate
690 * Routine assumes that packet "bytes" long was dequeued from leaf cl
698 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
705 while (cl) {
706 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
707 if (cl->level >= level) {
708 if (cl->level == level)
709 cl->xstats.lends++;
710 htb_accnt_tokens(cl, bytes, diff);
712 cl->xstats.borrows++;
713 cl->tokens += diff; /* we moved t_c; update tokens */
715 htb_accnt_ctokens(cl, bytes, diff);
716 cl->t_c = q->now;
718 old_mode = cl->cmode;
720 htb_change_class_mode(q, cl, &diff);
721 if (old_mode != cl->cmode) {
723 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
724 if (cl->cmode != HTB_CAN_SEND)
725 htb_add_to_wait_tree(q, cl, diff);
729 if (cl->level)
730 bstats_update(&cl->bstats, skb);
732 cl = cl->parent;
744 * Note: Applied are events whose have cl->pq_key <= q->now.
757 struct htb_class *cl;
764 cl = rb_entry(p, struct htb_class, pq_node);
765 if (cl->pq_key > q->now)
766 return cl->pq_key;
769 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
770 htb_change_class_mode(q, cl, &diff);
771 if (cl->cmode != HTB_CAN_SEND)
772 htb_add_to_wait_tree(q, cl, diff);
792 struct htb_class *cl =
795 if (id > cl->common.classid) {
797 } else if (id < cl->common.classid) {
852 struct htb_class *cl;
855 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
856 if (!cl->level)
857 return cl;
858 clp = &cl->inner.clprio[prio];
875 struct htb_class *cl, *start;
880 start = cl = htb_lookup_leaf(hprio, prio);
884 if (unlikely(!cl))
892 if (unlikely(cl->leaf.q->q.qlen == 0)) {
894 htb_deactivate(q, cl);
902 if (cl == start) /* fix start if we just deleted it */
904 cl = next;
908 skb = cl->leaf.q->dequeue(cl->leaf.q);
912 qdisc_warn_nonwc("htb", cl->leaf.q);
913 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
915 cl = htb_lookup_leaf(hprio, prio);
917 } while (cl != start);
920 bstats_update(&cl->bstats, skb);
921 cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
922 if (cl->leaf.deficit[level] < 0) {
923 cl->leaf.deficit[level] += cl->quantum;
924 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
930 if (!cl->leaf.q->q.qlen)
931 htb_deactivate(q, cl);
932 htb_charge_class(q, cl, level, skb);
1000 struct htb_class *cl;
1004 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1005 if (cl->level)
1006 memset(&cl->inner, 0, sizeof(cl->inner));
1008 if (cl->leaf.q && !q->offload)
1009 qdisc_reset(cl->leaf.q);
1011 cl->prio_activity = 0;
1012 cl->cmode = HTB_CAN_SEND;
1251 struct htb_class *cl = (struct htb_class *)arg;
1259 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1260 tcm->tcm_handle = cl->common.classid;
1261 if (!cl->level && cl->leaf.q)
1262 tcm->tcm_info = cl->leaf.q->handle;
1270 psched_ratecfg_getrate(&opt.rate, &cl->rate);
1271 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1272 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1273 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1274 opt.quantum = cl->quantum;
1275 opt.prio = cl->prio;
1276 opt.level = cl->level;
1281 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1282 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1285 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1286 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1298 struct htb_class *cl)
1304 gnet_stats_basic_sync_init(&cl->bstats);
1310 while (p && p->level < cl->level)
1313 if (p != cl)
1324 _bstats_update(&cl->bstats, bytes, packets);
1330 struct htb_class *cl = (struct htb_class *)arg;
1333 .drops = cl->drops,
1334 .overlimits = cl->overlimits,
1338 if (!cl->level && cl->leaf.q)
1339 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1341 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1343 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1347 if (!cl->level) {
1348 if (cl->leaf.q)
1349 cl->bstats = cl->leaf.q->bstats;
1351 gnet_stats_basic_sync_init(&cl->bstats);
1352 _bstats_update(&cl->bstats,
1353 u64_stats_read(&cl->bstats_bias.bytes),
1354 u64_stats_read(&cl->bstats_bias.packets));
1356 htb_offload_aggregate_stats(q, cl);
1360 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
1361 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1365 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1406 static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl)
1410 queue = cl->leaf.offload_queue;
1411 if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
1412 WARN_ON(cl->leaf.q->dev_queue != queue);
1453 struct htb_class *cl = (struct htb_class *)arg;
1457 if (cl->level)
1461 dev_queue = htb_offload_get_queue(cl);
1465 cl->common.classid, extack);
1472 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1477 *old = qdisc_replace(sch, new, &cl->leaf.q);
1489 struct htb_class *cl = (struct htb_class *)arg;
1490 return !cl->level ? cl->leaf.q : NULL;
1495 struct htb_class *cl = (struct htb_class *)arg;
1497 htb_deactivate(qdisc_priv(sch), cl);
1500 static inline int htb_parent_last_child(struct htb_class *cl)
1502 if (!cl->parent)
1505 if (cl->parent->children > 1)
1511 static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
1515 struct htb_class *parent = cl->parent;
1517 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
1531 parent->leaf.offload_queue = cl->leaf.offload_queue;
1540 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1547 static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
1553 struct Qdisc *q = cl->leaf.q;
1557 if (cl->level)
1561 dev_queue = htb_offload_get_queue(cl);
1568 /* Last qdisc grafted should be the same as cl->leaf.q when
1574 if (cl->parent) {
1575 _bstats_update(&cl->parent->bstats_bias,
1584 .classid = cl->common.classid,
1599 if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) {
1604 htb_offload_move_qdisc(sch, moved_cl, cl, destroying);
1610 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1612 if (!cl->level) {
1613 WARN_ON(!cl->leaf.q);
1614 qdisc_put(cl->leaf.q);
1616 gen_kill_estimator(&cl->rate_est);
1617 tcf_block_put(cl->block);
1618 kfree(cl);
1628 struct htb_class *cl;
1641 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1642 tcf_block_put(cl->block);
1643 cl->block = NULL;
1651 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1656 htb_destroy_class(sch, cl);
1662 if (cl->level)
1667 last_child = htb_parent_last_child(cl);
1668 htb_destroy_class_offload(sch, cl, last_child,
1671 &cl->common);
1672 if (cl->parent)
1673 cl->parent->children--;
1675 htb_parent_to_leaf(sch, cl, NULL);
1676 htb_destroy_class(sch, cl);
1703 struct htb_class *cl = (struct htb_class *)arg;
1712 if (cl->children || qdisc_class_in_use(&cl->common)) {
1717 if (!cl->level && htb_parent_last_child(cl))
1721 err = htb_destroy_class_offload(sch, cl, last_child, false,
1731 dev_queue = htb_offload_get_queue(cl);
1734 cl->parent->common.classid,
1745 if (!cl->level)
1746 qdisc_purge_queue(cl->leaf.q);
1749 qdisc_class_hash_remove(&q->clhash, &cl->common);
1750 if (cl->parent)
1751 cl->parent->children--;
1753 if (cl->prio_activity)
1754 htb_deactivate(q, cl);
1756 if (cl->cmode != HTB_CAN_SEND)
1757 htb_safe_rb_erase(&cl->pq_node,
1758 &q->hlevel[cl->level].wait_pq);
1761 htb_parent_to_leaf(sch, cl, new_q);
1765 htb_destroy_class(sch, cl);
1775 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1828 if (!cl) { /* new class */
1858 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1859 if (!cl)
1862 gnet_stats_basic_sync_init(&cl->bstats);
1863 gnet_stats_basic_sync_init(&cl->bstats_bias);
1865 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1867 kfree(cl);
1871 err = gen_new_estimator(&cl->bstats, NULL,
1872 &cl->rate_est,
1880 cl->children = 0;
1881 RB_CLEAR_NODE(&cl->pq_node);
1884 RB_CLEAR_NODE(&cl->node[prio]);
1886 cl->common.classid = classid;
1903 .classid = cl->common.classid,
1926 .classid = cl->common.classid,
1952 /* One ref for cl->leaf.q, the other for
1980 cl->leaf.q = new_q ? new_q : &noop_qdisc;
1982 cl->leaf.offload_queue = dev_queue;
1984 cl->parent = parent;
1987 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1988 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1989 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
1990 cl->t_c = ktime_get_ns();
1991 cl->cmode = HTB_CAN_SEND;
1994 qdisc_class_hash_insert(&q->clhash, &cl->common);
1997 if (cl->leaf.q != &noop_qdisc)
1998 qdisc_hash_add(cl->leaf.q, true);
2001 err = gen_replace_estimator(&cl->bstats, NULL,
2002 &cl->rate_est,
2015 .classid = cl->common.classid,
2036 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
2037 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
2040 * is really leaf before changing cl->leaf !
2042 if (!cl->level) {
2043 u64 quantum = cl->rate.rate_bytes_ps;
2046 cl->quantum = min_t(u64, quantum, INT_MAX);
2048 if (!hopt->quantum && cl->quantum < 1000) {
2050 cl->quantum = 1000;
2052 if (!hopt->quantum && cl->quantum > 200000) {
2054 cl->quantum = 200000;
2057 cl->quantum = hopt->quantum;
2058 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
2059 cl->prio = TC_HTB_NUMPRIO - 1;
2062 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
2063 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
2071 cl->common.classid, (warn == -1 ? "small" : "big"));
2075 *arg = (unsigned long)cl;
2079 gen_kill_estimator(&cl->rate_est);
2081 tcf_block_put(cl->block);
2082 kfree(cl);
2091 struct htb_class *cl = (struct htb_class *)arg;
2093 return cl ? cl->block : q->block;
2099 struct htb_class *cl = htb_find(classid, sch);
2101 /*if (cl && !cl->level) return 0;
2110 if (cl)
2111 qdisc_class_get(&cl->common);
2112 return (unsigned long)cl;
2117 struct htb_class *cl = (struct htb_class *)arg;
2119 qdisc_class_put(&cl->common);
2125 struct htb_class *cl;
2132 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
2133 if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))