1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Codel - The Controlled-Delay Active Queue Management algorithm
4 *
5 *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
6 *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
7 *
8 *  Implemented on linux by :
9 *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
10 *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
11 */
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/skbuff.h>
19#include <linux/prefetch.h>
20#include <net/pkt_sched.h>
21#include <net/codel.h>
22#include <net/codel_impl.h>
23#include <net/codel_qdisc.h>
24
25
26#define DEFAULT_CODEL_LIMIT 1000
27
28struct codel_sched_data {
29	struct codel_params	params;
30	struct codel_vars	vars;
31	struct codel_stats	stats;
32	u32			drop_overlimit;
33};
34
35/* This is the specific function called from codel_dequeue()
36 * to dequeue a packet from queue. Note: backlog is handled in
37 * codel, we dont need to reduce it here.
38 */
39static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
40{
41	struct Qdisc *sch = ctx;
42	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
43
44	if (skb) {
45		sch->qstats.backlog -= qdisc_pkt_len(skb);
46		prefetch(&skb->end); /* we'll need skb_shinfo() */
47	}
48	return skb;
49}
50
51static void drop_func(struct sk_buff *skb, void *ctx)
52{
53	struct Qdisc *sch = ctx;
54
55	kfree_skb(skb);
56	qdisc_qstats_drop(sch);
57}
58
59static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
60{
61	struct codel_sched_data *q = qdisc_priv(sch);
62	struct sk_buff *skb;
63
64	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
65			    &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
66			    drop_func, dequeue_func);
67
68	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
69	 * or HTB crashes. Defer it for next round.
70	 */
71	if (q->stats.drop_count && sch->q.qlen) {
72		qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
73		q->stats.drop_count = 0;
74		q->stats.drop_len = 0;
75	}
76	if (skb)
77		qdisc_bstats_update(sch, skb);
78	return skb;
79}
80
81static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
82			       struct sk_buff **to_free)
83{
84	struct codel_sched_data *q;
85
86	if (likely(qdisc_qlen(sch) < sch->limit)) {
87		codel_set_enqueue_time(skb);
88		return qdisc_enqueue_tail(skb, sch);
89	}
90	q = qdisc_priv(sch);
91	q->drop_overlimit++;
92	return qdisc_drop(skb, sch, to_free);
93}
94
95static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
96	[TCA_CODEL_TARGET]	= { .type = NLA_U32 },
97	[TCA_CODEL_LIMIT]	= { .type = NLA_U32 },
98	[TCA_CODEL_INTERVAL]	= { .type = NLA_U32 },
99	[TCA_CODEL_ECN]		= { .type = NLA_U32 },
100	[TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
101};
102
103static int codel_change(struct Qdisc *sch, struct nlattr *opt,
104			struct netlink_ext_ack *extack)
105{
106	struct codel_sched_data *q = qdisc_priv(sch);
107	struct nlattr *tb[TCA_CODEL_MAX + 1];
108	unsigned int qlen, dropped = 0;
109	int err;
110
111	err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
112					  codel_policy, NULL);
113	if (err < 0)
114		return err;
115
116	sch_tree_lock(sch);
117
118	if (tb[TCA_CODEL_TARGET]) {
119		u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
120
121		q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
122	}
123
124	if (tb[TCA_CODEL_CE_THRESHOLD]) {
125		u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
126
127		q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
128	}
129
130	if (tb[TCA_CODEL_INTERVAL]) {
131		u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
132
133		q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
134	}
135
136	if (tb[TCA_CODEL_LIMIT])
137		sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
138
139	if (tb[TCA_CODEL_ECN])
140		q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
141
142	qlen = sch->q.qlen;
143	while (sch->q.qlen > sch->limit) {
144		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
145
146		dropped += qdisc_pkt_len(skb);
147		qdisc_qstats_backlog_dec(sch, skb);
148		rtnl_qdisc_drop(skb, sch);
149	}
150	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
151
152	sch_tree_unlock(sch);
153	return 0;
154}
155
156static int codel_init(struct Qdisc *sch, struct nlattr *opt,
157		      struct netlink_ext_ack *extack)
158{
159	struct codel_sched_data *q = qdisc_priv(sch);
160
161	sch->limit = DEFAULT_CODEL_LIMIT;
162
163	codel_params_init(&q->params);
164	codel_vars_init(&q->vars);
165	codel_stats_init(&q->stats);
166	q->params.mtu = psched_mtu(qdisc_dev(sch));
167
168	if (opt) {
169		int err = codel_change(sch, opt, extack);
170
171		if (err)
172			return err;
173	}
174
175	if (sch->limit >= 1)
176		sch->flags |= TCQ_F_CAN_BYPASS;
177	else
178		sch->flags &= ~TCQ_F_CAN_BYPASS;
179
180	return 0;
181}
182
183static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
184{
185	struct codel_sched_data *q = qdisc_priv(sch);
186	struct nlattr *opts;
187
188	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
189	if (opts == NULL)
190		goto nla_put_failure;
191
192	if (nla_put_u32(skb, TCA_CODEL_TARGET,
193			codel_time_to_us(q->params.target)) ||
194	    nla_put_u32(skb, TCA_CODEL_LIMIT,
195			sch->limit) ||
196	    nla_put_u32(skb, TCA_CODEL_INTERVAL,
197			codel_time_to_us(q->params.interval)) ||
198	    nla_put_u32(skb, TCA_CODEL_ECN,
199			q->params.ecn))
200		goto nla_put_failure;
201	if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
202	    nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
203			codel_time_to_us(q->params.ce_threshold)))
204		goto nla_put_failure;
205	return nla_nest_end(skb, opts);
206
207nla_put_failure:
208	nla_nest_cancel(skb, opts);
209	return -1;
210}
211
212static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
213{
214	const struct codel_sched_data *q = qdisc_priv(sch);
215	struct tc_codel_xstats st = {
216		.maxpacket	= q->stats.maxpacket,
217		.count		= q->vars.count,
218		.lastcount	= q->vars.lastcount,
219		.drop_overlimit = q->drop_overlimit,
220		.ldelay		= codel_time_to_us(q->vars.ldelay),
221		.dropping	= q->vars.dropping,
222		.ecn_mark	= q->stats.ecn_mark,
223		.ce_mark	= q->stats.ce_mark,
224	};
225
226	if (q->vars.dropping) {
227		codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
228
229		if (delta >= 0)
230			st.drop_next = codel_time_to_us(delta);
231		else
232			st.drop_next = -codel_time_to_us(-delta);
233	}
234
235	return gnet_stats_copy_app(d, &st, sizeof(st));
236}
237
238static void codel_reset(struct Qdisc *sch)
239{
240	struct codel_sched_data *q = qdisc_priv(sch);
241
242	qdisc_reset_queue(sch);
243	codel_vars_init(&q->vars);
244}
245
246static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
247	.id		=	"codel",
248	.priv_size	=	sizeof(struct codel_sched_data),
249
250	.enqueue	=	codel_qdisc_enqueue,
251	.dequeue	=	codel_qdisc_dequeue,
252	.peek		=	qdisc_peek_dequeued,
253	.init		=	codel_init,
254	.reset		=	codel_reset,
255	.change 	=	codel_change,
256	.dump		=	codel_dump,
257	.dump_stats	=	codel_dump_stats,
258	.owner		=	THIS_MODULE,
259};
260MODULE_ALIAS_NET_SCH("codel");
261
262static int __init codel_module_init(void)
263{
264	return register_qdisc(&codel_qdisc_ops);
265}
266
267static void __exit codel_module_exit(void)
268{
269	unregister_qdisc(&codel_qdisc_ops);
270}
271
272module_init(codel_module_init)
273module_exit(codel_module_exit)
274
275MODULE_DESCRIPTION("Controlled Delay queue discipline");
276MODULE_AUTHOR("Dave Taht");
277MODULE_AUTHOR("Eric Dumazet");
278MODULE_LICENSE("Dual BSD/GPL");
279