dn_sched_qfq.c revision 230614
1/*
2 * Copyright (c) 2010 Fabio Checconi, Luigi Rizzo, Paolo Valente
3 * All rights reserved
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * $FreeBSD: head/sys/netinet/ipfw/dn_sched_qfq.c 230614 2012-01-27 13:26:25Z luigi $
29 */
30
31#ifdef _KERNEL
32#include <sys/malloc.h>
33#include <sys/socket.h>
34#include <sys/socketvar.h>
35#include <sys/kernel.h>
36#include <sys/mbuf.h>
37#include <sys/module.h>
38#include <net/if.h>	/* IFNAMSIZ */
39#include <netinet/in.h>
40#include <netinet/ip_var.h>		/* ipfw_rule_ref */
41#include <netinet/ip_fw.h>	/* flow_id */
42#include <netinet/ip_dummynet.h>
43#include <netinet/ipfw/dn_heap.h>
44#include <netinet/ipfw/ip_dn_private.h>
45#include <netinet/ipfw/dn_sched.h>
46#else
47#include <dn_test.h>
48#endif
49
50#ifdef QFQ_DEBUG
51struct qfq_sched;
52static void dump_sched(struct qfq_sched *q, const char *msg);
53#define	NO(x)	x
54#else
55#define NO(x)
56#endif
57#define DN_SCHED_QFQ	4 // XXX Where?
58typedef	unsigned long	bitmap;
59
60/*
61 * bitmaps ops are critical. Some linux versions have __fls
62 * and the bitmap ops. Some machines have ffs
63 */
64#if defined(_WIN32) || (defined(__MIPSEL__) && defined(LINUX_24))
65int fls(unsigned int n)
66{
67	int i = 0;
68	for (i = 0; n > 0; n >>= 1, i++)
69		;
70	return i;
71}
72#endif
73
74#if !defined(_KERNEL) || defined( __FreeBSD__ ) || defined(_WIN32) || (defined(__MIPSEL__) && defined(LINUX_24))
75static inline unsigned long __fls(unsigned long word)
76{
77	return fls(word) - 1;
78}
79#endif
80
81#if !defined(_KERNEL) || !defined(__linux__)
82#ifdef QFQ_DEBUG
83int test_bit(int ix, bitmap *p)
84{
85	if (ix < 0 || ix > 31)
86		D("bad index %d", ix);
87	return *p & (1<<ix);
88}
89void __set_bit(int ix, bitmap *p)
90{
91	if (ix < 0 || ix > 31)
92		D("bad index %d", ix);
93	*p |= (1<<ix);
94}
95void __clear_bit(int ix, bitmap *p)
96{
97	if (ix < 0 || ix > 31)
98		D("bad index %d", ix);
99	*p &= ~(1<<ix);
100}
101#else /* !QFQ_DEBUG */
102/* XXX do we have fast version, or leave it to the compiler ? */
103#define test_bit(ix, pData)	((*pData) & (1<<(ix)))
104#define __set_bit(ix, pData)	(*pData) |= (1<<(ix))
105#define __clear_bit(ix, pData)	(*pData) &= ~(1<<(ix))
106#endif /* !QFQ_DEBUG */
107#endif /* !__linux__ */
108
109#ifdef __MIPSEL__
110#define __clear_bit(ix, pData)	(*pData) &= ~(1<<(ix))
111#endif
112
113/*-------------------------------------------*/
114/*
115
116Virtual time computations.
117
118S, F and V are all computed in fixed point arithmetic with
119FRAC_BITS decimal bits.
120
121   QFQ_MAX_INDEX is the maximum index allowed for a group. We need
122  	one bit per index.
123   QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
124   The layout of the bits is as below:
125
126                   [ MTU_SHIFT ][      FRAC_BITS    ]
127                   [ MAX_INDEX    ][ MIN_SLOT_SHIFT ]
128  				 ^.__grp->index = 0
129  				 *.__grp->slot_shift
130
131   where MIN_SLOT_SHIFT is derived by difference from the others.
132
133The max group index corresponds to Lmax/w_min, where
134Lmax=1<<MTU_SHIFT, w_min = 1 .
135From this, and knowing how many groups (MAX_INDEX) we want,
136we can derive the shift corresponding to each group.
137
138Because we often need to compute
139	F = S + len/w_i  and V = V + len/wsum
140instead of storing w_i store the value
141	inv_w = (1<<FRAC_BITS)/w_i
142so we can do F = S + len * inv_w * wsum.
143We use W_TOT in the formulas so we can easily move between
144static and adaptive weight sum.
145
146The per-scheduler-instance data contain all the data structures
147for the scheduler: bitmaps and bucket lists.
148
149 */
150/*
151 * Maximum number of consecutive slots occupied by backlogged classes
152 * inside a group. This is approx lmax/lmin + 5.
153 * XXX check because it poses constraints on MAX_INDEX
154 */
155#define QFQ_MAX_SLOTS	32
156/*
157 * Shifts used for class<->group mapping. Class weights are
158 * in the range [1, QFQ_MAX_WEIGHT], we to map each class i to the
159 * group with the smallest index that can support the L_i / r_i
160 * configured for the class.
161 *
162 * grp->index is the index of the group; and grp->slot_shift
163 * is the shift for the corresponding (scaled) sigma_i.
164 *
165 * When computing the group index, we do (len<<FP_SHIFT)/weight,
166 * then compute an FLS (which is like a log2()), and if the result
167 * is below the MAX_INDEX region we use 0 (which is the same as
168 * using a larger len).
169 */
170#define QFQ_MAX_INDEX		19
171#define QFQ_MAX_WSHIFT		16	/* log2(max_weight) */
172
173#define	QFQ_MAX_WEIGHT		(1<<QFQ_MAX_WSHIFT)
174#define QFQ_MAX_WSUM		(2*QFQ_MAX_WEIGHT)
175//#define IWSUM	(q->i_wsum)
176#define IWSUM	((1<<FRAC_BITS)/QFQ_MAX_WSUM)
177
178#define FRAC_BITS		30	/* fixed point arithmetic */
179#define ONE_FP			(1UL << FRAC_BITS)
180
181#define QFQ_MTU_SHIFT		11	/* log2(max_len) */
182#define QFQ_MIN_SLOT_SHIFT	(FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
183
184/*
185 * Possible group states, also indexes for the bitmaps array in
186 * struct qfq_queue. We rely on ER, IR, EB, IB being numbered 0..3
187 */
188enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
189
190struct qfq_group;
191/*
192 * additional queue info. Some of this info should come from
193 * the flowset, we copy them here for faster processing.
194 * This is an overlay of the struct dn_queue
195 */
196struct qfq_class {
197	struct dn_queue _q;
198	uint64_t S, F;		/* flow timestamps (exact) */
199	struct qfq_class *next; /* Link for the slot list. */
200
201	/* group we belong to. In principle we would need the index,
202	 * which is log_2(lmax/weight), but we never reference it
203	 * directly, only the group.
204	 */
205	struct qfq_group *grp;
206
207	/* these are copied from the flowset. */
208	uint32_t	inv_w;	/* ONE_FP/weight */
209	uint32_t 	lmax;	/* Max packet size for this flow. */
210};
211
212/* Group descriptor, see the paper for details.
213 * Basically this contains the bucket lists
214 */
215struct qfq_group {
216	uint64_t S, F;			/* group timestamps (approx). */
217	unsigned int slot_shift;	/* Slot shift. */
218	unsigned int index;		/* Group index. */
219	unsigned int front;		/* Index of the front slot. */
220	bitmap full_slots;		/* non-empty slots */
221
222	/* Array of lists of active classes. */
223	struct qfq_class *slots[QFQ_MAX_SLOTS];
224};
225
226/* scheduler instance descriptor. */
227struct qfq_sched {
228	uint64_t	V;		/* Precise virtual time. */
229	uint32_t	wsum;		/* weight sum */
230	NO(uint32_t	i_wsum;		/* ONE_FP/w_sum */
231	uint32_t	_queued;	/* debugging */
232	uint32_t	loops;	/* debugging */)
233	bitmap bitmaps[QFQ_MAX_STATE];	/* Group bitmaps. */
234	struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
235};
236
237/*---- support functions ----------------------------*/
238
239/* Generic comparison function, handling wraparound. */
240static inline int qfq_gt(uint64_t a, uint64_t b)
241{
242	return (int64_t)(a - b) > 0;
243}
244
245/* Round a precise timestamp to its slotted value. */
246static inline uint64_t qfq_round_down(uint64_t ts, unsigned int shift)
247{
248	return ts & ~((1ULL << shift) - 1);
249}
250
251/* return the pointer to the group with lowest index in the bitmap */
252static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
253					unsigned long bitmap)
254{
255	int index = ffs(bitmap) - 1; // zero-based
256	return &q->groups[index];
257}
258
259/*
260 * Calculate a flow index, given its weight and maximum packet length.
261 * index = log_2(maxlen/weight) but we need to apply the scaling.
262 * This is used only once at flow creation.
263 */
264static int qfq_calc_index(uint32_t inv_w, unsigned int maxlen)
265{
266	uint64_t slot_size = (uint64_t)maxlen *inv_w;
267	unsigned long size_map;
268	int index = 0;
269
270	size_map = (unsigned long)(slot_size >> QFQ_MIN_SLOT_SHIFT);
271	if (!size_map)
272		goto out;
273
274	index = __fls(size_map) + 1;	// basically a log_2()
275	index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
276
277	if (index < 0)
278		index = 0;
279
280out:
281	ND("W = %d, L = %d, I = %d\n", ONE_FP/inv_w, maxlen, index);
282	return index;
283}
284/*---- end support functions ----*/
285
286/*-------- API calls --------------------------------*/
287/*
288 * Validate and copy parameters from flowset.
289 */
290static int
291qfq_new_queue(struct dn_queue *_q)
292{
293	struct qfq_sched *q = (struct qfq_sched *)(_q->_si + 1);
294	struct qfq_class *cl = (struct qfq_class *)_q;
295	int i;
296	uint32_t w;	/* approximated weight */
297
298	/* import parameters from the flowset. They should be correct
299	 * already.
300	 */
301	w = _q->fs->fs.par[0];
302	cl->lmax = _q->fs->fs.par[1];
303	if (!w || w > QFQ_MAX_WEIGHT) {
304		w = 1;
305		D("rounding weight to 1");
306	}
307	cl->inv_w = ONE_FP/w;
308	w = ONE_FP/cl->inv_w;
309	if (q->wsum + w > QFQ_MAX_WSUM)
310		return EINVAL;
311
312	i = qfq_calc_index(cl->inv_w, cl->lmax);
313	cl->grp = &q->groups[i];
314	q->wsum += w;
315	// XXX cl->S = q->V; ?
316	// XXX compute q->i_wsum
317	return 0;
318}
319
320/* remove an empty queue */
321static int
322qfq_free_queue(struct dn_queue *_q)
323{
324	struct qfq_sched *q = (struct qfq_sched *)(_q->_si + 1);
325	struct qfq_class *cl = (struct qfq_class *)_q;
326	if (cl->inv_w) {
327		q->wsum -= ONE_FP/cl->inv_w;
328		cl->inv_w = 0; /* reset weight to avoid run twice */
329	}
330	return 0;
331}
332
333/* Calculate a mask to mimic what would be ffs_from(). */
334static inline unsigned long
335mask_from(unsigned long bitmap, int from)
336{
337	return bitmap & ~((1UL << from) - 1);
338}
339
340/*
341 * The state computation relies on ER=0, IR=1, EB=2, IB=3
342 * First compute eligibility comparing grp->S, q->V,
343 * then check if someone is blocking us and possibly add EB
344 */
345static inline unsigned int
346qfq_calc_state(struct qfq_sched *q, struct qfq_group *grp)
347{
348	/* if S > V we are not eligible */
349	unsigned int state = qfq_gt(grp->S, q->V);
350	unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
351	struct qfq_group *next;
352
353	if (mask) {
354		next = qfq_ffs(q, mask);
355		if (qfq_gt(grp->F, next->F))
356			state |= EB;
357	}
358
359	return state;
360}
361
362/*
363 * In principle
364 *	q->bitmaps[dst] |= q->bitmaps[src] & mask;
365 *	q->bitmaps[src] &= ~mask;
366 * but we should make sure that src != dst
367 */
368static inline void
369qfq_move_groups(struct qfq_sched *q, unsigned long mask, int src, int dst)
370{
371	q->bitmaps[dst] |= q->bitmaps[src] & mask;
372	q->bitmaps[src] &= ~mask;
373}
374
375static inline void
376qfq_unblock_groups(struct qfq_sched *q, int index, uint64_t old_finish)
377{
378	unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
379	struct qfq_group *next;
380
381	if (mask) {
382		next = qfq_ffs(q, mask);
383		if (!qfq_gt(next->F, old_finish))
384			return;
385	}
386
387	mask = (1UL << index) - 1;
388	qfq_move_groups(q, mask, EB, ER);
389	qfq_move_groups(q, mask, IB, IR);
390}
391
392/*
393 * perhaps
394 *
395	old_V ^= q->V;
396	old_V >>= QFQ_MIN_SLOT_SHIFT;
397	if (old_V) {
398		...
399	}
400 *
401 */
402static inline void
403qfq_make_eligible(struct qfq_sched *q, uint64_t old_V)
404{
405	unsigned long mask, vslot, old_vslot;
406
407	vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
408	old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
409
410	if (vslot != old_vslot) {
411		mask = (2UL << (__fls(vslot ^ old_vslot))) - 1;
412		qfq_move_groups(q, mask, IR, ER);
413		qfq_move_groups(q, mask, IB, EB);
414	}
415}
416
417/*
418 * XXX we should make sure that slot becomes less than 32.
419 * This is guaranteed by the input values.
420 * roundedS is always cl->S rounded on grp->slot_shift bits.
421 */
422static inline void
423qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl, uint64_t roundedS)
424{
425	uint64_t slot = (roundedS - grp->S) >> grp->slot_shift;
426	unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
427
428	cl->next = grp->slots[i];
429	grp->slots[i] = cl;
430	__set_bit(slot, &grp->full_slots);
431}
432
433/*
434 * remove the entry from the slot
435 */
436static inline void
437qfq_front_slot_remove(struct qfq_group *grp)
438{
439	struct qfq_class **h = &grp->slots[grp->front];
440
441	*h = (*h)->next;
442	if (!*h)
443		__clear_bit(0, &grp->full_slots);
444}
445
446/*
447 * Returns the first full queue in a group. As a side effect,
448 * adjust the bucket list so the first non-empty bucket is at
449 * position 0 in full_slots.
450 */
451static inline struct qfq_class *
452qfq_slot_scan(struct qfq_group *grp)
453{
454	int i;
455
456	ND("grp %d full %x", grp->index, grp->full_slots);
457	if (!grp->full_slots)
458		return NULL;
459
460	i = ffs(grp->full_slots) - 1; // zero-based
461	if (i > 0) {
462		grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
463		grp->full_slots >>= i;
464	}
465
466	return grp->slots[grp->front];
467}
468
469/*
470 * adjust the bucket list. When the start time of a group decreases,
471 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
472 * move the objects. The mask of occupied slots must be shifted
473 * because we use ffs() to find the first non-empty slot.
474 * This covers decreases in the group's start time, but what about
475 * increases of the start time ?
476 * Here too we should make sure that i is less than 32
477 */
478static inline void
479qfq_slot_rotate(struct qfq_sched *q, struct qfq_group *grp, uint64_t roundedS)
480{
481	unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
482
483	grp->full_slots <<= i;
484	grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
485}
486
487
488static inline void
489qfq_update_eligible(struct qfq_sched *q, uint64_t old_V)
490{
491	bitmap ineligible;
492
493	ineligible = q->bitmaps[IR] | q->bitmaps[IB];
494	if (ineligible) {
495		if (!q->bitmaps[ER]) {
496			struct qfq_group *grp;
497			grp = qfq_ffs(q, ineligible);
498			if (qfq_gt(grp->S, q->V))
499				q->V = grp->S;
500		}
501		qfq_make_eligible(q, old_V);
502	}
503}
504
505/*
506 * Updates the class, returns true if also the group needs to be updated.
507 */
508static inline int
509qfq_update_class(struct qfq_sched *q, struct qfq_group *grp,
510	    struct qfq_class *cl)
511{
512
513	cl->S = cl->F;
514	if (cl->_q.mq.head == NULL)  {
515		qfq_front_slot_remove(grp);
516	} else {
517		unsigned int len;
518		uint64_t roundedS;
519
520		len = cl->_q.mq.head->m_pkthdr.len;
521		cl->F = cl->S + (uint64_t)len * cl->inv_w;
522		roundedS = qfq_round_down(cl->S, grp->slot_shift);
523		if (roundedS == grp->S)
524			return 0;
525
526		qfq_front_slot_remove(grp);
527		qfq_slot_insert(grp, cl, roundedS);
528	}
529	return 1;
530}
531
532static struct mbuf *
533qfq_dequeue(struct dn_sch_inst *si)
534{
535	struct qfq_sched *q = (struct qfq_sched *)(si + 1);
536	struct qfq_group *grp;
537	struct qfq_class *cl;
538	struct mbuf *m;
539	uint64_t old_V;
540
541	NO(q->loops++;)
542	if (!q->bitmaps[ER]) {
543		NO(if (q->queued)
544			dump_sched(q, "start dequeue");)
545		return NULL;
546	}
547
548	grp = qfq_ffs(q, q->bitmaps[ER]);
549
550	cl = grp->slots[grp->front];
551	/* extract from the first bucket in the bucket list */
552	m = dn_dequeue(&cl->_q);
553
554	if (!m) {
555		D("BUG/* non-workconserving leaf */");
556		return NULL;
557	}
558	NO(q->queued--;)
559	old_V = q->V;
560	q->V += (uint64_t)m->m_pkthdr.len * IWSUM;
561	ND("m is %p F 0x%llx V now 0x%llx", m, cl->F, q->V);
562
563	if (qfq_update_class(q, grp, cl)) {
564		uint64_t old_F = grp->F;
565		cl = qfq_slot_scan(grp);
566		if (!cl) { /* group gone, remove from ER */
567			__clear_bit(grp->index, &q->bitmaps[ER]);
568			// grp->S = grp->F + 1; // XXX debugging only
569		} else {
570			uint64_t roundedS = qfq_round_down(cl->S, grp->slot_shift);
571			unsigned int s;
572
573			if (grp->S == roundedS)
574				goto skip_unblock;
575			grp->S = roundedS;
576			grp->F = roundedS + (2ULL << grp->slot_shift);
577			/* remove from ER and put in the new set */
578			__clear_bit(grp->index, &q->bitmaps[ER]);
579			s = qfq_calc_state(q, grp);
580			__set_bit(grp->index, &q->bitmaps[s]);
581		}
582		/* we need to unblock even if the group has gone away */
583		qfq_unblock_groups(q, grp->index, old_F);
584	}
585
586skip_unblock:
587	qfq_update_eligible(q, old_V);
588	NO(if (!q->bitmaps[ER] && q->queued)
589		dump_sched(q, "end dequeue");)
590
591	return m;
592}
593
594/*
595 * Assign a reasonable start time for a new flow k in group i.
596 * Admissible values for \hat(F) are multiples of \sigma_i
597 * no greater than V+\sigma_i . Larger values mean that
598 * we had a wraparound so we consider the timestamp to be stale.
599 *
600 * If F is not stale and F >= V then we set S = F.
601 * Otherwise we should assign S = V, but this may violate
602 * the ordering in ER. So, if we have groups in ER, set S to
603 * the F_j of the first group j which would be blocking us.
604 * We are guaranteed not to move S backward because
605 * otherwise our group i would still be blocked.
606 */
607static inline void
608qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
609{
610	unsigned long mask;
611	uint64_t limit, roundedF;
612	int slot_shift = cl->grp->slot_shift;
613
614	roundedF = qfq_round_down(cl->F, slot_shift);
615	limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
616
617	if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
618		/* timestamp was stale */
619		mask = mask_from(q->bitmaps[ER], cl->grp->index);
620		if (mask) {
621			struct qfq_group *next = qfq_ffs(q, mask);
622			if (qfq_gt(roundedF, next->F)) {
623				cl->S = next->F;
624				return;
625			}
626		}
627		cl->S = q->V;
628	} else { /* timestamp is not stale */
629		cl->S = cl->F;
630	}
631}
632
633static int
634qfq_enqueue(struct dn_sch_inst *si, struct dn_queue *_q, struct mbuf *m)
635{
636	struct qfq_sched *q = (struct qfq_sched *)(si + 1);
637	struct qfq_group *grp;
638	struct qfq_class *cl = (struct qfq_class *)_q;
639	uint64_t roundedS;
640	int s;
641
642	NO(q->loops++;)
643	DX(4, "len %d flow %p inv_w 0x%x grp %d", m->m_pkthdr.len,
644		_q, cl->inv_w, cl->grp->index);
645	/* XXX verify that the packet obeys the parameters */
646	if (m != _q->mq.head) {
647		if (dn_enqueue(_q, m, 0)) /* packet was dropped */
648			return 1;
649		NO(q->queued++;)
650		if (m != _q->mq.head)
651			return 0;
652	}
653	/* If reach this point, queue q was idle */
654	grp = cl->grp;
655	qfq_update_start(q, cl); /* adjust start time */
656	/* compute new finish time and rounded start. */
657	cl->F = cl->S + (uint64_t)(m->m_pkthdr.len) * cl->inv_w;
658	roundedS = qfq_round_down(cl->S, grp->slot_shift);
659
660	/*
661	 * insert cl in the correct bucket.
662	 * If cl->S >= grp->S we don't need to adjust the
663	 * bucket list and simply go to the insertion phase.
664	 * Otherwise grp->S is decreasing, we must make room
665	 * in the bucket list, and also recompute the group state.
666	 * Finally, if there were no flows in this group and nobody
667	 * was in ER make sure to adjust V.
668	 */
669	if (grp->full_slots) {
670		if (!qfq_gt(grp->S, cl->S))
671			goto skip_update;
672		/* create a slot for this cl->S */
673		qfq_slot_rotate(q, grp, roundedS);
674		/* group was surely ineligible, remove */
675		__clear_bit(grp->index, &q->bitmaps[IR]);
676		__clear_bit(grp->index, &q->bitmaps[IB]);
677	} else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
678		q->V = roundedS;
679
680	grp->S = roundedS;
681	grp->F = roundedS + (2ULL << grp->slot_shift); // i.e. 2\sigma_i
682	s = qfq_calc_state(q, grp);
683	__set_bit(grp->index, &q->bitmaps[s]);
684	ND("new state %d 0x%x", s, q->bitmaps[s]);
685	ND("S %llx F %llx V %llx", cl->S, cl->F, q->V);
686skip_update:
687	qfq_slot_insert(grp, cl, roundedS);
688
689	return 0;
690}
691
692
693#if 0
694static inline void
695qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
696	struct qfq_class *cl, struct qfq_class **pprev)
697{
698	unsigned int i, offset;
699	uint64_t roundedS;
700
701	roundedS = qfq_round_down(cl->S, grp->slot_shift);
702	offset = (roundedS - grp->S) >> grp->slot_shift;
703	i = (grp->front + offset) % QFQ_MAX_SLOTS;
704
705#ifdef notyet
706	if (!pprev) {
707		pprev = &grp->slots[i];
708		while (*pprev && *pprev != cl)
709			pprev = &(*pprev)->next;
710	}
711#endif
712
713	*pprev = cl->next;
714	if (!grp->slots[i])
715		__clear_bit(offset, &grp->full_slots);
716}
717
718/*
719 * called to forcibly destroy a queue.
720 * If the queue is not in the front bucket, or if it has
721 * other queues in the front bucket, we can simply remove
722 * the queue with no other side effects.
723 * Otherwise we must propagate the event up.
724 * XXX description to be completed.
725 */
726static void
727qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl,
728				 struct qfq_class **pprev)
729{
730	struct qfq_group *grp = &q->groups[cl->index];
731	unsigned long mask;
732	uint64_t roundedS;
733	int s;
734
735	cl->F = cl->S;	// not needed if the class goes away.
736	qfq_slot_remove(q, grp, cl, pprev);
737
738	if (!grp->full_slots) {
739		/* nothing left in the group, remove from all sets.
740		 * Do ER last because if we were blocking other groups
741		 * we must unblock them.
742		 */
743		__clear_bit(grp->index, &q->bitmaps[IR]);
744		__clear_bit(grp->index, &q->bitmaps[EB]);
745		__clear_bit(grp->index, &q->bitmaps[IB]);
746
747		if (test_bit(grp->index, &q->bitmaps[ER]) &&
748		    !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
749			mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
750			if (mask)
751				mask = ~((1UL << __fls(mask)) - 1);
752			else
753				mask = ~0UL;
754			qfq_move_groups(q, mask, EB, ER);
755			qfq_move_groups(q, mask, IB, IR);
756		}
757		__clear_bit(grp->index, &q->bitmaps[ER]);
758	} else if (!grp->slots[grp->front]) {
759		cl = qfq_slot_scan(grp);
760		roundedS = qfq_round_down(cl->S, grp->slot_shift);
761		if (grp->S != roundedS) {
762			__clear_bit(grp->index, &q->bitmaps[ER]);
763			__clear_bit(grp->index, &q->bitmaps[IR]);
764			__clear_bit(grp->index, &q->bitmaps[EB]);
765			__clear_bit(grp->index, &q->bitmaps[IB]);
766			grp->S = roundedS;
767			grp->F = roundedS + (2ULL << grp->slot_shift);
768			s = qfq_calc_state(q, grp);
769			__set_bit(grp->index, &q->bitmaps[s]);
770		}
771	}
772	qfq_update_eligible(q, q->V);
773}
774#endif
775
776static int
777qfq_new_fsk(struct dn_fsk *f)
778{
779	ipdn_bound_var(&f->fs.par[0], 1, 1, QFQ_MAX_WEIGHT, "qfq weight");
780	ipdn_bound_var(&f->fs.par[1], 1500, 1, 2000, "qfq maxlen");
781	ND("weight %d len %d\n", f->fs.par[0], f->fs.par[1]);
782	return 0;
783}
784
785/*
786 * initialize a new scheduler instance
787 */
788static int
789qfq_new_sched(struct dn_sch_inst *si)
790{
791	struct qfq_sched *q = (struct qfq_sched *)(si + 1);
792	struct qfq_group *grp;
793	int i;
794
795	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
796		grp = &q->groups[i];
797		grp->index = i;
798		grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS -
799					(QFQ_MAX_INDEX - i);
800	}
801	return 0;
802}
803
804/*
805 * QFQ scheduler descriptor
806 */
807static struct dn_alg qfq_desc = {
808	_SI( .type = ) DN_SCHED_QFQ,
809	_SI( .name = ) "QFQ",
810	_SI( .flags = ) DN_MULTIQUEUE,
811
812	_SI( .schk_datalen = ) 0,
813	_SI( .si_datalen = ) sizeof(struct qfq_sched),
814	_SI( .q_datalen = ) sizeof(struct qfq_class) - sizeof(struct dn_queue),
815
816	_SI( .enqueue = ) qfq_enqueue,
817	_SI( .dequeue = ) qfq_dequeue,
818
819	_SI( .config = )  NULL,
820	_SI( .destroy = )  NULL,
821	_SI( .new_sched = ) qfq_new_sched,
822	_SI( .free_sched = )  NULL,
823	_SI( .new_fsk = ) qfq_new_fsk,
824	_SI( .free_fsk = )  NULL,
825	_SI( .new_queue = ) qfq_new_queue,
826	_SI( .free_queue = ) qfq_free_queue,
827};
828
829DECLARE_DNSCHED_MODULE(dn_qfq, &qfq_desc);
830
831#ifdef QFQ_DEBUG
832static void
833dump_groups(struct qfq_sched *q, uint32_t mask)
834{
835	int i, j;
836
837	for (i = 0; i < QFQ_MAX_INDEX + 1; i++) {
838		struct qfq_group *g = &q->groups[i];
839
840		if (0 == (mask & (1<<i)))
841			continue;
842		for (j = 0; j < QFQ_MAX_SLOTS; j++) {
843			if (g->slots[j])
844				D("    bucket %d %p", j, g->slots[j]);
845		}
846		D("full_slots 0x%x", g->full_slots);
847		D("        %2d S 0x%20llx F 0x%llx %c", i,
848			g->S, g->F,
849			mask & (1<<i) ? '1' : '0');
850	}
851}
852
853static void
854dump_sched(struct qfq_sched *q, const char *msg)
855{
856	D("--- in %s: ---", msg);
857	ND("loops %d queued %d V 0x%llx", q->loops, q->queued, q->V);
858	D("    ER 0x%08x", q->bitmaps[ER]);
859	D("    EB 0x%08x", q->bitmaps[EB]);
860	D("    IR 0x%08x", q->bitmaps[IR]);
861	D("    IB 0x%08x", q->bitmaps[IB]);
862	dump_groups(q, 0xffffffff);
863};
864#endif /* QFQ_DEBUG */
865