gs_rr.c revision 206552
1206497Sluigi/*-
2206552Sluigi * Copyright (c) 2009-2010 Fabio Checconi
3206552Sluigi * Copyright (c) 2009-2010 Luigi Rizzo, Universita` di Pisa
4206497Sluigi * All rights reserved.
5206497Sluigi *
6206497Sluigi * Redistribution and use in source and binary forms, with or without
7206497Sluigi * modification, are permitted provided that the following conditions
8206497Sluigi * are met:
9206497Sluigi * 1. Redistributions of source code must retain the above copyright
10206497Sluigi *    notice, this list of conditions and the following disclaimer.
11206497Sluigi * 2. Redistributions in binary form must reproduce the above copyright
12206497Sluigi *    notice, this list of conditions and the following disclaimer in the
13206497Sluigi *    documentation and/or other materials provided with the distribution.
14206497Sluigi *
15206497Sluigi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16206497Sluigi * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17206497Sluigi * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18206497Sluigi * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19206497Sluigi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20206497Sluigi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21206497Sluigi * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22206497Sluigi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23206497Sluigi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24206497Sluigi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25206497Sluigi * SUCH DAMAGE.
26206497Sluigi */
27206497Sluigi
28206497Sluigi/*
29206497Sluigi * $Id$
30206497Sluigi * $FreeBSD: head/sys/geom/sched/gs_rr.c 206552 2010-04-13 09:56:17Z luigi $
31206497Sluigi *
32206497Sluigi * A round-robin (RR) anticipatory scheduler, with per-client queues.
33206497Sluigi *
34206497Sluigi * The goal of this implementation is to improve throughput compared
35206497Sluigi * to the pure elevator algorithm, and insure some fairness among
36206497Sluigi * clients.
37206497Sluigi *
38206497Sluigi * Requests coming from the same client are put in the same queue.
39206497Sluigi * We use anticipation to help reducing seeks, and each queue
40206497Sluigi * is never served continuously for more than a given amount of
41206497Sluigi * time or data. Queues are then served in a round-robin fashion.
42206497Sluigi *
43206497Sluigi * Each queue can be in any of the following states:
44206497Sluigi *     READY	immediately serve the first pending request;
45206497Sluigi *     BUSY	one request is under service, wait for completion;
46206497Sluigi *     IDLING	do not serve incoming requests immediately, unless
47206497Sluigi * 		they are "eligible" as defined later.
48206497Sluigi *
49206497Sluigi * Scheduling is made looking at the status of all queues,
50206497Sluigi * and the first one in round-robin order is privileged.
51206497Sluigi */
52206497Sluigi
53206497Sluigi#include <sys/param.h>
54206497Sluigi#include <sys/systm.h>
55206497Sluigi#include <sys/kernel.h>
56206497Sluigi#include <sys/bio.h>
57206497Sluigi#include <sys/callout.h>
58206497Sluigi#include <sys/malloc.h>
59206497Sluigi#include <sys/module.h>
60206497Sluigi#include <sys/proc.h>
61206497Sluigi#include <sys/queue.h>
62206497Sluigi#include <sys/sysctl.h>
63206497Sluigi#include "gs_scheduler.h"
64206497Sluigi
65206497Sluigi/* possible states of the scheduler */
66206497Sluigienum g_rr_state {
67206497Sluigi	G_QUEUE_READY = 0,	/* Ready to dispatch. */
68206497Sluigi	G_QUEUE_BUSY,		/* Waiting for a completion. */
69206497Sluigi	G_QUEUE_IDLING		/* Waiting for a new request. */
70206497Sluigi};
71206497Sluigi
72206497Sluigi/* possible queue flags */
73206497Sluigienum g_rr_flags {
74206497Sluigi	G_FLAG_COMPLETED = 1,	/* Completed a req. in the current budget. */
75206497Sluigi};
76206497Sluigi
77206497Sluigistruct g_rr_softc;
78206497Sluigi
79206497Sluigi/*
80206497Sluigi * Queue descriptor, containing reference count, scheduling
81206497Sluigi * state, a queue of pending requests, configuration parameters.
82206497Sluigi * Queues with pending request(s) and not under service are also
83206497Sluigi * stored in a Round Robin (RR) list.
84206497Sluigi */
85206497Sluigistruct g_rr_queue {
86206497Sluigi	struct g_rr_softc *q_sc;	/* link to the parent */
87206497Sluigi
88206497Sluigi	enum g_rr_state	q_status;
89206497Sluigi	unsigned int	q_service;	/* service received so far */
90206497Sluigi	int		q_slice_end;	/* actual slice end in ticks */
91206497Sluigi	enum g_rr_flags	q_flags;	/* queue flags */
92206497Sluigi	struct bio_queue_head q_bioq;
93206497Sluigi
94206497Sluigi	/* Scheduling parameters */
95206497Sluigi	unsigned int	q_budget;	/* slice size in bytes */
96206497Sluigi	unsigned int	q_slice_duration; /* slice size in ticks */
97206497Sluigi	unsigned int	q_wait_ticks;	/* wait time for anticipation */
98206497Sluigi
99206497Sluigi	/* Stats to drive the various heuristics. */
100206497Sluigi	struct g_savg	q_thinktime;	/* Thinktime average. */
101206497Sluigi	struct g_savg	q_seekdist;	/* Seek distance average. */
102206497Sluigi
103206497Sluigi	int		q_bionum;	/* Number of requests. */
104206497Sluigi
105206497Sluigi	off_t		q_lastoff;	/* Last submitted req. offset. */
106206497Sluigi	int		q_lastsub;	/* Last submitted req. time. */
107206497Sluigi
108206497Sluigi	/* Expiration deadline for an empty queue. */
109206497Sluigi	int		q_expire;
110206497Sluigi
111206497Sluigi	TAILQ_ENTRY(g_rr_queue) q_tailq; /* RR list link field */
112206497Sluigi};
113206497Sluigi
114206497Sluigi/* List types. */
115206497SluigiTAILQ_HEAD(g_rr_tailq, g_rr_queue);
116206497Sluigi
117206497Sluigi/* list of scheduler instances */
118206497SluigiLIST_HEAD(g_scheds, g_rr_softc);
119206497Sluigi
120206497Sluigi/* Default quantum for RR between queues. */
121206497Sluigi#define	G_RR_DEFAULT_BUDGET	0x00800000
122206497Sluigi
123206497Sluigi/*
124206497Sluigi * Per device descriptor, holding the Round Robin list of queues
125206497Sluigi * accessing the disk, a reference to the geom, and the timer.
126206497Sluigi */
127206497Sluigistruct g_rr_softc {
128206497Sluigi	struct g_geom	*sc_geom;
129206497Sluigi
130206497Sluigi	/*
131206497Sluigi	 * sc_active is the queue we are anticipating for.
132206497Sluigi	 * It is set only in gs_rr_next(), and possibly cleared
133206497Sluigi	 * only in gs_rr_next() or on a timeout.
134206497Sluigi	 * The active queue is never in the Round Robin list
135206497Sluigi	 * even if it has requests queued.
136206497Sluigi	 */
137206497Sluigi	struct g_rr_queue *sc_active;
138206497Sluigi	struct callout	sc_wait;	/* timer for sc_active */
139206497Sluigi
140206497Sluigi	struct g_rr_tailq sc_rr_tailq;	/* the round-robin list */
141206497Sluigi	int		sc_nqueues;	/* number of queues */
142206497Sluigi
143206497Sluigi	/* Statistics */
144206497Sluigi	int		sc_in_flight;	/* requests in the driver */
145206497Sluigi
146206497Sluigi	LIST_ENTRY(g_rr_softc)	sc_next;
147206497Sluigi};
148206497Sluigi
149206497Sluigi/* Descriptor for bounded values, min and max are constant. */
150206497Sluigistruct x_bound {
151206497Sluigi	const int	x_min;
152206497Sluigi	int		x_cur;
153206497Sluigi	const int	x_max;
154206497Sluigi};
155206497Sluigi
156206497Sluigi/*
157206497Sluigi * parameters, config and stats
158206497Sluigi */
159206497Sluigistruct g_rr_params {
160206497Sluigi	int	queues;			/* total number of queues */
161206497Sluigi	int	w_anticipate;		/* anticipate writes */
162206497Sluigi	int	bypass;			/* bypass scheduling writes */
163206497Sluigi
164206497Sluigi	int	units;			/* how many instances */
165206497Sluigi	/* sc_head is used for debugging */
166206497Sluigi	struct g_scheds	sc_head;	/* first scheduler instance */
167206497Sluigi
168206497Sluigi	struct x_bound queue_depth;	/* max parallel requests */
169206497Sluigi	struct x_bound wait_ms;		/* wait time, milliseconds */
170206497Sluigi	struct x_bound quantum_ms;	/* quantum size, milliseconds */
171206497Sluigi	struct x_bound quantum_kb;	/* quantum size, Kb (1024 bytes) */
172206497Sluigi
173206497Sluigi	/* statistics */
174206497Sluigi	int	wait_hit;		/* success in anticipation */
175206497Sluigi	int	wait_miss;		/* failure in anticipation */
176206497Sluigi};
177206497Sluigi
178206497Sluigi/*
179206497Sluigi * Default parameters for the scheduler.  The quantum sizes target
180206497Sluigi * a 80MB/s disk; if the hw is faster or slower the minimum of the
181206497Sluigi * two will have effect: the clients will still be isolated but
182206497Sluigi * the fairness may be limited.  A complete solution would involve
183206497Sluigi * the on-line measurement of the actual disk throughput to derive
184206497Sluigi * these parameters.  Or we may just choose to ignore service domain
185206497Sluigi * fairness and accept what can be achieved with time-only budgets.
186206497Sluigi */
187206497Sluigistatic struct g_rr_params me = {
188206497Sluigi	.sc_head = LIST_HEAD_INITIALIZER(&me.sc_head),
189206497Sluigi	.w_anticipate =	1,
190206497Sluigi	.queue_depth =	{ 1,	1,	50 },
191206497Sluigi	.wait_ms =	{ 1, 	10,	30 },
192206497Sluigi	.quantum_ms =	{ 1, 	100,	500 },
193206497Sluigi	.quantum_kb =	{ 16, 	8192,	65536 },
194206497Sluigi};
195206497Sluigi
196206497Sluigistruct g_rr_params *gs_rr_me = &me;
197206497Sluigi
198206497SluigiSYSCTL_DECL(_kern_geom_sched);
199206497SluigiSYSCTL_NODE(_kern_geom_sched, OID_AUTO, rr, CTLFLAG_RW, 0,
200206497Sluigi    "GEOM_SCHED ROUND ROBIN stuff");
201206497SluigiSYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, units, CTLFLAG_RD,
202206497Sluigi    &me.units, 0, "Scheduler instances");
203206497SluigiSYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, queues, CTLFLAG_RD,
204206497Sluigi    &me.queues, 0, "Total rr queues");
205206497SluigiSYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, wait_ms, CTLFLAG_RW,
206206497Sluigi    &me.wait_ms.x_cur, 0, "Wait time milliseconds");
207206497SluigiSYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, quantum_ms, CTLFLAG_RW,
208206497Sluigi    &me.quantum_ms.x_cur, 0, "Quantum size milliseconds");
209206497SluigiSYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, bypass, CTLFLAG_RW,
210206497Sluigi    &me.bypass, 0, "Bypass scheduler");
211206497SluigiSYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, w_anticipate, CTLFLAG_RW,
212206497Sluigi    &me.w_anticipate, 0, "Do anticipation on writes");
213206497SluigiSYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, quantum_kb, CTLFLAG_RW,
214206497Sluigi    &me.quantum_kb.x_cur, 0, "Quantum size Kbytes");
215206497SluigiSYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, queue_depth, CTLFLAG_RW,
216206497Sluigi    &me.queue_depth.x_cur, 0, "Maximum simultaneous requests");
217206497SluigiSYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, wait_hit, CTLFLAG_RW,
218206497Sluigi    &me.wait_hit, 0, "Hits in anticipation");
219206497SluigiSYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, wait_miss, CTLFLAG_RW,
220206497Sluigi    &me.wait_miss, 0, "Misses in anticipation");
221206497Sluigi
222206497Sluigi#ifdef DEBUG_QUEUES
223206497Sluigi/* print the status of a queue */
224206497Sluigistatic void
225206497Sluigigs_rr_dump_q(struct g_rr_queue *qp, int index)
226206497Sluigi{
227206497Sluigi	int l = 0;
228206497Sluigi	struct bio *bp;
229206497Sluigi
230206497Sluigi	TAILQ_FOREACH(bp, &(qp->q_bioq.queue), bio_queue) {
231206497Sluigi		l++;
232206497Sluigi	}
233206497Sluigi	printf("--- rr queue %d %p status %d len %d ---\n",
234206497Sluigi	    index, qp, qp->q_status, l);
235206497Sluigi}
236206497Sluigi
237206497Sluigi/*
238206497Sluigi * Dump the scheduler status when writing to this sysctl variable.
239206497Sluigi * XXX right now we only dump the status of the last instance created.
240206497Sluigi * not a severe issue because this is only for debugging
241206497Sluigi */
242206497Sluigistatic int
243206497Sluigigs_rr_sysctl_status(SYSCTL_HANDLER_ARGS)
244206497Sluigi{
245206497Sluigi        int error, val = 0;
246206497Sluigi	struct g_rr_softc *sc;
247206497Sluigi
248206497Sluigi        error = sysctl_handle_int(oidp, &val, 0, req);
249206497Sluigi        if (error || !req->newptr )
250206497Sluigi                return (error);
251206497Sluigi
252206497Sluigi        printf("called %s\n", __FUNCTION__);
253206497Sluigi
254206497Sluigi	LIST_FOREACH(sc, &me.sc_head, sc_next) {
255206497Sluigi		int i, tot = 0;
256206497Sluigi		printf("--- sc %p active %p nqueues %d "
257206497Sluigi		    "callout %d in_flight %d ---\n",
258206497Sluigi		    sc, sc->sc_active, sc->sc_nqueues,
259206497Sluigi		    callout_active(&sc->sc_wait),
260206497Sluigi		    sc->sc_in_flight);
261206497Sluigi		for (i = 0; i < G_RR_HASH_SIZE; i++) {
262206497Sluigi			struct g_rr_queue *qp;
263206497Sluigi			LIST_FOREACH(qp, &sc->sc_hash[i], q_hash) {
264206497Sluigi				gs_rr_dump_q(qp, tot);
265206497Sluigi				tot++;
266206497Sluigi			}
267206497Sluigi		}
268206497Sluigi	}
269206497Sluigi        return (0);
270206497Sluigi}
271206497Sluigi
272206497SluigiSYSCTL_PROC(_kern_geom_sched_rr, OID_AUTO, status,
273206497Sluigi	CTLTYPE_UINT | CTLFLAG_RW,
274206497Sluigi    0, sizeof(int), gs_rr_sysctl_status, "I", "status");
275206497Sluigi
276206497Sluigi#endif	/* DEBUG_QUEUES */
277206497Sluigi
278206497Sluigi/*
279206497Sluigi * Get a bounded value, optionally convert to a min of t_min ticks.
280206497Sluigi */
281206497Sluigistatic int
282206497Sluigiget_bounded(struct x_bound *v, int t_min)
283206497Sluigi{
284206497Sluigi	int x;
285206497Sluigi
286206497Sluigi	x = v->x_cur;
287206497Sluigi	if (x < v->x_min)
288206497Sluigi		x = v->x_min;
289206497Sluigi	else if (x > v->x_max)
290206497Sluigi		x = v->x_max;
291206497Sluigi	if (t_min) {
292206497Sluigi		x = x * hz / 1000;	/* convert to ticks */
293206497Sluigi		if (x < t_min)
294206497Sluigi			x = t_min;
295206497Sluigi	}
296206497Sluigi	return x;
297206497Sluigi}
298206497Sluigi
299206497Sluigi/*
300206497Sluigi * Get a reference to the queue for bp, using the generic
301206497Sluigi * classification mechanism.
302206497Sluigi */
303206497Sluigistatic struct g_rr_queue *
304206497Sluigig_rr_queue_get(struct g_rr_softc *sc, struct bio *bp)
305206497Sluigi{
306206497Sluigi
307206497Sluigi	return (g_sched_get_class(sc->sc_geom, bp));
308206497Sluigi}
309206497Sluigi
310206497Sluigistatic int
311206497Sluigig_rr_init_class(void *data, void *priv)
312206497Sluigi{
313206497Sluigi	struct g_rr_softc *sc = data;
314206497Sluigi	struct g_rr_queue *qp = priv;
315206497Sluigi
316206497Sluigi	gs_bioq_init(&qp->q_bioq);
317206497Sluigi
318206497Sluigi	/*
319206497Sluigi	 * Set the initial parameters for the client:
320206497Sluigi	 * slice size in bytes and ticks, and wait ticks.
321206497Sluigi	 * Right now these are constant, but we could have
322206497Sluigi	 * autoconfiguration code to adjust the values based on
323206497Sluigi	 * the actual workload.
324206497Sluigi	 */
325206497Sluigi	qp->q_budget = 1024 * get_bounded(&me.quantum_kb, 0);
326206497Sluigi	qp->q_slice_duration = get_bounded(&me.quantum_ms, 2);
327206497Sluigi	qp->q_wait_ticks = get_bounded(&me.wait_ms, 2);
328206497Sluigi
329206497Sluigi	qp->q_sc = sc;		/* link to the parent */
330206497Sluigi	qp->q_sc->sc_nqueues++;
331206497Sluigi	me.queues++;
332206497Sluigi
333206497Sluigi	return (0);
334206497Sluigi}
335206497Sluigi
336206497Sluigi/*
337206497Sluigi * Release a reference to the queue.
338206497Sluigi */
339206497Sluigistatic void
340206497Sluigig_rr_queue_put(struct g_rr_queue *qp)
341206497Sluigi{
342206497Sluigi
343206497Sluigi	g_sched_put_class(qp->q_sc->sc_geom, qp);
344206497Sluigi}
345206497Sluigi
346206497Sluigistatic void
347206497Sluigig_rr_fini_class(void *data, void *priv)
348206497Sluigi{
349206497Sluigi	struct g_rr_queue *qp = priv;
350206497Sluigi
351206497Sluigi	KASSERT(gs_bioq_first(&qp->q_bioq) == NULL,
352206497Sluigi			("released nonempty queue"));
353206497Sluigi	qp->q_sc->sc_nqueues--;
354206497Sluigi	me.queues--;
355206497Sluigi}
356206497Sluigi
357206497Sluigistatic inline int
358206497Sluigig_rr_queue_expired(struct g_rr_queue *qp)
359206497Sluigi{
360206497Sluigi
361206497Sluigi	if (qp->q_service >= qp->q_budget)
362206497Sluigi		return (1);
363206497Sluigi
364206497Sluigi	if ((qp->q_flags & G_FLAG_COMPLETED) &&
365206497Sluigi	    ticks - qp->q_slice_end >= 0)
366206497Sluigi		return (1);
367206497Sluigi
368206497Sluigi	return (0);
369206497Sluigi}
370206497Sluigi
371206497Sluigistatic inline int
372206497Sluigig_rr_should_anticipate(struct g_rr_queue *qp, struct bio *bp)
373206497Sluigi{
374206497Sluigi	int wait = get_bounded(&me.wait_ms, 2);
375206497Sluigi
376206497Sluigi	if (!me.w_anticipate && (bp->bio_cmd & BIO_WRITE))
377206497Sluigi		return (0);
378206497Sluigi
379206497Sluigi	if (g_savg_valid(&qp->q_thinktime) &&
380206497Sluigi	    g_savg_read(&qp->q_thinktime) > wait)
381206497Sluigi		return (0);
382206497Sluigi
383206497Sluigi	if (g_savg_valid(&qp->q_seekdist) &&
384206497Sluigi	    g_savg_read(&qp->q_seekdist) > 8192)
385206497Sluigi		return (0);
386206497Sluigi
387206497Sluigi	return (1);
388206497Sluigi}
389206497Sluigi
390206497Sluigi/*
391206497Sluigi * Called on a request arrival, timeout or completion.
392206497Sluigi * Try to serve a request among those queued.
393206497Sluigi */
394206497Sluigistatic struct bio *
395206497Sluigig_rr_next(void *data, int force)
396206497Sluigi{
397206497Sluigi	struct g_rr_softc *sc = data;
398206497Sluigi	struct g_rr_queue *qp;
399206497Sluigi	struct bio *bp, *next;
400206497Sluigi	int expired;
401206497Sluigi
402206497Sluigi	qp = sc->sc_active;
403206497Sluigi	if (me.bypass == 0 && !force) {
404206497Sluigi		if (sc->sc_in_flight >= get_bounded(&me.queue_depth, 0))
405206497Sluigi			return (NULL);
406206497Sluigi
407206497Sluigi		/* Try with the queue under service first. */
408206497Sluigi		if (qp != NULL && qp->q_status != G_QUEUE_READY) {
409206497Sluigi			/*
410206497Sluigi			 * Queue is anticipating, ignore request.
411206497Sluigi			 * We should check that we are not past
412206497Sluigi			 * the timeout, but in that case the timeout
413206497Sluigi			 * will fire immediately afterwards so we
414206497Sluigi			 * don't bother.
415206497Sluigi			 */
416206497Sluigi			return (NULL);
417206497Sluigi		}
418206497Sluigi	} else if (qp != NULL && qp->q_status != G_QUEUE_READY) {
419206497Sluigi		g_rr_queue_put(qp);
420206497Sluigi		sc->sc_active = qp = NULL;
421206497Sluigi	}
422206497Sluigi
423206497Sluigi	/*
424206497Sluigi	 * No queue under service, look for the first in RR order.
425206497Sluigi	 * If we find it, select if as sc_active, clear service
426206497Sluigi	 * and record the end time of the slice.
427206497Sluigi	 */
428206497Sluigi	if (qp == NULL) {
429206497Sluigi		qp = TAILQ_FIRST(&sc->sc_rr_tailq);
430206497Sluigi		if (qp == NULL)
431206497Sluigi			return (NULL); /* no queues at all, return */
432206497Sluigi		/* otherwise select the new queue for service. */
433206497Sluigi		TAILQ_REMOVE(&sc->sc_rr_tailq, qp, q_tailq);
434206497Sluigi		sc->sc_active = qp;
435206497Sluigi		qp->q_service = 0;
436206497Sluigi		qp->q_flags &= ~G_FLAG_COMPLETED;
437206497Sluigi	}
438206497Sluigi
439206497Sluigi	bp = gs_bioq_takefirst(&qp->q_bioq);	/* surely not NULL */
440206497Sluigi	qp->q_service += bp->bio_length;	/* charge the service */
441206497Sluigi
442206497Sluigi	/*
443206497Sluigi	 * The request at the head of the active queue is always
444206497Sluigi	 * dispatched, and gs_rr_next() will be called again
445206497Sluigi	 * immediately.
446206497Sluigi	 * We need to prepare for what to do next:
447206497Sluigi	 *
448206497Sluigi	 * 1. have we reached the end of the (time or service) slice ?
449206497Sluigi	 *    If so, clear sc_active and possibly requeue the previous
450206497Sluigi	 *    active queue if it has more requests pending;
451206497Sluigi	 * 2. do we have more requests in sc_active ?
452206497Sluigi	 *    If yes, do not anticipate, as gs_rr_next() will run again;
453206497Sluigi	 *    if no, decide whether or not to anticipate depending
454206497Sluigi	 *    on read or writes (e.g., anticipate only on reads).
455206497Sluigi	 */
456206497Sluigi	expired = g_rr_queue_expired(qp);	/* are we expired ? */
457206497Sluigi	next = gs_bioq_first(&qp->q_bioq);	/* do we have one more ? */
458206497Sluigi 	if (expired) {
459206497Sluigi		sc->sc_active = NULL;
460206497Sluigi		/* Either requeue or release reference. */
461206497Sluigi		if (next != NULL)
462206497Sluigi			TAILQ_INSERT_TAIL(&sc->sc_rr_tailq, qp, q_tailq);
463206497Sluigi		else
464206497Sluigi			g_rr_queue_put(qp);
465206497Sluigi	} else if (next != NULL) {
466206497Sluigi		qp->q_status = G_QUEUE_READY;
467206497Sluigi	} else {
468206497Sluigi		if (!force && g_rr_should_anticipate(qp, bp)) {
469206497Sluigi			/* anticipate */
470206497Sluigi			qp->q_status = G_QUEUE_BUSY;
471206497Sluigi		} else {
472206497Sluigi			/* do not anticipate, release reference */
473206497Sluigi			g_rr_queue_put(qp);
474206497Sluigi			sc->sc_active = NULL;
475206497Sluigi		}
476206497Sluigi	}
477206497Sluigi	/* If sc_active != NULL, its q_status is always correct. */
478206497Sluigi
479206497Sluigi	sc->sc_in_flight++;
480206497Sluigi
481206497Sluigi	return (bp);
482206497Sluigi}
483206497Sluigi
484206497Sluigistatic inline void
485206497Sluigig_rr_update_thinktime(struct g_rr_queue *qp)
486206497Sluigi{
487206497Sluigi	int delta = ticks - qp->q_lastsub, wait = get_bounded(&me.wait_ms, 2);
488206497Sluigi
489206497Sluigi	if (qp->q_sc->sc_active != qp)
490206497Sluigi		return;
491206497Sluigi
492206497Sluigi	qp->q_lastsub = ticks;
493206497Sluigi	delta = (delta > 2 * wait) ? 2 * wait : delta;
494206497Sluigi	if (qp->q_bionum > 7)
495206497Sluigi		g_savg_add_sample(&qp->q_thinktime, delta);
496206497Sluigi}
497206497Sluigi
498206497Sluigistatic inline void
499206497Sluigig_rr_update_seekdist(struct g_rr_queue *qp, struct bio *bp)
500206497Sluigi{
501206497Sluigi	off_t dist;
502206497Sluigi
503206497Sluigi	if (qp->q_lastoff > bp->bio_offset)
504206497Sluigi		dist = qp->q_lastoff - bp->bio_offset;
505206497Sluigi	else
506206497Sluigi		dist = bp->bio_offset - qp->q_lastoff;
507206497Sluigi
508206497Sluigi	if (dist > (8192 * 8))
509206497Sluigi		dist = 8192 * 8;
510206497Sluigi
511206497Sluigi	qp->q_lastoff = bp->bio_offset + bp->bio_length;
512206497Sluigi
513206497Sluigi	if (qp->q_bionum > 7)
514206497Sluigi		g_savg_add_sample(&qp->q_seekdist, dist);
515206497Sluigi}
516206497Sluigi
517206497Sluigi/*
518206497Sluigi * Called when a real request for disk I/O arrives.
519206497Sluigi * Locate the queue associated with the client.
520206497Sluigi * If the queue is the one we are anticipating for, reset its timeout;
521206497Sluigi * if the queue is not in the round robin list, insert it in the list.
522206497Sluigi * On any error, do not queue the request and return -1, the caller
523206497Sluigi * will take care of this request.
524206497Sluigi */
525206497Sluigistatic int
526206497Sluigig_rr_start(void *data, struct bio *bp)
527206497Sluigi{
528206497Sluigi	struct g_rr_softc *sc = data;
529206497Sluigi	struct g_rr_queue *qp;
530206497Sluigi
531206497Sluigi	if (me.bypass)
532206497Sluigi		return (-1);	/* bypass the scheduler */
533206497Sluigi
534206497Sluigi	/* Get the queue for the request. */
535206497Sluigi	qp = g_rr_queue_get(sc, bp);
536206497Sluigi	if (qp == NULL)
537206497Sluigi		return (-1); /* allocation failed, tell upstream */
538206497Sluigi
539206497Sluigi	if (gs_bioq_first(&qp->q_bioq) == NULL) {
540206497Sluigi		/*
541206497Sluigi		 * We are inserting into an empty queue.
542206497Sluigi		 * Reset its state if it is sc_active,
543206497Sluigi		 * otherwise insert it in the RR list.
544206497Sluigi		 */
545206497Sluigi		if (qp == sc->sc_active) {
546206497Sluigi			qp->q_status = G_QUEUE_READY;
547206497Sluigi			callout_stop(&sc->sc_wait);
548206497Sluigi		} else {
549206497Sluigi			g_sched_priv_ref(qp);
550206497Sluigi			TAILQ_INSERT_TAIL(&sc->sc_rr_tailq, qp, q_tailq);
551206497Sluigi		}
552206497Sluigi	}
553206497Sluigi
554206497Sluigi	qp->q_bionum = 1 + qp->q_bionum - (qp->q_bionum >> 3);
555206497Sluigi
556206497Sluigi	g_rr_update_thinktime(qp);
557206497Sluigi	g_rr_update_seekdist(qp, bp);
558206497Sluigi
559206497Sluigi	/* Inherit the reference returned by g_rr_queue_get(). */
560206497Sluigi	bp->bio_caller1 = qp;
561206497Sluigi	gs_bioq_disksort(&qp->q_bioq, bp);
562206497Sluigi
563206497Sluigi	return (0);
564206497Sluigi}
565206497Sluigi
566206497Sluigi/*
567206497Sluigi * Callout executed when a queue times out anticipating a new request.
568206497Sluigi */
569206497Sluigistatic void
570206497Sluigig_rr_wait_timeout(void *data)
571206497Sluigi{
572206497Sluigi	struct g_rr_softc *sc = data;
573206497Sluigi	struct g_geom *geom = sc->sc_geom;
574206497Sluigi
575206497Sluigi	g_sched_lock(geom);
576206497Sluigi	/*
577206497Sluigi	 * We can race with other events, so check if
578206497Sluigi	 * sc_active is still valid.
579206497Sluigi	 */
580206497Sluigi	if (sc->sc_active != NULL) {
581206497Sluigi		/* Release the reference to the queue. */
582206497Sluigi		g_rr_queue_put(sc->sc_active);
583206497Sluigi		sc->sc_active = NULL;
584206497Sluigi		me.wait_hit--;
585206497Sluigi		me.wait_miss++;	/* record the miss */
586206497Sluigi	}
587206497Sluigi	g_sched_dispatch(geom);
588206497Sluigi	g_sched_unlock(geom);
589206497Sluigi}
590206497Sluigi
591206497Sluigi/*
592206497Sluigi * Module glue: allocate descriptor, initialize its fields.
593206497Sluigi */
594206497Sluigistatic void *
595206497Sluigig_rr_init(struct g_geom *geom)
596206497Sluigi{
597206497Sluigi	struct g_rr_softc *sc;
598206497Sluigi
599206497Sluigi	/* XXX check whether we can sleep */
600206497Sluigi	sc = malloc(sizeof *sc, M_GEOM_SCHED, M_NOWAIT | M_ZERO);
601206497Sluigi	sc->sc_geom = geom;
602206497Sluigi	TAILQ_INIT(&sc->sc_rr_tailq);
603206497Sluigi	callout_init(&sc->sc_wait, CALLOUT_MPSAFE);
604206497Sluigi	LIST_INSERT_HEAD(&me.sc_head, sc, sc_next);
605206497Sluigi	me.units++;
606206497Sluigi
607206497Sluigi	return (sc);
608206497Sluigi}
609206497Sluigi
610206497Sluigi/*
611206497Sluigi * Module glue -- drain the callout structure, destroy the
612206497Sluigi * hash table and its element, and free the descriptor.
613206497Sluigi */
614206497Sluigistatic void
615206497Sluigig_rr_fini(void *data)
616206497Sluigi{
617206497Sluigi	struct g_rr_softc *sc = data;
618206497Sluigi
619206497Sluigi	callout_drain(&sc->sc_wait);
620206497Sluigi	KASSERT(sc->sc_active == NULL, ("still a queue under service"));
621206497Sluigi	KASSERT(TAILQ_EMPTY(&sc->sc_rr_tailq), ("still scheduled queues"));
622206497Sluigi
623206497Sluigi	LIST_REMOVE(sc, sc_next);
624206497Sluigi	me.units--;
625206497Sluigi	free(sc, M_GEOM_SCHED);
626206497Sluigi}
627206497Sluigi
628206497Sluigi/*
629206497Sluigi * Called when the request under service terminates.
630206497Sluigi * Start the anticipation timer if needed.
631206497Sluigi */
632206497Sluigistatic void
633206497Sluigig_rr_done(void *data, struct bio *bp)
634206497Sluigi{
635206497Sluigi	struct g_rr_softc *sc = data;
636206497Sluigi	struct g_rr_queue *qp;
637206497Sluigi
638206497Sluigi	sc->sc_in_flight--;
639206497Sluigi
640206497Sluigi	qp = bp->bio_caller1;
641206497Sluigi	if (qp == sc->sc_active && qp->q_status == G_QUEUE_BUSY) {
642206497Sluigi		if (!(qp->q_flags & G_FLAG_COMPLETED)) {
643206497Sluigi			qp->q_flags |= G_FLAG_COMPLETED;
644206497Sluigi			/* in case we want to make the slice adaptive */
645206497Sluigi			qp->q_slice_duration = get_bounded(&me.quantum_ms, 2);
646206497Sluigi			qp->q_slice_end = ticks + qp->q_slice_duration;
647206497Sluigi		}
648206497Sluigi
649206497Sluigi		/* The queue is trying anticipation, start the timer. */
650206497Sluigi		qp->q_status = G_QUEUE_IDLING;
651206497Sluigi		/* may make this adaptive */
652206497Sluigi		qp->q_wait_ticks = get_bounded(&me.wait_ms, 2);
653206497Sluigi		me.wait_hit++;
654206497Sluigi		callout_reset(&sc->sc_wait, qp->q_wait_ticks,
655206497Sluigi		    g_rr_wait_timeout, sc);
656206497Sluigi	} else
657206497Sluigi		g_sched_dispatch(sc->sc_geom);
658206497Sluigi
659206497Sluigi	/* Release a reference to the queue. */
660206497Sluigi	g_rr_queue_put(qp);
661206497Sluigi}
662206497Sluigi
663206497Sluigistatic void
664206497Sluigig_rr_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
665206497Sluigi    struct g_consumer *cp, struct g_provider *pp)
666206497Sluigi{
667206497Sluigi	if (indent == NULL) {   /* plaintext */
668206497Sluigi		sbuf_printf(sb, " units %d queues %d",
669206497Sluigi			me.units, me.queues);
670206497Sluigi        }
671206497Sluigi}
672206497Sluigi
673206497Sluigistatic struct g_gsched g_rr = {
674206497Sluigi	.gs_name = "rr",
675206497Sluigi	.gs_priv_size = sizeof(struct g_rr_queue),
676206497Sluigi	.gs_init = g_rr_init,
677206497Sluigi	.gs_fini = g_rr_fini,
678206497Sluigi	.gs_start = g_rr_start,
679206497Sluigi	.gs_done = g_rr_done,
680206497Sluigi	.gs_next = g_rr_next,
681206497Sluigi	.gs_dumpconf = g_rr_dumpconf,
682206497Sluigi	.gs_init_class = g_rr_init_class,
683206497Sluigi	.gs_fini_class = g_rr_fini_class,
684206497Sluigi};
685206497Sluigi
686206497SluigiDECLARE_GSCHED_MODULE(rr, &g_rr);
687