1/*-
2 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/systm.h>
35#include <sys/bio.h>
36#include <sys/devicestat.h>
37#include <sys/sdt.h>
38#include <sys/sysctl.h>
39#include <sys/malloc.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/conf.h>
43#include <vm/vm.h>
44#include <vm/pmap.h>
45
46#include <machine/atomic.h>
47
48SDT_PROVIDER_DEFINE(io);
49
50SDT_PROBE_DEFINE2(io, , , start, "struct bio *", "struct devstat *");
51SDT_PROBE_DEFINE2(io, , , done, "struct bio *", "struct devstat *");
52SDT_PROBE_DEFINE2(io, , , wait__start, "struct bio *",
53    "struct devstat *");
54SDT_PROBE_DEFINE2(io, , , wait__done, "struct bio *",
55    "struct devstat *");
56
57#define	DTRACE_DEVSTAT_START()		SDT_PROBE2(io, , , start, NULL, ds)
58#define	DTRACE_DEVSTAT_BIO_START()	SDT_PROBE2(io, , , start, bp, ds)
59#define	DTRACE_DEVSTAT_DONE()		SDT_PROBE2(io, , , done, NULL, ds)
60#define	DTRACE_DEVSTAT_BIO_DONE()	SDT_PROBE2(io, , , done, bp, ds)
61#define	DTRACE_DEVSTAT_WAIT_START()	SDT_PROBE2(io, , , wait__start, NULL, ds)
62#define	DTRACE_DEVSTAT_WAIT_DONE()	SDT_PROBE2(io, , , wait__done, NULL, ds)
63
64static int devstat_num_devs;
65static long devstat_generation = 1;
66static int devstat_version = DEVSTAT_VERSION;
67static int devstat_current_devnumber;
68static struct mtx devstat_mutex;
69MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF);
70
71static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq);
72static struct devstat *devstat_alloc(void);
73static void devstat_free(struct devstat *);
74static void devstat_add_entry(struct devstat *ds, const void *dev_name,
75		       int unit_number, uint32_t block_size,
76		       devstat_support_flags flags,
77		       devstat_type_flags device_type,
78		       devstat_priority priority);
79
80/*
81 * Allocate a devstat and initialize it
82 */
83struct devstat *
84devstat_new_entry(const void *dev_name,
85		  int unit_number, uint32_t block_size,
86		  devstat_support_flags flags,
87		  devstat_type_flags device_type,
88		  devstat_priority priority)
89{
90	struct devstat *ds;
91
92	mtx_assert(&devstat_mutex, MA_NOTOWNED);
93
94	ds = devstat_alloc();
95	mtx_lock(&devstat_mutex);
96	if (unit_number == -1) {
97		ds->unit_number = unit_number;
98		ds->id = dev_name;
99		binuptime(&ds->creation_time);
100		devstat_generation++;
101	} else {
102		devstat_add_entry(ds, dev_name, unit_number, block_size,
103				  flags, device_type, priority);
104	}
105	mtx_unlock(&devstat_mutex);
106	return (ds);
107}
108
109/*
110 * Take a malloced and zeroed devstat structure given to us, fill it in
111 * and add it to the queue of devices.
112 */
113static void
114devstat_add_entry(struct devstat *ds, const void *dev_name,
115		  int unit_number, uint32_t block_size,
116		  devstat_support_flags flags,
117		  devstat_type_flags device_type,
118		  devstat_priority priority)
119{
120	struct devstatlist *devstat_head;
121	struct devstat *ds_tmp;
122
123	mtx_assert(&devstat_mutex, MA_OWNED);
124	devstat_num_devs++;
125
126	devstat_head = &device_statq;
127
128	/*
129	 * Priority sort.  Each driver passes in its priority when it adds
130	 * its devstat entry.  Drivers are sorted first by priority, and
131	 * then by probe order.
132	 *
133	 * For the first device, we just insert it, since the priority
134	 * doesn't really matter yet.  Subsequent devices are inserted into
135	 * the list using the order outlined above.
136	 */
137	if (devstat_num_devs == 1)
138		STAILQ_INSERT_TAIL(devstat_head, ds, dev_links);
139	else {
140		STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) {
141			struct devstat *ds_next;
142
143			ds_next = STAILQ_NEXT(ds_tmp, dev_links);
144
145			/*
146			 * If we find a break between higher and lower
147			 * priority items, and if this item fits in the
148			 * break, insert it.  This also applies if the
149			 * "lower priority item" is the end of the list.
150			 */
151			if ((priority <= ds_tmp->priority)
152			 && ((ds_next == NULL)
153			   || (priority > ds_next->priority))) {
154				STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds,
155						    dev_links);
156				break;
157			} else if (priority > ds_tmp->priority) {
158				/*
159				 * If this is the case, we should be able
160				 * to insert ourselves at the head of the
161				 * list.  If we can't, something is wrong.
162				 */
163				if (ds_tmp == STAILQ_FIRST(devstat_head)) {
164					STAILQ_INSERT_HEAD(devstat_head,
165							   ds, dev_links);
166					break;
167				} else {
168					STAILQ_INSERT_TAIL(devstat_head,
169							   ds, dev_links);
170					printf("devstat_add_entry: HELP! "
171					       "sorting problem detected "
172					       "for name %p unit %d\n",
173					       dev_name, unit_number);
174					break;
175				}
176			}
177		}
178	}
179
180	ds->device_number = devstat_current_devnumber++;
181	ds->unit_number = unit_number;
182	strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN);
183	ds->block_size = block_size;
184	ds->flags = flags;
185	ds->device_type = device_type;
186	ds->priority = priority;
187	binuptime(&ds->creation_time);
188	devstat_generation++;
189}
190
191/*
192 * Remove a devstat structure from the list of devices.
193 */
194void
195devstat_remove_entry(struct devstat *ds)
196{
197	struct devstatlist *devstat_head;
198
199	mtx_assert(&devstat_mutex, MA_NOTOWNED);
200	if (ds == NULL)
201		return;
202
203	mtx_lock(&devstat_mutex);
204
205	devstat_head = &device_statq;
206
207	/* Remove this entry from the devstat queue */
208	atomic_add_acq_int(&ds->sequence1, 1);
209	if (ds->unit_number != -1) {
210		devstat_num_devs--;
211		STAILQ_REMOVE(devstat_head, ds, devstat, dev_links);
212	}
213	devstat_free(ds);
214	devstat_generation++;
215	mtx_unlock(&devstat_mutex);
216}
217
218/*
219 * Record a transaction start.
220 *
221 * See comments for devstat_end_transaction().  Ordering is very important
222 * here.
223 */
224void
225devstat_start_transaction(struct devstat *ds, struct bintime *now)
226{
227
228	mtx_assert(&devstat_mutex, MA_NOTOWNED);
229
230	/* sanity check */
231	if (ds == NULL)
232		return;
233
234	atomic_add_acq_int(&ds->sequence1, 1);
235	/*
236	 * We only want to set the start time when we are going from idle
237	 * to busy.  The start time is really the start of the latest busy
238	 * period.
239	 */
240	if (ds->start_count == ds->end_count) {
241		if (now != NULL)
242			ds->busy_from = *now;
243		else
244			binuptime(&ds->busy_from);
245	}
246	ds->start_count++;
247	atomic_add_rel_int(&ds->sequence0, 1);
248	DTRACE_DEVSTAT_START();
249}
250
251void
252devstat_start_transaction_bio(struct devstat *ds, struct bio *bp)
253{
254
255	mtx_assert(&devstat_mutex, MA_NOTOWNED);
256
257	/* sanity check */
258	if (ds == NULL)
259		return;
260
261	binuptime(&bp->bio_t0);
262	devstat_start_transaction(ds, &bp->bio_t0);
263	DTRACE_DEVSTAT_BIO_START();
264}
265
266/*
267 * Record the ending of a transaction, and incrment the various counters.
268 *
269 * Ordering in this function, and in devstat_start_transaction() is VERY
270 * important.  The idea here is to run without locks, so we are very
271 * careful to only modify some fields on the way "down" (i.e. at
272 * transaction start) and some fields on the way "up" (i.e. at transaction
273 * completion).  One exception is busy_from, which we only modify in
274 * devstat_start_transaction() when there are no outstanding transactions,
275 * and thus it can't be modified in devstat_end_transaction()
276 * simultaneously.
277 *
278 * The sequence0 and sequence1 fields are provided to enable an application
279 * spying on the structures with mmap(2) to tell when a structure is in a
280 * consistent state or not.
281 *
282 * For this to work 100% reliably, it is important that the two fields
283 * are at opposite ends of the structure and that they are incremented
284 * in the opposite order of how a memcpy(3) in userland would copy them.
285 * We assume that the copying happens front to back, but there is actually
286 * no way short of writing your own memcpy(3) replacement to guarantee
287 * this will be the case.
288 *
289 * In addition to this, being a kind of locks, they must be updated with
290 * atomic instructions using appropriate memory barriers.
291 */
292void
293devstat_end_transaction(struct devstat *ds, uint32_t bytes,
294			devstat_tag_type tag_type, devstat_trans_flags flags,
295			struct bintime *now, struct bintime *then)
296{
297	struct bintime dt, lnow;
298
299	/* sanity check */
300	if (ds == NULL)
301		return;
302
303	if (now == NULL) {
304		now = &lnow;
305		binuptime(now);
306	}
307
308	atomic_add_acq_int(&ds->sequence1, 1);
309	/* Update byte and operations counts */
310	ds->bytes[flags] += bytes;
311	ds->operations[flags]++;
312
313	/*
314	 * Keep a count of the various tag types sent.
315	 */
316	if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 &&
317	    tag_type != DEVSTAT_TAG_NONE)
318		ds->tag_types[tag_type]++;
319
320	if (then != NULL) {
321		/* Update duration of operations */
322		dt = *now;
323		bintime_sub(&dt, then);
324		bintime_add(&ds->duration[flags], &dt);
325	}
326
327	/* Accumulate busy time */
328	dt = *now;
329	bintime_sub(&dt, &ds->busy_from);
330	bintime_add(&ds->busy_time, &dt);
331	ds->busy_from = *now;
332
333	ds->end_count++;
334	atomic_add_rel_int(&ds->sequence0, 1);
335	DTRACE_DEVSTAT_DONE();
336}
337
338void
339devstat_end_transaction_bio(struct devstat *ds, struct bio *bp)
340{
341
342	devstat_end_transaction_bio_bt(ds, bp, NULL);
343}
344
345void
346devstat_end_transaction_bio_bt(struct devstat *ds, struct bio *bp,
347    struct bintime *now)
348{
349	devstat_trans_flags flg;
350
351	/* sanity check */
352	if (ds == NULL)
353		return;
354
355	if (bp->bio_cmd == BIO_DELETE)
356		flg = DEVSTAT_FREE;
357	else if ((bp->bio_cmd == BIO_READ)
358	      || ((bp->bio_cmd == BIO_ZONE)
359	       && (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)))
360		flg = DEVSTAT_READ;
361	else if (bp->bio_cmd == BIO_WRITE)
362		flg = DEVSTAT_WRITE;
363	else
364		flg = DEVSTAT_NO_DATA;
365
366	devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid,
367				DEVSTAT_TAG_SIMPLE, flg, now, &bp->bio_t0);
368	DTRACE_DEVSTAT_BIO_DONE();
369}
370
371/*
372 * This is the sysctl handler for the devstat package.  The data pushed out
373 * on the kern.devstat.all sysctl variable consists of the current devstat
374 * generation number, and then an array of devstat structures, one for each
375 * device in the system.
376 *
377 * This is more cryptic that obvious, but basically we neither can nor
378 * want to hold the devstat_mutex for any amount of time, so we grab it
379 * only when we need to and keep an eye on devstat_generation all the time.
380 */
381static int
382sysctl_devstat(SYSCTL_HANDLER_ARGS)
383{
384	int error;
385	long mygen;
386	struct devstat *nds;
387
388	mtx_assert(&devstat_mutex, MA_NOTOWNED);
389
390	/*
391	 * XXX devstat_generation should really be "volatile" but that
392	 * XXX freaks out the sysctl macro below.  The places where we
393	 * XXX change it and inspect it are bracketed in the mutex which
394	 * XXX guarantees us proper write barriers.  I don't believe the
395	 * XXX compiler is allowed to optimize mygen away across calls
396	 * XXX to other functions, so the following is belived to be safe.
397	 */
398	mygen = devstat_generation;
399
400	error = SYSCTL_OUT(req, &mygen, sizeof(mygen));
401
402	if (devstat_num_devs == 0)
403		return(0);
404
405	if (error != 0)
406		return (error);
407
408	mtx_lock(&devstat_mutex);
409	nds = STAILQ_FIRST(&device_statq);
410	if (mygen != devstat_generation)
411		error = EBUSY;
412	mtx_unlock(&devstat_mutex);
413
414	if (error != 0)
415		return (error);
416
417	for (;nds != NULL;) {
418		error = SYSCTL_OUT(req, nds, sizeof(struct devstat));
419		if (error != 0)
420			return (error);
421		mtx_lock(&devstat_mutex);
422		if (mygen != devstat_generation)
423			error = EBUSY;
424		else
425			nds = STAILQ_NEXT(nds, dev_links);
426		mtx_unlock(&devstat_mutex);
427		if (error != 0)
428			return (error);
429	}
430	return(error);
431}
432
433/*
434 * Sysctl entries for devstat.  The first one is a node that all the rest
435 * hang off of.
436 */
437static SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD, NULL,
438    "Device Statistics");
439
440SYSCTL_PROC(_kern_devstat, OID_AUTO, all, CTLFLAG_RD|CTLTYPE_OPAQUE,
441    NULL, 0, sysctl_devstat, "S,devstat", "All devices in the devstat list");
442/*
443 * Export the number of devices in the system so that userland utilities
444 * can determine how much memory to allocate to hold all the devices.
445 */
446SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD,
447    &devstat_num_devs, 0, "Number of devices in the devstat list");
448SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD,
449    &devstat_generation, 0, "Devstat list generation");
450SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD,
451    &devstat_version, 0, "Devstat list version number");
452
453/*
454 * Allocator for struct devstat structures.  We sub-allocate these from pages
455 * which we get from malloc.  These pages are exported for mmap(2)'ing through
456 * a miniature device driver
457 */
458
459#define statsperpage (PAGE_SIZE / sizeof(struct devstat))
460
461static d_mmap_t devstat_mmap;
462
463static struct cdevsw devstat_cdevsw = {
464	.d_version =	D_VERSION,
465	.d_mmap =	devstat_mmap,
466	.d_name =	"devstat",
467};
468
469struct statspage {
470	TAILQ_ENTRY(statspage)	list;
471	struct devstat		*stat;
472	u_int			nfree;
473};
474
475static TAILQ_HEAD(, statspage)	pagelist = TAILQ_HEAD_INITIALIZER(pagelist);
476static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics");
477
478static int
479devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
480    int nprot, vm_memattr_t *memattr)
481{
482	struct statspage *spp;
483
484	if (nprot != VM_PROT_READ)
485		return (-1);
486	mtx_lock(&devstat_mutex);
487	TAILQ_FOREACH(spp, &pagelist, list) {
488		if (offset == 0) {
489			*paddr = vtophys(spp->stat);
490			mtx_unlock(&devstat_mutex);
491			return (0);
492		}
493		offset -= PAGE_SIZE;
494	}
495	mtx_unlock(&devstat_mutex);
496	return (-1);
497}
498
499static struct devstat *
500devstat_alloc(void)
501{
502	struct devstat *dsp;
503	struct statspage *spp, *spp2;
504	u_int u;
505	static int once;
506
507	mtx_assert(&devstat_mutex, MA_NOTOWNED);
508	if (!once) {
509		make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME,
510		    &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0444,
511		    DEVSTAT_DEVICE_NAME);
512		once = 1;
513	}
514	spp2 = NULL;
515	mtx_lock(&devstat_mutex);
516	for (;;) {
517		TAILQ_FOREACH(spp, &pagelist, list) {
518			if (spp->nfree > 0)
519				break;
520		}
521		if (spp != NULL)
522			break;
523		mtx_unlock(&devstat_mutex);
524		spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK);
525		spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK);
526		spp2->nfree = statsperpage;
527
528		/*
529		 * If free statspages were added while the lock was released
530		 * just reuse them.
531		 */
532		mtx_lock(&devstat_mutex);
533		TAILQ_FOREACH(spp, &pagelist, list)
534			if (spp->nfree > 0)
535				break;
536		if (spp == NULL) {
537			spp = spp2;
538
539			/*
540			 * It would make more sense to add the new page at the
541			 * head but the order on the list determine the
542			 * sequence of the mapping so we can't do that.
543			 */
544			TAILQ_INSERT_TAIL(&pagelist, spp, list);
545		} else
546			break;
547	}
548	dsp = spp->stat;
549	for (u = 0; u < statsperpage; u++) {
550		if (dsp->allocated == 0)
551			break;
552		dsp++;
553	}
554	spp->nfree--;
555	dsp->allocated = 1;
556	mtx_unlock(&devstat_mutex);
557	if (spp2 != NULL && spp2 != spp) {
558		free(spp2->stat, M_DEVSTAT);
559		free(spp2, M_DEVSTAT);
560	}
561	return (dsp);
562}
563
564static void
565devstat_free(struct devstat *dsp)
566{
567	struct statspage *spp;
568
569	mtx_assert(&devstat_mutex, MA_OWNED);
570	bzero(dsp, sizeof *dsp);
571	TAILQ_FOREACH(spp, &pagelist, list) {
572		if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) {
573			spp->nfree++;
574			return;
575		}
576	}
577}
578
579SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD,
580    SYSCTL_NULL_INT_PTR, sizeof(struct devstat), "sizeof(struct devstat)");
581