1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/dnode.h>
28#include <sys/dmu_objset.h>
29#include <sys/dmu_zfetch.h>
30#include <sys/dmu.h>
31#include <sys/dbuf.h>
32#include <sys/kstat.h>
33
34/*
35 * I'm against tune-ables, but these should probably exist as tweakable globals
36 * until we can get this working the way we want it to.
37 */
38
39int zfs_prefetch_disable = 0;
40
41/* max # of streams per zfetch */
42uint32_t	zfetch_max_streams = 8;
43/* min time before stream reclaim */
44uint32_t	zfetch_min_sec_reap = 2;
45/* max number of blocks to fetch at a time */
46uint32_t	zfetch_block_cap = 256;
47/* number of bytes in a array_read at which we stop prefetching (1Mb) */
48uint64_t	zfetch_array_rd_sz = 1024 * 1024;
49
50SYSCTL_DECL(_vfs_zfs);
51SYSCTL_INT(_vfs_zfs, OID_AUTO, prefetch_disable, CTLFLAG_RW,
52    &zfs_prefetch_disable, 0, "Disable prefetch");
53SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH");
54TUNABLE_INT("vfs.zfs.zfetch.max_streams", &zfetch_max_streams);
55SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_streams, CTLFLAG_RW,
56    &zfetch_max_streams, 0, "Max # of streams per zfetch");
57TUNABLE_INT("vfs.zfs.zfetch.min_sec_reap", &zfetch_min_sec_reap);
58SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, min_sec_reap, CTLFLAG_RDTUN,
59    &zfetch_min_sec_reap, 0, "Min time before stream reclaim");
60TUNABLE_INT("vfs.zfs.zfetch.block_cap", &zfetch_block_cap);
61SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, block_cap, CTLFLAG_RDTUN,
62    &zfetch_block_cap, 0, "Max number of blocks to fetch at a time");
63TUNABLE_QUAD("vfs.zfs.zfetch.array_rd_sz", &zfetch_array_rd_sz);
64SYSCTL_UQUAD(_vfs_zfs_zfetch, OID_AUTO, array_rd_sz, CTLFLAG_RDTUN,
65    &zfetch_array_rd_sz, 0,
66    "Number of bytes in a array_read at which we stop prefetching");
67
68/* forward decls for static routines */
69static boolean_t	dmu_zfetch_colinear(zfetch_t *, zstream_t *);
70static void		dmu_zfetch_dofetch(zfetch_t *, zstream_t *);
71static uint64_t		dmu_zfetch_fetch(dnode_t *, uint64_t, uint64_t);
72static uint64_t		dmu_zfetch_fetchsz(dnode_t *, uint64_t, uint64_t);
73static boolean_t	dmu_zfetch_find(zfetch_t *, zstream_t *, int);
74static int		dmu_zfetch_stream_insert(zfetch_t *, zstream_t *);
75static zstream_t	*dmu_zfetch_stream_reclaim(zfetch_t *);
76static void		dmu_zfetch_stream_remove(zfetch_t *, zstream_t *);
77static int		dmu_zfetch_streams_equal(zstream_t *, zstream_t *);
78
79typedef struct zfetch_stats {
80	kstat_named_t zfetchstat_hits;
81	kstat_named_t zfetchstat_misses;
82	kstat_named_t zfetchstat_colinear_hits;
83	kstat_named_t zfetchstat_colinear_misses;
84	kstat_named_t zfetchstat_stride_hits;
85	kstat_named_t zfetchstat_stride_misses;
86	kstat_named_t zfetchstat_reclaim_successes;
87	kstat_named_t zfetchstat_reclaim_failures;
88	kstat_named_t zfetchstat_stream_resets;
89	kstat_named_t zfetchstat_stream_noresets;
90	kstat_named_t zfetchstat_bogus_streams;
91} zfetch_stats_t;
92
93static zfetch_stats_t zfetch_stats = {
94	{ "hits",			KSTAT_DATA_UINT64 },
95	{ "misses",			KSTAT_DATA_UINT64 },
96	{ "colinear_hits",		KSTAT_DATA_UINT64 },
97	{ "colinear_misses",		KSTAT_DATA_UINT64 },
98	{ "stride_hits",		KSTAT_DATA_UINT64 },
99	{ "stride_misses",		KSTAT_DATA_UINT64 },
100	{ "reclaim_successes",		KSTAT_DATA_UINT64 },
101	{ "reclaim_failures",		KSTAT_DATA_UINT64 },
102	{ "streams_resets",		KSTAT_DATA_UINT64 },
103	{ "streams_noresets",		KSTAT_DATA_UINT64 },
104	{ "bogus_streams",		KSTAT_DATA_UINT64 },
105};
106
107#define	ZFETCHSTAT_INCR(stat, val) \
108	atomic_add_64(&zfetch_stats.stat.value.ui64, (val));
109
110#define	ZFETCHSTAT_BUMP(stat)		ZFETCHSTAT_INCR(stat, 1);
111
112kstat_t		*zfetch_ksp;
113
114/*
115 * Given a zfetch structure and a zstream structure, determine whether the
116 * blocks to be read are part of a co-linear pair of existing prefetch
117 * streams.  If a set is found, coalesce the streams, removing one, and
118 * configure the prefetch so it looks for a strided access pattern.
119 *
120 * In other words: if we find two sequential access streams that are
121 * the same length and distance N appart, and this read is N from the
122 * last stream, then we are probably in a strided access pattern.  So
123 * combine the two sequential streams into a single strided stream.
124 *
125 * Returns whether co-linear streams were found.
126 */
127static boolean_t
128dmu_zfetch_colinear(zfetch_t *zf, zstream_t *zh)
129{
130	zstream_t	*z_walk;
131	zstream_t	*z_comp;
132
133	if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
134		return (0);
135
136	if (zh == NULL) {
137		rw_exit(&zf->zf_rwlock);
138		return (0);
139	}
140
141	for (z_walk = list_head(&zf->zf_stream); z_walk;
142	    z_walk = list_next(&zf->zf_stream, z_walk)) {
143		for (z_comp = list_next(&zf->zf_stream, z_walk); z_comp;
144		    z_comp = list_next(&zf->zf_stream, z_comp)) {
145			int64_t		diff;
146
147			if (z_walk->zst_len != z_walk->zst_stride ||
148			    z_comp->zst_len != z_comp->zst_stride) {
149				continue;
150			}
151
152			diff = z_comp->zst_offset - z_walk->zst_offset;
153			if (z_comp->zst_offset + diff == zh->zst_offset) {
154				z_walk->zst_offset = zh->zst_offset;
155				z_walk->zst_direction = diff < 0 ? -1 : 1;
156				z_walk->zst_stride =
157				    diff * z_walk->zst_direction;
158				z_walk->zst_ph_offset =
159				    zh->zst_offset + z_walk->zst_stride;
160				dmu_zfetch_stream_remove(zf, z_comp);
161				mutex_destroy(&z_comp->zst_lock);
162				kmem_free(z_comp, sizeof (zstream_t));
163
164				dmu_zfetch_dofetch(zf, z_walk);
165
166				rw_exit(&zf->zf_rwlock);
167				return (1);
168			}
169
170			diff = z_walk->zst_offset - z_comp->zst_offset;
171			if (z_walk->zst_offset + diff == zh->zst_offset) {
172				z_walk->zst_offset = zh->zst_offset;
173				z_walk->zst_direction = diff < 0 ? -1 : 1;
174				z_walk->zst_stride =
175				    diff * z_walk->zst_direction;
176				z_walk->zst_ph_offset =
177				    zh->zst_offset + z_walk->zst_stride;
178				dmu_zfetch_stream_remove(zf, z_comp);
179				mutex_destroy(&z_comp->zst_lock);
180				kmem_free(z_comp, sizeof (zstream_t));
181
182				dmu_zfetch_dofetch(zf, z_walk);
183
184				rw_exit(&zf->zf_rwlock);
185				return (1);
186			}
187		}
188	}
189
190	rw_exit(&zf->zf_rwlock);
191	return (0);
192}
193
194/*
195 * Given a zstream_t, determine the bounds of the prefetch.  Then call the
196 * routine that actually prefetches the individual blocks.
197 */
198static void
199dmu_zfetch_dofetch(zfetch_t *zf, zstream_t *zs)
200{
201	uint64_t	prefetch_tail;
202	uint64_t	prefetch_limit;
203	uint64_t	prefetch_ofst;
204	uint64_t	prefetch_len;
205	uint64_t	blocks_fetched;
206
207	zs->zst_stride = MAX((int64_t)zs->zst_stride, zs->zst_len);
208	zs->zst_cap = MIN(zfetch_block_cap, 2 * zs->zst_cap);
209
210	prefetch_tail = MAX((int64_t)zs->zst_ph_offset,
211	    (int64_t)(zs->zst_offset + zs->zst_stride));
212	/*
213	 * XXX: use a faster division method?
214	 */
215	prefetch_limit = zs->zst_offset + zs->zst_len +
216	    (zs->zst_cap * zs->zst_stride) / zs->zst_len;
217
218	while (prefetch_tail < prefetch_limit) {
219		prefetch_ofst = zs->zst_offset + zs->zst_direction *
220		    (prefetch_tail - zs->zst_offset);
221
222		prefetch_len = zs->zst_len;
223
224		/*
225		 * Don't prefetch beyond the end of the file, if working
226		 * backwards.
227		 */
228		if ((zs->zst_direction == ZFETCH_BACKWARD) &&
229		    (prefetch_ofst > prefetch_tail)) {
230			prefetch_len += prefetch_ofst;
231			prefetch_ofst = 0;
232		}
233
234		/* don't prefetch more than we're supposed to */
235		if (prefetch_len > zs->zst_len)
236			break;
237
238		blocks_fetched = dmu_zfetch_fetch(zf->zf_dnode,
239		    prefetch_ofst, zs->zst_len);
240
241		prefetch_tail += zs->zst_stride;
242		/* stop if we've run out of stuff to prefetch */
243		if (blocks_fetched < zs->zst_len)
244			break;
245	}
246	zs->zst_ph_offset = prefetch_tail;
247	zs->zst_last = ddi_get_lbolt();
248}
249
250void
251zfetch_init(void)
252{
253
254	zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc",
255	    KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t),
256	    KSTAT_FLAG_VIRTUAL);
257
258	if (zfetch_ksp != NULL) {
259		zfetch_ksp->ks_data = &zfetch_stats;
260		kstat_install(zfetch_ksp);
261	}
262}
263
264void
265zfetch_fini(void)
266{
267	if (zfetch_ksp != NULL) {
268		kstat_delete(zfetch_ksp);
269		zfetch_ksp = NULL;
270	}
271}
272
273/*
274 * This takes a pointer to a zfetch structure and a dnode.  It performs the
275 * necessary setup for the zfetch structure, grokking data from the
276 * associated dnode.
277 */
278void
279dmu_zfetch_init(zfetch_t *zf, dnode_t *dno)
280{
281	if (zf == NULL) {
282		return;
283	}
284
285	zf->zf_dnode = dno;
286	zf->zf_stream_cnt = 0;
287	zf->zf_alloc_fail = 0;
288
289	list_create(&zf->zf_stream, sizeof (zstream_t),
290	    offsetof(zstream_t, zst_node));
291
292	rw_init(&zf->zf_rwlock, NULL, RW_DEFAULT, NULL);
293}
294
295/*
296 * This function computes the actual size, in blocks, that can be prefetched,
297 * and fetches it.
298 */
299static uint64_t
300dmu_zfetch_fetch(dnode_t *dn, uint64_t blkid, uint64_t nblks)
301{
302	uint64_t	fetchsz;
303	uint64_t	i;
304
305	fetchsz = dmu_zfetch_fetchsz(dn, blkid, nblks);
306
307	for (i = 0; i < fetchsz; i++) {
308		dbuf_prefetch(dn, blkid + i);
309	}
310
311	return (fetchsz);
312}
313
314/*
315 * this function returns the number of blocks that would be prefetched, based
316 * upon the supplied dnode, blockid, and nblks.  This is used so that we can
317 * update streams in place, and then prefetch with their old value after the
318 * fact.  This way, we can delay the prefetch, but subsequent accesses to the
319 * stream won't result in the same data being prefetched multiple times.
320 */
321static uint64_t
322dmu_zfetch_fetchsz(dnode_t *dn, uint64_t blkid, uint64_t nblks)
323{
324	uint64_t	fetchsz;
325
326	if (blkid > dn->dn_maxblkid) {
327		return (0);
328	}
329
330	/* compute fetch size */
331	if (blkid + nblks + 1 > dn->dn_maxblkid) {
332		fetchsz = (dn->dn_maxblkid - blkid) + 1;
333		ASSERT(blkid + fetchsz - 1 <= dn->dn_maxblkid);
334	} else {
335		fetchsz = nblks;
336	}
337
338
339	return (fetchsz);
340}
341
342/*
343 * given a zfetch and a zstream structure, see if there is an associated zstream
344 * for this block read.  If so, it starts a prefetch for the stream it
345 * located and returns true, otherwise it returns false
346 */
347static boolean_t
348dmu_zfetch_find(zfetch_t *zf, zstream_t *zh, int prefetched)
349{
350	zstream_t	*zs;
351	int64_t		diff;
352	int		reset = !prefetched;
353	int		rc = 0;
354
355	if (zh == NULL)
356		return (0);
357
358	/*
359	 * XXX: This locking strategy is a bit coarse; however, it's impact has
360	 * yet to be tested.  If this turns out to be an issue, it can be
361	 * modified in a number of different ways.
362	 */
363
364	rw_enter(&zf->zf_rwlock, RW_READER);
365top:
366
367	for (zs = list_head(&zf->zf_stream); zs;
368	    zs = list_next(&zf->zf_stream, zs)) {
369
370		/*
371		 * XXX - should this be an assert?
372		 */
373		if (zs->zst_len == 0) {
374			/* bogus stream */
375			ZFETCHSTAT_BUMP(zfetchstat_bogus_streams);
376			continue;
377		}
378
379		/*
380		 * We hit this case when we are in a strided prefetch stream:
381		 * we will read "len" blocks before "striding".
382		 */
383		if (zh->zst_offset >= zs->zst_offset &&
384		    zh->zst_offset < zs->zst_offset + zs->zst_len) {
385			if (prefetched) {
386				/* already fetched */
387				ZFETCHSTAT_BUMP(zfetchstat_stride_hits);
388				rc = 1;
389				goto out;
390			} else {
391				ZFETCHSTAT_BUMP(zfetchstat_stride_misses);
392			}
393		}
394
395		/*
396		 * This is the forward sequential read case: we increment
397		 * len by one each time we hit here, so we will enter this
398		 * case on every read.
399		 */
400		if (zh->zst_offset == zs->zst_offset + zs->zst_len) {
401
402			reset = !prefetched && zs->zst_len > 1;
403
404			if (mutex_tryenter(&zs->zst_lock) == 0) {
405				rc = 1;
406				goto out;
407			}
408
409			if (zh->zst_offset != zs->zst_offset + zs->zst_len) {
410				mutex_exit(&zs->zst_lock);
411				goto top;
412			}
413			zs->zst_len += zh->zst_len;
414			diff = zs->zst_len - zfetch_block_cap;
415			if (diff > 0) {
416				zs->zst_offset += diff;
417				zs->zst_len = zs->zst_len > diff ?
418				    zs->zst_len - diff : 0;
419			}
420			zs->zst_direction = ZFETCH_FORWARD;
421
422			break;
423
424		/*
425		 * Same as above, but reading backwards through the file.
426		 */
427		} else if (zh->zst_offset == zs->zst_offset - zh->zst_len) {
428			/* backwards sequential access */
429
430			reset = !prefetched && zs->zst_len > 1;
431
432			if (mutex_tryenter(&zs->zst_lock) == 0) {
433				rc = 1;
434				goto out;
435			}
436
437			if (zh->zst_offset != zs->zst_offset - zh->zst_len) {
438				mutex_exit(&zs->zst_lock);
439				goto top;
440			}
441
442			zs->zst_offset = zs->zst_offset > zh->zst_len ?
443			    zs->zst_offset - zh->zst_len : 0;
444			zs->zst_ph_offset = zs->zst_ph_offset > zh->zst_len ?
445			    zs->zst_ph_offset - zh->zst_len : 0;
446			zs->zst_len += zh->zst_len;
447
448			diff = zs->zst_len - zfetch_block_cap;
449			if (diff > 0) {
450				zs->zst_ph_offset = zs->zst_ph_offset > diff ?
451				    zs->zst_ph_offset - diff : 0;
452				zs->zst_len = zs->zst_len > diff ?
453				    zs->zst_len - diff : zs->zst_len;
454			}
455			zs->zst_direction = ZFETCH_BACKWARD;
456
457			break;
458
459		} else if ((zh->zst_offset - zs->zst_offset - zs->zst_stride <
460		    zs->zst_len) && (zs->zst_len != zs->zst_stride)) {
461			/* strided forward access */
462
463			if (mutex_tryenter(&zs->zst_lock) == 0) {
464				rc = 1;
465				goto out;
466			}
467
468			if ((zh->zst_offset - zs->zst_offset - zs->zst_stride >=
469			    zs->zst_len) || (zs->zst_len == zs->zst_stride)) {
470				mutex_exit(&zs->zst_lock);
471				goto top;
472			}
473
474			zs->zst_offset += zs->zst_stride;
475			zs->zst_direction = ZFETCH_FORWARD;
476
477			break;
478
479		} else if ((zh->zst_offset - zs->zst_offset + zs->zst_stride <
480		    zs->zst_len) && (zs->zst_len != zs->zst_stride)) {
481			/* strided reverse access */
482
483			if (mutex_tryenter(&zs->zst_lock) == 0) {
484				rc = 1;
485				goto out;
486			}
487
488			if ((zh->zst_offset - zs->zst_offset + zs->zst_stride >=
489			    zs->zst_len) || (zs->zst_len == zs->zst_stride)) {
490				mutex_exit(&zs->zst_lock);
491				goto top;
492			}
493
494			zs->zst_offset = zs->zst_offset > zs->zst_stride ?
495			    zs->zst_offset - zs->zst_stride : 0;
496			zs->zst_ph_offset = (zs->zst_ph_offset >
497			    (2 * zs->zst_stride)) ?
498			    (zs->zst_ph_offset - (2 * zs->zst_stride)) : 0;
499			zs->zst_direction = ZFETCH_BACKWARD;
500
501			break;
502		}
503	}
504
505	if (zs) {
506		if (reset) {
507			zstream_t *remove = zs;
508
509			ZFETCHSTAT_BUMP(zfetchstat_stream_resets);
510			rc = 0;
511			mutex_exit(&zs->zst_lock);
512			rw_exit(&zf->zf_rwlock);
513			rw_enter(&zf->zf_rwlock, RW_WRITER);
514			/*
515			 * Relocate the stream, in case someone removes
516			 * it while we were acquiring the WRITER lock.
517			 */
518			for (zs = list_head(&zf->zf_stream); zs;
519			    zs = list_next(&zf->zf_stream, zs)) {
520				if (zs == remove) {
521					dmu_zfetch_stream_remove(zf, zs);
522					mutex_destroy(&zs->zst_lock);
523					kmem_free(zs, sizeof (zstream_t));
524					break;
525				}
526			}
527		} else {
528			ZFETCHSTAT_BUMP(zfetchstat_stream_noresets);
529			rc = 1;
530			dmu_zfetch_dofetch(zf, zs);
531			mutex_exit(&zs->zst_lock);
532		}
533	}
534out:
535	rw_exit(&zf->zf_rwlock);
536	return (rc);
537}
538
539/*
540 * Clean-up state associated with a zfetch structure.  This frees allocated
541 * structure members, empties the zf_stream tree, and generally makes things
542 * nice.  This doesn't free the zfetch_t itself, that's left to the caller.
543 */
544void
545dmu_zfetch_rele(zfetch_t *zf)
546{
547	zstream_t	*zs;
548	zstream_t	*zs_next;
549
550	ASSERT(!RW_LOCK_HELD(&zf->zf_rwlock));
551
552	for (zs = list_head(&zf->zf_stream); zs; zs = zs_next) {
553		zs_next = list_next(&zf->zf_stream, zs);
554
555		list_remove(&zf->zf_stream, zs);
556		mutex_destroy(&zs->zst_lock);
557		kmem_free(zs, sizeof (zstream_t));
558	}
559	list_destroy(&zf->zf_stream);
560	rw_destroy(&zf->zf_rwlock);
561
562	zf->zf_dnode = NULL;
563}
564
565/*
566 * Given a zfetch and zstream structure, insert the zstream structure into the
567 * AVL tree contained within the zfetch structure.  Peform the appropriate
568 * book-keeping.  It is possible that another thread has inserted a stream which
569 * matches one that we are about to insert, so we must be sure to check for this
570 * case.  If one is found, return failure, and let the caller cleanup the
571 * duplicates.
572 */
573static int
574dmu_zfetch_stream_insert(zfetch_t *zf, zstream_t *zs)
575{
576	zstream_t	*zs_walk;
577	zstream_t	*zs_next;
578
579	ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
580
581	for (zs_walk = list_head(&zf->zf_stream); zs_walk; zs_walk = zs_next) {
582		zs_next = list_next(&zf->zf_stream, zs_walk);
583
584		if (dmu_zfetch_streams_equal(zs_walk, zs)) {
585			return (0);
586		}
587	}
588
589	list_insert_head(&zf->zf_stream, zs);
590	zf->zf_stream_cnt++;
591	return (1);
592}
593
594
595/*
596 * Walk the list of zstreams in the given zfetch, find an old one (by time), and
597 * reclaim it for use by the caller.
598 */
599static zstream_t *
600dmu_zfetch_stream_reclaim(zfetch_t *zf)
601{
602	zstream_t	*zs;
603
604	if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
605		return (0);
606
607	for (zs = list_head(&zf->zf_stream); zs;
608	    zs = list_next(&zf->zf_stream, zs)) {
609
610		if (((ddi_get_lbolt() - zs->zst_last)/hz) > zfetch_min_sec_reap)
611			break;
612	}
613
614	if (zs) {
615		dmu_zfetch_stream_remove(zf, zs);
616		mutex_destroy(&zs->zst_lock);
617		bzero(zs, sizeof (zstream_t));
618	} else {
619		zf->zf_alloc_fail++;
620	}
621	rw_exit(&zf->zf_rwlock);
622
623	return (zs);
624}
625
626/*
627 * Given a zfetch and zstream structure, remove the zstream structure from its
628 * container in the zfetch structure.  Perform the appropriate book-keeping.
629 */
630static void
631dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs)
632{
633	ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
634
635	list_remove(&zf->zf_stream, zs);
636	zf->zf_stream_cnt--;
637}
638
639static int
640dmu_zfetch_streams_equal(zstream_t *zs1, zstream_t *zs2)
641{
642	if (zs1->zst_offset != zs2->zst_offset)
643		return (0);
644
645	if (zs1->zst_len != zs2->zst_len)
646		return (0);
647
648	if (zs1->zst_stride != zs2->zst_stride)
649		return (0);
650
651	if (zs1->zst_ph_offset != zs2->zst_ph_offset)
652		return (0);
653
654	if (zs1->zst_cap != zs2->zst_cap)
655		return (0);
656
657	if (zs1->zst_direction != zs2->zst_direction)
658		return (0);
659
660	return (1);
661}
662
663/*
664 * This is the prefetch entry point.  It calls all of the other dmu_zfetch
665 * routines to create, delete, find, or operate upon prefetch streams.
666 */
667void
668dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
669{
670	zstream_t	zst;
671	zstream_t	*newstream;
672	boolean_t	fetched;
673	int		inserted;
674	unsigned int	blkshft;
675	uint64_t	blksz;
676
677	if (zfs_prefetch_disable)
678		return;
679
680	/* files that aren't ln2 blocksz are only one block -- nothing to do */
681	if (!zf->zf_dnode->dn_datablkshift)
682		return;
683
684	/* convert offset and size, into blockid and nblocks */
685	blkshft = zf->zf_dnode->dn_datablkshift;
686	blksz = (1 << blkshft);
687
688	bzero(&zst, sizeof (zstream_t));
689	zst.zst_offset = offset >> blkshft;
690	zst.zst_len = (P2ROUNDUP(offset + size, blksz) -
691	    P2ALIGN(offset, blksz)) >> blkshft;
692
693	fetched = dmu_zfetch_find(zf, &zst, prefetched);
694	if (fetched) {
695		ZFETCHSTAT_BUMP(zfetchstat_hits);
696	} else {
697		ZFETCHSTAT_BUMP(zfetchstat_misses);
698		fetched = dmu_zfetch_colinear(zf, &zst);
699		if (fetched) {
700			ZFETCHSTAT_BUMP(zfetchstat_colinear_hits);
701		} else {
702			ZFETCHSTAT_BUMP(zfetchstat_colinear_misses);
703		}
704	}
705
706	if (!fetched) {
707		newstream = dmu_zfetch_stream_reclaim(zf);
708
709		/*
710		 * we still couldn't find a stream, drop the lock, and allocate
711		 * one if possible.  Otherwise, give up and go home.
712		 */
713		if (newstream) {
714			ZFETCHSTAT_BUMP(zfetchstat_reclaim_successes);
715		} else {
716			uint64_t	maxblocks;
717			uint32_t	max_streams;
718			uint32_t	cur_streams;
719
720			ZFETCHSTAT_BUMP(zfetchstat_reclaim_failures);
721			cur_streams = zf->zf_stream_cnt;
722			maxblocks = zf->zf_dnode->dn_maxblkid;
723
724			max_streams = MIN(zfetch_max_streams,
725			    (maxblocks / zfetch_block_cap));
726			if (max_streams == 0) {
727				max_streams++;
728			}
729
730			if (cur_streams >= max_streams) {
731				return;
732			}
733			newstream = kmem_zalloc(sizeof (zstream_t), KM_SLEEP);
734		}
735
736		newstream->zst_offset = zst.zst_offset;
737		newstream->zst_len = zst.zst_len;
738		newstream->zst_stride = zst.zst_len;
739		newstream->zst_ph_offset = zst.zst_len + zst.zst_offset;
740		newstream->zst_cap = zst.zst_len;
741		newstream->zst_direction = ZFETCH_FORWARD;
742		newstream->zst_last = ddi_get_lbolt();
743
744		mutex_init(&newstream->zst_lock, NULL, MUTEX_DEFAULT, NULL);
745
746		rw_enter(&zf->zf_rwlock, RW_WRITER);
747		inserted = dmu_zfetch_stream_insert(zf, newstream);
748		rw_exit(&zf->zf_rwlock);
749
750		if (!inserted) {
751			mutex_destroy(&newstream->zst_lock);
752			kmem_free(newstream, sizeof (zstream_t));
753		}
754	}
755}
756