1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26/* 27 * Copyright (c) 2013, 2015 by Delphix. All rights reserved. 28 */ 29 30#include <sys/zfs_context.h> 31#include <sys/dnode.h> 32#include <sys/dmu_objset.h> 33#include <sys/dmu_zfetch.h> 34#include <sys/dmu.h> 35#include <sys/dbuf.h> 36#include <sys/kstat.h> 37 38/* 39 * This tunable disables predictive prefetch. Note that it leaves "prescient" 40 * prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch, 41 * prescient prefetch never issues i/os that end up not being needed, 42 * so it can't hurt performance. 43 */ 44boolean_t zfs_prefetch_disable = B_FALSE; 45 46/* max # of streams per zfetch */ 47uint32_t zfetch_max_streams = 8; 48/* min time before stream reclaim */ 49uint32_t zfetch_min_sec_reap = 2; 50/* max bytes to prefetch per stream (default 8MB) */ 51uint32_t zfetch_max_distance = 8 * 1024 * 1024; 52/* max bytes to prefetch indirects for per stream (default 64MB) */ 53uint32_t zfetch_max_idistance = 64 * 1024 * 1024; 54/* max number of bytes in an array_read in which we allow prefetching (1MB) */ 55uint64_t zfetch_array_rd_sz = 1024 * 1024; 56 57SYSCTL_DECL(_vfs_zfs); 58SYSCTL_INT(_vfs_zfs, OID_AUTO, prefetch_disable, CTLFLAG_RW, 59 &zfs_prefetch_disable, 0, "Disable prefetch"); 60SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH"); 61TUNABLE_INT("vfs.zfs.zfetch.max_streams", &zfetch_max_streams); 62SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_streams, CTLFLAG_RW, 63 &zfetch_max_streams, 0, "Max # of streams per zfetch"); 64TUNABLE_INT("vfs.zfs.zfetch.min_sec_reap", &zfetch_min_sec_reap); 65SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, min_sec_reap, CTLFLAG_RWTUN, 66 &zfetch_min_sec_reap, 0, "Min time before stream reclaim"); 67TUNABLE_INT("vfs.zfs.zfetch.max_distance", &zfetch_max_distance); 68SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance, CTLFLAG_RWTUN, 69 &zfetch_max_distance, 0, "Max bytes to prefetch per stream"); 70TUNABLE_QUAD("vfs.zfs.zfetch.array_rd_sz", &zfetch_array_rd_sz); 71SYSCTL_UQUAD(_vfs_zfs_zfetch, OID_AUTO, array_rd_sz, CTLFLAG_RWTUN, 72 &zfetch_array_rd_sz, 0, 73 "Number of bytes in a array_read at which we stop prefetching"); 74 75typedef struct zfetch_stats { 76 kstat_named_t zfetchstat_hits; 77 kstat_named_t zfetchstat_misses; 78 kstat_named_t zfetchstat_max_streams; 79} zfetch_stats_t; 80 81static zfetch_stats_t zfetch_stats = { 82 { "hits", KSTAT_DATA_UINT64 }, 83 { "misses", KSTAT_DATA_UINT64 }, 84 { "max_streams", KSTAT_DATA_UINT64 }, 85}; 86 87#define ZFETCHSTAT_BUMP(stat) \ 88 atomic_inc_64(&zfetch_stats.stat.value.ui64); 89 90kstat_t *zfetch_ksp; 91 92void 93zfetch_init(void) 94{ 95 zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc", 96 KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t), 97 KSTAT_FLAG_VIRTUAL); 98 99 if (zfetch_ksp != NULL) { 100 zfetch_ksp->ks_data = &zfetch_stats; 101 kstat_install(zfetch_ksp); 102 } 103} 104 105void 106zfetch_fini(void) 107{ 108 if (zfetch_ksp != NULL) { 109 kstat_delete(zfetch_ksp); 110 zfetch_ksp = NULL; 111 } 112} 113 114/* 115 * This takes a pointer to a zfetch structure and a dnode. It performs the 116 * necessary setup for the zfetch structure, grokking data from the 117 * associated dnode. 118 */ 119void 120dmu_zfetch_init(zfetch_t *zf, dnode_t *dno) 121{ 122 if (zf == NULL) 123 return; 124 125 zf->zf_dnode = dno; 126 127 list_create(&zf->zf_stream, sizeof (zstream_t), 128 offsetof(zstream_t, zs_node)); 129 130 rw_init(&zf->zf_rwlock, NULL, RW_DEFAULT, NULL); 131} 132 133static void 134dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs) 135{ 136 ASSERT(RW_WRITE_HELD(&zf->zf_rwlock)); 137 list_remove(&zf->zf_stream, zs); 138 mutex_destroy(&zs->zs_lock); 139 kmem_free(zs, sizeof (*zs)); 140} 141 142/* 143 * Clean-up state associated with a zfetch structure (e.g. destroy the 144 * streams). This doesn't free the zfetch_t itself, that's left to the caller. 145 */ 146void 147dmu_zfetch_fini(zfetch_t *zf) 148{ 149 zstream_t *zs; 150 151 ASSERT(!RW_LOCK_HELD(&zf->zf_rwlock)); 152 153 rw_enter(&zf->zf_rwlock, RW_WRITER); 154 while ((zs = list_head(&zf->zf_stream)) != NULL) 155 dmu_zfetch_stream_remove(zf, zs); 156 rw_exit(&zf->zf_rwlock); 157 list_destroy(&zf->zf_stream); 158 rw_destroy(&zf->zf_rwlock); 159 160 zf->zf_dnode = NULL; 161} 162 163/* 164 * If there aren't too many streams already, create a new stream. 165 * The "blkid" argument is the next block that we expect this stream to access. 166 * While we're here, clean up old streams (which haven't been 167 * accessed for at least zfetch_min_sec_reap seconds). 168 */ 169static void 170dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid) 171{ 172 zstream_t *zs_next; 173 int numstreams = 0; 174 175 ASSERT(RW_WRITE_HELD(&zf->zf_rwlock)); 176 177 /* 178 * Clean up old streams. 179 */ 180 for (zstream_t *zs = list_head(&zf->zf_stream); 181 zs != NULL; zs = zs_next) { 182 zs_next = list_next(&zf->zf_stream, zs); 183 if (((gethrtime() - zs->zs_atime) / NANOSEC) > 184 zfetch_min_sec_reap) 185 dmu_zfetch_stream_remove(zf, zs); 186 else 187 numstreams++; 188 } 189 190 /* 191 * The maximum number of streams is normally zfetch_max_streams, 192 * but for small files we lower it such that it's at least possible 193 * for all the streams to be non-overlapping. 194 * 195 * If we are already at the maximum number of streams for this file, 196 * even after removing old streams, then don't create this stream. 197 */ 198 uint32_t max_streams = MAX(1, MIN(zfetch_max_streams, 199 zf->zf_dnode->dn_maxblkid * zf->zf_dnode->dn_datablksz / 200 zfetch_max_distance)); 201 if (numstreams >= max_streams) { 202 ZFETCHSTAT_BUMP(zfetchstat_max_streams); 203 return; 204 } 205 206 zstream_t *zs = kmem_zalloc(sizeof (*zs), KM_SLEEP); 207 zs->zs_blkid = blkid; 208 zs->zs_pf_blkid = blkid; 209 zs->zs_ipf_blkid = blkid; 210 zs->zs_atime = gethrtime(); 211 mutex_init(&zs->zs_lock, NULL, MUTEX_DEFAULT, NULL); 212 213 list_insert_head(&zf->zf_stream, zs); 214} 215 216/* 217 * This is the predictive prefetch entry point. It associates dnode access 218 * specified with blkid and nblks arguments with prefetch stream, predicts 219 * further accesses based on that stats and initiates speculative prefetch. 220 * fetch_data argument specifies whether actual data blocks should be fetched: 221 * FALSE -- prefetch only indirect blocks for predicted data blocks; 222 * TRUE -- prefetch predicted data blocks plus following indirect blocks. 223 */ 224void 225dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data) 226{ 227 zstream_t *zs; 228 int64_t pf_start, ipf_start, ipf_istart, ipf_iend; 229 int64_t pf_ahead_blks, max_blks; 230 int epbs, max_dist_blks, pf_nblks, ipf_nblks; 231 uint64_t end_of_access_blkid = blkid + nblks; 232 233 if (zfs_prefetch_disable) 234 return; 235 236 /* 237 * As a fast path for small (single-block) files, ignore access 238 * to the first block. 239 */ 240 if (blkid == 0) 241 return; 242 243 rw_enter(&zf->zf_rwlock, RW_READER); 244 245 for (zs = list_head(&zf->zf_stream); zs != NULL; 246 zs = list_next(&zf->zf_stream, zs)) { 247 if (blkid == zs->zs_blkid) { 248 mutex_enter(&zs->zs_lock); 249 /* 250 * zs_blkid could have changed before we 251 * acquired zs_lock; re-check them here. 252 */ 253 if (blkid != zs->zs_blkid) { 254 mutex_exit(&zs->zs_lock); 255 continue; 256 } 257 break; 258 } 259 } 260 261 if (zs == NULL) { 262 /* 263 * This access is not part of any existing stream. Create 264 * a new stream for it. 265 */ 266 ZFETCHSTAT_BUMP(zfetchstat_misses); 267 if (rw_tryupgrade(&zf->zf_rwlock)) 268 dmu_zfetch_stream_create(zf, end_of_access_blkid); 269 rw_exit(&zf->zf_rwlock); 270 return; 271 } 272 273 /* 274 * This access was to a block that we issued a prefetch for on 275 * behalf of this stream. Issue further prefetches for this stream. 276 * 277 * Normally, we start prefetching where we stopped 278 * prefetching last (zs_pf_blkid). But when we get our first 279 * hit on this stream, zs_pf_blkid == zs_blkid, we don't 280 * want to prefetch the block we just accessed. In this case, 281 * start just after the block we just accessed. 282 */ 283 pf_start = MAX(zs->zs_pf_blkid, end_of_access_blkid); 284 285 /* 286 * Double our amount of prefetched data, but don't let the 287 * prefetch get further ahead than zfetch_max_distance. 288 */ 289 if (fetch_data) { 290 max_dist_blks = 291 zfetch_max_distance >> zf->zf_dnode->dn_datablkshift; 292 /* 293 * Previously, we were (zs_pf_blkid - blkid) ahead. We 294 * want to now be double that, so read that amount again, 295 * plus the amount we are catching up by (i.e. the amount 296 * read just now). 297 */ 298 pf_ahead_blks = zs->zs_pf_blkid - blkid + nblks; 299 max_blks = max_dist_blks - (pf_start - end_of_access_blkid); 300 pf_nblks = MIN(pf_ahead_blks, max_blks); 301 } else { 302 pf_nblks = 0; 303 } 304 305 zs->zs_pf_blkid = pf_start + pf_nblks; 306 307 /* 308 * Do the same for indirects, starting from where we stopped last, 309 * or where we will stop reading data blocks (and the indirects 310 * that point to them). 311 */ 312 ipf_start = MAX(zs->zs_ipf_blkid, zs->zs_pf_blkid); 313 max_dist_blks = zfetch_max_idistance >> zf->zf_dnode->dn_datablkshift; 314 /* 315 * We want to double our distance ahead of the data prefetch 316 * (or reader, if we are not prefetching data). Previously, we 317 * were (zs_ipf_blkid - blkid) ahead. To double that, we read 318 * that amount again, plus the amount we are catching up by 319 * (i.e. the amount read now + the amount of data prefetched now). 320 */ 321 pf_ahead_blks = zs->zs_ipf_blkid - blkid + nblks + pf_nblks; 322 max_blks = max_dist_blks - (ipf_start - end_of_access_blkid); 323 ipf_nblks = MIN(pf_ahead_blks, max_blks); 324 zs->zs_ipf_blkid = ipf_start + ipf_nblks; 325 326 epbs = zf->zf_dnode->dn_indblkshift - SPA_BLKPTRSHIFT; 327 ipf_istart = P2ROUNDUP(ipf_start, 1 << epbs) >> epbs; 328 ipf_iend = P2ROUNDUP(zs->zs_ipf_blkid, 1 << epbs) >> epbs; 329 330 zs->zs_atime = gethrtime(); 331 zs->zs_blkid = end_of_access_blkid; 332 mutex_exit(&zs->zs_lock); 333 rw_exit(&zf->zf_rwlock); 334 335 /* 336 * dbuf_prefetch() is asynchronous (even when it needs to read 337 * indirect blocks), but we still prefer to drop our locks before 338 * calling it to reduce the time we hold them. 339 */ 340 341 for (int i = 0; i < pf_nblks; i++) { 342 dbuf_prefetch(zf->zf_dnode, 0, pf_start + i, 343 ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH); 344 } 345 for (int64_t iblk = ipf_istart; iblk < ipf_iend; iblk++) { 346 dbuf_prefetch(zf->zf_dnode, 1, iblk, 347 ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH); 348 } 349 ZFETCHSTAT_BUMP(zfetchstat_hits); 350} 351