1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/*
| 1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/*
|
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
| 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
|
23 * Use is subject to license terms. 24 */ 25 26#include <sys/zfs_context.h> 27#include <sys/spa.h> 28#include <sys/vdev_impl.h> 29#include <sys/zio.h> 30#include <sys/kstat.h> 31 32/* 33 * Virtual device read-ahead caching. 34 * 35 * This file implements a simple LRU read-ahead cache. When the DMU reads 36 * a given block, it will often want other, nearby blocks soon thereafter. 37 * We take advantage of this by reading a larger disk region and caching 38 * the result. In the best case, this can turn 128 back-to-back 512-byte 39 * reads into a single 64k read followed by 127 cache hits; this reduces 40 * latency dramatically. In the worst case, it can turn an isolated 512-byte 41 * read into a 64k read, which doesn't affect latency all that much but is 42 * terribly wasteful of bandwidth. A more intelligent version of the cache 43 * could keep track of access patterns and not do read-ahead unless it sees 44 * at least two temporally close I/Os to the same region. Currently, only 45 * metadata I/O is inflated. A futher enhancement could take advantage of 46 * more semantic information about the I/O. And it could use something 47 * faster than an AVL tree; that was chosen solely for convenience. 48 * 49 * There are five cache operations: allocate, fill, read, write, evict. 50 * 51 * (1) Allocate. This reserves a cache entry for the specified region. 52 * We separate the allocate and fill operations so that multiple threads 53 * don't generate I/O for the same cache miss. 54 * 55 * (2) Fill. When the I/O for a cache miss completes, the fill routine 56 * places the data in the previously allocated cache entry. 57 * 58 * (3) Read. Read data from the cache. 59 * 60 * (4) Write. Update cache contents after write completion. 61 * 62 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry 63 * if the total cache size exceeds zfs_vdev_cache_size. 64 */ 65 66/* 67 * These tunables are for performance analysis. 68 */ 69/* 70 * All i/os smaller than zfs_vdev_cache_max will be turned into 71 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software 72 * track buffer). At most zfs_vdev_cache_size bytes will be kept in each 73 * vdev's vdev_cache. 74 */ 75int zfs_vdev_cache_max = 1<<14; /* 16KB */ 76int zfs_vdev_cache_size = 10ULL << 20; /* 10MB */ 77int zfs_vdev_cache_bshift = 16; 78 79#define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */ 80 81SYSCTL_DECL(_vfs_zfs_vdev); 82SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache"); 83TUNABLE_INT("vfs.zfs.vdev.cache.max", &zfs_vdev_cache_max); 84SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, max, CTLFLAG_RDTUN, 85 &zfs_vdev_cache_max, 0, "Maximum I/O request size that increase read size"); 86TUNABLE_INT("vfs.zfs.vdev.cache.size", &zfs_vdev_cache_size); 87SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, size, CTLFLAG_RDTUN, 88 &zfs_vdev_cache_size, 0, "Size of VDEV cache"); 89TUNABLE_INT("vfs.zfs.vdev.cache.bshift", &zfs_vdev_cache_bshift); 90SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, bshift, CTLFLAG_RDTUN, 91 &zfs_vdev_cache_bshift, 0, "Turn too small requests into 1 << this value"); 92 93kstat_t *vdc_ksp = NULL; 94 95typedef struct vdc_stats { 96 kstat_named_t vdc_stat_delegations; 97 kstat_named_t vdc_stat_hits; 98 kstat_named_t vdc_stat_misses; 99} vdc_stats_t; 100 101static vdc_stats_t vdc_stats = { 102 { "delegations", KSTAT_DATA_UINT64 }, 103 { "hits", KSTAT_DATA_UINT64 }, 104 { "misses", KSTAT_DATA_UINT64 } 105}; 106 107#define VDCSTAT_BUMP(stat) atomic_add_64(&vdc_stats.stat.value.ui64, 1); 108 109static int 110vdev_cache_offset_compare(const void *a1, const void *a2) 111{ 112 const vdev_cache_entry_t *ve1 = a1; 113 const vdev_cache_entry_t *ve2 = a2; 114 115 if (ve1->ve_offset < ve2->ve_offset) 116 return (-1); 117 if (ve1->ve_offset > ve2->ve_offset) 118 return (1); 119 return (0); 120} 121 122static int 123vdev_cache_lastused_compare(const void *a1, const void *a2) 124{ 125 const vdev_cache_entry_t *ve1 = a1; 126 const vdev_cache_entry_t *ve2 = a2; 127 128 if (ve1->ve_lastused < ve2->ve_lastused) 129 return (-1); 130 if (ve1->ve_lastused > ve2->ve_lastused) 131 return (1); 132 133 /* 134 * Among equally old entries, sort by offset to ensure uniqueness. 135 */ 136 return (vdev_cache_offset_compare(a1, a2)); 137} 138 139/* 140 * Evict the specified entry from the cache. 141 */ 142static void 143vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve) 144{ 145 ASSERT(MUTEX_HELD(&vc->vc_lock)); 146 ASSERT(ve->ve_fill_io == NULL); 147 ASSERT(ve->ve_data != NULL); 148 149 avl_remove(&vc->vc_lastused_tree, ve); 150 avl_remove(&vc->vc_offset_tree, ve); 151 zio_buf_free(ve->ve_data, VCBS); 152 kmem_free(ve, sizeof (vdev_cache_entry_t)); 153} 154 155/* 156 * Allocate an entry in the cache. At the point we don't have the data, 157 * we're just creating a placeholder so that multiple threads don't all 158 * go off and read the same blocks. 159 */ 160static vdev_cache_entry_t * 161vdev_cache_allocate(zio_t *zio) 162{ 163 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 164 uint64_t offset = P2ALIGN(zio->io_offset, VCBS); 165 vdev_cache_entry_t *ve; 166 167 ASSERT(MUTEX_HELD(&vc->vc_lock)); 168 169 if (zfs_vdev_cache_size == 0) 170 return (NULL); 171 172 /* 173 * If adding a new entry would exceed the cache size, 174 * evict the oldest entry (LRU). 175 */ 176 if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) > 177 zfs_vdev_cache_size) { 178 ve = avl_first(&vc->vc_lastused_tree); 179 if (ve->ve_fill_io != NULL) 180 return (NULL); 181 ASSERT(ve->ve_hits != 0); 182 vdev_cache_evict(vc, ve); 183 } 184 185 ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP); 186 ve->ve_offset = offset; 187 ve->ve_lastused = LBOLT; 188 ve->ve_data = zio_buf_alloc(VCBS); 189 190 avl_add(&vc->vc_offset_tree, ve); 191 avl_add(&vc->vc_lastused_tree, ve); 192 193 return (ve); 194} 195 196static void 197vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio) 198{ 199 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); 200 201 ASSERT(MUTEX_HELD(&vc->vc_lock)); 202 ASSERT(ve->ve_fill_io == NULL); 203 204 if (ve->ve_lastused != LBOLT) { 205 avl_remove(&vc->vc_lastused_tree, ve); 206 ve->ve_lastused = LBOLT; 207 avl_add(&vc->vc_lastused_tree, ve); 208 } 209 210 ve->ve_hits++; 211 bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size); 212} 213 214/* 215 * Fill a previously allocated cache entry with data. 216 */ 217static void
| 23 * Use is subject to license terms. 24 */ 25 26#include <sys/zfs_context.h> 27#include <sys/spa.h> 28#include <sys/vdev_impl.h> 29#include <sys/zio.h> 30#include <sys/kstat.h> 31 32/* 33 * Virtual device read-ahead caching. 34 * 35 * This file implements a simple LRU read-ahead cache. When the DMU reads 36 * a given block, it will often want other, nearby blocks soon thereafter. 37 * We take advantage of this by reading a larger disk region and caching 38 * the result. In the best case, this can turn 128 back-to-back 512-byte 39 * reads into a single 64k read followed by 127 cache hits; this reduces 40 * latency dramatically. In the worst case, it can turn an isolated 512-byte 41 * read into a 64k read, which doesn't affect latency all that much but is 42 * terribly wasteful of bandwidth. A more intelligent version of the cache 43 * could keep track of access patterns and not do read-ahead unless it sees 44 * at least two temporally close I/Os to the same region. Currently, only 45 * metadata I/O is inflated. A futher enhancement could take advantage of 46 * more semantic information about the I/O. And it could use something 47 * faster than an AVL tree; that was chosen solely for convenience. 48 * 49 * There are five cache operations: allocate, fill, read, write, evict. 50 * 51 * (1) Allocate. This reserves a cache entry for the specified region. 52 * We separate the allocate and fill operations so that multiple threads 53 * don't generate I/O for the same cache miss. 54 * 55 * (2) Fill. When the I/O for a cache miss completes, the fill routine 56 * places the data in the previously allocated cache entry. 57 * 58 * (3) Read. Read data from the cache. 59 * 60 * (4) Write. Update cache contents after write completion. 61 * 62 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry 63 * if the total cache size exceeds zfs_vdev_cache_size. 64 */ 65 66/* 67 * These tunables are for performance analysis. 68 */ 69/* 70 * All i/os smaller than zfs_vdev_cache_max will be turned into 71 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software 72 * track buffer). At most zfs_vdev_cache_size bytes will be kept in each 73 * vdev's vdev_cache. 74 */ 75int zfs_vdev_cache_max = 1<<14; /* 16KB */ 76int zfs_vdev_cache_size = 10ULL << 20; /* 10MB */ 77int zfs_vdev_cache_bshift = 16; 78 79#define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */ 80 81SYSCTL_DECL(_vfs_zfs_vdev); 82SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, cache, CTLFLAG_RW, 0, "ZFS VDEV Cache"); 83TUNABLE_INT("vfs.zfs.vdev.cache.max", &zfs_vdev_cache_max); 84SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, max, CTLFLAG_RDTUN, 85 &zfs_vdev_cache_max, 0, "Maximum I/O request size that increase read size"); 86TUNABLE_INT("vfs.zfs.vdev.cache.size", &zfs_vdev_cache_size); 87SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, size, CTLFLAG_RDTUN, 88 &zfs_vdev_cache_size, 0, "Size of VDEV cache"); 89TUNABLE_INT("vfs.zfs.vdev.cache.bshift", &zfs_vdev_cache_bshift); 90SYSCTL_INT(_vfs_zfs_vdev_cache, OID_AUTO, bshift, CTLFLAG_RDTUN, 91 &zfs_vdev_cache_bshift, 0, "Turn too small requests into 1 << this value"); 92 93kstat_t *vdc_ksp = NULL; 94 95typedef struct vdc_stats { 96 kstat_named_t vdc_stat_delegations; 97 kstat_named_t vdc_stat_hits; 98 kstat_named_t vdc_stat_misses; 99} vdc_stats_t; 100 101static vdc_stats_t vdc_stats = { 102 { "delegations", KSTAT_DATA_UINT64 }, 103 { "hits", KSTAT_DATA_UINT64 }, 104 { "misses", KSTAT_DATA_UINT64 } 105}; 106 107#define VDCSTAT_BUMP(stat) atomic_add_64(&vdc_stats.stat.value.ui64, 1); 108 109static int 110vdev_cache_offset_compare(const void *a1, const void *a2) 111{ 112 const vdev_cache_entry_t *ve1 = a1; 113 const vdev_cache_entry_t *ve2 = a2; 114 115 if (ve1->ve_offset < ve2->ve_offset) 116 return (-1); 117 if (ve1->ve_offset > ve2->ve_offset) 118 return (1); 119 return (0); 120} 121 122static int 123vdev_cache_lastused_compare(const void *a1, const void *a2) 124{ 125 const vdev_cache_entry_t *ve1 = a1; 126 const vdev_cache_entry_t *ve2 = a2; 127 128 if (ve1->ve_lastused < ve2->ve_lastused) 129 return (-1); 130 if (ve1->ve_lastused > ve2->ve_lastused) 131 return (1); 132 133 /* 134 * Among equally old entries, sort by offset to ensure uniqueness. 135 */ 136 return (vdev_cache_offset_compare(a1, a2)); 137} 138 139/* 140 * Evict the specified entry from the cache. 141 */ 142static void 143vdev_cache_evict(vdev_cache_t *vc, vdev_cache_entry_t *ve) 144{ 145 ASSERT(MUTEX_HELD(&vc->vc_lock)); 146 ASSERT(ve->ve_fill_io == NULL); 147 ASSERT(ve->ve_data != NULL); 148 149 avl_remove(&vc->vc_lastused_tree, ve); 150 avl_remove(&vc->vc_offset_tree, ve); 151 zio_buf_free(ve->ve_data, VCBS); 152 kmem_free(ve, sizeof (vdev_cache_entry_t)); 153} 154 155/* 156 * Allocate an entry in the cache. At the point we don't have the data, 157 * we're just creating a placeholder so that multiple threads don't all 158 * go off and read the same blocks. 159 */ 160static vdev_cache_entry_t * 161vdev_cache_allocate(zio_t *zio) 162{ 163 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 164 uint64_t offset = P2ALIGN(zio->io_offset, VCBS); 165 vdev_cache_entry_t *ve; 166 167 ASSERT(MUTEX_HELD(&vc->vc_lock)); 168 169 if (zfs_vdev_cache_size == 0) 170 return (NULL); 171 172 /* 173 * If adding a new entry would exceed the cache size, 174 * evict the oldest entry (LRU). 175 */ 176 if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) > 177 zfs_vdev_cache_size) { 178 ve = avl_first(&vc->vc_lastused_tree); 179 if (ve->ve_fill_io != NULL) 180 return (NULL); 181 ASSERT(ve->ve_hits != 0); 182 vdev_cache_evict(vc, ve); 183 } 184 185 ve = kmem_zalloc(sizeof (vdev_cache_entry_t), KM_SLEEP); 186 ve->ve_offset = offset; 187 ve->ve_lastused = LBOLT; 188 ve->ve_data = zio_buf_alloc(VCBS); 189 190 avl_add(&vc->vc_offset_tree, ve); 191 avl_add(&vc->vc_lastused_tree, ve); 192 193 return (ve); 194} 195 196static void 197vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio) 198{ 199 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); 200 201 ASSERT(MUTEX_HELD(&vc->vc_lock)); 202 ASSERT(ve->ve_fill_io == NULL); 203 204 if (ve->ve_lastused != LBOLT) { 205 avl_remove(&vc->vc_lastused_tree, ve); 206 ve->ve_lastused = LBOLT; 207 avl_add(&vc->vc_lastused_tree, ve); 208 } 209 210 ve->ve_hits++; 211 bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size); 212} 213 214/* 215 * Fill a previously allocated cache entry with data. 216 */ 217static void
|
218vdev_cache_fill(zio_t *zio)
| 218vdev_cache_fill(zio_t *fio)
|
219{
| 219{
|
220 vdev_t *vd = zio->io_vd;
| 220 vdev_t *vd = fio->io_vd;
|
221 vdev_cache_t *vc = &vd->vdev_cache;
| 221 vdev_cache_t *vc = &vd->vdev_cache;
|
222 vdev_cache_entry_t *ve = zio->io_private; 223 zio_t *dio;
| 222 vdev_cache_entry_t *ve = fio->io_private; 223 zio_t *pio;
|
224
| 224
|
225 ASSERT(zio->io_size == VCBS);
| 225 ASSERT(fio->io_size == VCBS);
|
226 227 /* 228 * Add data to the cache. 229 */ 230 mutex_enter(&vc->vc_lock); 231
| 226 227 /* 228 * Add data to the cache. 229 */ 230 mutex_enter(&vc->vc_lock); 231
|
232 ASSERT(ve->ve_fill_io == zio); 233 ASSERT(ve->ve_offset == zio->io_offset); 234 ASSERT(ve->ve_data == zio->io_data);
| 232 ASSERT(ve->ve_fill_io == fio); 233 ASSERT(ve->ve_offset == fio->io_offset); 234 ASSERT(ve->ve_data == fio->io_data);
|
235 236 ve->ve_fill_io = NULL; 237 238 /* 239 * Even if this cache line was invalidated by a missed write update, 240 * any reads that were queued up before the missed update are still 241 * valid, so we can satisfy them from this line before we evict it. 242 */
| 235 236 ve->ve_fill_io = NULL; 237 238 /* 239 * Even if this cache line was invalidated by a missed write update, 240 * any reads that were queued up before the missed update are still 241 * valid, so we can satisfy them from this line before we evict it. 242 */
|
243 for (dio = zio->io_delegate_list; dio; dio = dio->io_delegate_next) 244 vdev_cache_hit(vc, ve, dio);
| 243 while ((pio = zio_walk_parents(fio)) != NULL) 244 vdev_cache_hit(vc, ve, pio);
|
245
| 245
|
246 if (zio->io_error || ve->ve_missed_update)
| 246 if (fio->io_error || ve->ve_missed_update)
|
247 vdev_cache_evict(vc, ve); 248 249 mutex_exit(&vc->vc_lock);
| 247 vdev_cache_evict(vc, ve); 248 249 mutex_exit(&vc->vc_lock);
|
250 251 while ((dio = zio->io_delegate_list) != NULL) { 252 zio->io_delegate_list = dio->io_delegate_next; 253 dio->io_delegate_next = NULL; 254 dio->io_error = zio->io_error; 255 zio_execute(dio); 256 }
| |
257} 258 259/* 260 * Read data from the cache. Returns 0 on cache hit, errno on a miss. 261 */ 262int 263vdev_cache_read(zio_t *zio) 264{ 265 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 266 vdev_cache_entry_t *ve, ve_search; 267 uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS); 268 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); 269 zio_t *fio; 270 271 ASSERT(zio->io_type == ZIO_TYPE_READ); 272 273 if (zio->io_flags & ZIO_FLAG_DONT_CACHE) 274 return (EINVAL); 275 276 if (zio->io_size > zfs_vdev_cache_max) 277 return (EOVERFLOW); 278 279 /* 280 * If the I/O straddles two or more cache blocks, don't cache it. 281 */ 282 if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS)) 283 return (EXDEV); 284 285 ASSERT(cache_phase + zio->io_size <= VCBS); 286 287 mutex_enter(&vc->vc_lock); 288 289 ve_search.ve_offset = cache_offset; 290 ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL); 291 292 if (ve != NULL) { 293 if (ve->ve_missed_update) { 294 mutex_exit(&vc->vc_lock); 295 return (ESTALE); 296 } 297 298 if ((fio = ve->ve_fill_io) != NULL) {
| 250} 251 252/* 253 * Read data from the cache. Returns 0 on cache hit, errno on a miss. 254 */ 255int 256vdev_cache_read(zio_t *zio) 257{ 258 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 259 vdev_cache_entry_t *ve, ve_search; 260 uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS); 261 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); 262 zio_t *fio; 263 264 ASSERT(zio->io_type == ZIO_TYPE_READ); 265 266 if (zio->io_flags & ZIO_FLAG_DONT_CACHE) 267 return (EINVAL); 268 269 if (zio->io_size > zfs_vdev_cache_max) 270 return (EOVERFLOW); 271 272 /* 273 * If the I/O straddles two or more cache blocks, don't cache it. 274 */ 275 if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS)) 276 return (EXDEV); 277 278 ASSERT(cache_phase + zio->io_size <= VCBS); 279 280 mutex_enter(&vc->vc_lock); 281 282 ve_search.ve_offset = cache_offset; 283 ve = avl_find(&vc->vc_offset_tree, &ve_search, NULL); 284 285 if (ve != NULL) { 286 if (ve->ve_missed_update) { 287 mutex_exit(&vc->vc_lock); 288 return (ESTALE); 289 } 290 291 if ((fio = ve->ve_fill_io) != NULL) {
|
299 zio->io_delegate_next = fio->io_delegate_list; 300 fio->io_delegate_list = zio;
| |
301 zio_vdev_io_bypass(zio);
| 292 zio_vdev_io_bypass(zio);
|
| 293 zio_add_child(zio, fio);
|
302 mutex_exit(&vc->vc_lock); 303 VDCSTAT_BUMP(vdc_stat_delegations); 304 return (0); 305 } 306 307 vdev_cache_hit(vc, ve, zio); 308 zio_vdev_io_bypass(zio); 309 310 mutex_exit(&vc->vc_lock);
| 294 mutex_exit(&vc->vc_lock); 295 VDCSTAT_BUMP(vdc_stat_delegations); 296 return (0); 297 } 298 299 vdev_cache_hit(vc, ve, zio); 300 zio_vdev_io_bypass(zio); 301 302 mutex_exit(&vc->vc_lock);
|
311 zio_execute(zio);
| |
312 VDCSTAT_BUMP(vdc_stat_hits); 313 return (0); 314 } 315 316 ve = vdev_cache_allocate(zio); 317 318 if (ve == NULL) { 319 mutex_exit(&vc->vc_lock); 320 return (ENOMEM); 321 } 322 323 fio = zio_vdev_delegated_io(zio->io_vd, cache_offset, 324 ve->ve_data, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_CACHE_FILL, 325 ZIO_FLAG_DONT_CACHE, vdev_cache_fill, ve); 326 327 ve->ve_fill_io = fio;
| 303 VDCSTAT_BUMP(vdc_stat_hits); 304 return (0); 305 } 306 307 ve = vdev_cache_allocate(zio); 308 309 if (ve == NULL) { 310 mutex_exit(&vc->vc_lock); 311 return (ENOMEM); 312 } 313 314 fio = zio_vdev_delegated_io(zio->io_vd, cache_offset, 315 ve->ve_data, VCBS, ZIO_TYPE_READ, ZIO_PRIORITY_CACHE_FILL, 316 ZIO_FLAG_DONT_CACHE, vdev_cache_fill, ve); 317 318 ve->ve_fill_io = fio;
|
328 fio->io_delegate_list = zio;
| |
329 zio_vdev_io_bypass(zio);
| 319 zio_vdev_io_bypass(zio);
|
| 320 zio_add_child(zio, fio);
|
330 331 mutex_exit(&vc->vc_lock); 332 zio_nowait(fio); 333 VDCSTAT_BUMP(vdc_stat_misses); 334 335 return (0); 336} 337 338/* 339 * Update cache contents upon write completion. 340 */ 341void 342vdev_cache_write(zio_t *zio) 343{ 344 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 345 vdev_cache_entry_t *ve, ve_search; 346 uint64_t io_start = zio->io_offset; 347 uint64_t io_end = io_start + zio->io_size; 348 uint64_t min_offset = P2ALIGN(io_start, VCBS); 349 uint64_t max_offset = P2ROUNDUP(io_end, VCBS); 350 avl_index_t where; 351 352 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 353 354 mutex_enter(&vc->vc_lock); 355 356 ve_search.ve_offset = min_offset; 357 ve = avl_find(&vc->vc_offset_tree, &ve_search, &where); 358 359 if (ve == NULL) 360 ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER); 361 362 while (ve != NULL && ve->ve_offset < max_offset) { 363 uint64_t start = MAX(ve->ve_offset, io_start); 364 uint64_t end = MIN(ve->ve_offset + VCBS, io_end); 365 366 if (ve->ve_fill_io != NULL) { 367 ve->ve_missed_update = 1; 368 } else { 369 bcopy((char *)zio->io_data + start - io_start, 370 ve->ve_data + start - ve->ve_offset, end - start); 371 } 372 ve = AVL_NEXT(&vc->vc_offset_tree, ve); 373 } 374 mutex_exit(&vc->vc_lock); 375} 376 377void 378vdev_cache_purge(vdev_t *vd) 379{ 380 vdev_cache_t *vc = &vd->vdev_cache; 381 vdev_cache_entry_t *ve; 382 383 mutex_enter(&vc->vc_lock); 384 while ((ve = avl_first(&vc->vc_offset_tree)) != NULL) 385 vdev_cache_evict(vc, ve); 386 mutex_exit(&vc->vc_lock); 387} 388 389void 390vdev_cache_init(vdev_t *vd) 391{ 392 vdev_cache_t *vc = &vd->vdev_cache; 393 394 mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL); 395 396 avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare, 397 sizeof (vdev_cache_entry_t), 398 offsetof(struct vdev_cache_entry, ve_offset_node)); 399 400 avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare, 401 sizeof (vdev_cache_entry_t), 402 offsetof(struct vdev_cache_entry, ve_lastused_node)); 403} 404 405void 406vdev_cache_fini(vdev_t *vd) 407{ 408 vdev_cache_t *vc = &vd->vdev_cache; 409 410 vdev_cache_purge(vd); 411 412 avl_destroy(&vc->vc_offset_tree); 413 avl_destroy(&vc->vc_lastused_tree); 414 415 mutex_destroy(&vc->vc_lock); 416} 417 418void 419vdev_cache_stat_init(void) 420{ 421 vdc_ksp = kstat_create("zfs", 0, "vdev_cache_stats", "misc", 422 KSTAT_TYPE_NAMED, sizeof (vdc_stats) / sizeof (kstat_named_t), 423 KSTAT_FLAG_VIRTUAL); 424 if (vdc_ksp != NULL) { 425 vdc_ksp->ks_data = &vdc_stats; 426 kstat_install(vdc_ksp); 427 } 428} 429 430void 431vdev_cache_stat_fini(void) 432{ 433 if (vdc_ksp != NULL) { 434 kstat_delete(vdc_ksp); 435 vdc_ksp = NULL; 436 } 437}
| 321 322 mutex_exit(&vc->vc_lock); 323 zio_nowait(fio); 324 VDCSTAT_BUMP(vdc_stat_misses); 325 326 return (0); 327} 328 329/* 330 * Update cache contents upon write completion. 331 */ 332void 333vdev_cache_write(zio_t *zio) 334{ 335 vdev_cache_t *vc = &zio->io_vd->vdev_cache; 336 vdev_cache_entry_t *ve, ve_search; 337 uint64_t io_start = zio->io_offset; 338 uint64_t io_end = io_start + zio->io_size; 339 uint64_t min_offset = P2ALIGN(io_start, VCBS); 340 uint64_t max_offset = P2ROUNDUP(io_end, VCBS); 341 avl_index_t where; 342 343 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 344 345 mutex_enter(&vc->vc_lock); 346 347 ve_search.ve_offset = min_offset; 348 ve = avl_find(&vc->vc_offset_tree, &ve_search, &where); 349 350 if (ve == NULL) 351 ve = avl_nearest(&vc->vc_offset_tree, where, AVL_AFTER); 352 353 while (ve != NULL && ve->ve_offset < max_offset) { 354 uint64_t start = MAX(ve->ve_offset, io_start); 355 uint64_t end = MIN(ve->ve_offset + VCBS, io_end); 356 357 if (ve->ve_fill_io != NULL) { 358 ve->ve_missed_update = 1; 359 } else { 360 bcopy((char *)zio->io_data + start - io_start, 361 ve->ve_data + start - ve->ve_offset, end - start); 362 } 363 ve = AVL_NEXT(&vc->vc_offset_tree, ve); 364 } 365 mutex_exit(&vc->vc_lock); 366} 367 368void 369vdev_cache_purge(vdev_t *vd) 370{ 371 vdev_cache_t *vc = &vd->vdev_cache; 372 vdev_cache_entry_t *ve; 373 374 mutex_enter(&vc->vc_lock); 375 while ((ve = avl_first(&vc->vc_offset_tree)) != NULL) 376 vdev_cache_evict(vc, ve); 377 mutex_exit(&vc->vc_lock); 378} 379 380void 381vdev_cache_init(vdev_t *vd) 382{ 383 vdev_cache_t *vc = &vd->vdev_cache; 384 385 mutex_init(&vc->vc_lock, NULL, MUTEX_DEFAULT, NULL); 386 387 avl_create(&vc->vc_offset_tree, vdev_cache_offset_compare, 388 sizeof (vdev_cache_entry_t), 389 offsetof(struct vdev_cache_entry, ve_offset_node)); 390 391 avl_create(&vc->vc_lastused_tree, vdev_cache_lastused_compare, 392 sizeof (vdev_cache_entry_t), 393 offsetof(struct vdev_cache_entry, ve_lastused_node)); 394} 395 396void 397vdev_cache_fini(vdev_t *vd) 398{ 399 vdev_cache_t *vc = &vd->vdev_cache; 400 401 vdev_cache_purge(vd); 402 403 avl_destroy(&vc->vc_offset_tree); 404 avl_destroy(&vc->vc_lastused_tree); 405 406 mutex_destroy(&vc->vc_lock); 407} 408 409void 410vdev_cache_stat_init(void) 411{ 412 vdc_ksp = kstat_create("zfs", 0, "vdev_cache_stats", "misc", 413 KSTAT_TYPE_NAMED, sizeof (vdc_stats) / sizeof (kstat_named_t), 414 KSTAT_FLAG_VIRTUAL); 415 if (vdc_ksp != NULL) { 416 vdc_ksp->ks_data = &vdc_stats; 417 kstat_install(vdc_ksp); 418 } 419} 420 421void 422vdev_cache_stat_fini(void) 423{ 424 if (vdc_ksp != NULL) { 425 kstat_delete(vdc_ksp); 426 vdc_ksp = NULL; 427 } 428}
|