1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2011 by Delphix. All rights reserved. 25 */ 26 27/* 28 * DVA-based Adjustable Replacement Cache 29 * 30 * While much of the theory of operation used here is 31 * based on the self-tuning, low overhead replacement cache 32 * presented by Megiddo and Modha at FAST 2003, there are some 33 * significant differences: 34 * 35 * 1. The Megiddo and Modha model assumes any page is evictable. 36 * Pages in its cache cannot be "locked" into memory. This makes 37 * the eviction algorithm simple: evict the last page in the list. 38 * This also make the performance characteristics easy to reason 39 * about. Our cache is not so simple. At any given moment, some 40 * subset of the blocks in the cache are un-evictable because we 41 * have handed out a reference to them. Blocks are only evictable 42 * when there are no external references active. This makes 43 * eviction far more problematic: we choose to evict the evictable 44 * blocks that are the "lowest" in the list. 45 * 46 * There are times when it is not possible to evict the requested 47 * space. In these circumstances we are unable to adjust the cache 48 * size. To prevent the cache growing unbounded at these times we 49 * implement a "cache throttle" that slows the flow of new data 50 * into the cache until we can make space available. 51 * 52 * 2. The Megiddo and Modha model assumes a fixed cache size. 53 * Pages are evicted when the cache is full and there is a cache 54 * miss. Our model has a variable sized cache. It grows with 55 * high use, but also tries to react to memory pressure from the 56 * operating system: decreasing its size when system memory is 57 * tight. 58 * 59 * 3. The Megiddo and Modha model assumes a fixed page size. All 60 * elements of the cache are therefor exactly the same size. So 61 * when adjusting the cache size following a cache miss, its simply 62 * a matter of choosing a single page to evict. In our model, we 63 * have variable sized cache blocks (rangeing from 512 bytes to 64 * 128K bytes). We therefor choose a set of blocks to evict to make 65 * space for a cache miss that approximates as closely as possible 66 * the space used by the new block. 67 * 68 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 69 * by N. Megiddo & D. Modha, FAST 2003 70 */ 71 72/* 73 * The locking model: 74 * 75 * A new reference to a cache buffer can be obtained in two 76 * ways: 1) via a hash table lookup using the DVA as a key, 77 * or 2) via one of the ARC lists. The arc_read() interface 78 * uses method 1, while the internal arc algorithms for 79 * adjusting the cache use method 2. We therefor provide two 80 * types of locks: 1) the hash table lock array, and 2) the 81 * arc list locks. 82 * 83 * Buffers do not have their own mutexs, rather they rely on the 84 * hash table mutexs for the bulk of their protection (i.e. most 85 * fields in the arc_buf_hdr_t are protected by these mutexs). 86 * 87 * buf_hash_find() returns the appropriate mutex (held) when it 88 * locates the requested buffer in the hash table. It returns 89 * NULL for the mutex if the buffer was not in the table. 90 * 91 * buf_hash_remove() expects the appropriate hash mutex to be 92 * already held before it is invoked. 93 * 94 * Each arc state also has a mutex which is used to protect the 95 * buffer list associated with the state. When attempting to 96 * obtain a hash table lock while holding an arc list lock you 97 * must use: mutex_tryenter() to avoid deadlock. Also note that 98 * the active state mutex must be held before the ghost state mutex. 99 * 100 * Arc buffers may have an associated eviction callback function. 101 * This function will be invoked prior to removing the buffer (e.g. 102 * in arc_do_user_evicts()). Note however that the data associated 103 * with the buffer may be evicted prior to the callback. The callback 104 * must be made with *no locks held* (to prevent deadlock). Additionally, 105 * the users of callbacks must ensure that their private data is 106 * protected from simultaneous callbacks from arc_buf_evict() 107 * and arc_do_user_evicts(). 108 * 109 * Note that the majority of the performance stats are manipulated 110 * with atomic operations. 111 * 112 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 113 * 114 * - L2ARC buflist creation 115 * - L2ARC buflist eviction 116 * - L2ARC write completion, which walks L2ARC buflists 117 * - ARC header destruction, as it removes from L2ARC buflists 118 * - ARC header release, as it removes from L2ARC buflists 119 */ 120 121#include <sys/spa.h> 122#include <sys/zio.h> 123#include <sys/zfs_context.h> 124#include <sys/arc.h> 125#include <sys/refcount.h> 126#include <sys/vdev.h> 127#include <sys/vdev_impl.h> 128#ifdef _KERNEL 129#include <sys/dnlc.h> 130#endif 131#include <sys/callb.h> 132#include <sys/kstat.h> 133#include <zfs_fletcher.h> 134#include <sys/sdt.h> 135 136#include <vm/vm_pageout.h> 137
| 1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2011 by Delphix. All rights reserved. 25 */ 26 27/* 28 * DVA-based Adjustable Replacement Cache 29 * 30 * While much of the theory of operation used here is 31 * based on the self-tuning, low overhead replacement cache 32 * presented by Megiddo and Modha at FAST 2003, there are some 33 * significant differences: 34 * 35 * 1. The Megiddo and Modha model assumes any page is evictable. 36 * Pages in its cache cannot be "locked" into memory. This makes 37 * the eviction algorithm simple: evict the last page in the list. 38 * This also make the performance characteristics easy to reason 39 * about. Our cache is not so simple. At any given moment, some 40 * subset of the blocks in the cache are un-evictable because we 41 * have handed out a reference to them. Blocks are only evictable 42 * when there are no external references active. This makes 43 * eviction far more problematic: we choose to evict the evictable 44 * blocks that are the "lowest" in the list. 45 * 46 * There are times when it is not possible to evict the requested 47 * space. In these circumstances we are unable to adjust the cache 48 * size. To prevent the cache growing unbounded at these times we 49 * implement a "cache throttle" that slows the flow of new data 50 * into the cache until we can make space available. 51 * 52 * 2. The Megiddo and Modha model assumes a fixed cache size. 53 * Pages are evicted when the cache is full and there is a cache 54 * miss. Our model has a variable sized cache. It grows with 55 * high use, but also tries to react to memory pressure from the 56 * operating system: decreasing its size when system memory is 57 * tight. 58 * 59 * 3. The Megiddo and Modha model assumes a fixed page size. All 60 * elements of the cache are therefor exactly the same size. So 61 * when adjusting the cache size following a cache miss, its simply 62 * a matter of choosing a single page to evict. In our model, we 63 * have variable sized cache blocks (rangeing from 512 bytes to 64 * 128K bytes). We therefor choose a set of blocks to evict to make 65 * space for a cache miss that approximates as closely as possible 66 * the space used by the new block. 67 * 68 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 69 * by N. Megiddo & D. Modha, FAST 2003 70 */ 71 72/* 73 * The locking model: 74 * 75 * A new reference to a cache buffer can be obtained in two 76 * ways: 1) via a hash table lookup using the DVA as a key, 77 * or 2) via one of the ARC lists. The arc_read() interface 78 * uses method 1, while the internal arc algorithms for 79 * adjusting the cache use method 2. We therefor provide two 80 * types of locks: 1) the hash table lock array, and 2) the 81 * arc list locks. 82 * 83 * Buffers do not have their own mutexs, rather they rely on the 84 * hash table mutexs for the bulk of their protection (i.e. most 85 * fields in the arc_buf_hdr_t are protected by these mutexs). 86 * 87 * buf_hash_find() returns the appropriate mutex (held) when it 88 * locates the requested buffer in the hash table. It returns 89 * NULL for the mutex if the buffer was not in the table. 90 * 91 * buf_hash_remove() expects the appropriate hash mutex to be 92 * already held before it is invoked. 93 * 94 * Each arc state also has a mutex which is used to protect the 95 * buffer list associated with the state. When attempting to 96 * obtain a hash table lock while holding an arc list lock you 97 * must use: mutex_tryenter() to avoid deadlock. Also note that 98 * the active state mutex must be held before the ghost state mutex. 99 * 100 * Arc buffers may have an associated eviction callback function. 101 * This function will be invoked prior to removing the buffer (e.g. 102 * in arc_do_user_evicts()). Note however that the data associated 103 * with the buffer may be evicted prior to the callback. The callback 104 * must be made with *no locks held* (to prevent deadlock). Additionally, 105 * the users of callbacks must ensure that their private data is 106 * protected from simultaneous callbacks from arc_buf_evict() 107 * and arc_do_user_evicts(). 108 * 109 * Note that the majority of the performance stats are manipulated 110 * with atomic operations. 111 * 112 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 113 * 114 * - L2ARC buflist creation 115 * - L2ARC buflist eviction 116 * - L2ARC write completion, which walks L2ARC buflists 117 * - ARC header destruction, as it removes from L2ARC buflists 118 * - ARC header release, as it removes from L2ARC buflists 119 */ 120 121#include <sys/spa.h> 122#include <sys/zio.h> 123#include <sys/zfs_context.h> 124#include <sys/arc.h> 125#include <sys/refcount.h> 126#include <sys/vdev.h> 127#include <sys/vdev_impl.h> 128#ifdef _KERNEL 129#include <sys/dnlc.h> 130#endif 131#include <sys/callb.h> 132#include <sys/kstat.h> 133#include <zfs_fletcher.h> 134#include <sys/sdt.h> 135 136#include <vm/vm_pageout.h> 137
|
| 138#ifdef illumos 139#ifndef _KERNEL 140/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 141boolean_t arc_watch = B_FALSE; 142int arc_procfd; 143#endif 144#endif /* illumos */ 145
|
138static kmutex_t arc_reclaim_thr_lock; 139static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 140static uint8_t arc_thread_exit; 141 142extern int zfs_write_limit_shift; 143extern uint64_t zfs_write_limit_max; 144extern kmutex_t zfs_write_limit_lock; 145 146#define ARC_REDUCE_DNLC_PERCENT 3 147uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 148 149typedef enum arc_reclaim_strategy { 150 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 151 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 152} arc_reclaim_strategy_t; 153 154/* number of seconds before growing cache again */ 155static int arc_grow_retry = 60; 156 157/* shift of arc_c for calculating both min and max arc_p */ 158static int arc_p_min_shift = 4; 159 160/* log2(fraction of arc to reclaim) */ 161static int arc_shrink_shift = 5; 162 163/* 164 * minimum lifespan of a prefetch block in clock ticks 165 * (initialized in arc_init()) 166 */ 167static int arc_min_prefetch_lifespan; 168 169static int arc_dead; 170extern int zfs_prefetch_disable; 171 172/* 173 * The arc has filled available memory and has now warmed up. 174 */ 175static boolean_t arc_warm; 176 177/* 178 * These tunables are for performance analysis. 179 */ 180uint64_t zfs_arc_max; 181uint64_t zfs_arc_min; 182uint64_t zfs_arc_meta_limit = 0; 183int zfs_arc_grow_retry = 0; 184int zfs_arc_shrink_shift = 0; 185int zfs_arc_p_min_shift = 0; 186 187TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max); 188TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min); 189TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 190SYSCTL_DECL(_vfs_zfs); 191SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, 192 "Maximum ARC size"); 193SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, 194 "Minimum ARC size"); 195 196/* 197 * Note that buffers can be in one of 6 states: 198 * ARC_anon - anonymous (discussed below) 199 * ARC_mru - recently used, currently cached 200 * ARC_mru_ghost - recentely used, no longer in cache 201 * ARC_mfu - frequently used, currently cached 202 * ARC_mfu_ghost - frequently used, no longer in cache 203 * ARC_l2c_only - exists in L2ARC but not other states 204 * When there are no active references to the buffer, they are 205 * are linked onto a list in one of these arc states. These are 206 * the only buffers that can be evicted or deleted. Within each 207 * state there are multiple lists, one for meta-data and one for 208 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 209 * etc.) is tracked separately so that it can be managed more 210 * explicitly: favored over data, limited explicitly. 211 * 212 * Anonymous buffers are buffers that are not associated with 213 * a DVA. These are buffers that hold dirty block copies 214 * before they are written to stable storage. By definition, 215 * they are "ref'd" and are considered part of arc_mru 216 * that cannot be freed. Generally, they will aquire a DVA 217 * as they are written and migrate onto the arc_mru list. 218 * 219 * The ARC_l2c_only state is for buffers that are in the second 220 * level ARC but no longer in any of the ARC_m* lists. The second 221 * level ARC itself may also contain buffers that are in any of 222 * the ARC_m* states - meaning that a buffer can exist in two 223 * places. The reason for the ARC_l2c_only state is to keep the 224 * buffer header in the hash table, so that reads that hit the 225 * second level ARC benefit from these fast lookups. 226 */ 227 228#define ARCS_LOCK_PAD CACHE_LINE_SIZE 229struct arcs_lock { 230 kmutex_t arcs_lock; 231#ifdef _KERNEL 232 unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))]; 233#endif 234}; 235 236/* 237 * must be power of two for mask use to work 238 * 239 */ 240#define ARC_BUFC_NUMDATALISTS 16 241#define ARC_BUFC_NUMMETADATALISTS 16 242#define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS) 243 244typedef struct arc_state { 245 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 246 uint64_t arcs_size; /* total amount of data in this state */ 247 list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */ 248 struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE); 249} arc_state_t; 250 251#define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock)) 252 253/* The 6 states: */ 254static arc_state_t ARC_anon; 255static arc_state_t ARC_mru; 256static arc_state_t ARC_mru_ghost; 257static arc_state_t ARC_mfu; 258static arc_state_t ARC_mfu_ghost; 259static arc_state_t ARC_l2c_only; 260 261typedef struct arc_stats { 262 kstat_named_t arcstat_hits; 263 kstat_named_t arcstat_misses; 264 kstat_named_t arcstat_demand_data_hits; 265 kstat_named_t arcstat_demand_data_misses; 266 kstat_named_t arcstat_demand_metadata_hits; 267 kstat_named_t arcstat_demand_metadata_misses; 268 kstat_named_t arcstat_prefetch_data_hits; 269 kstat_named_t arcstat_prefetch_data_misses; 270 kstat_named_t arcstat_prefetch_metadata_hits; 271 kstat_named_t arcstat_prefetch_metadata_misses; 272 kstat_named_t arcstat_mru_hits; 273 kstat_named_t arcstat_mru_ghost_hits; 274 kstat_named_t arcstat_mfu_hits; 275 kstat_named_t arcstat_mfu_ghost_hits; 276 kstat_named_t arcstat_allocated; 277 kstat_named_t arcstat_deleted; 278 kstat_named_t arcstat_stolen; 279 kstat_named_t arcstat_recycle_miss; 280 kstat_named_t arcstat_mutex_miss; 281 kstat_named_t arcstat_evict_skip; 282 kstat_named_t arcstat_evict_l2_cached; 283 kstat_named_t arcstat_evict_l2_eligible; 284 kstat_named_t arcstat_evict_l2_ineligible; 285 kstat_named_t arcstat_hash_elements; 286 kstat_named_t arcstat_hash_elements_max; 287 kstat_named_t arcstat_hash_collisions; 288 kstat_named_t arcstat_hash_chains; 289 kstat_named_t arcstat_hash_chain_max; 290 kstat_named_t arcstat_p; 291 kstat_named_t arcstat_c; 292 kstat_named_t arcstat_c_min; 293 kstat_named_t arcstat_c_max; 294 kstat_named_t arcstat_size; 295 kstat_named_t arcstat_hdr_size; 296 kstat_named_t arcstat_data_size; 297 kstat_named_t arcstat_other_size; 298 kstat_named_t arcstat_l2_hits; 299 kstat_named_t arcstat_l2_misses; 300 kstat_named_t arcstat_l2_feeds; 301 kstat_named_t arcstat_l2_rw_clash; 302 kstat_named_t arcstat_l2_read_bytes; 303 kstat_named_t arcstat_l2_write_bytes; 304 kstat_named_t arcstat_l2_writes_sent; 305 kstat_named_t arcstat_l2_writes_done; 306 kstat_named_t arcstat_l2_writes_error; 307 kstat_named_t arcstat_l2_writes_hdr_miss; 308 kstat_named_t arcstat_l2_evict_lock_retry; 309 kstat_named_t arcstat_l2_evict_reading; 310 kstat_named_t arcstat_l2_free_on_write; 311 kstat_named_t arcstat_l2_abort_lowmem; 312 kstat_named_t arcstat_l2_cksum_bad; 313 kstat_named_t arcstat_l2_io_error; 314 kstat_named_t arcstat_l2_size; 315 kstat_named_t arcstat_l2_hdr_size; 316 kstat_named_t arcstat_memory_throttle_count; 317 kstat_named_t arcstat_l2_write_trylock_fail; 318 kstat_named_t arcstat_l2_write_passed_headroom; 319 kstat_named_t arcstat_l2_write_spa_mismatch; 320 kstat_named_t arcstat_l2_write_in_l2; 321 kstat_named_t arcstat_l2_write_hdr_io_in_progress; 322 kstat_named_t arcstat_l2_write_not_cacheable; 323 kstat_named_t arcstat_l2_write_full; 324 kstat_named_t arcstat_l2_write_buffer_iter; 325 kstat_named_t arcstat_l2_write_pios; 326 kstat_named_t arcstat_l2_write_buffer_bytes_scanned; 327 kstat_named_t arcstat_l2_write_buffer_list_iter; 328 kstat_named_t arcstat_l2_write_buffer_list_null_iter; 329} arc_stats_t; 330 331static arc_stats_t arc_stats = { 332 { "hits", KSTAT_DATA_UINT64 }, 333 { "misses", KSTAT_DATA_UINT64 }, 334 { "demand_data_hits", KSTAT_DATA_UINT64 }, 335 { "demand_data_misses", KSTAT_DATA_UINT64 }, 336 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 337 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 338 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 339 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 340 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 341 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 342 { "mru_hits", KSTAT_DATA_UINT64 }, 343 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 344 { "mfu_hits", KSTAT_DATA_UINT64 }, 345 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 346 { "allocated", KSTAT_DATA_UINT64 }, 347 { "deleted", KSTAT_DATA_UINT64 }, 348 { "stolen", KSTAT_DATA_UINT64 }, 349 { "recycle_miss", KSTAT_DATA_UINT64 }, 350 { "mutex_miss", KSTAT_DATA_UINT64 }, 351 { "evict_skip", KSTAT_DATA_UINT64 }, 352 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 353 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 354 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 355 { "hash_elements", KSTAT_DATA_UINT64 }, 356 { "hash_elements_max", KSTAT_DATA_UINT64 }, 357 { "hash_collisions", KSTAT_DATA_UINT64 }, 358 { "hash_chains", KSTAT_DATA_UINT64 }, 359 { "hash_chain_max", KSTAT_DATA_UINT64 }, 360 { "p", KSTAT_DATA_UINT64 }, 361 { "c", KSTAT_DATA_UINT64 }, 362 { "c_min", KSTAT_DATA_UINT64 }, 363 { "c_max", KSTAT_DATA_UINT64 }, 364 { "size", KSTAT_DATA_UINT64 }, 365 { "hdr_size", KSTAT_DATA_UINT64 }, 366 { "data_size", KSTAT_DATA_UINT64 }, 367 { "other_size", KSTAT_DATA_UINT64 }, 368 { "l2_hits", KSTAT_DATA_UINT64 }, 369 { "l2_misses", KSTAT_DATA_UINT64 }, 370 { "l2_feeds", KSTAT_DATA_UINT64 }, 371 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 372 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 373 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 374 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 375 { "l2_writes_done", KSTAT_DATA_UINT64 }, 376 { "l2_writes_error", KSTAT_DATA_UINT64 }, 377 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 378 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 379 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 380 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 381 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 382 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 383 { "l2_io_error", KSTAT_DATA_UINT64 }, 384 { "l2_size", KSTAT_DATA_UINT64 }, 385 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 386 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 387 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 }, 388 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 }, 389 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 }, 390 { "l2_write_in_l2", KSTAT_DATA_UINT64 }, 391 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 }, 392 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 }, 393 { "l2_write_full", KSTAT_DATA_UINT64 }, 394 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 }, 395 { "l2_write_pios", KSTAT_DATA_UINT64 }, 396 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 }, 397 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 }, 398 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 } 399}; 400 401#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 402 403#define ARCSTAT_INCR(stat, val) \ 404 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 405 406#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 407#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 408 409#define ARCSTAT_MAX(stat, val) { \ 410 uint64_t m; \ 411 while ((val) > (m = arc_stats.stat.value.ui64) && \ 412 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 413 continue; \ 414} 415 416#define ARCSTAT_MAXSTAT(stat) \ 417 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 418 419/* 420 * We define a macro to allow ARC hits/misses to be easily broken down by 421 * two separate conditions, giving a total of four different subtypes for 422 * each of hits and misses (so eight statistics total). 423 */ 424#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 425 if (cond1) { \ 426 if (cond2) { \ 427 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 428 } else { \ 429 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 430 } \ 431 } else { \ 432 if (cond2) { \ 433 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 434 } else { \ 435 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 436 } \ 437 } 438 439kstat_t *arc_ksp; 440static arc_state_t *arc_anon; 441static arc_state_t *arc_mru; 442static arc_state_t *arc_mru_ghost; 443static arc_state_t *arc_mfu; 444static arc_state_t *arc_mfu_ghost; 445static arc_state_t *arc_l2c_only; 446 447/* 448 * There are several ARC variables that are critical to export as kstats -- 449 * but we don't want to have to grovel around in the kstat whenever we wish to 450 * manipulate them. For these variables, we therefore define them to be in 451 * terms of the statistic variable. This assures that we are not introducing 452 * the possibility of inconsistency by having shadow copies of the variables, 453 * while still allowing the code to be readable. 454 */ 455#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 456#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 457#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 458#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 459#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 460 461static int arc_no_grow; /* Don't try to grow cache size */ 462static uint64_t arc_tempreserve; 463static uint64_t arc_loaned_bytes; 464static uint64_t arc_meta_used; 465static uint64_t arc_meta_limit; 466static uint64_t arc_meta_max = 0; 467SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RDTUN, 468 &arc_meta_used, 0, "ARC metadata used"); 469SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RDTUN, 470 &arc_meta_limit, 0, "ARC metadata limit"); 471 472typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 473 474typedef struct arc_callback arc_callback_t; 475 476struct arc_callback { 477 void *acb_private; 478 arc_done_func_t *acb_done; 479 arc_buf_t *acb_buf; 480 zio_t *acb_zio_dummy; 481 arc_callback_t *acb_next; 482}; 483 484typedef struct arc_write_callback arc_write_callback_t; 485 486struct arc_write_callback { 487 void *awcb_private; 488 arc_done_func_t *awcb_ready; 489 arc_done_func_t *awcb_done; 490 arc_buf_t *awcb_buf; 491}; 492 493struct arc_buf_hdr { 494 /* protected by hash lock */ 495 dva_t b_dva; 496 uint64_t b_birth; 497 uint64_t b_cksum0; 498 499 kmutex_t b_freeze_lock; 500 zio_cksum_t *b_freeze_cksum; 501 void *b_thawed; 502 503 arc_buf_hdr_t *b_hash_next; 504 arc_buf_t *b_buf; 505 uint32_t b_flags; 506 uint32_t b_datacnt; 507 508 arc_callback_t *b_acb; 509 kcondvar_t b_cv; 510 511 /* immutable */ 512 arc_buf_contents_t b_type; 513 uint64_t b_size; 514 uint64_t b_spa; 515 516 /* protected by arc state mutex */ 517 arc_state_t *b_state; 518 list_node_t b_arc_node; 519 520 /* updated atomically */ 521 clock_t b_arc_access; 522 523 /* self protecting */ 524 refcount_t b_refcnt; 525 526 l2arc_buf_hdr_t *b_l2hdr; 527 list_node_t b_l2node; 528}; 529 530static arc_buf_t *arc_eviction_list; 531static kmutex_t arc_eviction_mtx; 532static arc_buf_hdr_t arc_eviction_hdr; 533static void arc_get_data_buf(arc_buf_t *buf); 534static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 535static int arc_evict_needed(arc_buf_contents_t type); 536static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
| 146static kmutex_t arc_reclaim_thr_lock; 147static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 148static uint8_t arc_thread_exit; 149 150extern int zfs_write_limit_shift; 151extern uint64_t zfs_write_limit_max; 152extern kmutex_t zfs_write_limit_lock; 153 154#define ARC_REDUCE_DNLC_PERCENT 3 155uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 156 157typedef enum arc_reclaim_strategy { 158 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 159 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 160} arc_reclaim_strategy_t; 161 162/* number of seconds before growing cache again */ 163static int arc_grow_retry = 60; 164 165/* shift of arc_c for calculating both min and max arc_p */ 166static int arc_p_min_shift = 4; 167 168/* log2(fraction of arc to reclaim) */ 169static int arc_shrink_shift = 5; 170 171/* 172 * minimum lifespan of a prefetch block in clock ticks 173 * (initialized in arc_init()) 174 */ 175static int arc_min_prefetch_lifespan; 176 177static int arc_dead; 178extern int zfs_prefetch_disable; 179 180/* 181 * The arc has filled available memory and has now warmed up. 182 */ 183static boolean_t arc_warm; 184 185/* 186 * These tunables are for performance analysis. 187 */ 188uint64_t zfs_arc_max; 189uint64_t zfs_arc_min; 190uint64_t zfs_arc_meta_limit = 0; 191int zfs_arc_grow_retry = 0; 192int zfs_arc_shrink_shift = 0; 193int zfs_arc_p_min_shift = 0; 194 195TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max); 196TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min); 197TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 198SYSCTL_DECL(_vfs_zfs); 199SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, 200 "Maximum ARC size"); 201SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, 202 "Minimum ARC size"); 203 204/* 205 * Note that buffers can be in one of 6 states: 206 * ARC_anon - anonymous (discussed below) 207 * ARC_mru - recently used, currently cached 208 * ARC_mru_ghost - recentely used, no longer in cache 209 * ARC_mfu - frequently used, currently cached 210 * ARC_mfu_ghost - frequently used, no longer in cache 211 * ARC_l2c_only - exists in L2ARC but not other states 212 * When there are no active references to the buffer, they are 213 * are linked onto a list in one of these arc states. These are 214 * the only buffers that can be evicted or deleted. Within each 215 * state there are multiple lists, one for meta-data and one for 216 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 217 * etc.) is tracked separately so that it can be managed more 218 * explicitly: favored over data, limited explicitly. 219 * 220 * Anonymous buffers are buffers that are not associated with 221 * a DVA. These are buffers that hold dirty block copies 222 * before they are written to stable storage. By definition, 223 * they are "ref'd" and are considered part of arc_mru 224 * that cannot be freed. Generally, they will aquire a DVA 225 * as they are written and migrate onto the arc_mru list. 226 * 227 * The ARC_l2c_only state is for buffers that are in the second 228 * level ARC but no longer in any of the ARC_m* lists. The second 229 * level ARC itself may also contain buffers that are in any of 230 * the ARC_m* states - meaning that a buffer can exist in two 231 * places. The reason for the ARC_l2c_only state is to keep the 232 * buffer header in the hash table, so that reads that hit the 233 * second level ARC benefit from these fast lookups. 234 */ 235 236#define ARCS_LOCK_PAD CACHE_LINE_SIZE 237struct arcs_lock { 238 kmutex_t arcs_lock; 239#ifdef _KERNEL 240 unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))]; 241#endif 242}; 243 244/* 245 * must be power of two for mask use to work 246 * 247 */ 248#define ARC_BUFC_NUMDATALISTS 16 249#define ARC_BUFC_NUMMETADATALISTS 16 250#define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS) 251 252typedef struct arc_state { 253 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 254 uint64_t arcs_size; /* total amount of data in this state */ 255 list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */ 256 struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE); 257} arc_state_t; 258 259#define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock)) 260 261/* The 6 states: */ 262static arc_state_t ARC_anon; 263static arc_state_t ARC_mru; 264static arc_state_t ARC_mru_ghost; 265static arc_state_t ARC_mfu; 266static arc_state_t ARC_mfu_ghost; 267static arc_state_t ARC_l2c_only; 268 269typedef struct arc_stats { 270 kstat_named_t arcstat_hits; 271 kstat_named_t arcstat_misses; 272 kstat_named_t arcstat_demand_data_hits; 273 kstat_named_t arcstat_demand_data_misses; 274 kstat_named_t arcstat_demand_metadata_hits; 275 kstat_named_t arcstat_demand_metadata_misses; 276 kstat_named_t arcstat_prefetch_data_hits; 277 kstat_named_t arcstat_prefetch_data_misses; 278 kstat_named_t arcstat_prefetch_metadata_hits; 279 kstat_named_t arcstat_prefetch_metadata_misses; 280 kstat_named_t arcstat_mru_hits; 281 kstat_named_t arcstat_mru_ghost_hits; 282 kstat_named_t arcstat_mfu_hits; 283 kstat_named_t arcstat_mfu_ghost_hits; 284 kstat_named_t arcstat_allocated; 285 kstat_named_t arcstat_deleted; 286 kstat_named_t arcstat_stolen; 287 kstat_named_t arcstat_recycle_miss; 288 kstat_named_t arcstat_mutex_miss; 289 kstat_named_t arcstat_evict_skip; 290 kstat_named_t arcstat_evict_l2_cached; 291 kstat_named_t arcstat_evict_l2_eligible; 292 kstat_named_t arcstat_evict_l2_ineligible; 293 kstat_named_t arcstat_hash_elements; 294 kstat_named_t arcstat_hash_elements_max; 295 kstat_named_t arcstat_hash_collisions; 296 kstat_named_t arcstat_hash_chains; 297 kstat_named_t arcstat_hash_chain_max; 298 kstat_named_t arcstat_p; 299 kstat_named_t arcstat_c; 300 kstat_named_t arcstat_c_min; 301 kstat_named_t arcstat_c_max; 302 kstat_named_t arcstat_size; 303 kstat_named_t arcstat_hdr_size; 304 kstat_named_t arcstat_data_size; 305 kstat_named_t arcstat_other_size; 306 kstat_named_t arcstat_l2_hits; 307 kstat_named_t arcstat_l2_misses; 308 kstat_named_t arcstat_l2_feeds; 309 kstat_named_t arcstat_l2_rw_clash; 310 kstat_named_t arcstat_l2_read_bytes; 311 kstat_named_t arcstat_l2_write_bytes; 312 kstat_named_t arcstat_l2_writes_sent; 313 kstat_named_t arcstat_l2_writes_done; 314 kstat_named_t arcstat_l2_writes_error; 315 kstat_named_t arcstat_l2_writes_hdr_miss; 316 kstat_named_t arcstat_l2_evict_lock_retry; 317 kstat_named_t arcstat_l2_evict_reading; 318 kstat_named_t arcstat_l2_free_on_write; 319 kstat_named_t arcstat_l2_abort_lowmem; 320 kstat_named_t arcstat_l2_cksum_bad; 321 kstat_named_t arcstat_l2_io_error; 322 kstat_named_t arcstat_l2_size; 323 kstat_named_t arcstat_l2_hdr_size; 324 kstat_named_t arcstat_memory_throttle_count; 325 kstat_named_t arcstat_l2_write_trylock_fail; 326 kstat_named_t arcstat_l2_write_passed_headroom; 327 kstat_named_t arcstat_l2_write_spa_mismatch; 328 kstat_named_t arcstat_l2_write_in_l2; 329 kstat_named_t arcstat_l2_write_hdr_io_in_progress; 330 kstat_named_t arcstat_l2_write_not_cacheable; 331 kstat_named_t arcstat_l2_write_full; 332 kstat_named_t arcstat_l2_write_buffer_iter; 333 kstat_named_t arcstat_l2_write_pios; 334 kstat_named_t arcstat_l2_write_buffer_bytes_scanned; 335 kstat_named_t arcstat_l2_write_buffer_list_iter; 336 kstat_named_t arcstat_l2_write_buffer_list_null_iter; 337} arc_stats_t; 338 339static arc_stats_t arc_stats = { 340 { "hits", KSTAT_DATA_UINT64 }, 341 { "misses", KSTAT_DATA_UINT64 }, 342 { "demand_data_hits", KSTAT_DATA_UINT64 }, 343 { "demand_data_misses", KSTAT_DATA_UINT64 }, 344 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 345 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 346 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 347 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 348 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 349 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 350 { "mru_hits", KSTAT_DATA_UINT64 }, 351 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 352 { "mfu_hits", KSTAT_DATA_UINT64 }, 353 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 354 { "allocated", KSTAT_DATA_UINT64 }, 355 { "deleted", KSTAT_DATA_UINT64 }, 356 { "stolen", KSTAT_DATA_UINT64 }, 357 { "recycle_miss", KSTAT_DATA_UINT64 }, 358 { "mutex_miss", KSTAT_DATA_UINT64 }, 359 { "evict_skip", KSTAT_DATA_UINT64 }, 360 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 361 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 362 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 363 { "hash_elements", KSTAT_DATA_UINT64 }, 364 { "hash_elements_max", KSTAT_DATA_UINT64 }, 365 { "hash_collisions", KSTAT_DATA_UINT64 }, 366 { "hash_chains", KSTAT_DATA_UINT64 }, 367 { "hash_chain_max", KSTAT_DATA_UINT64 }, 368 { "p", KSTAT_DATA_UINT64 }, 369 { "c", KSTAT_DATA_UINT64 }, 370 { "c_min", KSTAT_DATA_UINT64 }, 371 { "c_max", KSTAT_DATA_UINT64 }, 372 { "size", KSTAT_DATA_UINT64 }, 373 { "hdr_size", KSTAT_DATA_UINT64 }, 374 { "data_size", KSTAT_DATA_UINT64 }, 375 { "other_size", KSTAT_DATA_UINT64 }, 376 { "l2_hits", KSTAT_DATA_UINT64 }, 377 { "l2_misses", KSTAT_DATA_UINT64 }, 378 { "l2_feeds", KSTAT_DATA_UINT64 }, 379 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 380 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 381 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 382 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 383 { "l2_writes_done", KSTAT_DATA_UINT64 }, 384 { "l2_writes_error", KSTAT_DATA_UINT64 }, 385 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 386 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 387 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 388 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 389 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 390 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 391 { "l2_io_error", KSTAT_DATA_UINT64 }, 392 { "l2_size", KSTAT_DATA_UINT64 }, 393 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 394 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 395 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 }, 396 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 }, 397 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 }, 398 { "l2_write_in_l2", KSTAT_DATA_UINT64 }, 399 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 }, 400 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 }, 401 { "l2_write_full", KSTAT_DATA_UINT64 }, 402 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 }, 403 { "l2_write_pios", KSTAT_DATA_UINT64 }, 404 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 }, 405 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 }, 406 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 } 407}; 408 409#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 410 411#define ARCSTAT_INCR(stat, val) \ 412 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 413 414#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 415#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 416 417#define ARCSTAT_MAX(stat, val) { \ 418 uint64_t m; \ 419 while ((val) > (m = arc_stats.stat.value.ui64) && \ 420 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 421 continue; \ 422} 423 424#define ARCSTAT_MAXSTAT(stat) \ 425 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 426 427/* 428 * We define a macro to allow ARC hits/misses to be easily broken down by 429 * two separate conditions, giving a total of four different subtypes for 430 * each of hits and misses (so eight statistics total). 431 */ 432#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 433 if (cond1) { \ 434 if (cond2) { \ 435 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 436 } else { \ 437 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 438 } \ 439 } else { \ 440 if (cond2) { \ 441 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 442 } else { \ 443 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 444 } \ 445 } 446 447kstat_t *arc_ksp; 448static arc_state_t *arc_anon; 449static arc_state_t *arc_mru; 450static arc_state_t *arc_mru_ghost; 451static arc_state_t *arc_mfu; 452static arc_state_t *arc_mfu_ghost; 453static arc_state_t *arc_l2c_only; 454 455/* 456 * There are several ARC variables that are critical to export as kstats -- 457 * but we don't want to have to grovel around in the kstat whenever we wish to 458 * manipulate them. For these variables, we therefore define them to be in 459 * terms of the statistic variable. This assures that we are not introducing 460 * the possibility of inconsistency by having shadow copies of the variables, 461 * while still allowing the code to be readable. 462 */ 463#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 464#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 465#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 466#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 467#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 468 469static int arc_no_grow; /* Don't try to grow cache size */ 470static uint64_t arc_tempreserve; 471static uint64_t arc_loaned_bytes; 472static uint64_t arc_meta_used; 473static uint64_t arc_meta_limit; 474static uint64_t arc_meta_max = 0; 475SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RDTUN, 476 &arc_meta_used, 0, "ARC metadata used"); 477SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RDTUN, 478 &arc_meta_limit, 0, "ARC metadata limit"); 479 480typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 481 482typedef struct arc_callback arc_callback_t; 483 484struct arc_callback { 485 void *acb_private; 486 arc_done_func_t *acb_done; 487 arc_buf_t *acb_buf; 488 zio_t *acb_zio_dummy; 489 arc_callback_t *acb_next; 490}; 491 492typedef struct arc_write_callback arc_write_callback_t; 493 494struct arc_write_callback { 495 void *awcb_private; 496 arc_done_func_t *awcb_ready; 497 arc_done_func_t *awcb_done; 498 arc_buf_t *awcb_buf; 499}; 500 501struct arc_buf_hdr { 502 /* protected by hash lock */ 503 dva_t b_dva; 504 uint64_t b_birth; 505 uint64_t b_cksum0; 506 507 kmutex_t b_freeze_lock; 508 zio_cksum_t *b_freeze_cksum; 509 void *b_thawed; 510 511 arc_buf_hdr_t *b_hash_next; 512 arc_buf_t *b_buf; 513 uint32_t b_flags; 514 uint32_t b_datacnt; 515 516 arc_callback_t *b_acb; 517 kcondvar_t b_cv; 518 519 /* immutable */ 520 arc_buf_contents_t b_type; 521 uint64_t b_size; 522 uint64_t b_spa; 523 524 /* protected by arc state mutex */ 525 arc_state_t *b_state; 526 list_node_t b_arc_node; 527 528 /* updated atomically */ 529 clock_t b_arc_access; 530 531 /* self protecting */ 532 refcount_t b_refcnt; 533 534 l2arc_buf_hdr_t *b_l2hdr; 535 list_node_t b_l2node; 536}; 537 538static arc_buf_t *arc_eviction_list; 539static kmutex_t arc_eviction_mtx; 540static arc_buf_hdr_t arc_eviction_hdr; 541static void arc_get_data_buf(arc_buf_t *buf); 542static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 543static int arc_evict_needed(arc_buf_contents_t type); 544static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
|
| 545#ifdef illumos 546static void arc_buf_watch(arc_buf_t *buf); 547#endif /* illumos */
|
537 538static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab); 539 540#define GHOST_STATE(state) \ 541 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 542 (state) == arc_l2c_only) 543 544/* 545 * Private ARC flags. These flags are private ARC only flags that will show up 546 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 547 * be passed in as arc_flags in things like arc_read. However, these flags 548 * should never be passed and should only be set by ARC code. When adding new 549 * public flags, make sure not to smash the private ones. 550 */ 551 552#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 553#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 554#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 555#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 556#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 557#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 558#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 559#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 560#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 561#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 562 563#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 564#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 565#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 566#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) 567#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 568#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 569#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 570#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 571#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 572 (hdr)->b_l2hdr != NULL) 573#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 574#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 575#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 576 577/* 578 * Other sizes 579 */ 580 581#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 582#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 583 584/* 585 * Hash table routines 586 */ 587 588#define HT_LOCK_PAD CACHE_LINE_SIZE 589 590struct ht_lock { 591 kmutex_t ht_lock; 592#ifdef _KERNEL 593 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 594#endif 595}; 596 597#define BUF_LOCKS 256 598typedef struct buf_hash_table { 599 uint64_t ht_mask; 600 arc_buf_hdr_t **ht_table; 601 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE); 602} buf_hash_table_t; 603 604static buf_hash_table_t buf_hash_table; 605 606#define BUF_HASH_INDEX(spa, dva, birth) \ 607 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 608#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 609#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 610#define HDR_LOCK(hdr) \ 611 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 612 613uint64_t zfs_crc64_table[256]; 614 615/* 616 * Level 2 ARC 617 */ 618 619#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 620#define L2ARC_HEADROOM 2 /* num of writes */ 621#define L2ARC_FEED_SECS 1 /* caching interval secs */ 622#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 623 624#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 625#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 626 627/* 628 * L2ARC Performance Tunables 629 */ 630uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 631uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 632uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 633uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 634uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 635boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 636boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 637boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 638 639SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW, 640 &l2arc_write_max, 0, "max write size"); 641SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW, 642 &l2arc_write_boost, 0, "extra write during warmup"); 643SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW, 644 &l2arc_headroom, 0, "number of dev writes"); 645SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW, 646 &l2arc_feed_secs, 0, "interval seconds"); 647SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW, 648 &l2arc_feed_min_ms, 0, "min interval milliseconds"); 649 650SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW, 651 &l2arc_noprefetch, 0, "don't cache prefetch bufs"); 652SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW, 653 &l2arc_feed_again, 0, "turbo warmup"); 654SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW, 655 &l2arc_norw, 0, "no reads during writes"); 656 657SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD, 658 &ARC_anon.arcs_size, 0, "size of anonymous state"); 659SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD, 660 &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state"); 661SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD, 662 &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state"); 663 664SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD, 665 &ARC_mru.arcs_size, 0, "size of mru state"); 666SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD, 667 &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state"); 668SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD, 669 &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state"); 670 671SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD, 672 &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state"); 673SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD, 674 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 675 "size of metadata in mru ghost state"); 676SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD, 677 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 678 "size of data in mru ghost state"); 679 680SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD, 681 &ARC_mfu.arcs_size, 0, "size of mfu state"); 682SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD, 683 &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state"); 684SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD, 685 &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state"); 686 687SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD, 688 &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state"); 689SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD, 690 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 691 "size of metadata in mfu ghost state"); 692SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD, 693 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 694 "size of data in mfu ghost state"); 695 696SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD, 697 &ARC_l2c_only.arcs_size, 0, "size of mru state"); 698 699/* 700 * L2ARC Internals 701 */ 702typedef struct l2arc_dev { 703 vdev_t *l2ad_vdev; /* vdev */ 704 spa_t *l2ad_spa; /* spa */ 705 uint64_t l2ad_hand; /* next write location */ 706 uint64_t l2ad_write; /* desired write size, bytes */ 707 uint64_t l2ad_boost; /* warmup write boost, bytes */ 708 uint64_t l2ad_start; /* first addr on device */ 709 uint64_t l2ad_end; /* last addr on device */ 710 uint64_t l2ad_evict; /* last addr eviction reached */ 711 boolean_t l2ad_first; /* first sweep through */ 712 boolean_t l2ad_writing; /* currently writing */ 713 list_t *l2ad_buflist; /* buffer list */ 714 list_node_t l2ad_node; /* device list node */ 715} l2arc_dev_t; 716 717static list_t L2ARC_dev_list; /* device list */ 718static list_t *l2arc_dev_list; /* device list pointer */ 719static kmutex_t l2arc_dev_mtx; /* device list mutex */ 720static l2arc_dev_t *l2arc_dev_last; /* last device used */ 721static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 722static list_t L2ARC_free_on_write; /* free after write buf list */ 723static list_t *l2arc_free_on_write; /* free after write list ptr */ 724static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 725static uint64_t l2arc_ndev; /* number of devices */ 726 727typedef struct l2arc_read_callback { 728 arc_buf_t *l2rcb_buf; /* read buffer */ 729 spa_t *l2rcb_spa; /* spa */ 730 blkptr_t l2rcb_bp; /* original blkptr */ 731 zbookmark_t l2rcb_zb; /* original bookmark */ 732 int l2rcb_flags; /* original flags */ 733} l2arc_read_callback_t; 734 735typedef struct l2arc_write_callback { 736 l2arc_dev_t *l2wcb_dev; /* device info */ 737 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 738} l2arc_write_callback_t; 739 740struct l2arc_buf_hdr { 741 /* protected by arc_buf_hdr mutex */ 742 l2arc_dev_t *b_dev; /* L2ARC device */ 743 uint64_t b_daddr; /* disk address, offset byte */ 744}; 745 746typedef struct l2arc_data_free { 747 /* protected by l2arc_free_on_write_mtx */ 748 void *l2df_data; 749 size_t l2df_size; 750 void (*l2df_func)(void *, size_t); 751 list_node_t l2df_list_node; 752} l2arc_data_free_t; 753 754static kmutex_t l2arc_feed_thr_lock; 755static kcondvar_t l2arc_feed_thr_cv; 756static uint8_t l2arc_thread_exit; 757 758static void l2arc_read_done(zio_t *zio); 759static void l2arc_hdr_stat_add(void); 760static void l2arc_hdr_stat_remove(void); 761 762static uint64_t 763buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 764{ 765 uint8_t *vdva = (uint8_t *)dva; 766 uint64_t crc = -1ULL; 767 int i; 768 769 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 770 771 for (i = 0; i < sizeof (dva_t); i++) 772 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 773 774 crc ^= (spa>>8) ^ birth; 775 776 return (crc); 777} 778 779#define BUF_EMPTY(buf) \ 780 ((buf)->b_dva.dva_word[0] == 0 && \ 781 (buf)->b_dva.dva_word[1] == 0 && \ 782 (buf)->b_birth == 0) 783 784#define BUF_EQUAL(spa, dva, birth, buf) \ 785 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 786 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 787 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 788 789static void 790buf_discard_identity(arc_buf_hdr_t *hdr) 791{ 792 hdr->b_dva.dva_word[0] = 0; 793 hdr->b_dva.dva_word[1] = 0; 794 hdr->b_birth = 0; 795 hdr->b_cksum0 = 0; 796} 797 798static arc_buf_hdr_t * 799buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 800{ 801 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 802 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 803 arc_buf_hdr_t *buf; 804 805 mutex_enter(hash_lock); 806 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 807 buf = buf->b_hash_next) { 808 if (BUF_EQUAL(spa, dva, birth, buf)) { 809 *lockp = hash_lock; 810 return (buf); 811 } 812 } 813 mutex_exit(hash_lock); 814 *lockp = NULL; 815 return (NULL); 816} 817 818/* 819 * Insert an entry into the hash table. If there is already an element 820 * equal to elem in the hash table, then the already existing element 821 * will be returned and the new element will not be inserted. 822 * Otherwise returns NULL. 823 */ 824static arc_buf_hdr_t * 825buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 826{ 827 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 828 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 829 arc_buf_hdr_t *fbuf; 830 uint32_t i; 831 832 ASSERT(!HDR_IN_HASH_TABLE(buf)); 833 *lockp = hash_lock; 834 mutex_enter(hash_lock); 835 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 836 fbuf = fbuf->b_hash_next, i++) { 837 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 838 return (fbuf); 839 } 840 841 buf->b_hash_next = buf_hash_table.ht_table[idx]; 842 buf_hash_table.ht_table[idx] = buf; 843 buf->b_flags |= ARC_IN_HASH_TABLE; 844 845 /* collect some hash table performance data */ 846 if (i > 0) { 847 ARCSTAT_BUMP(arcstat_hash_collisions); 848 if (i == 1) 849 ARCSTAT_BUMP(arcstat_hash_chains); 850 851 ARCSTAT_MAX(arcstat_hash_chain_max, i); 852 } 853 854 ARCSTAT_BUMP(arcstat_hash_elements); 855 ARCSTAT_MAXSTAT(arcstat_hash_elements); 856 857 return (NULL); 858} 859 860static void 861buf_hash_remove(arc_buf_hdr_t *buf) 862{ 863 arc_buf_hdr_t *fbuf, **bufp; 864 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 865 866 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 867 ASSERT(HDR_IN_HASH_TABLE(buf)); 868 869 bufp = &buf_hash_table.ht_table[idx]; 870 while ((fbuf = *bufp) != buf) { 871 ASSERT(fbuf != NULL); 872 bufp = &fbuf->b_hash_next; 873 } 874 *bufp = buf->b_hash_next; 875 buf->b_hash_next = NULL; 876 buf->b_flags &= ~ARC_IN_HASH_TABLE; 877 878 /* collect some hash table performance data */ 879 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 880 881 if (buf_hash_table.ht_table[idx] && 882 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 883 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 884} 885 886/* 887 * Global data structures and functions for the buf kmem cache. 888 */ 889static kmem_cache_t *hdr_cache; 890static kmem_cache_t *buf_cache; 891 892static void 893buf_fini(void) 894{ 895 int i; 896 897 kmem_free(buf_hash_table.ht_table, 898 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 899 for (i = 0; i < BUF_LOCKS; i++) 900 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 901 kmem_cache_destroy(hdr_cache); 902 kmem_cache_destroy(buf_cache); 903} 904 905/* 906 * Constructor callback - called when the cache is empty 907 * and a new buf is requested. 908 */ 909/* ARGSUSED */ 910static int 911hdr_cons(void *vbuf, void *unused, int kmflag) 912{ 913 arc_buf_hdr_t *buf = vbuf; 914 915 bzero(buf, sizeof (arc_buf_hdr_t)); 916 refcount_create(&buf->b_refcnt); 917 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 918 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 919 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 920 921 return (0); 922} 923 924/* ARGSUSED */ 925static int 926buf_cons(void *vbuf, void *unused, int kmflag) 927{ 928 arc_buf_t *buf = vbuf; 929 930 bzero(buf, sizeof (arc_buf_t)); 931 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 932 rw_init(&buf->b_data_lock, NULL, RW_DEFAULT, NULL); 933 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 934 935 return (0); 936} 937 938/* 939 * Destructor callback - called when a cached buf is 940 * no longer required. 941 */ 942/* ARGSUSED */ 943static void 944hdr_dest(void *vbuf, void *unused) 945{ 946 arc_buf_hdr_t *buf = vbuf; 947 948 ASSERT(BUF_EMPTY(buf)); 949 refcount_destroy(&buf->b_refcnt); 950 cv_destroy(&buf->b_cv); 951 mutex_destroy(&buf->b_freeze_lock); 952 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 953} 954 955/* ARGSUSED */ 956static void 957buf_dest(void *vbuf, void *unused) 958{ 959 arc_buf_t *buf = vbuf; 960 961 mutex_destroy(&buf->b_evict_lock); 962 rw_destroy(&buf->b_data_lock); 963 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 964} 965 966/* 967 * Reclaim callback -- invoked when memory is low. 968 */ 969/* ARGSUSED */ 970static void 971hdr_recl(void *unused) 972{ 973 dprintf("hdr_recl called\n"); 974 /* 975 * umem calls the reclaim func when we destroy the buf cache, 976 * which is after we do arc_fini(). 977 */ 978 if (!arc_dead) 979 cv_signal(&arc_reclaim_thr_cv); 980} 981 982static void 983buf_init(void) 984{ 985 uint64_t *ct; 986 uint64_t hsize = 1ULL << 12; 987 int i, j; 988 989 /* 990 * The hash table is big enough to fill all of physical memory 991 * with an average 64K block size. The table will take up 992 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 993 */ 994 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 995 hsize <<= 1; 996retry: 997 buf_hash_table.ht_mask = hsize - 1; 998 buf_hash_table.ht_table = 999 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 1000 if (buf_hash_table.ht_table == NULL) { 1001 ASSERT(hsize > (1ULL << 8)); 1002 hsize >>= 1; 1003 goto retry; 1004 } 1005 1006 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 1007 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 1008 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 1009 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1010 1011 for (i = 0; i < 256; i++) 1012 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1013 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1014 1015 for (i = 0; i < BUF_LOCKS; i++) { 1016 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1017 NULL, MUTEX_DEFAULT, NULL); 1018 } 1019} 1020 1021#define ARC_MINTIME (hz>>4) /* 62 ms */ 1022 1023static void 1024arc_cksum_verify(arc_buf_t *buf) 1025{ 1026 zio_cksum_t zc; 1027 1028 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1029 return; 1030 1031 mutex_enter(&buf->b_hdr->b_freeze_lock); 1032 if (buf->b_hdr->b_freeze_cksum == NULL || 1033 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 1034 mutex_exit(&buf->b_hdr->b_freeze_lock); 1035 return; 1036 } 1037 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1038 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 1039 panic("buffer modified while frozen!"); 1040 mutex_exit(&buf->b_hdr->b_freeze_lock); 1041} 1042 1043static int 1044arc_cksum_equal(arc_buf_t *buf) 1045{ 1046 zio_cksum_t zc; 1047 int equal; 1048 1049 mutex_enter(&buf->b_hdr->b_freeze_lock); 1050 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1051 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 1052 mutex_exit(&buf->b_hdr->b_freeze_lock); 1053 1054 return (equal); 1055} 1056 1057static void 1058arc_cksum_compute(arc_buf_t *buf, boolean_t force) 1059{ 1060 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 1061 return; 1062 1063 mutex_enter(&buf->b_hdr->b_freeze_lock); 1064 if (buf->b_hdr->b_freeze_cksum != NULL) { 1065 mutex_exit(&buf->b_hdr->b_freeze_lock); 1066 return; 1067 } 1068 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 1069 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 1070 buf->b_hdr->b_freeze_cksum); 1071 mutex_exit(&buf->b_hdr->b_freeze_lock);
| 548 549static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab); 550 551#define GHOST_STATE(state) \ 552 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 553 (state) == arc_l2c_only) 554 555/* 556 * Private ARC flags. These flags are private ARC only flags that will show up 557 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 558 * be passed in as arc_flags in things like arc_read. However, these flags 559 * should never be passed and should only be set by ARC code. When adding new 560 * public flags, make sure not to smash the private ones. 561 */ 562 563#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 564#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 565#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 566#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 567#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 568#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 569#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 570#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 571#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 572#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 573 574#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 575#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 576#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 577#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) 578#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 579#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 580#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 581#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 582#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 583 (hdr)->b_l2hdr != NULL) 584#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 585#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 586#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 587 588/* 589 * Other sizes 590 */ 591 592#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 593#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 594 595/* 596 * Hash table routines 597 */ 598 599#define HT_LOCK_PAD CACHE_LINE_SIZE 600 601struct ht_lock { 602 kmutex_t ht_lock; 603#ifdef _KERNEL 604 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 605#endif 606}; 607 608#define BUF_LOCKS 256 609typedef struct buf_hash_table { 610 uint64_t ht_mask; 611 arc_buf_hdr_t **ht_table; 612 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE); 613} buf_hash_table_t; 614 615static buf_hash_table_t buf_hash_table; 616 617#define BUF_HASH_INDEX(spa, dva, birth) \ 618 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 619#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 620#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 621#define HDR_LOCK(hdr) \ 622 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 623 624uint64_t zfs_crc64_table[256]; 625 626/* 627 * Level 2 ARC 628 */ 629 630#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 631#define L2ARC_HEADROOM 2 /* num of writes */ 632#define L2ARC_FEED_SECS 1 /* caching interval secs */ 633#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 634 635#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 636#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 637 638/* 639 * L2ARC Performance Tunables 640 */ 641uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 642uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 643uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 644uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 645uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 646boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 647boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 648boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 649 650SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW, 651 &l2arc_write_max, 0, "max write size"); 652SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW, 653 &l2arc_write_boost, 0, "extra write during warmup"); 654SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW, 655 &l2arc_headroom, 0, "number of dev writes"); 656SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW, 657 &l2arc_feed_secs, 0, "interval seconds"); 658SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW, 659 &l2arc_feed_min_ms, 0, "min interval milliseconds"); 660 661SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW, 662 &l2arc_noprefetch, 0, "don't cache prefetch bufs"); 663SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW, 664 &l2arc_feed_again, 0, "turbo warmup"); 665SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW, 666 &l2arc_norw, 0, "no reads during writes"); 667 668SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD, 669 &ARC_anon.arcs_size, 0, "size of anonymous state"); 670SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD, 671 &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state"); 672SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD, 673 &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state"); 674 675SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD, 676 &ARC_mru.arcs_size, 0, "size of mru state"); 677SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD, 678 &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state"); 679SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD, 680 &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state"); 681 682SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD, 683 &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state"); 684SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD, 685 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 686 "size of metadata in mru ghost state"); 687SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD, 688 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 689 "size of data in mru ghost state"); 690 691SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD, 692 &ARC_mfu.arcs_size, 0, "size of mfu state"); 693SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD, 694 &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state"); 695SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD, 696 &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state"); 697 698SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD, 699 &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state"); 700SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD, 701 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 702 "size of metadata in mfu ghost state"); 703SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD, 704 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 705 "size of data in mfu ghost state"); 706 707SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD, 708 &ARC_l2c_only.arcs_size, 0, "size of mru state"); 709 710/* 711 * L2ARC Internals 712 */ 713typedef struct l2arc_dev { 714 vdev_t *l2ad_vdev; /* vdev */ 715 spa_t *l2ad_spa; /* spa */ 716 uint64_t l2ad_hand; /* next write location */ 717 uint64_t l2ad_write; /* desired write size, bytes */ 718 uint64_t l2ad_boost; /* warmup write boost, bytes */ 719 uint64_t l2ad_start; /* first addr on device */ 720 uint64_t l2ad_end; /* last addr on device */ 721 uint64_t l2ad_evict; /* last addr eviction reached */ 722 boolean_t l2ad_first; /* first sweep through */ 723 boolean_t l2ad_writing; /* currently writing */ 724 list_t *l2ad_buflist; /* buffer list */ 725 list_node_t l2ad_node; /* device list node */ 726} l2arc_dev_t; 727 728static list_t L2ARC_dev_list; /* device list */ 729static list_t *l2arc_dev_list; /* device list pointer */ 730static kmutex_t l2arc_dev_mtx; /* device list mutex */ 731static l2arc_dev_t *l2arc_dev_last; /* last device used */ 732static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 733static list_t L2ARC_free_on_write; /* free after write buf list */ 734static list_t *l2arc_free_on_write; /* free after write list ptr */ 735static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 736static uint64_t l2arc_ndev; /* number of devices */ 737 738typedef struct l2arc_read_callback { 739 arc_buf_t *l2rcb_buf; /* read buffer */ 740 spa_t *l2rcb_spa; /* spa */ 741 blkptr_t l2rcb_bp; /* original blkptr */ 742 zbookmark_t l2rcb_zb; /* original bookmark */ 743 int l2rcb_flags; /* original flags */ 744} l2arc_read_callback_t; 745 746typedef struct l2arc_write_callback { 747 l2arc_dev_t *l2wcb_dev; /* device info */ 748 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 749} l2arc_write_callback_t; 750 751struct l2arc_buf_hdr { 752 /* protected by arc_buf_hdr mutex */ 753 l2arc_dev_t *b_dev; /* L2ARC device */ 754 uint64_t b_daddr; /* disk address, offset byte */ 755}; 756 757typedef struct l2arc_data_free { 758 /* protected by l2arc_free_on_write_mtx */ 759 void *l2df_data; 760 size_t l2df_size; 761 void (*l2df_func)(void *, size_t); 762 list_node_t l2df_list_node; 763} l2arc_data_free_t; 764 765static kmutex_t l2arc_feed_thr_lock; 766static kcondvar_t l2arc_feed_thr_cv; 767static uint8_t l2arc_thread_exit; 768 769static void l2arc_read_done(zio_t *zio); 770static void l2arc_hdr_stat_add(void); 771static void l2arc_hdr_stat_remove(void); 772 773static uint64_t 774buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 775{ 776 uint8_t *vdva = (uint8_t *)dva; 777 uint64_t crc = -1ULL; 778 int i; 779 780 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 781 782 for (i = 0; i < sizeof (dva_t); i++) 783 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 784 785 crc ^= (spa>>8) ^ birth; 786 787 return (crc); 788} 789 790#define BUF_EMPTY(buf) \ 791 ((buf)->b_dva.dva_word[0] == 0 && \ 792 (buf)->b_dva.dva_word[1] == 0 && \ 793 (buf)->b_birth == 0) 794 795#define BUF_EQUAL(spa, dva, birth, buf) \ 796 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 797 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 798 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 799 800static void 801buf_discard_identity(arc_buf_hdr_t *hdr) 802{ 803 hdr->b_dva.dva_word[0] = 0; 804 hdr->b_dva.dva_word[1] = 0; 805 hdr->b_birth = 0; 806 hdr->b_cksum0 = 0; 807} 808 809static arc_buf_hdr_t * 810buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 811{ 812 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 813 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 814 arc_buf_hdr_t *buf; 815 816 mutex_enter(hash_lock); 817 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 818 buf = buf->b_hash_next) { 819 if (BUF_EQUAL(spa, dva, birth, buf)) { 820 *lockp = hash_lock; 821 return (buf); 822 } 823 } 824 mutex_exit(hash_lock); 825 *lockp = NULL; 826 return (NULL); 827} 828 829/* 830 * Insert an entry into the hash table. If there is already an element 831 * equal to elem in the hash table, then the already existing element 832 * will be returned and the new element will not be inserted. 833 * Otherwise returns NULL. 834 */ 835static arc_buf_hdr_t * 836buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 837{ 838 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 839 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 840 arc_buf_hdr_t *fbuf; 841 uint32_t i; 842 843 ASSERT(!HDR_IN_HASH_TABLE(buf)); 844 *lockp = hash_lock; 845 mutex_enter(hash_lock); 846 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 847 fbuf = fbuf->b_hash_next, i++) { 848 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 849 return (fbuf); 850 } 851 852 buf->b_hash_next = buf_hash_table.ht_table[idx]; 853 buf_hash_table.ht_table[idx] = buf; 854 buf->b_flags |= ARC_IN_HASH_TABLE; 855 856 /* collect some hash table performance data */ 857 if (i > 0) { 858 ARCSTAT_BUMP(arcstat_hash_collisions); 859 if (i == 1) 860 ARCSTAT_BUMP(arcstat_hash_chains); 861 862 ARCSTAT_MAX(arcstat_hash_chain_max, i); 863 } 864 865 ARCSTAT_BUMP(arcstat_hash_elements); 866 ARCSTAT_MAXSTAT(arcstat_hash_elements); 867 868 return (NULL); 869} 870 871static void 872buf_hash_remove(arc_buf_hdr_t *buf) 873{ 874 arc_buf_hdr_t *fbuf, **bufp; 875 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 876 877 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 878 ASSERT(HDR_IN_HASH_TABLE(buf)); 879 880 bufp = &buf_hash_table.ht_table[idx]; 881 while ((fbuf = *bufp) != buf) { 882 ASSERT(fbuf != NULL); 883 bufp = &fbuf->b_hash_next; 884 } 885 *bufp = buf->b_hash_next; 886 buf->b_hash_next = NULL; 887 buf->b_flags &= ~ARC_IN_HASH_TABLE; 888 889 /* collect some hash table performance data */ 890 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 891 892 if (buf_hash_table.ht_table[idx] && 893 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 894 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 895} 896 897/* 898 * Global data structures and functions for the buf kmem cache. 899 */ 900static kmem_cache_t *hdr_cache; 901static kmem_cache_t *buf_cache; 902 903static void 904buf_fini(void) 905{ 906 int i; 907 908 kmem_free(buf_hash_table.ht_table, 909 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 910 for (i = 0; i < BUF_LOCKS; i++) 911 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 912 kmem_cache_destroy(hdr_cache); 913 kmem_cache_destroy(buf_cache); 914} 915 916/* 917 * Constructor callback - called when the cache is empty 918 * and a new buf is requested. 919 */ 920/* ARGSUSED */ 921static int 922hdr_cons(void *vbuf, void *unused, int kmflag) 923{ 924 arc_buf_hdr_t *buf = vbuf; 925 926 bzero(buf, sizeof (arc_buf_hdr_t)); 927 refcount_create(&buf->b_refcnt); 928 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 929 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 930 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 931 932 return (0); 933} 934 935/* ARGSUSED */ 936static int 937buf_cons(void *vbuf, void *unused, int kmflag) 938{ 939 arc_buf_t *buf = vbuf; 940 941 bzero(buf, sizeof (arc_buf_t)); 942 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 943 rw_init(&buf->b_data_lock, NULL, RW_DEFAULT, NULL); 944 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 945 946 return (0); 947} 948 949/* 950 * Destructor callback - called when a cached buf is 951 * no longer required. 952 */ 953/* ARGSUSED */ 954static void 955hdr_dest(void *vbuf, void *unused) 956{ 957 arc_buf_hdr_t *buf = vbuf; 958 959 ASSERT(BUF_EMPTY(buf)); 960 refcount_destroy(&buf->b_refcnt); 961 cv_destroy(&buf->b_cv); 962 mutex_destroy(&buf->b_freeze_lock); 963 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 964} 965 966/* ARGSUSED */ 967static void 968buf_dest(void *vbuf, void *unused) 969{ 970 arc_buf_t *buf = vbuf; 971 972 mutex_destroy(&buf->b_evict_lock); 973 rw_destroy(&buf->b_data_lock); 974 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 975} 976 977/* 978 * Reclaim callback -- invoked when memory is low. 979 */ 980/* ARGSUSED */ 981static void 982hdr_recl(void *unused) 983{ 984 dprintf("hdr_recl called\n"); 985 /* 986 * umem calls the reclaim func when we destroy the buf cache, 987 * which is after we do arc_fini(). 988 */ 989 if (!arc_dead) 990 cv_signal(&arc_reclaim_thr_cv); 991} 992 993static void 994buf_init(void) 995{ 996 uint64_t *ct; 997 uint64_t hsize = 1ULL << 12; 998 int i, j; 999 1000 /* 1001 * The hash table is big enough to fill all of physical memory 1002 * with an average 64K block size. The table will take up 1003 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 1004 */ 1005 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 1006 hsize <<= 1; 1007retry: 1008 buf_hash_table.ht_mask = hsize - 1; 1009 buf_hash_table.ht_table = 1010 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 1011 if (buf_hash_table.ht_table == NULL) { 1012 ASSERT(hsize > (1ULL << 8)); 1013 hsize >>= 1; 1014 goto retry; 1015 } 1016 1017 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 1018 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 1019 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 1020 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1021 1022 for (i = 0; i < 256; i++) 1023 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1024 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1025 1026 for (i = 0; i < BUF_LOCKS; i++) { 1027 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1028 NULL, MUTEX_DEFAULT, NULL); 1029 } 1030} 1031 1032#define ARC_MINTIME (hz>>4) /* 62 ms */ 1033 1034static void 1035arc_cksum_verify(arc_buf_t *buf) 1036{ 1037 zio_cksum_t zc; 1038 1039 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1040 return; 1041 1042 mutex_enter(&buf->b_hdr->b_freeze_lock); 1043 if (buf->b_hdr->b_freeze_cksum == NULL || 1044 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 1045 mutex_exit(&buf->b_hdr->b_freeze_lock); 1046 return; 1047 } 1048 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1049 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 1050 panic("buffer modified while frozen!"); 1051 mutex_exit(&buf->b_hdr->b_freeze_lock); 1052} 1053 1054static int 1055arc_cksum_equal(arc_buf_t *buf) 1056{ 1057 zio_cksum_t zc; 1058 int equal; 1059 1060 mutex_enter(&buf->b_hdr->b_freeze_lock); 1061 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1062 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 1063 mutex_exit(&buf->b_hdr->b_freeze_lock); 1064 1065 return (equal); 1066} 1067 1068static void 1069arc_cksum_compute(arc_buf_t *buf, boolean_t force) 1070{ 1071 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 1072 return; 1073 1074 mutex_enter(&buf->b_hdr->b_freeze_lock); 1075 if (buf->b_hdr->b_freeze_cksum != NULL) { 1076 mutex_exit(&buf->b_hdr->b_freeze_lock); 1077 return; 1078 } 1079 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 1080 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 1081 buf->b_hdr->b_freeze_cksum); 1082 mutex_exit(&buf->b_hdr->b_freeze_lock);
|
| 1083#ifdef illumos 1084 arc_buf_watch(buf); 1085#endif /* illumos */
|
1072} 1073
| 1086} 1087
|
| 1088#ifdef illumos 1089#ifndef _KERNEL 1090typedef struct procctl { 1091 long cmd; 1092 prwatch_t prwatch; 1093} procctl_t; 1094#endif 1095 1096/* ARGSUSED */ 1097static void 1098arc_buf_unwatch(arc_buf_t *buf) 1099{ 1100#ifndef _KERNEL 1101 if (arc_watch) { 1102 int result; 1103 procctl_t ctl; 1104 ctl.cmd = PCWATCH; 1105 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1106 ctl.prwatch.pr_size = 0; 1107 ctl.prwatch.pr_wflags = 0; 1108 result = write(arc_procfd, &ctl, sizeof (ctl)); 1109 ASSERT3U(result, ==, sizeof (ctl)); 1110 } 1111#endif 1112} 1113 1114/* ARGSUSED */ 1115static void 1116arc_buf_watch(arc_buf_t *buf) 1117{ 1118#ifndef _KERNEL 1119 if (arc_watch) { 1120 int result; 1121 procctl_t ctl; 1122 ctl.cmd = PCWATCH; 1123 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1124 ctl.prwatch.pr_size = buf->b_hdr->b_size; 1125 ctl.prwatch.pr_wflags = WA_WRITE; 1126 result = write(arc_procfd, &ctl, sizeof (ctl)); 1127 ASSERT3U(result, ==, sizeof (ctl)); 1128 } 1129#endif 1130} 1131#endif /* illumos */ 1132
|
1074void 1075arc_buf_thaw(arc_buf_t *buf) 1076{ 1077 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1078 if (buf->b_hdr->b_state != arc_anon) 1079 panic("modifying non-anon buffer!"); 1080 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 1081 panic("modifying buffer while i/o in progress!"); 1082 arc_cksum_verify(buf); 1083 } 1084 1085 mutex_enter(&buf->b_hdr->b_freeze_lock); 1086 if (buf->b_hdr->b_freeze_cksum != NULL) { 1087 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1088 buf->b_hdr->b_freeze_cksum = NULL; 1089 } 1090 1091 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1092 if (buf->b_hdr->b_thawed) 1093 kmem_free(buf->b_hdr->b_thawed, 1); 1094 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP); 1095 } 1096 1097 mutex_exit(&buf->b_hdr->b_freeze_lock);
| 1133void 1134arc_buf_thaw(arc_buf_t *buf) 1135{ 1136 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1137 if (buf->b_hdr->b_state != arc_anon) 1138 panic("modifying non-anon buffer!"); 1139 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 1140 panic("modifying buffer while i/o in progress!"); 1141 arc_cksum_verify(buf); 1142 } 1143 1144 mutex_enter(&buf->b_hdr->b_freeze_lock); 1145 if (buf->b_hdr->b_freeze_cksum != NULL) { 1146 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1147 buf->b_hdr->b_freeze_cksum = NULL; 1148 } 1149 1150 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1151 if (buf->b_hdr->b_thawed) 1152 kmem_free(buf->b_hdr->b_thawed, 1); 1153 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP); 1154 } 1155 1156 mutex_exit(&buf->b_hdr->b_freeze_lock);
|
| 1157 1158#ifdef illumos 1159 arc_buf_unwatch(buf); 1160#endif /* illumos */
|
1098} 1099 1100void 1101arc_buf_freeze(arc_buf_t *buf) 1102{ 1103 kmutex_t *hash_lock; 1104 1105 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1106 return; 1107 1108 hash_lock = HDR_LOCK(buf->b_hdr); 1109 mutex_enter(hash_lock); 1110 1111 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 1112 buf->b_hdr->b_state == arc_anon); 1113 arc_cksum_compute(buf, B_FALSE); 1114 mutex_exit(hash_lock);
| 1161} 1162 1163void 1164arc_buf_freeze(arc_buf_t *buf) 1165{ 1166 kmutex_t *hash_lock; 1167 1168 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1169 return; 1170 1171 hash_lock = HDR_LOCK(buf->b_hdr); 1172 mutex_enter(hash_lock); 1173 1174 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 1175 buf->b_hdr->b_state == arc_anon); 1176 arc_cksum_compute(buf, B_FALSE); 1177 mutex_exit(hash_lock);
|
| 1178
|
1115} 1116 1117static void 1118get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock) 1119{ 1120 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth); 1121 1122 if (ab->b_type == ARC_BUFC_METADATA) 1123 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1); 1124 else { 1125 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1); 1126 buf_hashid += ARC_BUFC_NUMMETADATALISTS; 1127 } 1128 1129 *list = &state->arcs_lists[buf_hashid]; 1130 *lock = ARCS_LOCK(state, buf_hashid); 1131} 1132 1133 1134static void 1135add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1136{ 1137 ASSERT(MUTEX_HELD(hash_lock)); 1138 1139 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 1140 (ab->b_state != arc_anon)) { 1141 uint64_t delta = ab->b_size * ab->b_datacnt; 1142 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 1143 list_t *list; 1144 kmutex_t *lock; 1145 1146 get_buf_info(ab, ab->b_state, &list, &lock); 1147 ASSERT(!MUTEX_HELD(lock)); 1148 mutex_enter(lock); 1149 ASSERT(list_link_active(&ab->b_arc_node)); 1150 list_remove(list, ab); 1151 if (GHOST_STATE(ab->b_state)) {
| 1179} 1180 1181static void 1182get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock) 1183{ 1184 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth); 1185 1186 if (ab->b_type == ARC_BUFC_METADATA) 1187 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1); 1188 else { 1189 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1); 1190 buf_hashid += ARC_BUFC_NUMMETADATALISTS; 1191 } 1192 1193 *list = &state->arcs_lists[buf_hashid]; 1194 *lock = ARCS_LOCK(state, buf_hashid); 1195} 1196 1197 1198static void 1199add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1200{ 1201 ASSERT(MUTEX_HELD(hash_lock)); 1202 1203 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 1204 (ab->b_state != arc_anon)) { 1205 uint64_t delta = ab->b_size * ab->b_datacnt; 1206 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 1207 list_t *list; 1208 kmutex_t *lock; 1209 1210 get_buf_info(ab, ab->b_state, &list, &lock); 1211 ASSERT(!MUTEX_HELD(lock)); 1212 mutex_enter(lock); 1213 ASSERT(list_link_active(&ab->b_arc_node)); 1214 list_remove(list, ab); 1215 if (GHOST_STATE(ab->b_state)) {
|
1152 ASSERT3U(ab->b_datacnt, ==, 0);
| 1216 ASSERT0(ab->b_datacnt);
|
1153 ASSERT3P(ab->b_buf, ==, NULL); 1154 delta = ab->b_size; 1155 } 1156 ASSERT(delta > 0); 1157 ASSERT3U(*size, >=, delta); 1158 atomic_add_64(size, -delta); 1159 mutex_exit(lock); 1160 /* remove the prefetch flag if we get a reference */ 1161 if (ab->b_flags & ARC_PREFETCH) 1162 ab->b_flags &= ~ARC_PREFETCH; 1163 } 1164} 1165 1166static int 1167remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1168{ 1169 int cnt; 1170 arc_state_t *state = ab->b_state; 1171 1172 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1173 ASSERT(!GHOST_STATE(state)); 1174 1175 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1176 (state != arc_anon)) { 1177 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1178 list_t *list; 1179 kmutex_t *lock; 1180 1181 get_buf_info(ab, state, &list, &lock); 1182 ASSERT(!MUTEX_HELD(lock)); 1183 mutex_enter(lock); 1184 ASSERT(!list_link_active(&ab->b_arc_node)); 1185 list_insert_head(list, ab); 1186 ASSERT(ab->b_datacnt > 0); 1187 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1188 mutex_exit(lock); 1189 } 1190 return (cnt); 1191} 1192 1193/* 1194 * Move the supplied buffer to the indicated state. The mutex 1195 * for the buffer must be held by the caller. 1196 */ 1197static void 1198arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1199{ 1200 arc_state_t *old_state = ab->b_state; 1201 int64_t refcnt = refcount_count(&ab->b_refcnt); 1202 uint64_t from_delta, to_delta; 1203 list_t *list; 1204 kmutex_t *lock; 1205 1206 ASSERT(MUTEX_HELD(hash_lock)); 1207 ASSERT(new_state != old_state); 1208 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1209 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1210 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon); 1211 1212 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1213 1214 /* 1215 * If this buffer is evictable, transfer it from the 1216 * old state list to the new state list. 1217 */ 1218 if (refcnt == 0) { 1219 if (old_state != arc_anon) { 1220 int use_mutex; 1221 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1222 1223 get_buf_info(ab, old_state, &list, &lock); 1224 use_mutex = !MUTEX_HELD(lock); 1225 if (use_mutex) 1226 mutex_enter(lock); 1227 1228 ASSERT(list_link_active(&ab->b_arc_node)); 1229 list_remove(list, ab); 1230 1231 /* 1232 * If prefetching out of the ghost cache, 1233 * we will have a non-zero datacnt. 1234 */ 1235 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1236 /* ghost elements have a ghost size */ 1237 ASSERT(ab->b_buf == NULL); 1238 from_delta = ab->b_size; 1239 } 1240 ASSERT3U(*size, >=, from_delta); 1241 atomic_add_64(size, -from_delta); 1242 1243 if (use_mutex) 1244 mutex_exit(lock); 1245 } 1246 if (new_state != arc_anon) { 1247 int use_mutex; 1248 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1249 1250 get_buf_info(ab, new_state, &list, &lock); 1251 use_mutex = !MUTEX_HELD(lock); 1252 if (use_mutex) 1253 mutex_enter(lock); 1254 1255 list_insert_head(list, ab); 1256 1257 /* ghost elements have a ghost size */ 1258 if (GHOST_STATE(new_state)) { 1259 ASSERT(ab->b_datacnt == 0); 1260 ASSERT(ab->b_buf == NULL); 1261 to_delta = ab->b_size; 1262 } 1263 atomic_add_64(size, to_delta); 1264 1265 if (use_mutex) 1266 mutex_exit(lock); 1267 } 1268 } 1269 1270 ASSERT(!BUF_EMPTY(ab)); 1271 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab)) 1272 buf_hash_remove(ab); 1273 1274 /* adjust state sizes */ 1275 if (to_delta) 1276 atomic_add_64(&new_state->arcs_size, to_delta); 1277 if (from_delta) { 1278 ASSERT3U(old_state->arcs_size, >=, from_delta); 1279 atomic_add_64(&old_state->arcs_size, -from_delta); 1280 } 1281 ab->b_state = new_state; 1282 1283 /* adjust l2arc hdr stats */ 1284 if (new_state == arc_l2c_only) 1285 l2arc_hdr_stat_add(); 1286 else if (old_state == arc_l2c_only) 1287 l2arc_hdr_stat_remove(); 1288} 1289 1290void 1291arc_space_consume(uint64_t space, arc_space_type_t type) 1292{ 1293 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1294 1295 switch (type) { 1296 case ARC_SPACE_DATA: 1297 ARCSTAT_INCR(arcstat_data_size, space); 1298 break; 1299 case ARC_SPACE_OTHER: 1300 ARCSTAT_INCR(arcstat_other_size, space); 1301 break; 1302 case ARC_SPACE_HDRS: 1303 ARCSTAT_INCR(arcstat_hdr_size, space); 1304 break; 1305 case ARC_SPACE_L2HDRS: 1306 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1307 break; 1308 } 1309 1310 atomic_add_64(&arc_meta_used, space); 1311 atomic_add_64(&arc_size, space); 1312} 1313 1314void 1315arc_space_return(uint64_t space, arc_space_type_t type) 1316{ 1317 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1318 1319 switch (type) { 1320 case ARC_SPACE_DATA: 1321 ARCSTAT_INCR(arcstat_data_size, -space); 1322 break; 1323 case ARC_SPACE_OTHER: 1324 ARCSTAT_INCR(arcstat_other_size, -space); 1325 break; 1326 case ARC_SPACE_HDRS: 1327 ARCSTAT_INCR(arcstat_hdr_size, -space); 1328 break; 1329 case ARC_SPACE_L2HDRS: 1330 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1331 break; 1332 } 1333 1334 ASSERT(arc_meta_used >= space); 1335 if (arc_meta_max < arc_meta_used) 1336 arc_meta_max = arc_meta_used; 1337 atomic_add_64(&arc_meta_used, -space); 1338 ASSERT(arc_size >= space); 1339 atomic_add_64(&arc_size, -space); 1340} 1341 1342void * 1343arc_data_buf_alloc(uint64_t size) 1344{ 1345 if (arc_evict_needed(ARC_BUFC_DATA)) 1346 cv_signal(&arc_reclaim_thr_cv); 1347 atomic_add_64(&arc_size, size); 1348 return (zio_data_buf_alloc(size)); 1349} 1350 1351void 1352arc_data_buf_free(void *buf, uint64_t size) 1353{ 1354 zio_data_buf_free(buf, size); 1355 ASSERT(arc_size >= size); 1356 atomic_add_64(&arc_size, -size); 1357} 1358 1359arc_buf_t * 1360arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1361{ 1362 arc_buf_hdr_t *hdr; 1363 arc_buf_t *buf; 1364 1365 ASSERT3U(size, >, 0); 1366 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1367 ASSERT(BUF_EMPTY(hdr)); 1368 hdr->b_size = size; 1369 hdr->b_type = type; 1370 hdr->b_spa = spa_load_guid(spa); 1371 hdr->b_state = arc_anon; 1372 hdr->b_arc_access = 0; 1373 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1374 buf->b_hdr = hdr; 1375 buf->b_data = NULL; 1376 buf->b_efunc = NULL; 1377 buf->b_private = NULL; 1378 buf->b_next = NULL; 1379 hdr->b_buf = buf; 1380 arc_get_data_buf(buf); 1381 hdr->b_datacnt = 1; 1382 hdr->b_flags = 0; 1383 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1384 (void) refcount_add(&hdr->b_refcnt, tag); 1385 1386 return (buf); 1387} 1388 1389static char *arc_onloan_tag = "onloan"; 1390 1391/* 1392 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 1393 * flight data by arc_tempreserve_space() until they are "returned". Loaned 1394 * buffers must be returned to the arc before they can be used by the DMU or 1395 * freed. 1396 */ 1397arc_buf_t * 1398arc_loan_buf(spa_t *spa, int size) 1399{ 1400 arc_buf_t *buf; 1401 1402 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 1403 1404 atomic_add_64(&arc_loaned_bytes, size); 1405 return (buf); 1406} 1407 1408/* 1409 * Return a loaned arc buffer to the arc. 1410 */ 1411void 1412arc_return_buf(arc_buf_t *buf, void *tag) 1413{ 1414 arc_buf_hdr_t *hdr = buf->b_hdr; 1415 1416 ASSERT(buf->b_data != NULL); 1417 (void) refcount_add(&hdr->b_refcnt, tag); 1418 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag); 1419 1420 atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 1421} 1422 1423/* Detach an arc_buf from a dbuf (tag) */ 1424void 1425arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 1426{ 1427 arc_buf_hdr_t *hdr; 1428 1429 ASSERT(buf->b_data != NULL); 1430 hdr = buf->b_hdr; 1431 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag); 1432 (void) refcount_remove(&hdr->b_refcnt, tag); 1433 buf->b_efunc = NULL; 1434 buf->b_private = NULL; 1435 1436 atomic_add_64(&arc_loaned_bytes, hdr->b_size); 1437} 1438 1439static arc_buf_t * 1440arc_buf_clone(arc_buf_t *from) 1441{ 1442 arc_buf_t *buf; 1443 arc_buf_hdr_t *hdr = from->b_hdr; 1444 uint64_t size = hdr->b_size; 1445 1446 ASSERT(hdr->b_state != arc_anon); 1447 1448 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1449 buf->b_hdr = hdr; 1450 buf->b_data = NULL; 1451 buf->b_efunc = NULL; 1452 buf->b_private = NULL; 1453 buf->b_next = hdr->b_buf; 1454 hdr->b_buf = buf; 1455 arc_get_data_buf(buf); 1456 bcopy(from->b_data, buf->b_data, size); 1457 hdr->b_datacnt += 1; 1458 return (buf); 1459} 1460 1461void 1462arc_buf_add_ref(arc_buf_t *buf, void* tag) 1463{ 1464 arc_buf_hdr_t *hdr; 1465 kmutex_t *hash_lock; 1466 1467 /* 1468 * Check to see if this buffer is evicted. Callers 1469 * must verify b_data != NULL to know if the add_ref 1470 * was successful. 1471 */ 1472 mutex_enter(&buf->b_evict_lock); 1473 if (buf->b_data == NULL) { 1474 mutex_exit(&buf->b_evict_lock); 1475 return; 1476 } 1477 hash_lock = HDR_LOCK(buf->b_hdr); 1478 mutex_enter(hash_lock); 1479 hdr = buf->b_hdr; 1480 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1481 mutex_exit(&buf->b_evict_lock); 1482 1483 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1484 add_reference(hdr, hash_lock, tag); 1485 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1486 arc_access(hdr, hash_lock); 1487 mutex_exit(hash_lock); 1488 ARCSTAT_BUMP(arcstat_hits); 1489 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1490 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1491 data, metadata, hits); 1492} 1493 1494/* 1495 * Free the arc data buffer. If it is an l2arc write in progress, 1496 * the buffer is placed on l2arc_free_on_write to be freed later. 1497 */ 1498static void
| 1217 ASSERT3P(ab->b_buf, ==, NULL); 1218 delta = ab->b_size; 1219 } 1220 ASSERT(delta > 0); 1221 ASSERT3U(*size, >=, delta); 1222 atomic_add_64(size, -delta); 1223 mutex_exit(lock); 1224 /* remove the prefetch flag if we get a reference */ 1225 if (ab->b_flags & ARC_PREFETCH) 1226 ab->b_flags &= ~ARC_PREFETCH; 1227 } 1228} 1229 1230static int 1231remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1232{ 1233 int cnt; 1234 arc_state_t *state = ab->b_state; 1235 1236 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1237 ASSERT(!GHOST_STATE(state)); 1238 1239 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1240 (state != arc_anon)) { 1241 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1242 list_t *list; 1243 kmutex_t *lock; 1244 1245 get_buf_info(ab, state, &list, &lock); 1246 ASSERT(!MUTEX_HELD(lock)); 1247 mutex_enter(lock); 1248 ASSERT(!list_link_active(&ab->b_arc_node)); 1249 list_insert_head(list, ab); 1250 ASSERT(ab->b_datacnt > 0); 1251 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1252 mutex_exit(lock); 1253 } 1254 return (cnt); 1255} 1256 1257/* 1258 * Move the supplied buffer to the indicated state. The mutex 1259 * for the buffer must be held by the caller. 1260 */ 1261static void 1262arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1263{ 1264 arc_state_t *old_state = ab->b_state; 1265 int64_t refcnt = refcount_count(&ab->b_refcnt); 1266 uint64_t from_delta, to_delta; 1267 list_t *list; 1268 kmutex_t *lock; 1269 1270 ASSERT(MUTEX_HELD(hash_lock)); 1271 ASSERT(new_state != old_state); 1272 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1273 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1274 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon); 1275 1276 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1277 1278 /* 1279 * If this buffer is evictable, transfer it from the 1280 * old state list to the new state list. 1281 */ 1282 if (refcnt == 0) { 1283 if (old_state != arc_anon) { 1284 int use_mutex; 1285 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1286 1287 get_buf_info(ab, old_state, &list, &lock); 1288 use_mutex = !MUTEX_HELD(lock); 1289 if (use_mutex) 1290 mutex_enter(lock); 1291 1292 ASSERT(list_link_active(&ab->b_arc_node)); 1293 list_remove(list, ab); 1294 1295 /* 1296 * If prefetching out of the ghost cache, 1297 * we will have a non-zero datacnt. 1298 */ 1299 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1300 /* ghost elements have a ghost size */ 1301 ASSERT(ab->b_buf == NULL); 1302 from_delta = ab->b_size; 1303 } 1304 ASSERT3U(*size, >=, from_delta); 1305 atomic_add_64(size, -from_delta); 1306 1307 if (use_mutex) 1308 mutex_exit(lock); 1309 } 1310 if (new_state != arc_anon) { 1311 int use_mutex; 1312 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1313 1314 get_buf_info(ab, new_state, &list, &lock); 1315 use_mutex = !MUTEX_HELD(lock); 1316 if (use_mutex) 1317 mutex_enter(lock); 1318 1319 list_insert_head(list, ab); 1320 1321 /* ghost elements have a ghost size */ 1322 if (GHOST_STATE(new_state)) { 1323 ASSERT(ab->b_datacnt == 0); 1324 ASSERT(ab->b_buf == NULL); 1325 to_delta = ab->b_size; 1326 } 1327 atomic_add_64(size, to_delta); 1328 1329 if (use_mutex) 1330 mutex_exit(lock); 1331 } 1332 } 1333 1334 ASSERT(!BUF_EMPTY(ab)); 1335 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab)) 1336 buf_hash_remove(ab); 1337 1338 /* adjust state sizes */ 1339 if (to_delta) 1340 atomic_add_64(&new_state->arcs_size, to_delta); 1341 if (from_delta) { 1342 ASSERT3U(old_state->arcs_size, >=, from_delta); 1343 atomic_add_64(&old_state->arcs_size, -from_delta); 1344 } 1345 ab->b_state = new_state; 1346 1347 /* adjust l2arc hdr stats */ 1348 if (new_state == arc_l2c_only) 1349 l2arc_hdr_stat_add(); 1350 else if (old_state == arc_l2c_only) 1351 l2arc_hdr_stat_remove(); 1352} 1353 1354void 1355arc_space_consume(uint64_t space, arc_space_type_t type) 1356{ 1357 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1358 1359 switch (type) { 1360 case ARC_SPACE_DATA: 1361 ARCSTAT_INCR(arcstat_data_size, space); 1362 break; 1363 case ARC_SPACE_OTHER: 1364 ARCSTAT_INCR(arcstat_other_size, space); 1365 break; 1366 case ARC_SPACE_HDRS: 1367 ARCSTAT_INCR(arcstat_hdr_size, space); 1368 break; 1369 case ARC_SPACE_L2HDRS: 1370 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1371 break; 1372 } 1373 1374 atomic_add_64(&arc_meta_used, space); 1375 atomic_add_64(&arc_size, space); 1376} 1377 1378void 1379arc_space_return(uint64_t space, arc_space_type_t type) 1380{ 1381 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1382 1383 switch (type) { 1384 case ARC_SPACE_DATA: 1385 ARCSTAT_INCR(arcstat_data_size, -space); 1386 break; 1387 case ARC_SPACE_OTHER: 1388 ARCSTAT_INCR(arcstat_other_size, -space); 1389 break; 1390 case ARC_SPACE_HDRS: 1391 ARCSTAT_INCR(arcstat_hdr_size, -space); 1392 break; 1393 case ARC_SPACE_L2HDRS: 1394 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1395 break; 1396 } 1397 1398 ASSERT(arc_meta_used >= space); 1399 if (arc_meta_max < arc_meta_used) 1400 arc_meta_max = arc_meta_used; 1401 atomic_add_64(&arc_meta_used, -space); 1402 ASSERT(arc_size >= space); 1403 atomic_add_64(&arc_size, -space); 1404} 1405 1406void * 1407arc_data_buf_alloc(uint64_t size) 1408{ 1409 if (arc_evict_needed(ARC_BUFC_DATA)) 1410 cv_signal(&arc_reclaim_thr_cv); 1411 atomic_add_64(&arc_size, size); 1412 return (zio_data_buf_alloc(size)); 1413} 1414 1415void 1416arc_data_buf_free(void *buf, uint64_t size) 1417{ 1418 zio_data_buf_free(buf, size); 1419 ASSERT(arc_size >= size); 1420 atomic_add_64(&arc_size, -size); 1421} 1422 1423arc_buf_t * 1424arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1425{ 1426 arc_buf_hdr_t *hdr; 1427 arc_buf_t *buf; 1428 1429 ASSERT3U(size, >, 0); 1430 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1431 ASSERT(BUF_EMPTY(hdr)); 1432 hdr->b_size = size; 1433 hdr->b_type = type; 1434 hdr->b_spa = spa_load_guid(spa); 1435 hdr->b_state = arc_anon; 1436 hdr->b_arc_access = 0; 1437 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1438 buf->b_hdr = hdr; 1439 buf->b_data = NULL; 1440 buf->b_efunc = NULL; 1441 buf->b_private = NULL; 1442 buf->b_next = NULL; 1443 hdr->b_buf = buf; 1444 arc_get_data_buf(buf); 1445 hdr->b_datacnt = 1; 1446 hdr->b_flags = 0; 1447 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1448 (void) refcount_add(&hdr->b_refcnt, tag); 1449 1450 return (buf); 1451} 1452 1453static char *arc_onloan_tag = "onloan"; 1454 1455/* 1456 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 1457 * flight data by arc_tempreserve_space() until they are "returned". Loaned 1458 * buffers must be returned to the arc before they can be used by the DMU or 1459 * freed. 1460 */ 1461arc_buf_t * 1462arc_loan_buf(spa_t *spa, int size) 1463{ 1464 arc_buf_t *buf; 1465 1466 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 1467 1468 atomic_add_64(&arc_loaned_bytes, size); 1469 return (buf); 1470} 1471 1472/* 1473 * Return a loaned arc buffer to the arc. 1474 */ 1475void 1476arc_return_buf(arc_buf_t *buf, void *tag) 1477{ 1478 arc_buf_hdr_t *hdr = buf->b_hdr; 1479 1480 ASSERT(buf->b_data != NULL); 1481 (void) refcount_add(&hdr->b_refcnt, tag); 1482 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag); 1483 1484 atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 1485} 1486 1487/* Detach an arc_buf from a dbuf (tag) */ 1488void 1489arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 1490{ 1491 arc_buf_hdr_t *hdr; 1492 1493 ASSERT(buf->b_data != NULL); 1494 hdr = buf->b_hdr; 1495 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag); 1496 (void) refcount_remove(&hdr->b_refcnt, tag); 1497 buf->b_efunc = NULL; 1498 buf->b_private = NULL; 1499 1500 atomic_add_64(&arc_loaned_bytes, hdr->b_size); 1501} 1502 1503static arc_buf_t * 1504arc_buf_clone(arc_buf_t *from) 1505{ 1506 arc_buf_t *buf; 1507 arc_buf_hdr_t *hdr = from->b_hdr; 1508 uint64_t size = hdr->b_size; 1509 1510 ASSERT(hdr->b_state != arc_anon); 1511 1512 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1513 buf->b_hdr = hdr; 1514 buf->b_data = NULL; 1515 buf->b_efunc = NULL; 1516 buf->b_private = NULL; 1517 buf->b_next = hdr->b_buf; 1518 hdr->b_buf = buf; 1519 arc_get_data_buf(buf); 1520 bcopy(from->b_data, buf->b_data, size); 1521 hdr->b_datacnt += 1; 1522 return (buf); 1523} 1524 1525void 1526arc_buf_add_ref(arc_buf_t *buf, void* tag) 1527{ 1528 arc_buf_hdr_t *hdr; 1529 kmutex_t *hash_lock; 1530 1531 /* 1532 * Check to see if this buffer is evicted. Callers 1533 * must verify b_data != NULL to know if the add_ref 1534 * was successful. 1535 */ 1536 mutex_enter(&buf->b_evict_lock); 1537 if (buf->b_data == NULL) { 1538 mutex_exit(&buf->b_evict_lock); 1539 return; 1540 } 1541 hash_lock = HDR_LOCK(buf->b_hdr); 1542 mutex_enter(hash_lock); 1543 hdr = buf->b_hdr; 1544 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1545 mutex_exit(&buf->b_evict_lock); 1546 1547 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1548 add_reference(hdr, hash_lock, tag); 1549 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1550 arc_access(hdr, hash_lock); 1551 mutex_exit(hash_lock); 1552 ARCSTAT_BUMP(arcstat_hits); 1553 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1554 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1555 data, metadata, hits); 1556} 1557 1558/* 1559 * Free the arc data buffer. If it is an l2arc write in progress, 1560 * the buffer is placed on l2arc_free_on_write to be freed later. 1561 */ 1562static void
|
1499arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1500 void *data, size_t size)
| 1563arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
|
1501{
| 1564{
|
| 1565 arc_buf_hdr_t *hdr = buf->b_hdr; 1566
|
1502 if (HDR_L2_WRITING(hdr)) { 1503 l2arc_data_free_t *df; 1504 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
| 1567 if (HDR_L2_WRITING(hdr)) { 1568 l2arc_data_free_t *df; 1569 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
|
1505 df->l2df_data = data; 1506 df->l2df_size = size;
| 1570 df->l2df_data = buf->b_data; 1571 df->l2df_size = hdr->b_size;
|
1507 df->l2df_func = free_func; 1508 mutex_enter(&l2arc_free_on_write_mtx); 1509 list_insert_head(l2arc_free_on_write, df); 1510 mutex_exit(&l2arc_free_on_write_mtx); 1511 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1512 } else {
| 1572 df->l2df_func = free_func; 1573 mutex_enter(&l2arc_free_on_write_mtx); 1574 list_insert_head(l2arc_free_on_write, df); 1575 mutex_exit(&l2arc_free_on_write_mtx); 1576 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1577 } else {
|
1513 free_func(data, size);
| 1578 free_func(buf->b_data, hdr->b_size);
|
1514 } 1515} 1516 1517static void 1518arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1519{ 1520 arc_buf_t **bufp; 1521 1522 /* free up data associated with the buf */ 1523 if (buf->b_data) { 1524 arc_state_t *state = buf->b_hdr->b_state; 1525 uint64_t size = buf->b_hdr->b_size; 1526 arc_buf_contents_t type = buf->b_hdr->b_type; 1527 1528 arc_cksum_verify(buf);
| 1579 } 1580} 1581 1582static void 1583arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1584{ 1585 arc_buf_t **bufp; 1586 1587 /* free up data associated with the buf */ 1588 if (buf->b_data) { 1589 arc_state_t *state = buf->b_hdr->b_state; 1590 uint64_t size = buf->b_hdr->b_size; 1591 arc_buf_contents_t type = buf->b_hdr->b_type; 1592 1593 arc_cksum_verify(buf);
|
| 1594#ifdef illumos 1595 arc_buf_unwatch(buf); 1596#endif /* illumos */
|
1529 1530 if (!recycle) { 1531 if (type == ARC_BUFC_METADATA) {
| 1597 1598 if (!recycle) { 1599 if (type == ARC_BUFC_METADATA) {
|
1532 arc_buf_data_free(buf->b_hdr, zio_buf_free, 1533 buf->b_data, size);
| 1600 arc_buf_data_free(buf, zio_buf_free);
|
1534 arc_space_return(size, ARC_SPACE_DATA); 1535 } else { 1536 ASSERT(type == ARC_BUFC_DATA);
| 1601 arc_space_return(size, ARC_SPACE_DATA); 1602 } else { 1603 ASSERT(type == ARC_BUFC_DATA);
|
1537 arc_buf_data_free(buf->b_hdr, 1538 zio_data_buf_free, buf->b_data, size);
| 1604 arc_buf_data_free(buf, zio_data_buf_free);
|
1539 ARCSTAT_INCR(arcstat_data_size, -size); 1540 atomic_add_64(&arc_size, -size); 1541 } 1542 } 1543 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1544 uint64_t *cnt = &state->arcs_lsize[type]; 1545 1546 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1547 ASSERT(state != arc_anon); 1548 1549 ASSERT3U(*cnt, >=, size); 1550 atomic_add_64(cnt, -size); 1551 } 1552 ASSERT3U(state->arcs_size, >=, size); 1553 atomic_add_64(&state->arcs_size, -size); 1554 buf->b_data = NULL; 1555 ASSERT(buf->b_hdr->b_datacnt > 0); 1556 buf->b_hdr->b_datacnt -= 1; 1557 } 1558 1559 /* only remove the buf if requested */ 1560 if (!all) 1561 return; 1562 1563 /* remove the buf from the hdr list */ 1564 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1565 continue; 1566 *bufp = buf->b_next; 1567 buf->b_next = NULL; 1568 1569 ASSERT(buf->b_efunc == NULL); 1570 1571 /* clean up the buf */ 1572 buf->b_hdr = NULL; 1573 kmem_cache_free(buf_cache, buf); 1574} 1575 1576static void 1577arc_hdr_destroy(arc_buf_hdr_t *hdr) 1578{ 1579 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1580 ASSERT3P(hdr->b_state, ==, arc_anon); 1581 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1582 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; 1583 1584 if (l2hdr != NULL) { 1585 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); 1586 /* 1587 * To prevent arc_free() and l2arc_evict() from 1588 * attempting to free the same buffer at the same time, 1589 * a FREE_IN_PROGRESS flag is given to arc_free() to 1590 * give it priority. l2arc_evict() can't destroy this 1591 * header while we are waiting on l2arc_buflist_mtx. 1592 * 1593 * The hdr may be removed from l2ad_buflist before we 1594 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1595 */ 1596 if (!buflist_held) { 1597 mutex_enter(&l2arc_buflist_mtx); 1598 l2hdr = hdr->b_l2hdr; 1599 } 1600 1601 if (l2hdr != NULL) { 1602 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 1603 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1604 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 1605 if (hdr->b_state == arc_l2c_only) 1606 l2arc_hdr_stat_remove(); 1607 hdr->b_l2hdr = NULL; 1608 } 1609 1610 if (!buflist_held) 1611 mutex_exit(&l2arc_buflist_mtx); 1612 } 1613 1614 if (!BUF_EMPTY(hdr)) { 1615 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1616 buf_discard_identity(hdr); 1617 } 1618 while (hdr->b_buf) { 1619 arc_buf_t *buf = hdr->b_buf; 1620 1621 if (buf->b_efunc) { 1622 mutex_enter(&arc_eviction_mtx); 1623 mutex_enter(&buf->b_evict_lock); 1624 ASSERT(buf->b_hdr != NULL); 1625 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1626 hdr->b_buf = buf->b_next; 1627 buf->b_hdr = &arc_eviction_hdr; 1628 buf->b_next = arc_eviction_list; 1629 arc_eviction_list = buf; 1630 mutex_exit(&buf->b_evict_lock); 1631 mutex_exit(&arc_eviction_mtx); 1632 } else { 1633 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1634 } 1635 } 1636 if (hdr->b_freeze_cksum != NULL) { 1637 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1638 hdr->b_freeze_cksum = NULL; 1639 } 1640 if (hdr->b_thawed) { 1641 kmem_free(hdr->b_thawed, 1); 1642 hdr->b_thawed = NULL; 1643 } 1644 1645 ASSERT(!list_link_active(&hdr->b_arc_node)); 1646 ASSERT3P(hdr->b_hash_next, ==, NULL); 1647 ASSERT3P(hdr->b_acb, ==, NULL); 1648 kmem_cache_free(hdr_cache, hdr); 1649} 1650 1651void 1652arc_buf_free(arc_buf_t *buf, void *tag) 1653{ 1654 arc_buf_hdr_t *hdr = buf->b_hdr; 1655 int hashed = hdr->b_state != arc_anon; 1656 1657 ASSERT(buf->b_efunc == NULL); 1658 ASSERT(buf->b_data != NULL); 1659 1660 if (hashed) { 1661 kmutex_t *hash_lock = HDR_LOCK(hdr); 1662 1663 mutex_enter(hash_lock); 1664 hdr = buf->b_hdr; 1665 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1666 1667 (void) remove_reference(hdr, hash_lock, tag); 1668 if (hdr->b_datacnt > 1) { 1669 arc_buf_destroy(buf, FALSE, TRUE); 1670 } else { 1671 ASSERT(buf == hdr->b_buf); 1672 ASSERT(buf->b_efunc == NULL); 1673 hdr->b_flags |= ARC_BUF_AVAILABLE; 1674 } 1675 mutex_exit(hash_lock); 1676 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1677 int destroy_hdr; 1678 /* 1679 * We are in the middle of an async write. Don't destroy 1680 * this buffer unless the write completes before we finish 1681 * decrementing the reference count. 1682 */ 1683 mutex_enter(&arc_eviction_mtx); 1684 (void) remove_reference(hdr, NULL, tag); 1685 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1686 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1687 mutex_exit(&arc_eviction_mtx); 1688 if (destroy_hdr) 1689 arc_hdr_destroy(hdr); 1690 } else { 1691 if (remove_reference(hdr, NULL, tag) > 0) 1692 arc_buf_destroy(buf, FALSE, TRUE); 1693 else 1694 arc_hdr_destroy(hdr); 1695 } 1696} 1697 1698int 1699arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1700{ 1701 arc_buf_hdr_t *hdr = buf->b_hdr; 1702 kmutex_t *hash_lock = HDR_LOCK(hdr); 1703 int no_callback = (buf->b_efunc == NULL); 1704 1705 if (hdr->b_state == arc_anon) { 1706 ASSERT(hdr->b_datacnt == 1); 1707 arc_buf_free(buf, tag); 1708 return (no_callback); 1709 } 1710 1711 mutex_enter(hash_lock); 1712 hdr = buf->b_hdr; 1713 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1714 ASSERT(hdr->b_state != arc_anon); 1715 ASSERT(buf->b_data != NULL); 1716 1717 (void) remove_reference(hdr, hash_lock, tag); 1718 if (hdr->b_datacnt > 1) { 1719 if (no_callback) 1720 arc_buf_destroy(buf, FALSE, TRUE); 1721 } else if (no_callback) { 1722 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1723 ASSERT(buf->b_efunc == NULL); 1724 hdr->b_flags |= ARC_BUF_AVAILABLE; 1725 } 1726 ASSERT(no_callback || hdr->b_datacnt > 1 || 1727 refcount_is_zero(&hdr->b_refcnt)); 1728 mutex_exit(hash_lock); 1729 return (no_callback); 1730} 1731 1732int 1733arc_buf_size(arc_buf_t *buf) 1734{ 1735 return (buf->b_hdr->b_size); 1736} 1737 1738/* 1739 * Evict buffers from list until we've removed the specified number of 1740 * bytes. Move the removed buffers to the appropriate evict state. 1741 * If the recycle flag is set, then attempt to "recycle" a buffer: 1742 * - look for a buffer to evict that is `bytes' long. 1743 * - return the data block from this buffer rather than freeing it. 1744 * This flag is used by callers that are trying to make space for a 1745 * new buffer in a full arc cache. 1746 * 1747 * This function makes a "best effort". It skips over any buffers 1748 * it can't get a hash_lock on, and so may not catch all candidates. 1749 * It may also return without evicting as much space as requested. 1750 */ 1751static void * 1752arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, 1753 arc_buf_contents_t type) 1754{ 1755 arc_state_t *evicted_state; 1756 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1757 int64_t bytes_remaining; 1758 arc_buf_hdr_t *ab, *ab_prev = NULL; 1759 list_t *evicted_list, *list, *evicted_list_start, *list_start; 1760 kmutex_t *lock, *evicted_lock; 1761 kmutex_t *hash_lock; 1762 boolean_t have_lock; 1763 void *stolen = NULL; 1764 static int evict_metadata_offset, evict_data_offset; 1765 int i, idx, offset, list_count, count; 1766 1767 ASSERT(state == arc_mru || state == arc_mfu); 1768 1769 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1770 1771 if (type == ARC_BUFC_METADATA) { 1772 offset = 0; 1773 list_count = ARC_BUFC_NUMMETADATALISTS; 1774 list_start = &state->arcs_lists[0]; 1775 evicted_list_start = &evicted_state->arcs_lists[0]; 1776 idx = evict_metadata_offset; 1777 } else { 1778 offset = ARC_BUFC_NUMMETADATALISTS; 1779 list_start = &state->arcs_lists[offset]; 1780 evicted_list_start = &evicted_state->arcs_lists[offset]; 1781 list_count = ARC_BUFC_NUMDATALISTS; 1782 idx = evict_data_offset; 1783 } 1784 bytes_remaining = evicted_state->arcs_lsize[type]; 1785 count = 0; 1786 1787evict_start: 1788 list = &list_start[idx]; 1789 evicted_list = &evicted_list_start[idx]; 1790 lock = ARCS_LOCK(state, (offset + idx)); 1791 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx)); 1792 1793 mutex_enter(lock); 1794 mutex_enter(evicted_lock); 1795 1796 for (ab = list_tail(list); ab; ab = ab_prev) { 1797 ab_prev = list_prev(list, ab); 1798 bytes_remaining -= (ab->b_size * ab->b_datacnt); 1799 /* prefetch buffers have a minimum lifespan */ 1800 if (HDR_IO_IN_PROGRESS(ab) || 1801 (spa && ab->b_spa != spa) || 1802 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1803 ddi_get_lbolt() - ab->b_arc_access < 1804 arc_min_prefetch_lifespan)) { 1805 skipped++; 1806 continue; 1807 } 1808 /* "lookahead" for better eviction candidate */ 1809 if (recycle && ab->b_size != bytes && 1810 ab_prev && ab_prev->b_size == bytes) 1811 continue; 1812 hash_lock = HDR_LOCK(ab); 1813 have_lock = MUTEX_HELD(hash_lock); 1814 if (have_lock || mutex_tryenter(hash_lock)) {
| 1605 ARCSTAT_INCR(arcstat_data_size, -size); 1606 atomic_add_64(&arc_size, -size); 1607 } 1608 } 1609 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1610 uint64_t *cnt = &state->arcs_lsize[type]; 1611 1612 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1613 ASSERT(state != arc_anon); 1614 1615 ASSERT3U(*cnt, >=, size); 1616 atomic_add_64(cnt, -size); 1617 } 1618 ASSERT3U(state->arcs_size, >=, size); 1619 atomic_add_64(&state->arcs_size, -size); 1620 buf->b_data = NULL; 1621 ASSERT(buf->b_hdr->b_datacnt > 0); 1622 buf->b_hdr->b_datacnt -= 1; 1623 } 1624 1625 /* only remove the buf if requested */ 1626 if (!all) 1627 return; 1628 1629 /* remove the buf from the hdr list */ 1630 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1631 continue; 1632 *bufp = buf->b_next; 1633 buf->b_next = NULL; 1634 1635 ASSERT(buf->b_efunc == NULL); 1636 1637 /* clean up the buf */ 1638 buf->b_hdr = NULL; 1639 kmem_cache_free(buf_cache, buf); 1640} 1641 1642static void 1643arc_hdr_destroy(arc_buf_hdr_t *hdr) 1644{ 1645 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1646 ASSERT3P(hdr->b_state, ==, arc_anon); 1647 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1648 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; 1649 1650 if (l2hdr != NULL) { 1651 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); 1652 /* 1653 * To prevent arc_free() and l2arc_evict() from 1654 * attempting to free the same buffer at the same time, 1655 * a FREE_IN_PROGRESS flag is given to arc_free() to 1656 * give it priority. l2arc_evict() can't destroy this 1657 * header while we are waiting on l2arc_buflist_mtx. 1658 * 1659 * The hdr may be removed from l2ad_buflist before we 1660 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1661 */ 1662 if (!buflist_held) { 1663 mutex_enter(&l2arc_buflist_mtx); 1664 l2hdr = hdr->b_l2hdr; 1665 } 1666 1667 if (l2hdr != NULL) { 1668 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 1669 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1670 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 1671 if (hdr->b_state == arc_l2c_only) 1672 l2arc_hdr_stat_remove(); 1673 hdr->b_l2hdr = NULL; 1674 } 1675 1676 if (!buflist_held) 1677 mutex_exit(&l2arc_buflist_mtx); 1678 } 1679 1680 if (!BUF_EMPTY(hdr)) { 1681 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1682 buf_discard_identity(hdr); 1683 } 1684 while (hdr->b_buf) { 1685 arc_buf_t *buf = hdr->b_buf; 1686 1687 if (buf->b_efunc) { 1688 mutex_enter(&arc_eviction_mtx); 1689 mutex_enter(&buf->b_evict_lock); 1690 ASSERT(buf->b_hdr != NULL); 1691 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1692 hdr->b_buf = buf->b_next; 1693 buf->b_hdr = &arc_eviction_hdr; 1694 buf->b_next = arc_eviction_list; 1695 arc_eviction_list = buf; 1696 mutex_exit(&buf->b_evict_lock); 1697 mutex_exit(&arc_eviction_mtx); 1698 } else { 1699 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1700 } 1701 } 1702 if (hdr->b_freeze_cksum != NULL) { 1703 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1704 hdr->b_freeze_cksum = NULL; 1705 } 1706 if (hdr->b_thawed) { 1707 kmem_free(hdr->b_thawed, 1); 1708 hdr->b_thawed = NULL; 1709 } 1710 1711 ASSERT(!list_link_active(&hdr->b_arc_node)); 1712 ASSERT3P(hdr->b_hash_next, ==, NULL); 1713 ASSERT3P(hdr->b_acb, ==, NULL); 1714 kmem_cache_free(hdr_cache, hdr); 1715} 1716 1717void 1718arc_buf_free(arc_buf_t *buf, void *tag) 1719{ 1720 arc_buf_hdr_t *hdr = buf->b_hdr; 1721 int hashed = hdr->b_state != arc_anon; 1722 1723 ASSERT(buf->b_efunc == NULL); 1724 ASSERT(buf->b_data != NULL); 1725 1726 if (hashed) { 1727 kmutex_t *hash_lock = HDR_LOCK(hdr); 1728 1729 mutex_enter(hash_lock); 1730 hdr = buf->b_hdr; 1731 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1732 1733 (void) remove_reference(hdr, hash_lock, tag); 1734 if (hdr->b_datacnt > 1) { 1735 arc_buf_destroy(buf, FALSE, TRUE); 1736 } else { 1737 ASSERT(buf == hdr->b_buf); 1738 ASSERT(buf->b_efunc == NULL); 1739 hdr->b_flags |= ARC_BUF_AVAILABLE; 1740 } 1741 mutex_exit(hash_lock); 1742 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1743 int destroy_hdr; 1744 /* 1745 * We are in the middle of an async write. Don't destroy 1746 * this buffer unless the write completes before we finish 1747 * decrementing the reference count. 1748 */ 1749 mutex_enter(&arc_eviction_mtx); 1750 (void) remove_reference(hdr, NULL, tag); 1751 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1752 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1753 mutex_exit(&arc_eviction_mtx); 1754 if (destroy_hdr) 1755 arc_hdr_destroy(hdr); 1756 } else { 1757 if (remove_reference(hdr, NULL, tag) > 0) 1758 arc_buf_destroy(buf, FALSE, TRUE); 1759 else 1760 arc_hdr_destroy(hdr); 1761 } 1762} 1763 1764int 1765arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1766{ 1767 arc_buf_hdr_t *hdr = buf->b_hdr; 1768 kmutex_t *hash_lock = HDR_LOCK(hdr); 1769 int no_callback = (buf->b_efunc == NULL); 1770 1771 if (hdr->b_state == arc_anon) { 1772 ASSERT(hdr->b_datacnt == 1); 1773 arc_buf_free(buf, tag); 1774 return (no_callback); 1775 } 1776 1777 mutex_enter(hash_lock); 1778 hdr = buf->b_hdr; 1779 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1780 ASSERT(hdr->b_state != arc_anon); 1781 ASSERT(buf->b_data != NULL); 1782 1783 (void) remove_reference(hdr, hash_lock, tag); 1784 if (hdr->b_datacnt > 1) { 1785 if (no_callback) 1786 arc_buf_destroy(buf, FALSE, TRUE); 1787 } else if (no_callback) { 1788 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1789 ASSERT(buf->b_efunc == NULL); 1790 hdr->b_flags |= ARC_BUF_AVAILABLE; 1791 } 1792 ASSERT(no_callback || hdr->b_datacnt > 1 || 1793 refcount_is_zero(&hdr->b_refcnt)); 1794 mutex_exit(hash_lock); 1795 return (no_callback); 1796} 1797 1798int 1799arc_buf_size(arc_buf_t *buf) 1800{ 1801 return (buf->b_hdr->b_size); 1802} 1803 1804/* 1805 * Evict buffers from list until we've removed the specified number of 1806 * bytes. Move the removed buffers to the appropriate evict state. 1807 * If the recycle flag is set, then attempt to "recycle" a buffer: 1808 * - look for a buffer to evict that is `bytes' long. 1809 * - return the data block from this buffer rather than freeing it. 1810 * This flag is used by callers that are trying to make space for a 1811 * new buffer in a full arc cache. 1812 * 1813 * This function makes a "best effort". It skips over any buffers 1814 * it can't get a hash_lock on, and so may not catch all candidates. 1815 * It may also return without evicting as much space as requested. 1816 */ 1817static void * 1818arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, 1819 arc_buf_contents_t type) 1820{ 1821 arc_state_t *evicted_state; 1822 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1823 int64_t bytes_remaining; 1824 arc_buf_hdr_t *ab, *ab_prev = NULL; 1825 list_t *evicted_list, *list, *evicted_list_start, *list_start; 1826 kmutex_t *lock, *evicted_lock; 1827 kmutex_t *hash_lock; 1828 boolean_t have_lock; 1829 void *stolen = NULL; 1830 static int evict_metadata_offset, evict_data_offset; 1831 int i, idx, offset, list_count, count; 1832 1833 ASSERT(state == arc_mru || state == arc_mfu); 1834 1835 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1836 1837 if (type == ARC_BUFC_METADATA) { 1838 offset = 0; 1839 list_count = ARC_BUFC_NUMMETADATALISTS; 1840 list_start = &state->arcs_lists[0]; 1841 evicted_list_start = &evicted_state->arcs_lists[0]; 1842 idx = evict_metadata_offset; 1843 } else { 1844 offset = ARC_BUFC_NUMMETADATALISTS; 1845 list_start = &state->arcs_lists[offset]; 1846 evicted_list_start = &evicted_state->arcs_lists[offset]; 1847 list_count = ARC_BUFC_NUMDATALISTS; 1848 idx = evict_data_offset; 1849 } 1850 bytes_remaining = evicted_state->arcs_lsize[type]; 1851 count = 0; 1852 1853evict_start: 1854 list = &list_start[idx]; 1855 evicted_list = &evicted_list_start[idx]; 1856 lock = ARCS_LOCK(state, (offset + idx)); 1857 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx)); 1858 1859 mutex_enter(lock); 1860 mutex_enter(evicted_lock); 1861 1862 for (ab = list_tail(list); ab; ab = ab_prev) { 1863 ab_prev = list_prev(list, ab); 1864 bytes_remaining -= (ab->b_size * ab->b_datacnt); 1865 /* prefetch buffers have a minimum lifespan */ 1866 if (HDR_IO_IN_PROGRESS(ab) || 1867 (spa && ab->b_spa != spa) || 1868 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1869 ddi_get_lbolt() - ab->b_arc_access < 1870 arc_min_prefetch_lifespan)) { 1871 skipped++; 1872 continue; 1873 } 1874 /* "lookahead" for better eviction candidate */ 1875 if (recycle && ab->b_size != bytes && 1876 ab_prev && ab_prev->b_size == bytes) 1877 continue; 1878 hash_lock = HDR_LOCK(ab); 1879 have_lock = MUTEX_HELD(hash_lock); 1880 if (have_lock || mutex_tryenter(hash_lock)) {
|
1815 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
| 1881 ASSERT0(refcount_count(&ab->b_refcnt));
|
1816 ASSERT(ab->b_datacnt > 0); 1817 while (ab->b_buf) { 1818 arc_buf_t *buf = ab->b_buf; 1819 if (!mutex_tryenter(&buf->b_evict_lock)) { 1820 missed += 1; 1821 break; 1822 } 1823 if (buf->b_data) { 1824 bytes_evicted += ab->b_size; 1825 if (recycle && ab->b_type == type && 1826 ab->b_size == bytes && 1827 !HDR_L2_WRITING(ab)) { 1828 stolen = buf->b_data; 1829 recycle = FALSE; 1830 } 1831 } 1832 if (buf->b_efunc) { 1833 mutex_enter(&arc_eviction_mtx); 1834 arc_buf_destroy(buf, 1835 buf->b_data == stolen, FALSE); 1836 ab->b_buf = buf->b_next; 1837 buf->b_hdr = &arc_eviction_hdr; 1838 buf->b_next = arc_eviction_list; 1839 arc_eviction_list = buf; 1840 mutex_exit(&arc_eviction_mtx); 1841 mutex_exit(&buf->b_evict_lock); 1842 } else { 1843 mutex_exit(&buf->b_evict_lock); 1844 arc_buf_destroy(buf, 1845 buf->b_data == stolen, TRUE); 1846 } 1847 } 1848 1849 if (ab->b_l2hdr) { 1850 ARCSTAT_INCR(arcstat_evict_l2_cached, 1851 ab->b_size); 1852 } else { 1853 if (l2arc_write_eligible(ab->b_spa, ab)) { 1854 ARCSTAT_INCR(arcstat_evict_l2_eligible, 1855 ab->b_size); 1856 } else { 1857 ARCSTAT_INCR( 1858 arcstat_evict_l2_ineligible, 1859 ab->b_size); 1860 } 1861 } 1862 1863 if (ab->b_datacnt == 0) { 1864 arc_change_state(evicted_state, ab, hash_lock); 1865 ASSERT(HDR_IN_HASH_TABLE(ab)); 1866 ab->b_flags |= ARC_IN_HASH_TABLE; 1867 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1868 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1869 } 1870 if (!have_lock) 1871 mutex_exit(hash_lock); 1872 if (bytes >= 0 && bytes_evicted >= bytes) 1873 break; 1874 if (bytes_remaining > 0) { 1875 mutex_exit(evicted_lock); 1876 mutex_exit(lock); 1877 idx = ((idx + 1) & (list_count - 1)); 1878 count++; 1879 goto evict_start; 1880 } 1881 } else { 1882 missed += 1; 1883 } 1884 } 1885 1886 mutex_exit(evicted_lock); 1887 mutex_exit(lock); 1888 1889 idx = ((idx + 1) & (list_count - 1)); 1890 count++; 1891 1892 if (bytes_evicted < bytes) { 1893 if (count < list_count) 1894 goto evict_start; 1895 else 1896 dprintf("only evicted %lld bytes from %x", 1897 (longlong_t)bytes_evicted, state); 1898 } 1899 if (type == ARC_BUFC_METADATA) 1900 evict_metadata_offset = idx; 1901 else 1902 evict_data_offset = idx; 1903 1904 if (skipped) 1905 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1906 1907 if (missed) 1908 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1909 1910 /* 1911 * We have just evicted some date into the ghost state, make 1912 * sure we also adjust the ghost state size if necessary. 1913 */ 1914 if (arc_no_grow && 1915 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1916 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1917 arc_mru_ghost->arcs_size - arc_c; 1918 1919 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1920 int64_t todelete = 1921 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1922 arc_evict_ghost(arc_mru_ghost, 0, todelete); 1923 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1924 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1925 arc_mru_ghost->arcs_size + 1926 arc_mfu_ghost->arcs_size - arc_c); 1927 arc_evict_ghost(arc_mfu_ghost, 0, todelete); 1928 } 1929 } 1930 if (stolen) 1931 ARCSTAT_BUMP(arcstat_stolen); 1932 1933 return (stolen); 1934} 1935 1936/* 1937 * Remove buffers from list until we've removed the specified number of 1938 * bytes. Destroy the buffers that are removed. 1939 */ 1940static void 1941arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) 1942{ 1943 arc_buf_hdr_t *ab, *ab_prev; 1944 arc_buf_hdr_t marker = { 0 }; 1945 list_t *list, *list_start; 1946 kmutex_t *hash_lock, *lock; 1947 uint64_t bytes_deleted = 0; 1948 uint64_t bufs_skipped = 0; 1949 static int evict_offset; 1950 int list_count, idx = evict_offset; 1951 int offset, count = 0; 1952 1953 ASSERT(GHOST_STATE(state)); 1954 1955 /* 1956 * data lists come after metadata lists 1957 */ 1958 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS]; 1959 list_count = ARC_BUFC_NUMDATALISTS; 1960 offset = ARC_BUFC_NUMMETADATALISTS; 1961 1962evict_start: 1963 list = &list_start[idx]; 1964 lock = ARCS_LOCK(state, idx + offset); 1965 1966 mutex_enter(lock); 1967 for (ab = list_tail(list); ab; ab = ab_prev) { 1968 ab_prev = list_prev(list, ab); 1969 if (spa && ab->b_spa != spa) 1970 continue; 1971 1972 /* ignore markers */ 1973 if (ab->b_spa == 0) 1974 continue; 1975 1976 hash_lock = HDR_LOCK(ab); 1977 /* caller may be trying to modify this buffer, skip it */ 1978 if (MUTEX_HELD(hash_lock)) 1979 continue; 1980 if (mutex_tryenter(hash_lock)) { 1981 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1982 ASSERT(ab->b_buf == NULL); 1983 ARCSTAT_BUMP(arcstat_deleted); 1984 bytes_deleted += ab->b_size; 1985 1986 if (ab->b_l2hdr != NULL) { 1987 /* 1988 * This buffer is cached on the 2nd Level ARC; 1989 * don't destroy the header. 1990 */ 1991 arc_change_state(arc_l2c_only, ab, hash_lock); 1992 mutex_exit(hash_lock); 1993 } else { 1994 arc_change_state(arc_anon, ab, hash_lock); 1995 mutex_exit(hash_lock); 1996 arc_hdr_destroy(ab); 1997 } 1998 1999 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 2000 if (bytes >= 0 && bytes_deleted >= bytes) 2001 break; 2002 } else if (bytes < 0) { 2003 /* 2004 * Insert a list marker and then wait for the 2005 * hash lock to become available. Once its 2006 * available, restart from where we left off. 2007 */ 2008 list_insert_after(list, ab, &marker); 2009 mutex_exit(lock); 2010 mutex_enter(hash_lock); 2011 mutex_exit(hash_lock); 2012 mutex_enter(lock); 2013 ab_prev = list_prev(list, &marker); 2014 list_remove(list, &marker); 2015 } else 2016 bufs_skipped += 1; 2017 } 2018 mutex_exit(lock); 2019 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1)); 2020 count++; 2021 2022 if (count < list_count) 2023 goto evict_start; 2024 2025 evict_offset = idx; 2026 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] && 2027 (bytes < 0 || bytes_deleted < bytes)) { 2028 list_start = &state->arcs_lists[0]; 2029 list_count = ARC_BUFC_NUMMETADATALISTS; 2030 offset = count = 0; 2031 goto evict_start; 2032 } 2033 2034 if (bufs_skipped) { 2035 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 2036 ASSERT(bytes >= 0); 2037 } 2038 2039 if (bytes_deleted < bytes) 2040 dprintf("only deleted %lld bytes from %p", 2041 (longlong_t)bytes_deleted, state); 2042} 2043 2044static void 2045arc_adjust(void) 2046{ 2047 int64_t adjustment, delta; 2048 2049 /* 2050 * Adjust MRU size 2051 */ 2052 2053 adjustment = MIN((int64_t)(arc_size - arc_c), 2054 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - 2055 arc_p)); 2056 2057 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 2058 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 2059 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA); 2060 adjustment -= delta; 2061 } 2062 2063 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 2064 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 2065 (void) arc_evict(arc_mru, 0, delta, FALSE, 2066 ARC_BUFC_METADATA); 2067 } 2068 2069 /* 2070 * Adjust MFU size 2071 */ 2072 2073 adjustment = arc_size - arc_c; 2074 2075 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 2076 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 2077 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA); 2078 adjustment -= delta; 2079 } 2080 2081 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 2082 int64_t delta = MIN(adjustment, 2083 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 2084 (void) arc_evict(arc_mfu, 0, delta, FALSE, 2085 ARC_BUFC_METADATA); 2086 } 2087 2088 /* 2089 * Adjust ghost lists 2090 */ 2091 2092 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 2093 2094 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 2095 delta = MIN(arc_mru_ghost->arcs_size, adjustment); 2096 arc_evict_ghost(arc_mru_ghost, 0, delta); 2097 } 2098 2099 adjustment = 2100 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 2101 2102 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 2103 delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 2104 arc_evict_ghost(arc_mfu_ghost, 0, delta); 2105 } 2106} 2107 2108static void 2109arc_do_user_evicts(void) 2110{ 2111 static arc_buf_t *tmp_arc_eviction_list; 2112 2113 /* 2114 * Move list over to avoid LOR 2115 */ 2116restart: 2117 mutex_enter(&arc_eviction_mtx); 2118 tmp_arc_eviction_list = arc_eviction_list; 2119 arc_eviction_list = NULL; 2120 mutex_exit(&arc_eviction_mtx); 2121 2122 while (tmp_arc_eviction_list != NULL) { 2123 arc_buf_t *buf = tmp_arc_eviction_list; 2124 tmp_arc_eviction_list = buf->b_next; 2125 mutex_enter(&buf->b_evict_lock); 2126 buf->b_hdr = NULL; 2127 mutex_exit(&buf->b_evict_lock); 2128 2129 if (buf->b_efunc != NULL) 2130 VERIFY(buf->b_efunc(buf) == 0); 2131 2132 buf->b_efunc = NULL; 2133 buf->b_private = NULL; 2134 kmem_cache_free(buf_cache, buf); 2135 } 2136 2137 if (arc_eviction_list != NULL) 2138 goto restart; 2139} 2140 2141/* 2142 * Flush all *evictable* data from the cache for the given spa. 2143 * NOTE: this will not touch "active" (i.e. referenced) data. 2144 */ 2145void 2146arc_flush(spa_t *spa) 2147{ 2148 uint64_t guid = 0; 2149 2150 if (spa) 2151 guid = spa_load_guid(spa); 2152 2153 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) { 2154 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); 2155 if (spa) 2156 break; 2157 } 2158 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) { 2159 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); 2160 if (spa) 2161 break; 2162 } 2163 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) { 2164 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); 2165 if (spa) 2166 break; 2167 } 2168 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) { 2169 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); 2170 if (spa) 2171 break; 2172 } 2173 2174 arc_evict_ghost(arc_mru_ghost, guid, -1); 2175 arc_evict_ghost(arc_mfu_ghost, guid, -1); 2176 2177 mutex_enter(&arc_reclaim_thr_lock); 2178 arc_do_user_evicts(); 2179 mutex_exit(&arc_reclaim_thr_lock); 2180 ASSERT(spa || arc_eviction_list == NULL); 2181} 2182 2183void 2184arc_shrink(void) 2185{ 2186 if (arc_c > arc_c_min) { 2187 uint64_t to_free; 2188 2189#ifdef _KERNEL 2190 to_free = arc_c >> arc_shrink_shift; 2191#else 2192 to_free = arc_c >> arc_shrink_shift; 2193#endif 2194 if (arc_c > arc_c_min + to_free) 2195 atomic_add_64(&arc_c, -to_free); 2196 else 2197 arc_c = arc_c_min; 2198 2199 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 2200 if (arc_c > arc_size) 2201 arc_c = MAX(arc_size, arc_c_min); 2202 if (arc_p > arc_c) 2203 arc_p = (arc_c >> 1); 2204 ASSERT(arc_c >= arc_c_min); 2205 ASSERT((int64_t)arc_p >= 0); 2206 } 2207 2208 if (arc_size > arc_c) 2209 arc_adjust(); 2210} 2211 2212static int needfree = 0; 2213 2214static int 2215arc_reclaim_needed(void) 2216{ 2217 2218#ifdef _KERNEL 2219 2220 if (needfree) 2221 return (1); 2222 2223 /* 2224 * Cooperate with pagedaemon when it's time for it to scan 2225 * and reclaim some pages. 2226 */ 2227 if (vm_paging_needed()) 2228 return (1); 2229 2230#ifdef sun 2231 /* 2232 * take 'desfree' extra pages, so we reclaim sooner, rather than later 2233 */ 2234 extra = desfree; 2235 2236 /* 2237 * check that we're out of range of the pageout scanner. It starts to 2238 * schedule paging if freemem is less than lotsfree and needfree. 2239 * lotsfree is the high-water mark for pageout, and needfree is the 2240 * number of needed free pages. We add extra pages here to make sure 2241 * the scanner doesn't start up while we're freeing memory. 2242 */ 2243 if (freemem < lotsfree + needfree + extra) 2244 return (1); 2245 2246 /* 2247 * check to make sure that swapfs has enough space so that anon 2248 * reservations can still succeed. anon_resvmem() checks that the 2249 * availrmem is greater than swapfs_minfree, and the number of reserved 2250 * swap pages. We also add a bit of extra here just to prevent 2251 * circumstances from getting really dire. 2252 */ 2253 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 2254 return (1); 2255 2256#if defined(__i386) 2257 /* 2258 * If we're on an i386 platform, it's possible that we'll exhaust the 2259 * kernel heap space before we ever run out of available physical 2260 * memory. Most checks of the size of the heap_area compare against 2261 * tune.t_minarmem, which is the minimum available real memory that we 2262 * can have in the system. However, this is generally fixed at 25 pages 2263 * which is so low that it's useless. In this comparison, we seek to 2264 * calculate the total heap-size, and reclaim if more than 3/4ths of the 2265 * heap is allocated. (Or, in the calculation, if less than 1/4th is 2266 * free) 2267 */ 2268 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 2269 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 2270 return (1); 2271#endif 2272#else /* !sun */ 2273 if (kmem_used() > (kmem_size() * 3) / 4) 2274 return (1); 2275#endif /* sun */ 2276 2277#else 2278 if (spa_get_random(100) == 0) 2279 return (1); 2280#endif 2281 return (0); 2282} 2283 2284extern kmem_cache_t *zio_buf_cache[]; 2285extern kmem_cache_t *zio_data_buf_cache[]; 2286 2287static void 2288arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2289{ 2290 size_t i; 2291 kmem_cache_t *prev_cache = NULL; 2292 kmem_cache_t *prev_data_cache = NULL; 2293 2294#ifdef _KERNEL 2295 if (arc_meta_used >= arc_meta_limit) { 2296 /* 2297 * We are exceeding our meta-data cache limit. 2298 * Purge some DNLC entries to release holds on meta-data. 2299 */ 2300 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 2301 } 2302#if defined(__i386) 2303 /* 2304 * Reclaim unused memory from all kmem caches. 2305 */ 2306 kmem_reap(); 2307#endif 2308#endif 2309 2310 /* 2311 * An aggressive reclamation will shrink the cache size as well as 2312 * reap free buffers from the arc kmem caches. 2313 */ 2314 if (strat == ARC_RECLAIM_AGGR) 2315 arc_shrink(); 2316 2317 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2318 if (zio_buf_cache[i] != prev_cache) { 2319 prev_cache = zio_buf_cache[i]; 2320 kmem_cache_reap_now(zio_buf_cache[i]); 2321 } 2322 if (zio_data_buf_cache[i] != prev_data_cache) { 2323 prev_data_cache = zio_data_buf_cache[i]; 2324 kmem_cache_reap_now(zio_data_buf_cache[i]); 2325 } 2326 } 2327 kmem_cache_reap_now(buf_cache); 2328 kmem_cache_reap_now(hdr_cache); 2329} 2330 2331static void 2332arc_reclaim_thread(void *dummy __unused) 2333{ 2334 clock_t growtime = 0; 2335 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2336 callb_cpr_t cpr; 2337 2338 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2339 2340 mutex_enter(&arc_reclaim_thr_lock); 2341 while (arc_thread_exit == 0) { 2342 if (arc_reclaim_needed()) { 2343 2344 if (arc_no_grow) { 2345 if (last_reclaim == ARC_RECLAIM_CONS) { 2346 last_reclaim = ARC_RECLAIM_AGGR; 2347 } else { 2348 last_reclaim = ARC_RECLAIM_CONS; 2349 } 2350 } else { 2351 arc_no_grow = TRUE; 2352 last_reclaim = ARC_RECLAIM_AGGR; 2353 membar_producer(); 2354 } 2355 2356 /* reset the growth delay for every reclaim */ 2357 growtime = ddi_get_lbolt() + (arc_grow_retry * hz); 2358 2359 if (needfree && last_reclaim == ARC_RECLAIM_CONS) { 2360 /* 2361 * If needfree is TRUE our vm_lowmem hook 2362 * was called and in that case we must free some 2363 * memory, so switch to aggressive mode. 2364 */ 2365 arc_no_grow = TRUE; 2366 last_reclaim = ARC_RECLAIM_AGGR; 2367 } 2368 arc_kmem_reap_now(last_reclaim); 2369 arc_warm = B_TRUE; 2370 2371 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) { 2372 arc_no_grow = FALSE; 2373 } 2374 2375 arc_adjust(); 2376 2377 if (arc_eviction_list != NULL) 2378 arc_do_user_evicts(); 2379 2380#ifdef _KERNEL 2381 if (needfree) { 2382 needfree = 0; 2383 wakeup(&needfree); 2384 } 2385#endif 2386 2387 /* block until needed, or one second, whichever is shorter */ 2388 CALLB_CPR_SAFE_BEGIN(&cpr); 2389 (void) cv_timedwait(&arc_reclaim_thr_cv, 2390 &arc_reclaim_thr_lock, hz); 2391 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2392 } 2393 2394 arc_thread_exit = 0; 2395 cv_broadcast(&arc_reclaim_thr_cv); 2396 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2397 thread_exit(); 2398} 2399 2400/* 2401 * Adapt arc info given the number of bytes we are trying to add and 2402 * the state that we are comming from. This function is only called 2403 * when we are adding new content to the cache. 2404 */ 2405static void 2406arc_adapt(int bytes, arc_state_t *state) 2407{ 2408 int mult; 2409 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2410 2411 if (state == arc_l2c_only) 2412 return; 2413 2414 ASSERT(bytes > 0); 2415 /* 2416 * Adapt the target size of the MRU list: 2417 * - if we just hit in the MRU ghost list, then increase 2418 * the target size of the MRU list. 2419 * - if we just hit in the MFU ghost list, then increase 2420 * the target size of the MFU list by decreasing the 2421 * target size of the MRU list. 2422 */ 2423 if (state == arc_mru_ghost) { 2424 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2425 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2426 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 2427 2428 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 2429 } else if (state == arc_mfu_ghost) { 2430 uint64_t delta; 2431 2432 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2433 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2434 mult = MIN(mult, 10); 2435 2436 delta = MIN(bytes * mult, arc_p); 2437 arc_p = MAX(arc_p_min, arc_p - delta); 2438 } 2439 ASSERT((int64_t)arc_p >= 0); 2440 2441 if (arc_reclaim_needed()) { 2442 cv_signal(&arc_reclaim_thr_cv); 2443 return; 2444 } 2445 2446 if (arc_no_grow) 2447 return; 2448 2449 if (arc_c >= arc_c_max) 2450 return; 2451 2452 /* 2453 * If we're within (2 * maxblocksize) bytes of the target 2454 * cache size, increment the target cache size 2455 */ 2456 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2457 atomic_add_64(&arc_c, (int64_t)bytes); 2458 if (arc_c > arc_c_max) 2459 arc_c = arc_c_max; 2460 else if (state == arc_anon) 2461 atomic_add_64(&arc_p, (int64_t)bytes); 2462 if (arc_p > arc_c) 2463 arc_p = arc_c; 2464 } 2465 ASSERT((int64_t)arc_p >= 0); 2466} 2467 2468/* 2469 * Check if the cache has reached its limits and eviction is required 2470 * prior to insert. 2471 */ 2472static int 2473arc_evict_needed(arc_buf_contents_t type) 2474{ 2475 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2476 return (1); 2477 2478#ifdef sun 2479#ifdef _KERNEL 2480 /* 2481 * If zio data pages are being allocated out of a separate heap segment, 2482 * then enforce that the size of available vmem for this area remains 2483 * above about 1/32nd free. 2484 */ 2485 if (type == ARC_BUFC_DATA && zio_arena != NULL && 2486 vmem_size(zio_arena, VMEM_FREE) < 2487 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2488 return (1); 2489#endif 2490#endif /* sun */ 2491 2492 if (arc_reclaim_needed()) 2493 return (1); 2494 2495 return (arc_size > arc_c); 2496} 2497 2498/* 2499 * The buffer, supplied as the first argument, needs a data block. 2500 * So, if we are at cache max, determine which cache should be victimized. 2501 * We have the following cases: 2502 * 2503 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2504 * In this situation if we're out of space, but the resident size of the MFU is 2505 * under the limit, victimize the MFU cache to satisfy this insertion request. 2506 * 2507 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2508 * Here, we've used up all of the available space for the MRU, so we need to 2509 * evict from our own cache instead. Evict from the set of resident MRU 2510 * entries. 2511 * 2512 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2513 * c minus p represents the MFU space in the cache, since p is the size of the 2514 * cache that is dedicated to the MRU. In this situation there's still space on 2515 * the MFU side, so the MRU side needs to be victimized. 2516 * 2517 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2518 * MFU's resident set is consuming more space than it has been allotted. In 2519 * this situation, we must victimize our own cache, the MFU, for this insertion. 2520 */ 2521static void 2522arc_get_data_buf(arc_buf_t *buf) 2523{ 2524 arc_state_t *state = buf->b_hdr->b_state; 2525 uint64_t size = buf->b_hdr->b_size; 2526 arc_buf_contents_t type = buf->b_hdr->b_type; 2527 2528 arc_adapt(size, state); 2529 2530 /* 2531 * We have not yet reached cache maximum size, 2532 * just allocate a new buffer. 2533 */ 2534 if (!arc_evict_needed(type)) { 2535 if (type == ARC_BUFC_METADATA) { 2536 buf->b_data = zio_buf_alloc(size); 2537 arc_space_consume(size, ARC_SPACE_DATA); 2538 } else { 2539 ASSERT(type == ARC_BUFC_DATA); 2540 buf->b_data = zio_data_buf_alloc(size); 2541 ARCSTAT_INCR(arcstat_data_size, size); 2542 atomic_add_64(&arc_size, size); 2543 } 2544 goto out; 2545 } 2546 2547 /* 2548 * If we are prefetching from the mfu ghost list, this buffer 2549 * will end up on the mru list; so steal space from there. 2550 */ 2551 if (state == arc_mfu_ghost) 2552 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2553 else if (state == arc_mru_ghost) 2554 state = arc_mru; 2555 2556 if (state == arc_mru || state == arc_anon) { 2557 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2558 state = (arc_mfu->arcs_lsize[type] >= size && 2559 arc_p > mru_used) ? arc_mfu : arc_mru; 2560 } else { 2561 /* MFU cases */ 2562 uint64_t mfu_space = arc_c - arc_p; 2563 state = (arc_mru->arcs_lsize[type] >= size && 2564 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2565 } 2566 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) { 2567 if (type == ARC_BUFC_METADATA) { 2568 buf->b_data = zio_buf_alloc(size); 2569 arc_space_consume(size, ARC_SPACE_DATA); 2570 } else { 2571 ASSERT(type == ARC_BUFC_DATA); 2572 buf->b_data = zio_data_buf_alloc(size); 2573 ARCSTAT_INCR(arcstat_data_size, size); 2574 atomic_add_64(&arc_size, size); 2575 } 2576 ARCSTAT_BUMP(arcstat_recycle_miss); 2577 } 2578 ASSERT(buf->b_data != NULL); 2579out: 2580 /* 2581 * Update the state size. Note that ghost states have a 2582 * "ghost size" and so don't need to be updated. 2583 */ 2584 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2585 arc_buf_hdr_t *hdr = buf->b_hdr; 2586 2587 atomic_add_64(&hdr->b_state->arcs_size, size); 2588 if (list_link_active(&hdr->b_arc_node)) { 2589 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2590 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2591 } 2592 /* 2593 * If we are growing the cache, and we are adding anonymous 2594 * data, and we have outgrown arc_p, update arc_p 2595 */ 2596 if (arc_size < arc_c && hdr->b_state == arc_anon && 2597 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2598 arc_p = MIN(arc_c, arc_p + size); 2599 } 2600 ARCSTAT_BUMP(arcstat_allocated); 2601} 2602 2603/* 2604 * This routine is called whenever a buffer is accessed. 2605 * NOTE: the hash lock is dropped in this function. 2606 */ 2607static void 2608arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2609{ 2610 clock_t now; 2611 2612 ASSERT(MUTEX_HELD(hash_lock)); 2613 2614 if (buf->b_state == arc_anon) { 2615 /* 2616 * This buffer is not in the cache, and does not 2617 * appear in our "ghost" list. Add the new buffer 2618 * to the MRU state. 2619 */ 2620 2621 ASSERT(buf->b_arc_access == 0); 2622 buf->b_arc_access = ddi_get_lbolt(); 2623 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2624 arc_change_state(arc_mru, buf, hash_lock); 2625 2626 } else if (buf->b_state == arc_mru) { 2627 now = ddi_get_lbolt(); 2628 2629 /* 2630 * If this buffer is here because of a prefetch, then either: 2631 * - clear the flag if this is a "referencing" read 2632 * (any subsequent access will bump this into the MFU state). 2633 * or 2634 * - move the buffer to the head of the list if this is 2635 * another prefetch (to make it less likely to be evicted). 2636 */ 2637 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2638 if (refcount_count(&buf->b_refcnt) == 0) { 2639 ASSERT(list_link_active(&buf->b_arc_node)); 2640 } else { 2641 buf->b_flags &= ~ARC_PREFETCH; 2642 ARCSTAT_BUMP(arcstat_mru_hits); 2643 } 2644 buf->b_arc_access = now; 2645 return; 2646 } 2647 2648 /* 2649 * This buffer has been "accessed" only once so far, 2650 * but it is still in the cache. Move it to the MFU 2651 * state. 2652 */ 2653 if (now > buf->b_arc_access + ARC_MINTIME) { 2654 /* 2655 * More than 125ms have passed since we 2656 * instantiated this buffer. Move it to the 2657 * most frequently used state. 2658 */ 2659 buf->b_arc_access = now; 2660 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2661 arc_change_state(arc_mfu, buf, hash_lock); 2662 } 2663 ARCSTAT_BUMP(arcstat_mru_hits); 2664 } else if (buf->b_state == arc_mru_ghost) { 2665 arc_state_t *new_state; 2666 /* 2667 * This buffer has been "accessed" recently, but 2668 * was evicted from the cache. Move it to the 2669 * MFU state. 2670 */ 2671 2672 if (buf->b_flags & ARC_PREFETCH) { 2673 new_state = arc_mru; 2674 if (refcount_count(&buf->b_refcnt) > 0) 2675 buf->b_flags &= ~ARC_PREFETCH; 2676 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2677 } else { 2678 new_state = arc_mfu; 2679 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2680 } 2681 2682 buf->b_arc_access = ddi_get_lbolt(); 2683 arc_change_state(new_state, buf, hash_lock); 2684 2685 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2686 } else if (buf->b_state == arc_mfu) { 2687 /* 2688 * This buffer has been accessed more than once and is 2689 * still in the cache. Keep it in the MFU state. 2690 * 2691 * NOTE: an add_reference() that occurred when we did 2692 * the arc_read() will have kicked this off the list. 2693 * If it was a prefetch, we will explicitly move it to 2694 * the head of the list now. 2695 */ 2696 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2697 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2698 ASSERT(list_link_active(&buf->b_arc_node)); 2699 } 2700 ARCSTAT_BUMP(arcstat_mfu_hits); 2701 buf->b_arc_access = ddi_get_lbolt(); 2702 } else if (buf->b_state == arc_mfu_ghost) { 2703 arc_state_t *new_state = arc_mfu; 2704 /* 2705 * This buffer has been accessed more than once but has 2706 * been evicted from the cache. Move it back to the 2707 * MFU state. 2708 */ 2709 2710 if (buf->b_flags & ARC_PREFETCH) { 2711 /* 2712 * This is a prefetch access... 2713 * move this block back to the MRU state. 2714 */
| 1882 ASSERT(ab->b_datacnt > 0); 1883 while (ab->b_buf) { 1884 arc_buf_t *buf = ab->b_buf; 1885 if (!mutex_tryenter(&buf->b_evict_lock)) { 1886 missed += 1; 1887 break; 1888 } 1889 if (buf->b_data) { 1890 bytes_evicted += ab->b_size; 1891 if (recycle && ab->b_type == type && 1892 ab->b_size == bytes && 1893 !HDR_L2_WRITING(ab)) { 1894 stolen = buf->b_data; 1895 recycle = FALSE; 1896 } 1897 } 1898 if (buf->b_efunc) { 1899 mutex_enter(&arc_eviction_mtx); 1900 arc_buf_destroy(buf, 1901 buf->b_data == stolen, FALSE); 1902 ab->b_buf = buf->b_next; 1903 buf->b_hdr = &arc_eviction_hdr; 1904 buf->b_next = arc_eviction_list; 1905 arc_eviction_list = buf; 1906 mutex_exit(&arc_eviction_mtx); 1907 mutex_exit(&buf->b_evict_lock); 1908 } else { 1909 mutex_exit(&buf->b_evict_lock); 1910 arc_buf_destroy(buf, 1911 buf->b_data == stolen, TRUE); 1912 } 1913 } 1914 1915 if (ab->b_l2hdr) { 1916 ARCSTAT_INCR(arcstat_evict_l2_cached, 1917 ab->b_size); 1918 } else { 1919 if (l2arc_write_eligible(ab->b_spa, ab)) { 1920 ARCSTAT_INCR(arcstat_evict_l2_eligible, 1921 ab->b_size); 1922 } else { 1923 ARCSTAT_INCR( 1924 arcstat_evict_l2_ineligible, 1925 ab->b_size); 1926 } 1927 } 1928 1929 if (ab->b_datacnt == 0) { 1930 arc_change_state(evicted_state, ab, hash_lock); 1931 ASSERT(HDR_IN_HASH_TABLE(ab)); 1932 ab->b_flags |= ARC_IN_HASH_TABLE; 1933 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1934 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1935 } 1936 if (!have_lock) 1937 mutex_exit(hash_lock); 1938 if (bytes >= 0 && bytes_evicted >= bytes) 1939 break; 1940 if (bytes_remaining > 0) { 1941 mutex_exit(evicted_lock); 1942 mutex_exit(lock); 1943 idx = ((idx + 1) & (list_count - 1)); 1944 count++; 1945 goto evict_start; 1946 } 1947 } else { 1948 missed += 1; 1949 } 1950 } 1951 1952 mutex_exit(evicted_lock); 1953 mutex_exit(lock); 1954 1955 idx = ((idx + 1) & (list_count - 1)); 1956 count++; 1957 1958 if (bytes_evicted < bytes) { 1959 if (count < list_count) 1960 goto evict_start; 1961 else 1962 dprintf("only evicted %lld bytes from %x", 1963 (longlong_t)bytes_evicted, state); 1964 } 1965 if (type == ARC_BUFC_METADATA) 1966 evict_metadata_offset = idx; 1967 else 1968 evict_data_offset = idx; 1969 1970 if (skipped) 1971 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1972 1973 if (missed) 1974 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1975 1976 /* 1977 * We have just evicted some date into the ghost state, make 1978 * sure we also adjust the ghost state size if necessary. 1979 */ 1980 if (arc_no_grow && 1981 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1982 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1983 arc_mru_ghost->arcs_size - arc_c; 1984 1985 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1986 int64_t todelete = 1987 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1988 arc_evict_ghost(arc_mru_ghost, 0, todelete); 1989 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1990 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1991 arc_mru_ghost->arcs_size + 1992 arc_mfu_ghost->arcs_size - arc_c); 1993 arc_evict_ghost(arc_mfu_ghost, 0, todelete); 1994 } 1995 } 1996 if (stolen) 1997 ARCSTAT_BUMP(arcstat_stolen); 1998 1999 return (stolen); 2000} 2001 2002/* 2003 * Remove buffers from list until we've removed the specified number of 2004 * bytes. Destroy the buffers that are removed. 2005 */ 2006static void 2007arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) 2008{ 2009 arc_buf_hdr_t *ab, *ab_prev; 2010 arc_buf_hdr_t marker = { 0 }; 2011 list_t *list, *list_start; 2012 kmutex_t *hash_lock, *lock; 2013 uint64_t bytes_deleted = 0; 2014 uint64_t bufs_skipped = 0; 2015 static int evict_offset; 2016 int list_count, idx = evict_offset; 2017 int offset, count = 0; 2018 2019 ASSERT(GHOST_STATE(state)); 2020 2021 /* 2022 * data lists come after metadata lists 2023 */ 2024 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS]; 2025 list_count = ARC_BUFC_NUMDATALISTS; 2026 offset = ARC_BUFC_NUMMETADATALISTS; 2027 2028evict_start: 2029 list = &list_start[idx]; 2030 lock = ARCS_LOCK(state, idx + offset); 2031 2032 mutex_enter(lock); 2033 for (ab = list_tail(list); ab; ab = ab_prev) { 2034 ab_prev = list_prev(list, ab); 2035 if (spa && ab->b_spa != spa) 2036 continue; 2037 2038 /* ignore markers */ 2039 if (ab->b_spa == 0) 2040 continue; 2041 2042 hash_lock = HDR_LOCK(ab); 2043 /* caller may be trying to modify this buffer, skip it */ 2044 if (MUTEX_HELD(hash_lock)) 2045 continue; 2046 if (mutex_tryenter(hash_lock)) { 2047 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 2048 ASSERT(ab->b_buf == NULL); 2049 ARCSTAT_BUMP(arcstat_deleted); 2050 bytes_deleted += ab->b_size; 2051 2052 if (ab->b_l2hdr != NULL) { 2053 /* 2054 * This buffer is cached on the 2nd Level ARC; 2055 * don't destroy the header. 2056 */ 2057 arc_change_state(arc_l2c_only, ab, hash_lock); 2058 mutex_exit(hash_lock); 2059 } else { 2060 arc_change_state(arc_anon, ab, hash_lock); 2061 mutex_exit(hash_lock); 2062 arc_hdr_destroy(ab); 2063 } 2064 2065 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 2066 if (bytes >= 0 && bytes_deleted >= bytes) 2067 break; 2068 } else if (bytes < 0) { 2069 /* 2070 * Insert a list marker and then wait for the 2071 * hash lock to become available. Once its 2072 * available, restart from where we left off. 2073 */ 2074 list_insert_after(list, ab, &marker); 2075 mutex_exit(lock); 2076 mutex_enter(hash_lock); 2077 mutex_exit(hash_lock); 2078 mutex_enter(lock); 2079 ab_prev = list_prev(list, &marker); 2080 list_remove(list, &marker); 2081 } else 2082 bufs_skipped += 1; 2083 } 2084 mutex_exit(lock); 2085 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1)); 2086 count++; 2087 2088 if (count < list_count) 2089 goto evict_start; 2090 2091 evict_offset = idx; 2092 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] && 2093 (bytes < 0 || bytes_deleted < bytes)) { 2094 list_start = &state->arcs_lists[0]; 2095 list_count = ARC_BUFC_NUMMETADATALISTS; 2096 offset = count = 0; 2097 goto evict_start; 2098 } 2099 2100 if (bufs_skipped) { 2101 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 2102 ASSERT(bytes >= 0); 2103 } 2104 2105 if (bytes_deleted < bytes) 2106 dprintf("only deleted %lld bytes from %p", 2107 (longlong_t)bytes_deleted, state); 2108} 2109 2110static void 2111arc_adjust(void) 2112{ 2113 int64_t adjustment, delta; 2114 2115 /* 2116 * Adjust MRU size 2117 */ 2118 2119 adjustment = MIN((int64_t)(arc_size - arc_c), 2120 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - 2121 arc_p)); 2122 2123 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 2124 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 2125 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA); 2126 adjustment -= delta; 2127 } 2128 2129 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 2130 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 2131 (void) arc_evict(arc_mru, 0, delta, FALSE, 2132 ARC_BUFC_METADATA); 2133 } 2134 2135 /* 2136 * Adjust MFU size 2137 */ 2138 2139 adjustment = arc_size - arc_c; 2140 2141 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 2142 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 2143 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA); 2144 adjustment -= delta; 2145 } 2146 2147 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 2148 int64_t delta = MIN(adjustment, 2149 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 2150 (void) arc_evict(arc_mfu, 0, delta, FALSE, 2151 ARC_BUFC_METADATA); 2152 } 2153 2154 /* 2155 * Adjust ghost lists 2156 */ 2157 2158 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 2159 2160 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 2161 delta = MIN(arc_mru_ghost->arcs_size, adjustment); 2162 arc_evict_ghost(arc_mru_ghost, 0, delta); 2163 } 2164 2165 adjustment = 2166 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 2167 2168 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 2169 delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 2170 arc_evict_ghost(arc_mfu_ghost, 0, delta); 2171 } 2172} 2173 2174static void 2175arc_do_user_evicts(void) 2176{ 2177 static arc_buf_t *tmp_arc_eviction_list; 2178 2179 /* 2180 * Move list over to avoid LOR 2181 */ 2182restart: 2183 mutex_enter(&arc_eviction_mtx); 2184 tmp_arc_eviction_list = arc_eviction_list; 2185 arc_eviction_list = NULL; 2186 mutex_exit(&arc_eviction_mtx); 2187 2188 while (tmp_arc_eviction_list != NULL) { 2189 arc_buf_t *buf = tmp_arc_eviction_list; 2190 tmp_arc_eviction_list = buf->b_next; 2191 mutex_enter(&buf->b_evict_lock); 2192 buf->b_hdr = NULL; 2193 mutex_exit(&buf->b_evict_lock); 2194 2195 if (buf->b_efunc != NULL) 2196 VERIFY(buf->b_efunc(buf) == 0); 2197 2198 buf->b_efunc = NULL; 2199 buf->b_private = NULL; 2200 kmem_cache_free(buf_cache, buf); 2201 } 2202 2203 if (arc_eviction_list != NULL) 2204 goto restart; 2205} 2206 2207/* 2208 * Flush all *evictable* data from the cache for the given spa. 2209 * NOTE: this will not touch "active" (i.e. referenced) data. 2210 */ 2211void 2212arc_flush(spa_t *spa) 2213{ 2214 uint64_t guid = 0; 2215 2216 if (spa) 2217 guid = spa_load_guid(spa); 2218 2219 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) { 2220 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); 2221 if (spa) 2222 break; 2223 } 2224 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) { 2225 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); 2226 if (spa) 2227 break; 2228 } 2229 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) { 2230 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); 2231 if (spa) 2232 break; 2233 } 2234 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) { 2235 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); 2236 if (spa) 2237 break; 2238 } 2239 2240 arc_evict_ghost(arc_mru_ghost, guid, -1); 2241 arc_evict_ghost(arc_mfu_ghost, guid, -1); 2242 2243 mutex_enter(&arc_reclaim_thr_lock); 2244 arc_do_user_evicts(); 2245 mutex_exit(&arc_reclaim_thr_lock); 2246 ASSERT(spa || arc_eviction_list == NULL); 2247} 2248 2249void 2250arc_shrink(void) 2251{ 2252 if (arc_c > arc_c_min) { 2253 uint64_t to_free; 2254 2255#ifdef _KERNEL 2256 to_free = arc_c >> arc_shrink_shift; 2257#else 2258 to_free = arc_c >> arc_shrink_shift; 2259#endif 2260 if (arc_c > arc_c_min + to_free) 2261 atomic_add_64(&arc_c, -to_free); 2262 else 2263 arc_c = arc_c_min; 2264 2265 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 2266 if (arc_c > arc_size) 2267 arc_c = MAX(arc_size, arc_c_min); 2268 if (arc_p > arc_c) 2269 arc_p = (arc_c >> 1); 2270 ASSERT(arc_c >= arc_c_min); 2271 ASSERT((int64_t)arc_p >= 0); 2272 } 2273 2274 if (arc_size > arc_c) 2275 arc_adjust(); 2276} 2277 2278static int needfree = 0; 2279 2280static int 2281arc_reclaim_needed(void) 2282{ 2283 2284#ifdef _KERNEL 2285 2286 if (needfree) 2287 return (1); 2288 2289 /* 2290 * Cooperate with pagedaemon when it's time for it to scan 2291 * and reclaim some pages. 2292 */ 2293 if (vm_paging_needed()) 2294 return (1); 2295 2296#ifdef sun 2297 /* 2298 * take 'desfree' extra pages, so we reclaim sooner, rather than later 2299 */ 2300 extra = desfree; 2301 2302 /* 2303 * check that we're out of range of the pageout scanner. It starts to 2304 * schedule paging if freemem is less than lotsfree and needfree. 2305 * lotsfree is the high-water mark for pageout, and needfree is the 2306 * number of needed free pages. We add extra pages here to make sure 2307 * the scanner doesn't start up while we're freeing memory. 2308 */ 2309 if (freemem < lotsfree + needfree + extra) 2310 return (1); 2311 2312 /* 2313 * check to make sure that swapfs has enough space so that anon 2314 * reservations can still succeed. anon_resvmem() checks that the 2315 * availrmem is greater than swapfs_minfree, and the number of reserved 2316 * swap pages. We also add a bit of extra here just to prevent 2317 * circumstances from getting really dire. 2318 */ 2319 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 2320 return (1); 2321 2322#if defined(__i386) 2323 /* 2324 * If we're on an i386 platform, it's possible that we'll exhaust the 2325 * kernel heap space before we ever run out of available physical 2326 * memory. Most checks of the size of the heap_area compare against 2327 * tune.t_minarmem, which is the minimum available real memory that we 2328 * can have in the system. However, this is generally fixed at 25 pages 2329 * which is so low that it's useless. In this comparison, we seek to 2330 * calculate the total heap-size, and reclaim if more than 3/4ths of the 2331 * heap is allocated. (Or, in the calculation, if less than 1/4th is 2332 * free) 2333 */ 2334 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 2335 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 2336 return (1); 2337#endif 2338#else /* !sun */ 2339 if (kmem_used() > (kmem_size() * 3) / 4) 2340 return (1); 2341#endif /* sun */ 2342 2343#else 2344 if (spa_get_random(100) == 0) 2345 return (1); 2346#endif 2347 return (0); 2348} 2349 2350extern kmem_cache_t *zio_buf_cache[]; 2351extern kmem_cache_t *zio_data_buf_cache[]; 2352 2353static void 2354arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2355{ 2356 size_t i; 2357 kmem_cache_t *prev_cache = NULL; 2358 kmem_cache_t *prev_data_cache = NULL; 2359 2360#ifdef _KERNEL 2361 if (arc_meta_used >= arc_meta_limit) { 2362 /* 2363 * We are exceeding our meta-data cache limit. 2364 * Purge some DNLC entries to release holds on meta-data. 2365 */ 2366 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 2367 } 2368#if defined(__i386) 2369 /* 2370 * Reclaim unused memory from all kmem caches. 2371 */ 2372 kmem_reap(); 2373#endif 2374#endif 2375 2376 /* 2377 * An aggressive reclamation will shrink the cache size as well as 2378 * reap free buffers from the arc kmem caches. 2379 */ 2380 if (strat == ARC_RECLAIM_AGGR) 2381 arc_shrink(); 2382 2383 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2384 if (zio_buf_cache[i] != prev_cache) { 2385 prev_cache = zio_buf_cache[i]; 2386 kmem_cache_reap_now(zio_buf_cache[i]); 2387 } 2388 if (zio_data_buf_cache[i] != prev_data_cache) { 2389 prev_data_cache = zio_data_buf_cache[i]; 2390 kmem_cache_reap_now(zio_data_buf_cache[i]); 2391 } 2392 } 2393 kmem_cache_reap_now(buf_cache); 2394 kmem_cache_reap_now(hdr_cache); 2395} 2396 2397static void 2398arc_reclaim_thread(void *dummy __unused) 2399{ 2400 clock_t growtime = 0; 2401 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2402 callb_cpr_t cpr; 2403 2404 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2405 2406 mutex_enter(&arc_reclaim_thr_lock); 2407 while (arc_thread_exit == 0) { 2408 if (arc_reclaim_needed()) { 2409 2410 if (arc_no_grow) { 2411 if (last_reclaim == ARC_RECLAIM_CONS) { 2412 last_reclaim = ARC_RECLAIM_AGGR; 2413 } else { 2414 last_reclaim = ARC_RECLAIM_CONS; 2415 } 2416 } else { 2417 arc_no_grow = TRUE; 2418 last_reclaim = ARC_RECLAIM_AGGR; 2419 membar_producer(); 2420 } 2421 2422 /* reset the growth delay for every reclaim */ 2423 growtime = ddi_get_lbolt() + (arc_grow_retry * hz); 2424 2425 if (needfree && last_reclaim == ARC_RECLAIM_CONS) { 2426 /* 2427 * If needfree is TRUE our vm_lowmem hook 2428 * was called and in that case we must free some 2429 * memory, so switch to aggressive mode. 2430 */ 2431 arc_no_grow = TRUE; 2432 last_reclaim = ARC_RECLAIM_AGGR; 2433 } 2434 arc_kmem_reap_now(last_reclaim); 2435 arc_warm = B_TRUE; 2436 2437 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) { 2438 arc_no_grow = FALSE; 2439 } 2440 2441 arc_adjust(); 2442 2443 if (arc_eviction_list != NULL) 2444 arc_do_user_evicts(); 2445 2446#ifdef _KERNEL 2447 if (needfree) { 2448 needfree = 0; 2449 wakeup(&needfree); 2450 } 2451#endif 2452 2453 /* block until needed, or one second, whichever is shorter */ 2454 CALLB_CPR_SAFE_BEGIN(&cpr); 2455 (void) cv_timedwait(&arc_reclaim_thr_cv, 2456 &arc_reclaim_thr_lock, hz); 2457 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2458 } 2459 2460 arc_thread_exit = 0; 2461 cv_broadcast(&arc_reclaim_thr_cv); 2462 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2463 thread_exit(); 2464} 2465 2466/* 2467 * Adapt arc info given the number of bytes we are trying to add and 2468 * the state that we are comming from. This function is only called 2469 * when we are adding new content to the cache. 2470 */ 2471static void 2472arc_adapt(int bytes, arc_state_t *state) 2473{ 2474 int mult; 2475 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2476 2477 if (state == arc_l2c_only) 2478 return; 2479 2480 ASSERT(bytes > 0); 2481 /* 2482 * Adapt the target size of the MRU list: 2483 * - if we just hit in the MRU ghost list, then increase 2484 * the target size of the MRU list. 2485 * - if we just hit in the MFU ghost list, then increase 2486 * the target size of the MFU list by decreasing the 2487 * target size of the MRU list. 2488 */ 2489 if (state == arc_mru_ghost) { 2490 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2491 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2492 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 2493 2494 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 2495 } else if (state == arc_mfu_ghost) { 2496 uint64_t delta; 2497 2498 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2499 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2500 mult = MIN(mult, 10); 2501 2502 delta = MIN(bytes * mult, arc_p); 2503 arc_p = MAX(arc_p_min, arc_p - delta); 2504 } 2505 ASSERT((int64_t)arc_p >= 0); 2506 2507 if (arc_reclaim_needed()) { 2508 cv_signal(&arc_reclaim_thr_cv); 2509 return; 2510 } 2511 2512 if (arc_no_grow) 2513 return; 2514 2515 if (arc_c >= arc_c_max) 2516 return; 2517 2518 /* 2519 * If we're within (2 * maxblocksize) bytes of the target 2520 * cache size, increment the target cache size 2521 */ 2522 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2523 atomic_add_64(&arc_c, (int64_t)bytes); 2524 if (arc_c > arc_c_max) 2525 arc_c = arc_c_max; 2526 else if (state == arc_anon) 2527 atomic_add_64(&arc_p, (int64_t)bytes); 2528 if (arc_p > arc_c) 2529 arc_p = arc_c; 2530 } 2531 ASSERT((int64_t)arc_p >= 0); 2532} 2533 2534/* 2535 * Check if the cache has reached its limits and eviction is required 2536 * prior to insert. 2537 */ 2538static int 2539arc_evict_needed(arc_buf_contents_t type) 2540{ 2541 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2542 return (1); 2543 2544#ifdef sun 2545#ifdef _KERNEL 2546 /* 2547 * If zio data pages are being allocated out of a separate heap segment, 2548 * then enforce that the size of available vmem for this area remains 2549 * above about 1/32nd free. 2550 */ 2551 if (type == ARC_BUFC_DATA && zio_arena != NULL && 2552 vmem_size(zio_arena, VMEM_FREE) < 2553 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2554 return (1); 2555#endif 2556#endif /* sun */ 2557 2558 if (arc_reclaim_needed()) 2559 return (1); 2560 2561 return (arc_size > arc_c); 2562} 2563 2564/* 2565 * The buffer, supplied as the first argument, needs a data block. 2566 * So, if we are at cache max, determine which cache should be victimized. 2567 * We have the following cases: 2568 * 2569 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2570 * In this situation if we're out of space, but the resident size of the MFU is 2571 * under the limit, victimize the MFU cache to satisfy this insertion request. 2572 * 2573 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2574 * Here, we've used up all of the available space for the MRU, so we need to 2575 * evict from our own cache instead. Evict from the set of resident MRU 2576 * entries. 2577 * 2578 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2579 * c minus p represents the MFU space in the cache, since p is the size of the 2580 * cache that is dedicated to the MRU. In this situation there's still space on 2581 * the MFU side, so the MRU side needs to be victimized. 2582 * 2583 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2584 * MFU's resident set is consuming more space than it has been allotted. In 2585 * this situation, we must victimize our own cache, the MFU, for this insertion. 2586 */ 2587static void 2588arc_get_data_buf(arc_buf_t *buf) 2589{ 2590 arc_state_t *state = buf->b_hdr->b_state; 2591 uint64_t size = buf->b_hdr->b_size; 2592 arc_buf_contents_t type = buf->b_hdr->b_type; 2593 2594 arc_adapt(size, state); 2595 2596 /* 2597 * We have not yet reached cache maximum size, 2598 * just allocate a new buffer. 2599 */ 2600 if (!arc_evict_needed(type)) { 2601 if (type == ARC_BUFC_METADATA) { 2602 buf->b_data = zio_buf_alloc(size); 2603 arc_space_consume(size, ARC_SPACE_DATA); 2604 } else { 2605 ASSERT(type == ARC_BUFC_DATA); 2606 buf->b_data = zio_data_buf_alloc(size); 2607 ARCSTAT_INCR(arcstat_data_size, size); 2608 atomic_add_64(&arc_size, size); 2609 } 2610 goto out; 2611 } 2612 2613 /* 2614 * If we are prefetching from the mfu ghost list, this buffer 2615 * will end up on the mru list; so steal space from there. 2616 */ 2617 if (state == arc_mfu_ghost) 2618 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2619 else if (state == arc_mru_ghost) 2620 state = arc_mru; 2621 2622 if (state == arc_mru || state == arc_anon) { 2623 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2624 state = (arc_mfu->arcs_lsize[type] >= size && 2625 arc_p > mru_used) ? arc_mfu : arc_mru; 2626 } else { 2627 /* MFU cases */ 2628 uint64_t mfu_space = arc_c - arc_p; 2629 state = (arc_mru->arcs_lsize[type] >= size && 2630 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2631 } 2632 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) { 2633 if (type == ARC_BUFC_METADATA) { 2634 buf->b_data = zio_buf_alloc(size); 2635 arc_space_consume(size, ARC_SPACE_DATA); 2636 } else { 2637 ASSERT(type == ARC_BUFC_DATA); 2638 buf->b_data = zio_data_buf_alloc(size); 2639 ARCSTAT_INCR(arcstat_data_size, size); 2640 atomic_add_64(&arc_size, size); 2641 } 2642 ARCSTAT_BUMP(arcstat_recycle_miss); 2643 } 2644 ASSERT(buf->b_data != NULL); 2645out: 2646 /* 2647 * Update the state size. Note that ghost states have a 2648 * "ghost size" and so don't need to be updated. 2649 */ 2650 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2651 arc_buf_hdr_t *hdr = buf->b_hdr; 2652 2653 atomic_add_64(&hdr->b_state->arcs_size, size); 2654 if (list_link_active(&hdr->b_arc_node)) { 2655 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2656 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2657 } 2658 /* 2659 * If we are growing the cache, and we are adding anonymous 2660 * data, and we have outgrown arc_p, update arc_p 2661 */ 2662 if (arc_size < arc_c && hdr->b_state == arc_anon && 2663 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2664 arc_p = MIN(arc_c, arc_p + size); 2665 } 2666 ARCSTAT_BUMP(arcstat_allocated); 2667} 2668 2669/* 2670 * This routine is called whenever a buffer is accessed. 2671 * NOTE: the hash lock is dropped in this function. 2672 */ 2673static void 2674arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2675{ 2676 clock_t now; 2677 2678 ASSERT(MUTEX_HELD(hash_lock)); 2679 2680 if (buf->b_state == arc_anon) { 2681 /* 2682 * This buffer is not in the cache, and does not 2683 * appear in our "ghost" list. Add the new buffer 2684 * to the MRU state. 2685 */ 2686 2687 ASSERT(buf->b_arc_access == 0); 2688 buf->b_arc_access = ddi_get_lbolt(); 2689 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2690 arc_change_state(arc_mru, buf, hash_lock); 2691 2692 } else if (buf->b_state == arc_mru) { 2693 now = ddi_get_lbolt(); 2694 2695 /* 2696 * If this buffer is here because of a prefetch, then either: 2697 * - clear the flag if this is a "referencing" read 2698 * (any subsequent access will bump this into the MFU state). 2699 * or 2700 * - move the buffer to the head of the list if this is 2701 * another prefetch (to make it less likely to be evicted). 2702 */ 2703 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2704 if (refcount_count(&buf->b_refcnt) == 0) { 2705 ASSERT(list_link_active(&buf->b_arc_node)); 2706 } else { 2707 buf->b_flags &= ~ARC_PREFETCH; 2708 ARCSTAT_BUMP(arcstat_mru_hits); 2709 } 2710 buf->b_arc_access = now; 2711 return; 2712 } 2713 2714 /* 2715 * This buffer has been "accessed" only once so far, 2716 * but it is still in the cache. Move it to the MFU 2717 * state. 2718 */ 2719 if (now > buf->b_arc_access + ARC_MINTIME) { 2720 /* 2721 * More than 125ms have passed since we 2722 * instantiated this buffer. Move it to the 2723 * most frequently used state. 2724 */ 2725 buf->b_arc_access = now; 2726 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2727 arc_change_state(arc_mfu, buf, hash_lock); 2728 } 2729 ARCSTAT_BUMP(arcstat_mru_hits); 2730 } else if (buf->b_state == arc_mru_ghost) { 2731 arc_state_t *new_state; 2732 /* 2733 * This buffer has been "accessed" recently, but 2734 * was evicted from the cache. Move it to the 2735 * MFU state. 2736 */ 2737 2738 if (buf->b_flags & ARC_PREFETCH) { 2739 new_state = arc_mru; 2740 if (refcount_count(&buf->b_refcnt) > 0) 2741 buf->b_flags &= ~ARC_PREFETCH; 2742 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2743 } else { 2744 new_state = arc_mfu; 2745 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2746 } 2747 2748 buf->b_arc_access = ddi_get_lbolt(); 2749 arc_change_state(new_state, buf, hash_lock); 2750 2751 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2752 } else if (buf->b_state == arc_mfu) { 2753 /* 2754 * This buffer has been accessed more than once and is 2755 * still in the cache. Keep it in the MFU state. 2756 * 2757 * NOTE: an add_reference() that occurred when we did 2758 * the arc_read() will have kicked this off the list. 2759 * If it was a prefetch, we will explicitly move it to 2760 * the head of the list now. 2761 */ 2762 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2763 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2764 ASSERT(list_link_active(&buf->b_arc_node)); 2765 } 2766 ARCSTAT_BUMP(arcstat_mfu_hits); 2767 buf->b_arc_access = ddi_get_lbolt(); 2768 } else if (buf->b_state == arc_mfu_ghost) { 2769 arc_state_t *new_state = arc_mfu; 2770 /* 2771 * This buffer has been accessed more than once but has 2772 * been evicted from the cache. Move it back to the 2773 * MFU state. 2774 */ 2775 2776 if (buf->b_flags & ARC_PREFETCH) { 2777 /* 2778 * This is a prefetch access... 2779 * move this block back to the MRU state. 2780 */
|
2715 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
| 2781 ASSERT0(refcount_count(&buf->b_refcnt));
|
2716 new_state = arc_mru; 2717 } 2718 2719 buf->b_arc_access = ddi_get_lbolt(); 2720 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2721 arc_change_state(new_state, buf, hash_lock); 2722 2723 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2724 } else if (buf->b_state == arc_l2c_only) { 2725 /* 2726 * This buffer is on the 2nd Level ARC. 2727 */ 2728 2729 buf->b_arc_access = ddi_get_lbolt(); 2730 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2731 arc_change_state(arc_mfu, buf, hash_lock); 2732 } else { 2733 ASSERT(!"invalid arc state"); 2734 } 2735} 2736 2737/* a generic arc_done_func_t which you can use */ 2738/* ARGSUSED */ 2739void 2740arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2741{ 2742 if (zio == NULL || zio->io_error == 0) 2743 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2744 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2745} 2746 2747/* a generic arc_done_func_t */ 2748void 2749arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2750{ 2751 arc_buf_t **bufp = arg; 2752 if (zio && zio->io_error) { 2753 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2754 *bufp = NULL; 2755 } else { 2756 *bufp = buf; 2757 ASSERT(buf->b_data); 2758 } 2759} 2760 2761static void 2762arc_read_done(zio_t *zio) 2763{ 2764 arc_buf_hdr_t *hdr, *found; 2765 arc_buf_t *buf; 2766 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2767 kmutex_t *hash_lock; 2768 arc_callback_t *callback_list, *acb; 2769 int freeable = FALSE; 2770 2771 buf = zio->io_private; 2772 hdr = buf->b_hdr; 2773 2774 /* 2775 * The hdr was inserted into hash-table and removed from lists 2776 * prior to starting I/O. We should find this header, since 2777 * it's in the hash table, and it should be legit since it's 2778 * not possible to evict it during the I/O. The only possible 2779 * reason for it not to be found is if we were freed during the 2780 * read. 2781 */ 2782 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth, 2783 &hash_lock); 2784 2785 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2786 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2787 (found == hdr && HDR_L2_READING(hdr))); 2788 2789 hdr->b_flags &= ~ARC_L2_EVICTED; 2790 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2791 hdr->b_flags &= ~ARC_L2CACHE; 2792 2793 /* byteswap if necessary */ 2794 callback_list = hdr->b_acb; 2795 ASSERT(callback_list != NULL); 2796 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
| 2782 new_state = arc_mru; 2783 } 2784 2785 buf->b_arc_access = ddi_get_lbolt(); 2786 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2787 arc_change_state(new_state, buf, hash_lock); 2788 2789 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2790 } else if (buf->b_state == arc_l2c_only) { 2791 /* 2792 * This buffer is on the 2nd Level ARC. 2793 */ 2794 2795 buf->b_arc_access = ddi_get_lbolt(); 2796 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2797 arc_change_state(arc_mfu, buf, hash_lock); 2798 } else { 2799 ASSERT(!"invalid arc state"); 2800 } 2801} 2802 2803/* a generic arc_done_func_t which you can use */ 2804/* ARGSUSED */ 2805void 2806arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2807{ 2808 if (zio == NULL || zio->io_error == 0) 2809 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2810 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2811} 2812 2813/* a generic arc_done_func_t */ 2814void 2815arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2816{ 2817 arc_buf_t **bufp = arg; 2818 if (zio && zio->io_error) { 2819 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2820 *bufp = NULL; 2821 } else { 2822 *bufp = buf; 2823 ASSERT(buf->b_data); 2824 } 2825} 2826 2827static void 2828arc_read_done(zio_t *zio) 2829{ 2830 arc_buf_hdr_t *hdr, *found; 2831 arc_buf_t *buf; 2832 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2833 kmutex_t *hash_lock; 2834 arc_callback_t *callback_list, *acb; 2835 int freeable = FALSE; 2836 2837 buf = zio->io_private; 2838 hdr = buf->b_hdr; 2839 2840 /* 2841 * The hdr was inserted into hash-table and removed from lists 2842 * prior to starting I/O. We should find this header, since 2843 * it's in the hash table, and it should be legit since it's 2844 * not possible to evict it during the I/O. The only possible 2845 * reason for it not to be found is if we were freed during the 2846 * read. 2847 */ 2848 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth, 2849 &hash_lock); 2850 2851 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2852 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2853 (found == hdr && HDR_L2_READING(hdr))); 2854 2855 hdr->b_flags &= ~ARC_L2_EVICTED; 2856 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2857 hdr->b_flags &= ~ARC_L2CACHE; 2858 2859 /* byteswap if necessary */ 2860 callback_list = hdr->b_acb; 2861 ASSERT(callback_list != NULL); 2862 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
|
| 2863 dmu_object_byteswap_t bswap = 2864 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
|
2797 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2798 byteswap_uint64_array :
| 2865 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2866 byteswap_uint64_array :
|
2799 dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap;
| 2867 dmu_ot_byteswap[bswap].ob_func;
|
2800 func(buf->b_data, hdr->b_size); 2801 } 2802 2803 arc_cksum_compute(buf, B_FALSE);
| 2868 func(buf->b_data, hdr->b_size); 2869 } 2870 2871 arc_cksum_compute(buf, B_FALSE);
|
| 2872#ifdef illumos 2873 arc_buf_watch(buf); 2874#endif /* illumos */
|
2804 2805 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) { 2806 /* 2807 * Only call arc_access on anonymous buffers. This is because 2808 * if we've issued an I/O for an evicted buffer, we've already 2809 * called arc_access (to prevent any simultaneous readers from 2810 * getting confused). 2811 */ 2812 arc_access(hdr, hash_lock); 2813 } 2814 2815 /* create copies of the data buffer for the callers */ 2816 abuf = buf; 2817 for (acb = callback_list; acb; acb = acb->acb_next) { 2818 if (acb->acb_done) { 2819 if (abuf == NULL) 2820 abuf = arc_buf_clone(buf); 2821 acb->acb_buf = abuf; 2822 abuf = NULL; 2823 } 2824 } 2825 hdr->b_acb = NULL; 2826 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2827 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2828 if (abuf == buf) { 2829 ASSERT(buf->b_efunc == NULL); 2830 ASSERT(hdr->b_datacnt == 1); 2831 hdr->b_flags |= ARC_BUF_AVAILABLE; 2832 } 2833 2834 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2835 2836 if (zio->io_error != 0) { 2837 hdr->b_flags |= ARC_IO_ERROR; 2838 if (hdr->b_state != arc_anon) 2839 arc_change_state(arc_anon, hdr, hash_lock); 2840 if (HDR_IN_HASH_TABLE(hdr)) 2841 buf_hash_remove(hdr); 2842 freeable = refcount_is_zero(&hdr->b_refcnt); 2843 } 2844 2845 /* 2846 * Broadcast before we drop the hash_lock to avoid the possibility 2847 * that the hdr (and hence the cv) might be freed before we get to 2848 * the cv_broadcast(). 2849 */ 2850 cv_broadcast(&hdr->b_cv); 2851 2852 if (hash_lock) { 2853 mutex_exit(hash_lock); 2854 } else { 2855 /* 2856 * This block was freed while we waited for the read to 2857 * complete. It has been removed from the hash table and 2858 * moved to the anonymous state (so that it won't show up 2859 * in the cache). 2860 */ 2861 ASSERT3P(hdr->b_state, ==, arc_anon); 2862 freeable = refcount_is_zero(&hdr->b_refcnt); 2863 } 2864 2865 /* execute each callback and free its structure */ 2866 while ((acb = callback_list) != NULL) { 2867 if (acb->acb_done) 2868 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2869 2870 if (acb->acb_zio_dummy != NULL) { 2871 acb->acb_zio_dummy->io_error = zio->io_error; 2872 zio_nowait(acb->acb_zio_dummy); 2873 } 2874 2875 callback_list = acb->acb_next; 2876 kmem_free(acb, sizeof (arc_callback_t)); 2877 } 2878 2879 if (freeable) 2880 arc_hdr_destroy(hdr); 2881} 2882 2883/* 2884 * "Read" the block block at the specified DVA (in bp) via the 2885 * cache. If the block is found in the cache, invoke the provided 2886 * callback immediately and return. Note that the `zio' parameter 2887 * in the callback will be NULL in this case, since no IO was 2888 * required. If the block is not in the cache pass the read request 2889 * on to the spa with a substitute callback function, so that the 2890 * requested block will be added to the cache. 2891 * 2892 * If a read request arrives for a block that has a read in-progress, 2893 * either wait for the in-progress read to complete (and return the 2894 * results); or, if this is a read with a "done" func, add a record 2895 * to the read to invoke the "done" func when the read completes, 2896 * and return; or just return. 2897 * 2898 * arc_read_done() will invoke all the requested "done" functions 2899 * for readers of this block. 2900 * 2901 * Normal callers should use arc_read and pass the arc buffer and offset 2902 * for the bp. But if you know you don't need locking, you can use 2903 * arc_read_nolock. 2904 */ 2905int 2906arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_buf_t *pbuf, 2907 arc_done_func_t *done, void *private, int priority, int zio_flags, 2908 uint32_t *arc_flags, const zbookmark_t *zb) 2909{ 2910 int err; 2911 2912 if (pbuf == NULL) { 2913 /* 2914 * XXX This happens from traverse callback funcs, for 2915 * the objset_phys_t block. 2916 */ 2917 return (arc_read_nolock(pio, spa, bp, done, private, priority, 2918 zio_flags, arc_flags, zb)); 2919 } 2920 2921 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); 2922 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); 2923 rw_enter(&pbuf->b_data_lock, RW_READER); 2924 2925 err = arc_read_nolock(pio, spa, bp, done, private, priority, 2926 zio_flags, arc_flags, zb); 2927 rw_exit(&pbuf->b_data_lock); 2928 2929 return (err); 2930} 2931 2932int 2933arc_read_nolock(zio_t *pio, spa_t *spa, const blkptr_t *bp, 2934 arc_done_func_t *done, void *private, int priority, int zio_flags, 2935 uint32_t *arc_flags, const zbookmark_t *zb) 2936{ 2937 arc_buf_hdr_t *hdr; 2938 arc_buf_t *buf; 2939 kmutex_t *hash_lock; 2940 zio_t *rzio; 2941 uint64_t guid = spa_load_guid(spa); 2942 2943top: 2944 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp), 2945 &hash_lock); 2946 if (hdr && hdr->b_datacnt > 0) { 2947 2948 *arc_flags |= ARC_CACHED; 2949 2950 if (HDR_IO_IN_PROGRESS(hdr)) { 2951 2952 if (*arc_flags & ARC_WAIT) { 2953 cv_wait(&hdr->b_cv, hash_lock); 2954 mutex_exit(hash_lock); 2955 goto top; 2956 } 2957 ASSERT(*arc_flags & ARC_NOWAIT); 2958 2959 if (done) { 2960 arc_callback_t *acb = NULL; 2961 2962 acb = kmem_zalloc(sizeof (arc_callback_t), 2963 KM_SLEEP); 2964 acb->acb_done = done; 2965 acb->acb_private = private; 2966 if (pio != NULL) 2967 acb->acb_zio_dummy = zio_null(pio, 2968 spa, NULL, NULL, NULL, zio_flags); 2969 2970 ASSERT(acb->acb_done != NULL); 2971 acb->acb_next = hdr->b_acb; 2972 hdr->b_acb = acb; 2973 add_reference(hdr, hash_lock, private); 2974 mutex_exit(hash_lock); 2975 return (0); 2976 } 2977 mutex_exit(hash_lock); 2978 return (0); 2979 } 2980 2981 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2982 2983 if (done) { 2984 add_reference(hdr, hash_lock, private); 2985 /* 2986 * If this block is already in use, create a new 2987 * copy of the data so that we will be guaranteed 2988 * that arc_release() will always succeed. 2989 */ 2990 buf = hdr->b_buf; 2991 ASSERT(buf); 2992 ASSERT(buf->b_data); 2993 if (HDR_BUF_AVAILABLE(hdr)) { 2994 ASSERT(buf->b_efunc == NULL); 2995 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2996 } else { 2997 buf = arc_buf_clone(buf); 2998 } 2999 3000 } else if (*arc_flags & ARC_PREFETCH && 3001 refcount_count(&hdr->b_refcnt) == 0) { 3002 hdr->b_flags |= ARC_PREFETCH; 3003 } 3004 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 3005 arc_access(hdr, hash_lock); 3006 if (*arc_flags & ARC_L2CACHE) 3007 hdr->b_flags |= ARC_L2CACHE; 3008 mutex_exit(hash_lock); 3009 ARCSTAT_BUMP(arcstat_hits); 3010 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 3011 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 3012 data, metadata, hits); 3013 3014 if (done) 3015 done(NULL, buf, private); 3016 } else { 3017 uint64_t size = BP_GET_LSIZE(bp); 3018 arc_callback_t *acb; 3019 vdev_t *vd = NULL; 3020 uint64_t addr; 3021 boolean_t devw = B_FALSE; 3022 3023 if (hdr == NULL) { 3024 /* this block is not in the cache */ 3025 arc_buf_hdr_t *exists; 3026 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 3027 buf = arc_buf_alloc(spa, size, private, type); 3028 hdr = buf->b_hdr; 3029 hdr->b_dva = *BP_IDENTITY(bp); 3030 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 3031 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 3032 exists = buf_hash_insert(hdr, &hash_lock); 3033 if (exists) { 3034 /* somebody beat us to the hash insert */ 3035 mutex_exit(hash_lock); 3036 buf_discard_identity(hdr); 3037 (void) arc_buf_remove_ref(buf, private); 3038 goto top; /* restart the IO request */ 3039 } 3040 /* if this is a prefetch, we don't have a reference */ 3041 if (*arc_flags & ARC_PREFETCH) { 3042 (void) remove_reference(hdr, hash_lock, 3043 private); 3044 hdr->b_flags |= ARC_PREFETCH; 3045 } 3046 if (*arc_flags & ARC_L2CACHE) 3047 hdr->b_flags |= ARC_L2CACHE; 3048 if (BP_GET_LEVEL(bp) > 0) 3049 hdr->b_flags |= ARC_INDIRECT; 3050 } else { 3051 /* this block is in the ghost cache */ 3052 ASSERT(GHOST_STATE(hdr->b_state)); 3053 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
| 2875 2876 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) { 2877 /* 2878 * Only call arc_access on anonymous buffers. This is because 2879 * if we've issued an I/O for an evicted buffer, we've already 2880 * called arc_access (to prevent any simultaneous readers from 2881 * getting confused). 2882 */ 2883 arc_access(hdr, hash_lock); 2884 } 2885 2886 /* create copies of the data buffer for the callers */ 2887 abuf = buf; 2888 for (acb = callback_list; acb; acb = acb->acb_next) { 2889 if (acb->acb_done) { 2890 if (abuf == NULL) 2891 abuf = arc_buf_clone(buf); 2892 acb->acb_buf = abuf; 2893 abuf = NULL; 2894 } 2895 } 2896 hdr->b_acb = NULL; 2897 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2898 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2899 if (abuf == buf) { 2900 ASSERT(buf->b_efunc == NULL); 2901 ASSERT(hdr->b_datacnt == 1); 2902 hdr->b_flags |= ARC_BUF_AVAILABLE; 2903 } 2904 2905 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2906 2907 if (zio->io_error != 0) { 2908 hdr->b_flags |= ARC_IO_ERROR; 2909 if (hdr->b_state != arc_anon) 2910 arc_change_state(arc_anon, hdr, hash_lock); 2911 if (HDR_IN_HASH_TABLE(hdr)) 2912 buf_hash_remove(hdr); 2913 freeable = refcount_is_zero(&hdr->b_refcnt); 2914 } 2915 2916 /* 2917 * Broadcast before we drop the hash_lock to avoid the possibility 2918 * that the hdr (and hence the cv) might be freed before we get to 2919 * the cv_broadcast(). 2920 */ 2921 cv_broadcast(&hdr->b_cv); 2922 2923 if (hash_lock) { 2924 mutex_exit(hash_lock); 2925 } else { 2926 /* 2927 * This block was freed while we waited for the read to 2928 * complete. It has been removed from the hash table and 2929 * moved to the anonymous state (so that it won't show up 2930 * in the cache). 2931 */ 2932 ASSERT3P(hdr->b_state, ==, arc_anon); 2933 freeable = refcount_is_zero(&hdr->b_refcnt); 2934 } 2935 2936 /* execute each callback and free its structure */ 2937 while ((acb = callback_list) != NULL) { 2938 if (acb->acb_done) 2939 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2940 2941 if (acb->acb_zio_dummy != NULL) { 2942 acb->acb_zio_dummy->io_error = zio->io_error; 2943 zio_nowait(acb->acb_zio_dummy); 2944 } 2945 2946 callback_list = acb->acb_next; 2947 kmem_free(acb, sizeof (arc_callback_t)); 2948 } 2949 2950 if (freeable) 2951 arc_hdr_destroy(hdr); 2952} 2953 2954/* 2955 * "Read" the block block at the specified DVA (in bp) via the 2956 * cache. If the block is found in the cache, invoke the provided 2957 * callback immediately and return. Note that the `zio' parameter 2958 * in the callback will be NULL in this case, since no IO was 2959 * required. If the block is not in the cache pass the read request 2960 * on to the spa with a substitute callback function, so that the 2961 * requested block will be added to the cache. 2962 * 2963 * If a read request arrives for a block that has a read in-progress, 2964 * either wait for the in-progress read to complete (and return the 2965 * results); or, if this is a read with a "done" func, add a record 2966 * to the read to invoke the "done" func when the read completes, 2967 * and return; or just return. 2968 * 2969 * arc_read_done() will invoke all the requested "done" functions 2970 * for readers of this block. 2971 * 2972 * Normal callers should use arc_read and pass the arc buffer and offset 2973 * for the bp. But if you know you don't need locking, you can use 2974 * arc_read_nolock. 2975 */ 2976int 2977arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_buf_t *pbuf, 2978 arc_done_func_t *done, void *private, int priority, int zio_flags, 2979 uint32_t *arc_flags, const zbookmark_t *zb) 2980{ 2981 int err; 2982 2983 if (pbuf == NULL) { 2984 /* 2985 * XXX This happens from traverse callback funcs, for 2986 * the objset_phys_t block. 2987 */ 2988 return (arc_read_nolock(pio, spa, bp, done, private, priority, 2989 zio_flags, arc_flags, zb)); 2990 } 2991 2992 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); 2993 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); 2994 rw_enter(&pbuf->b_data_lock, RW_READER); 2995 2996 err = arc_read_nolock(pio, spa, bp, done, private, priority, 2997 zio_flags, arc_flags, zb); 2998 rw_exit(&pbuf->b_data_lock); 2999 3000 return (err); 3001} 3002 3003int 3004arc_read_nolock(zio_t *pio, spa_t *spa, const blkptr_t *bp, 3005 arc_done_func_t *done, void *private, int priority, int zio_flags, 3006 uint32_t *arc_flags, const zbookmark_t *zb) 3007{ 3008 arc_buf_hdr_t *hdr; 3009 arc_buf_t *buf; 3010 kmutex_t *hash_lock; 3011 zio_t *rzio; 3012 uint64_t guid = spa_load_guid(spa); 3013 3014top: 3015 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp), 3016 &hash_lock); 3017 if (hdr && hdr->b_datacnt > 0) { 3018 3019 *arc_flags |= ARC_CACHED; 3020 3021 if (HDR_IO_IN_PROGRESS(hdr)) { 3022 3023 if (*arc_flags & ARC_WAIT) { 3024 cv_wait(&hdr->b_cv, hash_lock); 3025 mutex_exit(hash_lock); 3026 goto top; 3027 } 3028 ASSERT(*arc_flags & ARC_NOWAIT); 3029 3030 if (done) { 3031 arc_callback_t *acb = NULL; 3032 3033 acb = kmem_zalloc(sizeof (arc_callback_t), 3034 KM_SLEEP); 3035 acb->acb_done = done; 3036 acb->acb_private = private; 3037 if (pio != NULL) 3038 acb->acb_zio_dummy = zio_null(pio, 3039 spa, NULL, NULL, NULL, zio_flags); 3040 3041 ASSERT(acb->acb_done != NULL); 3042 acb->acb_next = hdr->b_acb; 3043 hdr->b_acb = acb; 3044 add_reference(hdr, hash_lock, private); 3045 mutex_exit(hash_lock); 3046 return (0); 3047 } 3048 mutex_exit(hash_lock); 3049 return (0); 3050 } 3051 3052 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3053 3054 if (done) { 3055 add_reference(hdr, hash_lock, private); 3056 /* 3057 * If this block is already in use, create a new 3058 * copy of the data so that we will be guaranteed 3059 * that arc_release() will always succeed. 3060 */ 3061 buf = hdr->b_buf; 3062 ASSERT(buf); 3063 ASSERT(buf->b_data); 3064 if (HDR_BUF_AVAILABLE(hdr)) { 3065 ASSERT(buf->b_efunc == NULL); 3066 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3067 } else { 3068 buf = arc_buf_clone(buf); 3069 } 3070 3071 } else if (*arc_flags & ARC_PREFETCH && 3072 refcount_count(&hdr->b_refcnt) == 0) { 3073 hdr->b_flags |= ARC_PREFETCH; 3074 } 3075 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 3076 arc_access(hdr, hash_lock); 3077 if (*arc_flags & ARC_L2CACHE) 3078 hdr->b_flags |= ARC_L2CACHE; 3079 mutex_exit(hash_lock); 3080 ARCSTAT_BUMP(arcstat_hits); 3081 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 3082 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 3083 data, metadata, hits); 3084 3085 if (done) 3086 done(NULL, buf, private); 3087 } else { 3088 uint64_t size = BP_GET_LSIZE(bp); 3089 arc_callback_t *acb; 3090 vdev_t *vd = NULL; 3091 uint64_t addr; 3092 boolean_t devw = B_FALSE; 3093 3094 if (hdr == NULL) { 3095 /* this block is not in the cache */ 3096 arc_buf_hdr_t *exists; 3097 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 3098 buf = arc_buf_alloc(spa, size, private, type); 3099 hdr = buf->b_hdr; 3100 hdr->b_dva = *BP_IDENTITY(bp); 3101 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 3102 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 3103 exists = buf_hash_insert(hdr, &hash_lock); 3104 if (exists) { 3105 /* somebody beat us to the hash insert */ 3106 mutex_exit(hash_lock); 3107 buf_discard_identity(hdr); 3108 (void) arc_buf_remove_ref(buf, private); 3109 goto top; /* restart the IO request */ 3110 } 3111 /* if this is a prefetch, we don't have a reference */ 3112 if (*arc_flags & ARC_PREFETCH) { 3113 (void) remove_reference(hdr, hash_lock, 3114 private); 3115 hdr->b_flags |= ARC_PREFETCH; 3116 } 3117 if (*arc_flags & ARC_L2CACHE) 3118 hdr->b_flags |= ARC_L2CACHE; 3119 if (BP_GET_LEVEL(bp) > 0) 3120 hdr->b_flags |= ARC_INDIRECT; 3121 } else { 3122 /* this block is in the ghost cache */ 3123 ASSERT(GHOST_STATE(hdr->b_state)); 3124 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
|
3054 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
| 3125 ASSERT0(refcount_count(&hdr->b_refcnt));
|
3055 ASSERT(hdr->b_buf == NULL); 3056 3057 /* if this is a prefetch, we don't have a reference */ 3058 if (*arc_flags & ARC_PREFETCH) 3059 hdr->b_flags |= ARC_PREFETCH; 3060 else 3061 add_reference(hdr, hash_lock, private); 3062 if (*arc_flags & ARC_L2CACHE) 3063 hdr->b_flags |= ARC_L2CACHE; 3064 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 3065 buf->b_hdr = hdr; 3066 buf->b_data = NULL; 3067 buf->b_efunc = NULL; 3068 buf->b_private = NULL; 3069 buf->b_next = NULL; 3070 hdr->b_buf = buf; 3071 ASSERT(hdr->b_datacnt == 0); 3072 hdr->b_datacnt = 1; 3073 arc_get_data_buf(buf); 3074 arc_access(hdr, hash_lock); 3075 } 3076 3077 ASSERT(!GHOST_STATE(hdr->b_state)); 3078 3079 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 3080 acb->acb_done = done; 3081 acb->acb_private = private; 3082 3083 ASSERT(hdr->b_acb == NULL); 3084 hdr->b_acb = acb; 3085 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3086 3087 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 3088 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 3089 devw = hdr->b_l2hdr->b_dev->l2ad_writing; 3090 addr = hdr->b_l2hdr->b_daddr; 3091 /* 3092 * Lock out device removal. 3093 */ 3094 if (vdev_is_dead(vd) || 3095 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 3096 vd = NULL; 3097 } 3098 3099 mutex_exit(hash_lock); 3100 3101 ASSERT3U(hdr->b_size, ==, size); 3102 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 3103 uint64_t, size, zbookmark_t *, zb); 3104 ARCSTAT_BUMP(arcstat_misses); 3105 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 3106 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 3107 data, metadata, misses); 3108#ifdef _KERNEL 3109 curthread->td_ru.ru_inblock++; 3110#endif 3111 3112 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 3113 /* 3114 * Read from the L2ARC if the following are true: 3115 * 1. The L2ARC vdev was previously cached. 3116 * 2. This buffer still has L2ARC metadata. 3117 * 3. This buffer isn't currently writing to the L2ARC. 3118 * 4. The L2ARC entry wasn't evicted, which may 3119 * also have invalidated the vdev. 3120 * 5. This isn't prefetch and l2arc_noprefetch is set. 3121 */ 3122 if (hdr->b_l2hdr != NULL && 3123 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 3124 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 3125 l2arc_read_callback_t *cb; 3126 3127 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 3128 ARCSTAT_BUMP(arcstat_l2_hits); 3129 3130 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 3131 KM_SLEEP); 3132 cb->l2rcb_buf = buf; 3133 cb->l2rcb_spa = spa; 3134 cb->l2rcb_bp = *bp; 3135 cb->l2rcb_zb = *zb; 3136 cb->l2rcb_flags = zio_flags; 3137 3138 /* 3139 * l2arc read. The SCL_L2ARC lock will be 3140 * released by l2arc_read_done(). 3141 */ 3142 rzio = zio_read_phys(pio, vd, addr, size, 3143 buf->b_data, ZIO_CHECKSUM_OFF, 3144 l2arc_read_done, cb, priority, zio_flags | 3145 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 3146 ZIO_FLAG_DONT_PROPAGATE | 3147 ZIO_FLAG_DONT_RETRY, B_FALSE); 3148 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 3149 zio_t *, rzio); 3150 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 3151 3152 if (*arc_flags & ARC_NOWAIT) { 3153 zio_nowait(rzio); 3154 return (0); 3155 } 3156 3157 ASSERT(*arc_flags & ARC_WAIT); 3158 if (zio_wait(rzio) == 0) 3159 return (0); 3160 3161 /* l2arc read error; goto zio_read() */ 3162 } else { 3163 DTRACE_PROBE1(l2arc__miss, 3164 arc_buf_hdr_t *, hdr); 3165 ARCSTAT_BUMP(arcstat_l2_misses); 3166 if (HDR_L2_WRITING(hdr)) 3167 ARCSTAT_BUMP(arcstat_l2_rw_clash); 3168 spa_config_exit(spa, SCL_L2ARC, vd); 3169 } 3170 } else { 3171 if (vd != NULL) 3172 spa_config_exit(spa, SCL_L2ARC, vd); 3173 if (l2arc_ndev != 0) { 3174 DTRACE_PROBE1(l2arc__miss, 3175 arc_buf_hdr_t *, hdr); 3176 ARCSTAT_BUMP(arcstat_l2_misses); 3177 } 3178 } 3179 3180 rzio = zio_read(pio, spa, bp, buf->b_data, size, 3181 arc_read_done, buf, priority, zio_flags, zb); 3182 3183 if (*arc_flags & ARC_WAIT) 3184 return (zio_wait(rzio)); 3185 3186 ASSERT(*arc_flags & ARC_NOWAIT); 3187 zio_nowait(rzio); 3188 } 3189 return (0); 3190} 3191 3192void 3193arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 3194{ 3195 ASSERT(buf->b_hdr != NULL); 3196 ASSERT(buf->b_hdr->b_state != arc_anon); 3197 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 3198 ASSERT(buf->b_efunc == NULL); 3199 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); 3200 3201 buf->b_efunc = func; 3202 buf->b_private = private; 3203} 3204 3205/* 3206 * This is used by the DMU to let the ARC know that a buffer is 3207 * being evicted, so the ARC should clean up. If this arc buf 3208 * is not yet in the evicted state, it will be put there. 3209 */ 3210int 3211arc_buf_evict(arc_buf_t *buf) 3212{ 3213 arc_buf_hdr_t *hdr; 3214 kmutex_t *hash_lock; 3215 arc_buf_t **bufp; 3216 list_t *list, *evicted_list; 3217 kmutex_t *lock, *evicted_lock; 3218 3219 mutex_enter(&buf->b_evict_lock); 3220 hdr = buf->b_hdr; 3221 if (hdr == NULL) { 3222 /* 3223 * We are in arc_do_user_evicts(). 3224 */ 3225 ASSERT(buf->b_data == NULL); 3226 mutex_exit(&buf->b_evict_lock); 3227 return (0); 3228 } else if (buf->b_data == NULL) { 3229 arc_buf_t copy = *buf; /* structure assignment */ 3230 /* 3231 * We are on the eviction list; process this buffer now 3232 * but let arc_do_user_evicts() do the reaping. 3233 */ 3234 buf->b_efunc = NULL; 3235 mutex_exit(&buf->b_evict_lock); 3236 VERIFY(copy.b_efunc(©) == 0); 3237 return (1); 3238 } 3239 hash_lock = HDR_LOCK(hdr); 3240 mutex_enter(hash_lock); 3241 hdr = buf->b_hdr; 3242 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3243 3244 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 3245 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3246 3247 /* 3248 * Pull this buffer off of the hdr 3249 */ 3250 bufp = &hdr->b_buf; 3251 while (*bufp != buf) 3252 bufp = &(*bufp)->b_next; 3253 *bufp = buf->b_next; 3254 3255 ASSERT(buf->b_data != NULL); 3256 arc_buf_destroy(buf, FALSE, FALSE); 3257 3258 if (hdr->b_datacnt == 0) { 3259 arc_state_t *old_state = hdr->b_state; 3260 arc_state_t *evicted_state; 3261 3262 ASSERT(hdr->b_buf == NULL); 3263 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 3264 3265 evicted_state = 3266 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3267 3268 get_buf_info(hdr, old_state, &list, &lock); 3269 get_buf_info(hdr, evicted_state, &evicted_list, &evicted_lock); 3270 mutex_enter(lock); 3271 mutex_enter(evicted_lock); 3272 3273 arc_change_state(evicted_state, hdr, hash_lock); 3274 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3275 hdr->b_flags |= ARC_IN_HASH_TABLE; 3276 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3277 3278 mutex_exit(evicted_lock); 3279 mutex_exit(lock); 3280 } 3281 mutex_exit(hash_lock); 3282 mutex_exit(&buf->b_evict_lock); 3283 3284 VERIFY(buf->b_efunc(buf) == 0); 3285 buf->b_efunc = NULL; 3286 buf->b_private = NULL; 3287 buf->b_hdr = NULL; 3288 buf->b_next = NULL; 3289 kmem_cache_free(buf_cache, buf); 3290 return (1); 3291} 3292 3293/* 3294 * Release this buffer from the cache. This must be done 3295 * after a read and prior to modifying the buffer contents. 3296 * If the buffer has more than one reference, we must make 3297 * a new hdr for the buffer. 3298 */ 3299void 3300arc_release(arc_buf_t *buf, void *tag) 3301{ 3302 arc_buf_hdr_t *hdr; 3303 kmutex_t *hash_lock = NULL; 3304 l2arc_buf_hdr_t *l2hdr; 3305 uint64_t buf_size; 3306 3307 /* 3308 * It would be nice to assert that if it's DMU metadata (level > 3309 * 0 || it's the dnode file), then it must be syncing context. 3310 * But we don't know that information at this level. 3311 */ 3312 3313 mutex_enter(&buf->b_evict_lock); 3314 hdr = buf->b_hdr; 3315 3316 /* this buffer is not on any list */ 3317 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3318 3319 if (hdr->b_state == arc_anon) { 3320 /* this buffer is already released */ 3321 ASSERT(buf->b_efunc == NULL); 3322 } else { 3323 hash_lock = HDR_LOCK(hdr); 3324 mutex_enter(hash_lock); 3325 hdr = buf->b_hdr; 3326 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3327 } 3328 3329 l2hdr = hdr->b_l2hdr; 3330 if (l2hdr) { 3331 mutex_enter(&l2arc_buflist_mtx); 3332 hdr->b_l2hdr = NULL; 3333 buf_size = hdr->b_size; 3334 } 3335 3336 /* 3337 * Do we have more than one buf? 3338 */ 3339 if (hdr->b_datacnt > 1) { 3340 arc_buf_hdr_t *nhdr; 3341 arc_buf_t **bufp; 3342 uint64_t blksz = hdr->b_size; 3343 uint64_t spa = hdr->b_spa; 3344 arc_buf_contents_t type = hdr->b_type; 3345 uint32_t flags = hdr->b_flags; 3346 3347 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3348 /* 3349 * Pull the data off of this hdr and attach it to 3350 * a new anonymous hdr. 3351 */ 3352 (void) remove_reference(hdr, hash_lock, tag); 3353 bufp = &hdr->b_buf; 3354 while (*bufp != buf) 3355 bufp = &(*bufp)->b_next; 3356 *bufp = buf->b_next; 3357 buf->b_next = NULL; 3358 3359 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 3360 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3361 if (refcount_is_zero(&hdr->b_refcnt)) { 3362 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 3363 ASSERT3U(*size, >=, hdr->b_size); 3364 atomic_add_64(size, -hdr->b_size); 3365 } 3366 hdr->b_datacnt -= 1; 3367 arc_cksum_verify(buf);
| 3126 ASSERT(hdr->b_buf == NULL); 3127 3128 /* if this is a prefetch, we don't have a reference */ 3129 if (*arc_flags & ARC_PREFETCH) 3130 hdr->b_flags |= ARC_PREFETCH; 3131 else 3132 add_reference(hdr, hash_lock, private); 3133 if (*arc_flags & ARC_L2CACHE) 3134 hdr->b_flags |= ARC_L2CACHE; 3135 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 3136 buf->b_hdr = hdr; 3137 buf->b_data = NULL; 3138 buf->b_efunc = NULL; 3139 buf->b_private = NULL; 3140 buf->b_next = NULL; 3141 hdr->b_buf = buf; 3142 ASSERT(hdr->b_datacnt == 0); 3143 hdr->b_datacnt = 1; 3144 arc_get_data_buf(buf); 3145 arc_access(hdr, hash_lock); 3146 } 3147 3148 ASSERT(!GHOST_STATE(hdr->b_state)); 3149 3150 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 3151 acb->acb_done = done; 3152 acb->acb_private = private; 3153 3154 ASSERT(hdr->b_acb == NULL); 3155 hdr->b_acb = acb; 3156 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3157 3158 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 3159 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 3160 devw = hdr->b_l2hdr->b_dev->l2ad_writing; 3161 addr = hdr->b_l2hdr->b_daddr; 3162 /* 3163 * Lock out device removal. 3164 */ 3165 if (vdev_is_dead(vd) || 3166 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 3167 vd = NULL; 3168 } 3169 3170 mutex_exit(hash_lock); 3171 3172 ASSERT3U(hdr->b_size, ==, size); 3173 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 3174 uint64_t, size, zbookmark_t *, zb); 3175 ARCSTAT_BUMP(arcstat_misses); 3176 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 3177 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 3178 data, metadata, misses); 3179#ifdef _KERNEL 3180 curthread->td_ru.ru_inblock++; 3181#endif 3182 3183 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 3184 /* 3185 * Read from the L2ARC if the following are true: 3186 * 1. The L2ARC vdev was previously cached. 3187 * 2. This buffer still has L2ARC metadata. 3188 * 3. This buffer isn't currently writing to the L2ARC. 3189 * 4. The L2ARC entry wasn't evicted, which may 3190 * also have invalidated the vdev. 3191 * 5. This isn't prefetch and l2arc_noprefetch is set. 3192 */ 3193 if (hdr->b_l2hdr != NULL && 3194 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 3195 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 3196 l2arc_read_callback_t *cb; 3197 3198 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 3199 ARCSTAT_BUMP(arcstat_l2_hits); 3200 3201 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 3202 KM_SLEEP); 3203 cb->l2rcb_buf = buf; 3204 cb->l2rcb_spa = spa; 3205 cb->l2rcb_bp = *bp; 3206 cb->l2rcb_zb = *zb; 3207 cb->l2rcb_flags = zio_flags; 3208 3209 /* 3210 * l2arc read. The SCL_L2ARC lock will be 3211 * released by l2arc_read_done(). 3212 */ 3213 rzio = zio_read_phys(pio, vd, addr, size, 3214 buf->b_data, ZIO_CHECKSUM_OFF, 3215 l2arc_read_done, cb, priority, zio_flags | 3216 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 3217 ZIO_FLAG_DONT_PROPAGATE | 3218 ZIO_FLAG_DONT_RETRY, B_FALSE); 3219 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 3220 zio_t *, rzio); 3221 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 3222 3223 if (*arc_flags & ARC_NOWAIT) { 3224 zio_nowait(rzio); 3225 return (0); 3226 } 3227 3228 ASSERT(*arc_flags & ARC_WAIT); 3229 if (zio_wait(rzio) == 0) 3230 return (0); 3231 3232 /* l2arc read error; goto zio_read() */ 3233 } else { 3234 DTRACE_PROBE1(l2arc__miss, 3235 arc_buf_hdr_t *, hdr); 3236 ARCSTAT_BUMP(arcstat_l2_misses); 3237 if (HDR_L2_WRITING(hdr)) 3238 ARCSTAT_BUMP(arcstat_l2_rw_clash); 3239 spa_config_exit(spa, SCL_L2ARC, vd); 3240 } 3241 } else { 3242 if (vd != NULL) 3243 spa_config_exit(spa, SCL_L2ARC, vd); 3244 if (l2arc_ndev != 0) { 3245 DTRACE_PROBE1(l2arc__miss, 3246 arc_buf_hdr_t *, hdr); 3247 ARCSTAT_BUMP(arcstat_l2_misses); 3248 } 3249 } 3250 3251 rzio = zio_read(pio, spa, bp, buf->b_data, size, 3252 arc_read_done, buf, priority, zio_flags, zb); 3253 3254 if (*arc_flags & ARC_WAIT) 3255 return (zio_wait(rzio)); 3256 3257 ASSERT(*arc_flags & ARC_NOWAIT); 3258 zio_nowait(rzio); 3259 } 3260 return (0); 3261} 3262 3263void 3264arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 3265{ 3266 ASSERT(buf->b_hdr != NULL); 3267 ASSERT(buf->b_hdr->b_state != arc_anon); 3268 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 3269 ASSERT(buf->b_efunc == NULL); 3270 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); 3271 3272 buf->b_efunc = func; 3273 buf->b_private = private; 3274} 3275 3276/* 3277 * This is used by the DMU to let the ARC know that a buffer is 3278 * being evicted, so the ARC should clean up. If this arc buf 3279 * is not yet in the evicted state, it will be put there. 3280 */ 3281int 3282arc_buf_evict(arc_buf_t *buf) 3283{ 3284 arc_buf_hdr_t *hdr; 3285 kmutex_t *hash_lock; 3286 arc_buf_t **bufp; 3287 list_t *list, *evicted_list; 3288 kmutex_t *lock, *evicted_lock; 3289 3290 mutex_enter(&buf->b_evict_lock); 3291 hdr = buf->b_hdr; 3292 if (hdr == NULL) { 3293 /* 3294 * We are in arc_do_user_evicts(). 3295 */ 3296 ASSERT(buf->b_data == NULL); 3297 mutex_exit(&buf->b_evict_lock); 3298 return (0); 3299 } else if (buf->b_data == NULL) { 3300 arc_buf_t copy = *buf; /* structure assignment */ 3301 /* 3302 * We are on the eviction list; process this buffer now 3303 * but let arc_do_user_evicts() do the reaping. 3304 */ 3305 buf->b_efunc = NULL; 3306 mutex_exit(&buf->b_evict_lock); 3307 VERIFY(copy.b_efunc(©) == 0); 3308 return (1); 3309 } 3310 hash_lock = HDR_LOCK(hdr); 3311 mutex_enter(hash_lock); 3312 hdr = buf->b_hdr; 3313 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3314 3315 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 3316 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3317 3318 /* 3319 * Pull this buffer off of the hdr 3320 */ 3321 bufp = &hdr->b_buf; 3322 while (*bufp != buf) 3323 bufp = &(*bufp)->b_next; 3324 *bufp = buf->b_next; 3325 3326 ASSERT(buf->b_data != NULL); 3327 arc_buf_destroy(buf, FALSE, FALSE); 3328 3329 if (hdr->b_datacnt == 0) { 3330 arc_state_t *old_state = hdr->b_state; 3331 arc_state_t *evicted_state; 3332 3333 ASSERT(hdr->b_buf == NULL); 3334 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 3335 3336 evicted_state = 3337 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3338 3339 get_buf_info(hdr, old_state, &list, &lock); 3340 get_buf_info(hdr, evicted_state, &evicted_list, &evicted_lock); 3341 mutex_enter(lock); 3342 mutex_enter(evicted_lock); 3343 3344 arc_change_state(evicted_state, hdr, hash_lock); 3345 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3346 hdr->b_flags |= ARC_IN_HASH_TABLE; 3347 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3348 3349 mutex_exit(evicted_lock); 3350 mutex_exit(lock); 3351 } 3352 mutex_exit(hash_lock); 3353 mutex_exit(&buf->b_evict_lock); 3354 3355 VERIFY(buf->b_efunc(buf) == 0); 3356 buf->b_efunc = NULL; 3357 buf->b_private = NULL; 3358 buf->b_hdr = NULL; 3359 buf->b_next = NULL; 3360 kmem_cache_free(buf_cache, buf); 3361 return (1); 3362} 3363 3364/* 3365 * Release this buffer from the cache. This must be done 3366 * after a read and prior to modifying the buffer contents. 3367 * If the buffer has more than one reference, we must make 3368 * a new hdr for the buffer. 3369 */ 3370void 3371arc_release(arc_buf_t *buf, void *tag) 3372{ 3373 arc_buf_hdr_t *hdr; 3374 kmutex_t *hash_lock = NULL; 3375 l2arc_buf_hdr_t *l2hdr; 3376 uint64_t buf_size; 3377 3378 /* 3379 * It would be nice to assert that if it's DMU metadata (level > 3380 * 0 || it's the dnode file), then it must be syncing context. 3381 * But we don't know that information at this level. 3382 */ 3383 3384 mutex_enter(&buf->b_evict_lock); 3385 hdr = buf->b_hdr; 3386 3387 /* this buffer is not on any list */ 3388 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3389 3390 if (hdr->b_state == arc_anon) { 3391 /* this buffer is already released */ 3392 ASSERT(buf->b_efunc == NULL); 3393 } else { 3394 hash_lock = HDR_LOCK(hdr); 3395 mutex_enter(hash_lock); 3396 hdr = buf->b_hdr; 3397 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3398 } 3399 3400 l2hdr = hdr->b_l2hdr; 3401 if (l2hdr) { 3402 mutex_enter(&l2arc_buflist_mtx); 3403 hdr->b_l2hdr = NULL; 3404 buf_size = hdr->b_size; 3405 } 3406 3407 /* 3408 * Do we have more than one buf? 3409 */ 3410 if (hdr->b_datacnt > 1) { 3411 arc_buf_hdr_t *nhdr; 3412 arc_buf_t **bufp; 3413 uint64_t blksz = hdr->b_size; 3414 uint64_t spa = hdr->b_spa; 3415 arc_buf_contents_t type = hdr->b_type; 3416 uint32_t flags = hdr->b_flags; 3417 3418 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3419 /* 3420 * Pull the data off of this hdr and attach it to 3421 * a new anonymous hdr. 3422 */ 3423 (void) remove_reference(hdr, hash_lock, tag); 3424 bufp = &hdr->b_buf; 3425 while (*bufp != buf) 3426 bufp = &(*bufp)->b_next; 3427 *bufp = buf->b_next; 3428 buf->b_next = NULL; 3429 3430 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 3431 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3432 if (refcount_is_zero(&hdr->b_refcnt)) { 3433 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 3434 ASSERT3U(*size, >=, hdr->b_size); 3435 atomic_add_64(size, -hdr->b_size); 3436 } 3437 hdr->b_datacnt -= 1; 3438 arc_cksum_verify(buf);
|
| 3439#ifdef illumos 3440 arc_buf_unwatch(buf); 3441#endif /* illumos */
|
3368 3369 mutex_exit(hash_lock); 3370 3371 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3372 nhdr->b_size = blksz; 3373 nhdr->b_spa = spa; 3374 nhdr->b_type = type; 3375 nhdr->b_buf = buf; 3376 nhdr->b_state = arc_anon; 3377 nhdr->b_arc_access = 0; 3378 nhdr->b_flags = flags & ARC_L2_WRITING; 3379 nhdr->b_l2hdr = NULL; 3380 nhdr->b_datacnt = 1; 3381 nhdr->b_freeze_cksum = NULL; 3382 (void) refcount_add(&nhdr->b_refcnt, tag); 3383 buf->b_hdr = nhdr; 3384 mutex_exit(&buf->b_evict_lock); 3385 atomic_add_64(&arc_anon->arcs_size, blksz); 3386 } else { 3387 mutex_exit(&buf->b_evict_lock); 3388 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3389 ASSERT(!list_link_active(&hdr->b_arc_node)); 3390 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3391 if (hdr->b_state != arc_anon) 3392 arc_change_state(arc_anon, hdr, hash_lock); 3393 hdr->b_arc_access = 0; 3394 if (hash_lock) 3395 mutex_exit(hash_lock); 3396 3397 buf_discard_identity(hdr); 3398 arc_buf_thaw(buf); 3399 } 3400 buf->b_efunc = NULL; 3401 buf->b_private = NULL; 3402 3403 if (l2hdr) { 3404 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3405 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3406 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3407 mutex_exit(&l2arc_buflist_mtx); 3408 } 3409} 3410 3411/* 3412 * Release this buffer. If it does not match the provided BP, fill it 3413 * with that block's contents. 3414 */ 3415/* ARGSUSED */ 3416int 3417arc_release_bp(arc_buf_t *buf, void *tag, blkptr_t *bp, spa_t *spa, 3418 zbookmark_t *zb) 3419{ 3420 arc_release(buf, tag); 3421 return (0); 3422} 3423 3424int 3425arc_released(arc_buf_t *buf) 3426{ 3427 int released; 3428 3429 mutex_enter(&buf->b_evict_lock); 3430 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3431 mutex_exit(&buf->b_evict_lock); 3432 return (released); 3433} 3434 3435int 3436arc_has_callback(arc_buf_t *buf) 3437{ 3438 int callback; 3439 3440 mutex_enter(&buf->b_evict_lock); 3441 callback = (buf->b_efunc != NULL); 3442 mutex_exit(&buf->b_evict_lock); 3443 return (callback); 3444} 3445 3446#ifdef ZFS_DEBUG 3447int 3448arc_referenced(arc_buf_t *buf) 3449{ 3450 int referenced; 3451 3452 mutex_enter(&buf->b_evict_lock); 3453 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3454 mutex_exit(&buf->b_evict_lock); 3455 return (referenced); 3456} 3457#endif 3458 3459static void 3460arc_write_ready(zio_t *zio) 3461{ 3462 arc_write_callback_t *callback = zio->io_private; 3463 arc_buf_t *buf = callback->awcb_buf; 3464 arc_buf_hdr_t *hdr = buf->b_hdr; 3465 3466 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3467 callback->awcb_ready(zio, buf, callback->awcb_private); 3468 3469 /* 3470 * If the IO is already in progress, then this is a re-write 3471 * attempt, so we need to thaw and re-compute the cksum. 3472 * It is the responsibility of the callback to handle the 3473 * accounting for any re-write attempt. 3474 */ 3475 if (HDR_IO_IN_PROGRESS(hdr)) { 3476 mutex_enter(&hdr->b_freeze_lock); 3477 if (hdr->b_freeze_cksum != NULL) { 3478 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3479 hdr->b_freeze_cksum = NULL; 3480 } 3481 mutex_exit(&hdr->b_freeze_lock); 3482 } 3483 arc_cksum_compute(buf, B_FALSE); 3484 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3485} 3486 3487static void 3488arc_write_done(zio_t *zio) 3489{ 3490 arc_write_callback_t *callback = zio->io_private; 3491 arc_buf_t *buf = callback->awcb_buf; 3492 arc_buf_hdr_t *hdr = buf->b_hdr; 3493 3494 ASSERT(hdr->b_acb == NULL); 3495 3496 if (zio->io_error == 0) { 3497 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3498 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 3499 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3500 } else { 3501 ASSERT(BUF_EMPTY(hdr)); 3502 } 3503 3504 /* 3505 * If the block to be written was all-zero, we may have 3506 * compressed it away. In this case no write was performed 3507 * so there will be no dva/birth/checksum. The buffer must 3508 * therefore remain anonymous (and uncached). 3509 */ 3510 if (!BUF_EMPTY(hdr)) { 3511 arc_buf_hdr_t *exists; 3512 kmutex_t *hash_lock; 3513 3514 ASSERT(zio->io_error == 0); 3515 3516 arc_cksum_verify(buf); 3517 3518 exists = buf_hash_insert(hdr, &hash_lock); 3519 if (exists) { 3520 /* 3521 * This can only happen if we overwrite for 3522 * sync-to-convergence, because we remove 3523 * buffers from the hash table when we arc_free(). 3524 */ 3525 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 3526 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3527 panic("bad overwrite, hdr=%p exists=%p", 3528 (void *)hdr, (void *)exists); 3529 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3530 arc_change_state(arc_anon, exists, hash_lock); 3531 mutex_exit(hash_lock); 3532 arc_hdr_destroy(exists); 3533 exists = buf_hash_insert(hdr, &hash_lock); 3534 ASSERT3P(exists, ==, NULL); 3535 } else { 3536 /* Dedup */ 3537 ASSERT(hdr->b_datacnt == 1); 3538 ASSERT(hdr->b_state == arc_anon); 3539 ASSERT(BP_GET_DEDUP(zio->io_bp)); 3540 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 3541 } 3542 } 3543 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3544 /* if it's not anon, we are doing a scrub */ 3545 if (!exists && hdr->b_state == arc_anon) 3546 arc_access(hdr, hash_lock); 3547 mutex_exit(hash_lock); 3548 } else { 3549 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3550 } 3551 3552 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3553 callback->awcb_done(zio, buf, callback->awcb_private); 3554 3555 kmem_free(callback, sizeof (arc_write_callback_t)); 3556} 3557 3558zio_t * 3559arc_write(zio_t *pio, spa_t *spa, uint64_t txg, 3560 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp, 3561 arc_done_func_t *ready, arc_done_func_t *done, void *private, 3562 int priority, int zio_flags, const zbookmark_t *zb) 3563{ 3564 arc_buf_hdr_t *hdr = buf->b_hdr; 3565 arc_write_callback_t *callback; 3566 zio_t *zio; 3567 3568 ASSERT(ready != NULL); 3569 ASSERT(done != NULL); 3570 ASSERT(!HDR_IO_ERROR(hdr)); 3571 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3572 ASSERT(hdr->b_acb == NULL); 3573 if (l2arc) 3574 hdr->b_flags |= ARC_L2CACHE; 3575 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3576 callback->awcb_ready = ready; 3577 callback->awcb_done = done; 3578 callback->awcb_private = private; 3579 callback->awcb_buf = buf; 3580 3581 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, 3582 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3583 3584 return (zio); 3585} 3586 3587static int 3588arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg) 3589{ 3590#ifdef _KERNEL 3591 uint64_t available_memory = 3592 ptoa((uintmax_t)cnt.v_free_count + cnt.v_cache_count); 3593 static uint64_t page_load = 0; 3594 static uint64_t last_txg = 0; 3595 3596#ifdef sun 3597#if defined(__i386) 3598 available_memory = 3599 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3600#endif 3601#endif /* sun */ 3602 if (available_memory >= zfs_write_limit_max) 3603 return (0); 3604 3605 if (txg > last_txg) { 3606 last_txg = txg; 3607 page_load = 0; 3608 } 3609 /* 3610 * If we are in pageout, we know that memory is already tight, 3611 * the arc is already going to be evicting, so we just want to 3612 * continue to let page writes occur as quickly as possible. 3613 */ 3614 if (curproc == pageproc) { 3615 if (page_load > available_memory / 4) 3616 return (ERESTART); 3617 /* Note: reserve is inflated, so we deflate */ 3618 page_load += reserve / 8; 3619 return (0); 3620 } else if (page_load > 0 && arc_reclaim_needed()) { 3621 /* memory is low, delay before restarting */ 3622 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3623 return (EAGAIN); 3624 } 3625 page_load = 0; 3626 3627 if (arc_size > arc_c_min) { 3628 uint64_t evictable_memory = 3629 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3630 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3631 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3632 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3633 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3634 } 3635 3636 if (inflight_data > available_memory / 4) { 3637 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3638 return (ERESTART); 3639 } 3640#endif 3641 return (0); 3642} 3643 3644void 3645arc_tempreserve_clear(uint64_t reserve) 3646{ 3647 atomic_add_64(&arc_tempreserve, -reserve); 3648 ASSERT((int64_t)arc_tempreserve >= 0); 3649} 3650 3651int 3652arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3653{ 3654 int error; 3655 uint64_t anon_size; 3656 3657#ifdef ZFS_DEBUG 3658 /* 3659 * Once in a while, fail for no reason. Everything should cope. 3660 */ 3661 if (spa_get_random(10000) == 0) { 3662 dprintf("forcing random failure\n"); 3663 return (ERESTART); 3664 } 3665#endif 3666 if (reserve > arc_c/4 && !arc_no_grow) 3667 arc_c = MIN(arc_c_max, reserve * 4); 3668 if (reserve > arc_c) 3669 return (ENOMEM); 3670 3671 /* 3672 * Don't count loaned bufs as in flight dirty data to prevent long 3673 * network delays from blocking transactions that are ready to be 3674 * assigned to a txg. 3675 */ 3676 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0); 3677 3678 /* 3679 * Writes will, almost always, require additional memory allocations 3680 * in order to compress/encrypt/etc the data. We therefor need to 3681 * make sure that there is sufficient available memory for this. 3682 */ 3683 if (error = arc_memory_throttle(reserve, anon_size, txg)) 3684 return (error); 3685 3686 /* 3687 * Throttle writes when the amount of dirty data in the cache 3688 * gets too large. We try to keep the cache less than half full 3689 * of dirty blocks so that our sync times don't grow too large. 3690 * Note: if two requests come in concurrently, we might let them 3691 * both succeed, when one of them should fail. Not a huge deal. 3692 */ 3693 3694 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 3695 anon_size > arc_c / 4) { 3696 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3697 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3698 arc_tempreserve>>10, 3699 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3700 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3701 reserve>>10, arc_c>>10); 3702 return (ERESTART); 3703 } 3704 atomic_add_64(&arc_tempreserve, reserve); 3705 return (0); 3706} 3707 3708static kmutex_t arc_lowmem_lock; 3709#ifdef _KERNEL 3710static eventhandler_tag arc_event_lowmem = NULL; 3711 3712static void 3713arc_lowmem(void *arg __unused, int howto __unused) 3714{ 3715 3716 /* Serialize access via arc_lowmem_lock. */ 3717 mutex_enter(&arc_lowmem_lock); 3718 mutex_enter(&arc_reclaim_thr_lock); 3719 needfree = 1; 3720 cv_signal(&arc_reclaim_thr_cv); 3721 3722 /* 3723 * It is unsafe to block here in arbitrary threads, because we can come 3724 * here from ARC itself and may hold ARC locks and thus risk a deadlock 3725 * with ARC reclaim thread. 3726 */ 3727 if (curproc == pageproc) { 3728 while (needfree) 3729 msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0); 3730 } 3731 mutex_exit(&arc_reclaim_thr_lock); 3732 mutex_exit(&arc_lowmem_lock); 3733} 3734#endif 3735 3736void 3737arc_init(void) 3738{ 3739 int i, prefetch_tunable_set = 0; 3740 3741 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3742 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3743 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 3744 3745 /* Convert seconds to clock ticks */ 3746 arc_min_prefetch_lifespan = 1 * hz; 3747 3748 /* Start out with 1/8 of all memory */ 3749 arc_c = kmem_size() / 8; 3750 3751#ifdef sun 3752#ifdef _KERNEL 3753 /* 3754 * On architectures where the physical memory can be larger 3755 * than the addressable space (intel in 32-bit mode), we may 3756 * need to limit the cache to 1/8 of VM size. 3757 */ 3758 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3759#endif 3760#endif /* sun */ 3761 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 3762 arc_c_min = MAX(arc_c / 4, 64<<18); 3763 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 3764 if (arc_c * 8 >= 1<<30) 3765 arc_c_max = (arc_c * 8) - (1<<30); 3766 else 3767 arc_c_max = arc_c_min; 3768 arc_c_max = MAX(arc_c * 5, arc_c_max); 3769 3770#ifdef _KERNEL 3771 /* 3772 * Allow the tunables to override our calculations if they are 3773 * reasonable (ie. over 16MB) 3774 */ 3775 if (zfs_arc_max > 64<<18 && zfs_arc_max < kmem_size()) 3776 arc_c_max = zfs_arc_max; 3777 if (zfs_arc_min > 64<<18 && zfs_arc_min <= arc_c_max) 3778 arc_c_min = zfs_arc_min; 3779#endif 3780 3781 arc_c = arc_c_max; 3782 arc_p = (arc_c >> 1); 3783 3784 /* limit meta-data to 1/4 of the arc capacity */ 3785 arc_meta_limit = arc_c_max / 4; 3786 3787 /* Allow the tunable to override if it is reasonable */ 3788 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3789 arc_meta_limit = zfs_arc_meta_limit; 3790 3791 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3792 arc_c_min = arc_meta_limit / 2; 3793 3794 if (zfs_arc_grow_retry > 0) 3795 arc_grow_retry = zfs_arc_grow_retry; 3796 3797 if (zfs_arc_shrink_shift > 0) 3798 arc_shrink_shift = zfs_arc_shrink_shift; 3799 3800 if (zfs_arc_p_min_shift > 0) 3801 arc_p_min_shift = zfs_arc_p_min_shift; 3802 3803 /* if kmem_flags are set, lets try to use less memory */ 3804 if (kmem_debugging()) 3805 arc_c = arc_c / 2; 3806 if (arc_c < arc_c_min) 3807 arc_c = arc_c_min; 3808 3809 zfs_arc_min = arc_c_min; 3810 zfs_arc_max = arc_c_max; 3811 3812 arc_anon = &ARC_anon; 3813 arc_mru = &ARC_mru; 3814 arc_mru_ghost = &ARC_mru_ghost; 3815 arc_mfu = &ARC_mfu; 3816 arc_mfu_ghost = &ARC_mfu_ghost; 3817 arc_l2c_only = &ARC_l2c_only; 3818 arc_size = 0; 3819 3820 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 3821 mutex_init(&arc_anon->arcs_locks[i].arcs_lock, 3822 NULL, MUTEX_DEFAULT, NULL); 3823 mutex_init(&arc_mru->arcs_locks[i].arcs_lock, 3824 NULL, MUTEX_DEFAULT, NULL); 3825 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock, 3826 NULL, MUTEX_DEFAULT, NULL); 3827 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock, 3828 NULL, MUTEX_DEFAULT, NULL); 3829 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock, 3830 NULL, MUTEX_DEFAULT, NULL); 3831 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock, 3832 NULL, MUTEX_DEFAULT, NULL); 3833 3834 list_create(&arc_mru->arcs_lists[i], 3835 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3836 list_create(&arc_mru_ghost->arcs_lists[i], 3837 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3838 list_create(&arc_mfu->arcs_lists[i], 3839 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3840 list_create(&arc_mfu_ghost->arcs_lists[i], 3841 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3842 list_create(&arc_mfu_ghost->arcs_lists[i], 3843 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3844 list_create(&arc_l2c_only->arcs_lists[i], 3845 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3846 } 3847 3848 buf_init(); 3849 3850 arc_thread_exit = 0; 3851 arc_eviction_list = NULL; 3852 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3853 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3854 3855 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3856 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3857 3858 if (arc_ksp != NULL) { 3859 arc_ksp->ks_data = &arc_stats; 3860 kstat_install(arc_ksp); 3861 } 3862 3863 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3864 TS_RUN, minclsyspri); 3865 3866#ifdef _KERNEL 3867 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 3868 EVENTHANDLER_PRI_FIRST); 3869#endif 3870 3871 arc_dead = FALSE; 3872 arc_warm = B_FALSE; 3873 3874 if (zfs_write_limit_max == 0) 3875 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3876 else 3877 zfs_write_limit_shift = 0; 3878 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3879 3880#ifdef _KERNEL 3881 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable)) 3882 prefetch_tunable_set = 1; 3883 3884#ifdef __i386__ 3885 if (prefetch_tunable_set == 0) { 3886 printf("ZFS NOTICE: Prefetch is disabled by default on i386 " 3887 "-- to enable,\n"); 3888 printf(" add \"vfs.zfs.prefetch_disable=0\" " 3889 "to /boot/loader.conf.\n"); 3890 zfs_prefetch_disable = 1; 3891 } 3892#else 3893 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) && 3894 prefetch_tunable_set == 0) { 3895 printf("ZFS NOTICE: Prefetch is disabled by default if less " 3896 "than 4GB of RAM is present;\n" 3897 " to enable, add \"vfs.zfs.prefetch_disable=0\" " 3898 "to /boot/loader.conf.\n"); 3899 zfs_prefetch_disable = 1; 3900 } 3901#endif 3902 /* Warn about ZFS memory and address space requirements. */ 3903 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 3904 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 3905 "expect unstable behavior.\n"); 3906 } 3907 if (kmem_size() < 512 * (1 << 20)) { 3908 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 3909 "expect unstable behavior.\n"); 3910 printf(" Consider tuning vm.kmem_size and " 3911 "vm.kmem_size_max\n"); 3912 printf(" in /boot/loader.conf.\n"); 3913 } 3914#endif 3915} 3916 3917void 3918arc_fini(void) 3919{ 3920 int i; 3921 3922 mutex_enter(&arc_reclaim_thr_lock); 3923 arc_thread_exit = 1; 3924 cv_signal(&arc_reclaim_thr_cv); 3925 while (arc_thread_exit != 0) 3926 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3927 mutex_exit(&arc_reclaim_thr_lock); 3928 3929 arc_flush(NULL); 3930 3931 arc_dead = TRUE; 3932 3933 if (arc_ksp != NULL) { 3934 kstat_delete(arc_ksp); 3935 arc_ksp = NULL; 3936 } 3937 3938 mutex_destroy(&arc_eviction_mtx); 3939 mutex_destroy(&arc_reclaim_thr_lock); 3940 cv_destroy(&arc_reclaim_thr_cv); 3941 3942 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 3943 list_destroy(&arc_mru->arcs_lists[i]); 3944 list_destroy(&arc_mru_ghost->arcs_lists[i]); 3945 list_destroy(&arc_mfu->arcs_lists[i]); 3946 list_destroy(&arc_mfu_ghost->arcs_lists[i]); 3947 list_destroy(&arc_l2c_only->arcs_lists[i]); 3948 3949 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock); 3950 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock); 3951 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock); 3952 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock); 3953 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock); 3954 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock); 3955 } 3956 3957 mutex_destroy(&zfs_write_limit_lock); 3958 3959 buf_fini(); 3960 3961 ASSERT(arc_loaned_bytes == 0); 3962 3963 mutex_destroy(&arc_lowmem_lock); 3964#ifdef _KERNEL 3965 if (arc_event_lowmem != NULL) 3966 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 3967#endif 3968} 3969 3970/* 3971 * Level 2 ARC 3972 * 3973 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3974 * It uses dedicated storage devices to hold cached data, which are populated 3975 * using large infrequent writes. The main role of this cache is to boost 3976 * the performance of random read workloads. The intended L2ARC devices 3977 * include short-stroked disks, solid state disks, and other media with 3978 * substantially faster read latency than disk. 3979 * 3980 * +-----------------------+ 3981 * | ARC | 3982 * +-----------------------+ 3983 * | ^ ^ 3984 * | | | 3985 * l2arc_feed_thread() arc_read() 3986 * | | | 3987 * | l2arc read | 3988 * V | | 3989 * +---------------+ | 3990 * | L2ARC | | 3991 * +---------------+ | 3992 * | ^ | 3993 * l2arc_write() | | 3994 * | | | 3995 * V | | 3996 * +-------+ +-------+ 3997 * | vdev | | vdev | 3998 * | cache | | cache | 3999 * +-------+ +-------+ 4000 * +=========+ .-----. 4001 * : L2ARC : |-_____-| 4002 * : devices : | Disks | 4003 * +=========+ `-_____-' 4004 * 4005 * Read requests are satisfied from the following sources, in order: 4006 * 4007 * 1) ARC 4008 * 2) vdev cache of L2ARC devices 4009 * 3) L2ARC devices 4010 * 4) vdev cache of disks 4011 * 5) disks 4012 * 4013 * Some L2ARC device types exhibit extremely slow write performance. 4014 * To accommodate for this there are some significant differences between 4015 * the L2ARC and traditional cache design: 4016 * 4017 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 4018 * the ARC behave as usual, freeing buffers and placing headers on ghost 4019 * lists. The ARC does not send buffers to the L2ARC during eviction as 4020 * this would add inflated write latencies for all ARC memory pressure. 4021 * 4022 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 4023 * It does this by periodically scanning buffers from the eviction-end of 4024 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 4025 * not already there. It scans until a headroom of buffers is satisfied, 4026 * which itself is a buffer for ARC eviction. The thread that does this is 4027 * l2arc_feed_thread(), illustrated below; example sizes are included to 4028 * provide a better sense of ratio than this diagram: 4029 * 4030 * head --> tail 4031 * +---------------------+----------+ 4032 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 4033 * +---------------------+----------+ | o L2ARC eligible 4034 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 4035 * +---------------------+----------+ | 4036 * 15.9 Gbytes ^ 32 Mbytes | 4037 * headroom | 4038 * l2arc_feed_thread() 4039 * | 4040 * l2arc write hand <--[oooo]--' 4041 * | 8 Mbyte 4042 * | write max 4043 * V 4044 * +==============================+ 4045 * L2ARC dev |####|#|###|###| |####| ... | 4046 * +==============================+ 4047 * 32 Gbytes 4048 * 4049 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 4050 * evicted, then the L2ARC has cached a buffer much sooner than it probably 4051 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 4052 * safe to say that this is an uncommon case, since buffers at the end of 4053 * the ARC lists have moved there due to inactivity. 4054 * 4055 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 4056 * then the L2ARC simply misses copying some buffers. This serves as a 4057 * pressure valve to prevent heavy read workloads from both stalling the ARC 4058 * with waits and clogging the L2ARC with writes. This also helps prevent 4059 * the potential for the L2ARC to churn if it attempts to cache content too 4060 * quickly, such as during backups of the entire pool. 4061 * 4062 * 5. After system boot and before the ARC has filled main memory, there are 4063 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 4064 * lists can remain mostly static. Instead of searching from tail of these 4065 * lists as pictured, the l2arc_feed_thread() will search from the list heads 4066 * for eligible buffers, greatly increasing its chance of finding them. 4067 * 4068 * The L2ARC device write speed is also boosted during this time so that 4069 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 4070 * there are no L2ARC reads, and no fear of degrading read performance 4071 * through increased writes. 4072 * 4073 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 4074 * the vdev queue can aggregate them into larger and fewer writes. Each 4075 * device is written to in a rotor fashion, sweeping writes through 4076 * available space then repeating. 4077 * 4078 * 7. The L2ARC does not store dirty content. It never needs to flush 4079 * write buffers back to disk based storage. 4080 * 4081 * 8. If an ARC buffer is written (and dirtied) which also exists in the 4082 * L2ARC, the now stale L2ARC buffer is immediately dropped. 4083 * 4084 * The performance of the L2ARC can be tweaked by a number of tunables, which 4085 * may be necessary for different workloads: 4086 * 4087 * l2arc_write_max max write bytes per interval 4088 * l2arc_write_boost extra write bytes during device warmup 4089 * l2arc_noprefetch skip caching prefetched buffers 4090 * l2arc_headroom number of max device writes to precache 4091 * l2arc_feed_secs seconds between L2ARC writing 4092 * 4093 * Tunables may be removed or added as future performance improvements are 4094 * integrated, and also may become zpool properties. 4095 * 4096 * There are three key functions that control how the L2ARC warms up: 4097 * 4098 * l2arc_write_eligible() check if a buffer is eligible to cache 4099 * l2arc_write_size() calculate how much to write 4100 * l2arc_write_interval() calculate sleep delay between writes 4101 * 4102 * These three functions determine what to write, how much, and how quickly 4103 * to send writes. 4104 */ 4105 4106static boolean_t 4107l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab) 4108{ 4109 /* 4110 * A buffer is *not* eligible for the L2ARC if it: 4111 * 1. belongs to a different spa. 4112 * 2. is already cached on the L2ARC. 4113 * 3. has an I/O in progress (it may be an incomplete read). 4114 * 4. is flagged not eligible (zfs property). 4115 */ 4116 if (ab->b_spa != spa_guid) { 4117 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch); 4118 return (B_FALSE); 4119 } 4120 if (ab->b_l2hdr != NULL) { 4121 ARCSTAT_BUMP(arcstat_l2_write_in_l2); 4122 return (B_FALSE); 4123 } 4124 if (HDR_IO_IN_PROGRESS(ab)) { 4125 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress); 4126 return (B_FALSE); 4127 } 4128 if (!HDR_L2CACHE(ab)) { 4129 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable); 4130 return (B_FALSE); 4131 } 4132 4133 return (B_TRUE); 4134} 4135 4136static uint64_t 4137l2arc_write_size(l2arc_dev_t *dev) 4138{ 4139 uint64_t size; 4140 4141 size = dev->l2ad_write; 4142 4143 if (arc_warm == B_FALSE) 4144 size += dev->l2ad_boost; 4145 4146 return (size); 4147 4148} 4149 4150static clock_t 4151l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 4152{ 4153 clock_t interval, next, now; 4154 4155 /* 4156 * If the ARC lists are busy, increase our write rate; if the 4157 * lists are stale, idle back. This is achieved by checking 4158 * how much we previously wrote - if it was more than half of 4159 * what we wanted, schedule the next write much sooner. 4160 */ 4161 if (l2arc_feed_again && wrote > (wanted / 2)) 4162 interval = (hz * l2arc_feed_min_ms) / 1000; 4163 else 4164 interval = hz * l2arc_feed_secs; 4165 4166 now = ddi_get_lbolt(); 4167 next = MAX(now, MIN(now + interval, began + interval)); 4168 4169 return (next); 4170} 4171 4172static void 4173l2arc_hdr_stat_add(void) 4174{ 4175 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 4176 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 4177} 4178 4179static void 4180l2arc_hdr_stat_remove(void) 4181{ 4182 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 4183 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 4184} 4185 4186/* 4187 * Cycle through L2ARC devices. This is how L2ARC load balances. 4188 * If a device is returned, this also returns holding the spa config lock. 4189 */ 4190static l2arc_dev_t * 4191l2arc_dev_get_next(void) 4192{ 4193 l2arc_dev_t *first, *next = NULL; 4194 4195 /* 4196 * Lock out the removal of spas (spa_namespace_lock), then removal 4197 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 4198 * both locks will be dropped and a spa config lock held instead. 4199 */ 4200 mutex_enter(&spa_namespace_lock); 4201 mutex_enter(&l2arc_dev_mtx); 4202 4203 /* if there are no vdevs, there is nothing to do */ 4204 if (l2arc_ndev == 0) 4205 goto out; 4206 4207 first = NULL; 4208 next = l2arc_dev_last; 4209 do { 4210 /* loop around the list looking for a non-faulted vdev */ 4211 if (next == NULL) { 4212 next = list_head(l2arc_dev_list); 4213 } else { 4214 next = list_next(l2arc_dev_list, next); 4215 if (next == NULL) 4216 next = list_head(l2arc_dev_list); 4217 } 4218 4219 /* if we have come back to the start, bail out */ 4220 if (first == NULL) 4221 first = next; 4222 else if (next == first) 4223 break; 4224 4225 } while (vdev_is_dead(next->l2ad_vdev)); 4226 4227 /* if we were unable to find any usable vdevs, return NULL */ 4228 if (vdev_is_dead(next->l2ad_vdev)) 4229 next = NULL; 4230 4231 l2arc_dev_last = next; 4232 4233out: 4234 mutex_exit(&l2arc_dev_mtx); 4235 4236 /* 4237 * Grab the config lock to prevent the 'next' device from being 4238 * removed while we are writing to it. 4239 */ 4240 if (next != NULL) 4241 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 4242 mutex_exit(&spa_namespace_lock); 4243 4244 return (next); 4245} 4246 4247/* 4248 * Free buffers that were tagged for destruction. 4249 */ 4250static void 4251l2arc_do_free_on_write() 4252{ 4253 list_t *buflist; 4254 l2arc_data_free_t *df, *df_prev; 4255 4256 mutex_enter(&l2arc_free_on_write_mtx); 4257 buflist = l2arc_free_on_write; 4258 4259 for (df = list_tail(buflist); df; df = df_prev) { 4260 df_prev = list_prev(buflist, df); 4261 ASSERT(df->l2df_data != NULL); 4262 ASSERT(df->l2df_func != NULL); 4263 df->l2df_func(df->l2df_data, df->l2df_size); 4264 list_remove(buflist, df); 4265 kmem_free(df, sizeof (l2arc_data_free_t)); 4266 } 4267 4268 mutex_exit(&l2arc_free_on_write_mtx); 4269} 4270 4271/* 4272 * A write to a cache device has completed. Update all headers to allow 4273 * reads from these buffers to begin. 4274 */ 4275static void 4276l2arc_write_done(zio_t *zio) 4277{ 4278 l2arc_write_callback_t *cb; 4279 l2arc_dev_t *dev; 4280 list_t *buflist; 4281 arc_buf_hdr_t *head, *ab, *ab_prev; 4282 l2arc_buf_hdr_t *abl2; 4283 kmutex_t *hash_lock; 4284 4285 cb = zio->io_private; 4286 ASSERT(cb != NULL); 4287 dev = cb->l2wcb_dev; 4288 ASSERT(dev != NULL); 4289 head = cb->l2wcb_head; 4290 ASSERT(head != NULL); 4291 buflist = dev->l2ad_buflist; 4292 ASSERT(buflist != NULL); 4293 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 4294 l2arc_write_callback_t *, cb); 4295 4296 if (zio->io_error != 0) 4297 ARCSTAT_BUMP(arcstat_l2_writes_error); 4298 4299 mutex_enter(&l2arc_buflist_mtx); 4300 4301 /* 4302 * All writes completed, or an error was hit. 4303 */ 4304 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 4305 ab_prev = list_prev(buflist, ab); 4306 4307 hash_lock = HDR_LOCK(ab); 4308 if (!mutex_tryenter(hash_lock)) { 4309 /* 4310 * This buffer misses out. It may be in a stage 4311 * of eviction. Its ARC_L2_WRITING flag will be 4312 * left set, denying reads to this buffer. 4313 */ 4314 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 4315 continue; 4316 } 4317 4318 if (zio->io_error != 0) { 4319 /* 4320 * Error - drop L2ARC entry. 4321 */ 4322 list_remove(buflist, ab); 4323 abl2 = ab->b_l2hdr; 4324 ab->b_l2hdr = NULL; 4325 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4326 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4327 } 4328 4329 /* 4330 * Allow ARC to begin reads to this L2ARC entry. 4331 */ 4332 ab->b_flags &= ~ARC_L2_WRITING; 4333 4334 mutex_exit(hash_lock); 4335 } 4336 4337 atomic_inc_64(&l2arc_writes_done); 4338 list_remove(buflist, head); 4339 kmem_cache_free(hdr_cache, head); 4340 mutex_exit(&l2arc_buflist_mtx); 4341 4342 l2arc_do_free_on_write(); 4343 4344 kmem_free(cb, sizeof (l2arc_write_callback_t)); 4345} 4346 4347/* 4348 * A read to a cache device completed. Validate buffer contents before 4349 * handing over to the regular ARC routines. 4350 */ 4351static void 4352l2arc_read_done(zio_t *zio) 4353{ 4354 l2arc_read_callback_t *cb; 4355 arc_buf_hdr_t *hdr; 4356 arc_buf_t *buf; 4357 kmutex_t *hash_lock; 4358 int equal; 4359 4360 ASSERT(zio->io_vd != NULL); 4361 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 4362 4363 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 4364 4365 cb = zio->io_private; 4366 ASSERT(cb != NULL); 4367 buf = cb->l2rcb_buf; 4368 ASSERT(buf != NULL); 4369 4370 hash_lock = HDR_LOCK(buf->b_hdr); 4371 mutex_enter(hash_lock); 4372 hdr = buf->b_hdr; 4373 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4374 4375 /* 4376 * Check this survived the L2ARC journey. 4377 */ 4378 equal = arc_cksum_equal(buf); 4379 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 4380 mutex_exit(hash_lock); 4381 zio->io_private = buf; 4382 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4383 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4384 arc_read_done(zio); 4385 } else { 4386 mutex_exit(hash_lock); 4387 /* 4388 * Buffer didn't survive caching. Increment stats and 4389 * reissue to the original storage device. 4390 */ 4391 if (zio->io_error != 0) { 4392 ARCSTAT_BUMP(arcstat_l2_io_error); 4393 } else { 4394 zio->io_error = EIO; 4395 } 4396 if (!equal) 4397 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4398 4399 /* 4400 * If there's no waiter, issue an async i/o to the primary 4401 * storage now. If there *is* a waiter, the caller must 4402 * issue the i/o in a context where it's OK to block. 4403 */ 4404 if (zio->io_waiter == NULL) { 4405 zio_t *pio = zio_unique_parent(zio); 4406 4407 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 4408 4409 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 4410 buf->b_data, zio->io_size, arc_read_done, buf, 4411 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4412 } 4413 } 4414 4415 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4416} 4417 4418/* 4419 * This is the list priority from which the L2ARC will search for pages to 4420 * cache. This is used within loops (0..3) to cycle through lists in the 4421 * desired order. This order can have a significant effect on cache 4422 * performance. 4423 * 4424 * Currently the metadata lists are hit first, MFU then MRU, followed by 4425 * the data lists. This function returns a locked list, and also returns 4426 * the lock pointer. 4427 */ 4428static list_t * 4429l2arc_list_locked(int list_num, kmutex_t **lock) 4430{ 4431 list_t *list; 4432 int idx; 4433 4434 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS); 4435 4436 if (list_num < ARC_BUFC_NUMMETADATALISTS) { 4437 idx = list_num; 4438 list = &arc_mfu->arcs_lists[idx]; 4439 *lock = ARCS_LOCK(arc_mfu, idx); 4440 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) { 4441 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4442 list = &arc_mru->arcs_lists[idx]; 4443 *lock = ARCS_LOCK(arc_mru, idx); 4444 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 + 4445 ARC_BUFC_NUMDATALISTS)) { 4446 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4447 list = &arc_mfu->arcs_lists[idx]; 4448 *lock = ARCS_LOCK(arc_mfu, idx); 4449 } else { 4450 idx = list_num - ARC_BUFC_NUMLISTS; 4451 list = &arc_mru->arcs_lists[idx]; 4452 *lock = ARCS_LOCK(arc_mru, idx); 4453 } 4454 4455 ASSERT(!(MUTEX_HELD(*lock))); 4456 mutex_enter(*lock); 4457 return (list); 4458} 4459 4460/* 4461 * Evict buffers from the device write hand to the distance specified in 4462 * bytes. This distance may span populated buffers, it may span nothing. 4463 * This is clearing a region on the L2ARC device ready for writing. 4464 * If the 'all' boolean is set, every buffer is evicted. 4465 */ 4466static void 4467l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4468{ 4469 list_t *buflist; 4470 l2arc_buf_hdr_t *abl2; 4471 arc_buf_hdr_t *ab, *ab_prev; 4472 kmutex_t *hash_lock; 4473 uint64_t taddr; 4474 4475 buflist = dev->l2ad_buflist; 4476 4477 if (buflist == NULL) 4478 return; 4479 4480 if (!all && dev->l2ad_first) { 4481 /* 4482 * This is the first sweep through the device. There is 4483 * nothing to evict. 4484 */ 4485 return; 4486 } 4487 4488 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4489 /* 4490 * When nearing the end of the device, evict to the end 4491 * before the device write hand jumps to the start. 4492 */ 4493 taddr = dev->l2ad_end; 4494 } else { 4495 taddr = dev->l2ad_hand + distance; 4496 } 4497 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4498 uint64_t, taddr, boolean_t, all); 4499 4500top: 4501 mutex_enter(&l2arc_buflist_mtx); 4502 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4503 ab_prev = list_prev(buflist, ab); 4504 4505 hash_lock = HDR_LOCK(ab); 4506 if (!mutex_tryenter(hash_lock)) { 4507 /* 4508 * Missed the hash lock. Retry. 4509 */ 4510 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4511 mutex_exit(&l2arc_buflist_mtx); 4512 mutex_enter(hash_lock); 4513 mutex_exit(hash_lock); 4514 goto top; 4515 } 4516 4517 if (HDR_L2_WRITE_HEAD(ab)) { 4518 /* 4519 * We hit a write head node. Leave it for 4520 * l2arc_write_done(). 4521 */ 4522 list_remove(buflist, ab); 4523 mutex_exit(hash_lock); 4524 continue; 4525 } 4526 4527 if (!all && ab->b_l2hdr != NULL && 4528 (ab->b_l2hdr->b_daddr > taddr || 4529 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4530 /* 4531 * We've evicted to the target address, 4532 * or the end of the device. 4533 */ 4534 mutex_exit(hash_lock); 4535 break; 4536 } 4537 4538 if (HDR_FREE_IN_PROGRESS(ab)) { 4539 /* 4540 * Already on the path to destruction. 4541 */ 4542 mutex_exit(hash_lock); 4543 continue; 4544 } 4545 4546 if (ab->b_state == arc_l2c_only) { 4547 ASSERT(!HDR_L2_READING(ab)); 4548 /* 4549 * This doesn't exist in the ARC. Destroy. 4550 * arc_hdr_destroy() will call list_remove() 4551 * and decrement arcstat_l2_size. 4552 */ 4553 arc_change_state(arc_anon, ab, hash_lock); 4554 arc_hdr_destroy(ab); 4555 } else { 4556 /* 4557 * Invalidate issued or about to be issued 4558 * reads, since we may be about to write 4559 * over this location. 4560 */ 4561 if (HDR_L2_READING(ab)) { 4562 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4563 ab->b_flags |= ARC_L2_EVICTED; 4564 } 4565 4566 /* 4567 * Tell ARC this no longer exists in L2ARC. 4568 */ 4569 if (ab->b_l2hdr != NULL) { 4570 abl2 = ab->b_l2hdr; 4571 ab->b_l2hdr = NULL; 4572 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4573 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4574 } 4575 list_remove(buflist, ab); 4576 4577 /* 4578 * This may have been leftover after a 4579 * failed write. 4580 */ 4581 ab->b_flags &= ~ARC_L2_WRITING; 4582 } 4583 mutex_exit(hash_lock); 4584 } 4585 mutex_exit(&l2arc_buflist_mtx); 4586 4587 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0); 4588 dev->l2ad_evict = taddr; 4589} 4590 4591/* 4592 * Find and write ARC buffers to the L2ARC device. 4593 * 4594 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4595 * for reading until they have completed writing. 4596 */ 4597static uint64_t 4598l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4599{ 4600 arc_buf_hdr_t *ab, *ab_prev, *head; 4601 l2arc_buf_hdr_t *hdrl2; 4602 list_t *list; 4603 uint64_t passed_sz, write_sz, buf_sz, headroom; 4604 void *buf_data; 4605 kmutex_t *hash_lock, *list_lock; 4606 boolean_t have_lock, full; 4607 l2arc_write_callback_t *cb; 4608 zio_t *pio, *wzio; 4609 uint64_t guid = spa_load_guid(spa); 4610 int try; 4611 4612 ASSERT(dev->l2ad_vdev != NULL); 4613 4614 pio = NULL; 4615 write_sz = 0; 4616 full = B_FALSE; 4617 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4618 head->b_flags |= ARC_L2_WRITE_HEAD; 4619 4620 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter); 4621 /* 4622 * Copy buffers for L2ARC writing. 4623 */ 4624 mutex_enter(&l2arc_buflist_mtx); 4625 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) { 4626 list = l2arc_list_locked(try, &list_lock); 4627 passed_sz = 0; 4628 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter); 4629 4630 /* 4631 * L2ARC fast warmup. 4632 * 4633 * Until the ARC is warm and starts to evict, read from the 4634 * head of the ARC lists rather than the tail. 4635 */ 4636 headroom = target_sz * l2arc_headroom; 4637 if (arc_warm == B_FALSE) 4638 ab = list_head(list); 4639 else 4640 ab = list_tail(list); 4641 if (ab == NULL) 4642 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter); 4643 4644 for (; ab; ab = ab_prev) { 4645 if (arc_warm == B_FALSE) 4646 ab_prev = list_next(list, ab); 4647 else 4648 ab_prev = list_prev(list, ab); 4649 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size); 4650 4651 hash_lock = HDR_LOCK(ab); 4652 have_lock = MUTEX_HELD(hash_lock); 4653 if (!have_lock && !mutex_tryenter(hash_lock)) { 4654 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail); 4655 /* 4656 * Skip this buffer rather than waiting. 4657 */ 4658 continue; 4659 } 4660 4661 passed_sz += ab->b_size; 4662 if (passed_sz > headroom) { 4663 /* 4664 * Searched too far. 4665 */ 4666 mutex_exit(hash_lock); 4667 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom); 4668 break; 4669 } 4670 4671 if (!l2arc_write_eligible(guid, ab)) { 4672 mutex_exit(hash_lock); 4673 continue; 4674 } 4675 4676 if ((write_sz + ab->b_size) > target_sz) { 4677 full = B_TRUE; 4678 mutex_exit(hash_lock); 4679 ARCSTAT_BUMP(arcstat_l2_write_full); 4680 break; 4681 } 4682 4683 if (pio == NULL) { 4684 /* 4685 * Insert a dummy header on the buflist so 4686 * l2arc_write_done() can find where the 4687 * write buffers begin without searching. 4688 */ 4689 list_insert_head(dev->l2ad_buflist, head); 4690 4691 cb = kmem_alloc( 4692 sizeof (l2arc_write_callback_t), KM_SLEEP); 4693 cb->l2wcb_dev = dev; 4694 cb->l2wcb_head = head; 4695 pio = zio_root(spa, l2arc_write_done, cb, 4696 ZIO_FLAG_CANFAIL); 4697 ARCSTAT_BUMP(arcstat_l2_write_pios); 4698 } 4699 4700 /* 4701 * Create and add a new L2ARC header. 4702 */ 4703 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4704 hdrl2->b_dev = dev; 4705 hdrl2->b_daddr = dev->l2ad_hand; 4706 4707 ab->b_flags |= ARC_L2_WRITING; 4708 ab->b_l2hdr = hdrl2; 4709 list_insert_head(dev->l2ad_buflist, ab); 4710 buf_data = ab->b_buf->b_data; 4711 buf_sz = ab->b_size; 4712 4713 /* 4714 * Compute and store the buffer cksum before 4715 * writing. On debug the cksum is verified first. 4716 */ 4717 arc_cksum_verify(ab->b_buf); 4718 arc_cksum_compute(ab->b_buf, B_TRUE); 4719 4720 mutex_exit(hash_lock); 4721 4722 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4723 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4724 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4725 ZIO_FLAG_CANFAIL, B_FALSE); 4726 4727 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4728 zio_t *, wzio); 4729 (void) zio_nowait(wzio); 4730 4731 /* 4732 * Keep the clock hand suitably device-aligned. 4733 */ 4734 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4735 4736 write_sz += buf_sz; 4737 dev->l2ad_hand += buf_sz; 4738 } 4739 4740 mutex_exit(list_lock); 4741 4742 if (full == B_TRUE) 4743 break; 4744 } 4745 mutex_exit(&l2arc_buflist_mtx); 4746 4747 if (pio == NULL) {
| 3442 3443 mutex_exit(hash_lock); 3444 3445 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3446 nhdr->b_size = blksz; 3447 nhdr->b_spa = spa; 3448 nhdr->b_type = type; 3449 nhdr->b_buf = buf; 3450 nhdr->b_state = arc_anon; 3451 nhdr->b_arc_access = 0; 3452 nhdr->b_flags = flags & ARC_L2_WRITING; 3453 nhdr->b_l2hdr = NULL; 3454 nhdr->b_datacnt = 1; 3455 nhdr->b_freeze_cksum = NULL; 3456 (void) refcount_add(&nhdr->b_refcnt, tag); 3457 buf->b_hdr = nhdr; 3458 mutex_exit(&buf->b_evict_lock); 3459 atomic_add_64(&arc_anon->arcs_size, blksz); 3460 } else { 3461 mutex_exit(&buf->b_evict_lock); 3462 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3463 ASSERT(!list_link_active(&hdr->b_arc_node)); 3464 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3465 if (hdr->b_state != arc_anon) 3466 arc_change_state(arc_anon, hdr, hash_lock); 3467 hdr->b_arc_access = 0; 3468 if (hash_lock) 3469 mutex_exit(hash_lock); 3470 3471 buf_discard_identity(hdr); 3472 arc_buf_thaw(buf); 3473 } 3474 buf->b_efunc = NULL; 3475 buf->b_private = NULL; 3476 3477 if (l2hdr) { 3478 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3479 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3480 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3481 mutex_exit(&l2arc_buflist_mtx); 3482 } 3483} 3484 3485/* 3486 * Release this buffer. If it does not match the provided BP, fill it 3487 * with that block's contents. 3488 */ 3489/* ARGSUSED */ 3490int 3491arc_release_bp(arc_buf_t *buf, void *tag, blkptr_t *bp, spa_t *spa, 3492 zbookmark_t *zb) 3493{ 3494 arc_release(buf, tag); 3495 return (0); 3496} 3497 3498int 3499arc_released(arc_buf_t *buf) 3500{ 3501 int released; 3502 3503 mutex_enter(&buf->b_evict_lock); 3504 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3505 mutex_exit(&buf->b_evict_lock); 3506 return (released); 3507} 3508 3509int 3510arc_has_callback(arc_buf_t *buf) 3511{ 3512 int callback; 3513 3514 mutex_enter(&buf->b_evict_lock); 3515 callback = (buf->b_efunc != NULL); 3516 mutex_exit(&buf->b_evict_lock); 3517 return (callback); 3518} 3519 3520#ifdef ZFS_DEBUG 3521int 3522arc_referenced(arc_buf_t *buf) 3523{ 3524 int referenced; 3525 3526 mutex_enter(&buf->b_evict_lock); 3527 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3528 mutex_exit(&buf->b_evict_lock); 3529 return (referenced); 3530} 3531#endif 3532 3533static void 3534arc_write_ready(zio_t *zio) 3535{ 3536 arc_write_callback_t *callback = zio->io_private; 3537 arc_buf_t *buf = callback->awcb_buf; 3538 arc_buf_hdr_t *hdr = buf->b_hdr; 3539 3540 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3541 callback->awcb_ready(zio, buf, callback->awcb_private); 3542 3543 /* 3544 * If the IO is already in progress, then this is a re-write 3545 * attempt, so we need to thaw and re-compute the cksum. 3546 * It is the responsibility of the callback to handle the 3547 * accounting for any re-write attempt. 3548 */ 3549 if (HDR_IO_IN_PROGRESS(hdr)) { 3550 mutex_enter(&hdr->b_freeze_lock); 3551 if (hdr->b_freeze_cksum != NULL) { 3552 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3553 hdr->b_freeze_cksum = NULL; 3554 } 3555 mutex_exit(&hdr->b_freeze_lock); 3556 } 3557 arc_cksum_compute(buf, B_FALSE); 3558 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3559} 3560 3561static void 3562arc_write_done(zio_t *zio) 3563{ 3564 arc_write_callback_t *callback = zio->io_private; 3565 arc_buf_t *buf = callback->awcb_buf; 3566 arc_buf_hdr_t *hdr = buf->b_hdr; 3567 3568 ASSERT(hdr->b_acb == NULL); 3569 3570 if (zio->io_error == 0) { 3571 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3572 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 3573 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3574 } else { 3575 ASSERT(BUF_EMPTY(hdr)); 3576 } 3577 3578 /* 3579 * If the block to be written was all-zero, we may have 3580 * compressed it away. In this case no write was performed 3581 * so there will be no dva/birth/checksum. The buffer must 3582 * therefore remain anonymous (and uncached). 3583 */ 3584 if (!BUF_EMPTY(hdr)) { 3585 arc_buf_hdr_t *exists; 3586 kmutex_t *hash_lock; 3587 3588 ASSERT(zio->io_error == 0); 3589 3590 arc_cksum_verify(buf); 3591 3592 exists = buf_hash_insert(hdr, &hash_lock); 3593 if (exists) { 3594 /* 3595 * This can only happen if we overwrite for 3596 * sync-to-convergence, because we remove 3597 * buffers from the hash table when we arc_free(). 3598 */ 3599 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 3600 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3601 panic("bad overwrite, hdr=%p exists=%p", 3602 (void *)hdr, (void *)exists); 3603 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3604 arc_change_state(arc_anon, exists, hash_lock); 3605 mutex_exit(hash_lock); 3606 arc_hdr_destroy(exists); 3607 exists = buf_hash_insert(hdr, &hash_lock); 3608 ASSERT3P(exists, ==, NULL); 3609 } else { 3610 /* Dedup */ 3611 ASSERT(hdr->b_datacnt == 1); 3612 ASSERT(hdr->b_state == arc_anon); 3613 ASSERT(BP_GET_DEDUP(zio->io_bp)); 3614 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 3615 } 3616 } 3617 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3618 /* if it's not anon, we are doing a scrub */ 3619 if (!exists && hdr->b_state == arc_anon) 3620 arc_access(hdr, hash_lock); 3621 mutex_exit(hash_lock); 3622 } else { 3623 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3624 } 3625 3626 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3627 callback->awcb_done(zio, buf, callback->awcb_private); 3628 3629 kmem_free(callback, sizeof (arc_write_callback_t)); 3630} 3631 3632zio_t * 3633arc_write(zio_t *pio, spa_t *spa, uint64_t txg, 3634 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp, 3635 arc_done_func_t *ready, arc_done_func_t *done, void *private, 3636 int priority, int zio_flags, const zbookmark_t *zb) 3637{ 3638 arc_buf_hdr_t *hdr = buf->b_hdr; 3639 arc_write_callback_t *callback; 3640 zio_t *zio; 3641 3642 ASSERT(ready != NULL); 3643 ASSERT(done != NULL); 3644 ASSERT(!HDR_IO_ERROR(hdr)); 3645 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3646 ASSERT(hdr->b_acb == NULL); 3647 if (l2arc) 3648 hdr->b_flags |= ARC_L2CACHE; 3649 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3650 callback->awcb_ready = ready; 3651 callback->awcb_done = done; 3652 callback->awcb_private = private; 3653 callback->awcb_buf = buf; 3654 3655 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, 3656 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3657 3658 return (zio); 3659} 3660 3661static int 3662arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg) 3663{ 3664#ifdef _KERNEL 3665 uint64_t available_memory = 3666 ptoa((uintmax_t)cnt.v_free_count + cnt.v_cache_count); 3667 static uint64_t page_load = 0; 3668 static uint64_t last_txg = 0; 3669 3670#ifdef sun 3671#if defined(__i386) 3672 available_memory = 3673 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3674#endif 3675#endif /* sun */ 3676 if (available_memory >= zfs_write_limit_max) 3677 return (0); 3678 3679 if (txg > last_txg) { 3680 last_txg = txg; 3681 page_load = 0; 3682 } 3683 /* 3684 * If we are in pageout, we know that memory is already tight, 3685 * the arc is already going to be evicting, so we just want to 3686 * continue to let page writes occur as quickly as possible. 3687 */ 3688 if (curproc == pageproc) { 3689 if (page_load > available_memory / 4) 3690 return (ERESTART); 3691 /* Note: reserve is inflated, so we deflate */ 3692 page_load += reserve / 8; 3693 return (0); 3694 } else if (page_load > 0 && arc_reclaim_needed()) { 3695 /* memory is low, delay before restarting */ 3696 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3697 return (EAGAIN); 3698 } 3699 page_load = 0; 3700 3701 if (arc_size > arc_c_min) { 3702 uint64_t evictable_memory = 3703 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3704 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3705 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3706 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3707 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3708 } 3709 3710 if (inflight_data > available_memory / 4) { 3711 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3712 return (ERESTART); 3713 } 3714#endif 3715 return (0); 3716} 3717 3718void 3719arc_tempreserve_clear(uint64_t reserve) 3720{ 3721 atomic_add_64(&arc_tempreserve, -reserve); 3722 ASSERT((int64_t)arc_tempreserve >= 0); 3723} 3724 3725int 3726arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3727{ 3728 int error; 3729 uint64_t anon_size; 3730 3731#ifdef ZFS_DEBUG 3732 /* 3733 * Once in a while, fail for no reason. Everything should cope. 3734 */ 3735 if (spa_get_random(10000) == 0) { 3736 dprintf("forcing random failure\n"); 3737 return (ERESTART); 3738 } 3739#endif 3740 if (reserve > arc_c/4 && !arc_no_grow) 3741 arc_c = MIN(arc_c_max, reserve * 4); 3742 if (reserve > arc_c) 3743 return (ENOMEM); 3744 3745 /* 3746 * Don't count loaned bufs as in flight dirty data to prevent long 3747 * network delays from blocking transactions that are ready to be 3748 * assigned to a txg. 3749 */ 3750 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0); 3751 3752 /* 3753 * Writes will, almost always, require additional memory allocations 3754 * in order to compress/encrypt/etc the data. We therefor need to 3755 * make sure that there is sufficient available memory for this. 3756 */ 3757 if (error = arc_memory_throttle(reserve, anon_size, txg)) 3758 return (error); 3759 3760 /* 3761 * Throttle writes when the amount of dirty data in the cache 3762 * gets too large. We try to keep the cache less than half full 3763 * of dirty blocks so that our sync times don't grow too large. 3764 * Note: if two requests come in concurrently, we might let them 3765 * both succeed, when one of them should fail. Not a huge deal. 3766 */ 3767 3768 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 3769 anon_size > arc_c / 4) { 3770 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3771 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3772 arc_tempreserve>>10, 3773 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3774 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3775 reserve>>10, arc_c>>10); 3776 return (ERESTART); 3777 } 3778 atomic_add_64(&arc_tempreserve, reserve); 3779 return (0); 3780} 3781 3782static kmutex_t arc_lowmem_lock; 3783#ifdef _KERNEL 3784static eventhandler_tag arc_event_lowmem = NULL; 3785 3786static void 3787arc_lowmem(void *arg __unused, int howto __unused) 3788{ 3789 3790 /* Serialize access via arc_lowmem_lock. */ 3791 mutex_enter(&arc_lowmem_lock); 3792 mutex_enter(&arc_reclaim_thr_lock); 3793 needfree = 1; 3794 cv_signal(&arc_reclaim_thr_cv); 3795 3796 /* 3797 * It is unsafe to block here in arbitrary threads, because we can come 3798 * here from ARC itself and may hold ARC locks and thus risk a deadlock 3799 * with ARC reclaim thread. 3800 */ 3801 if (curproc == pageproc) { 3802 while (needfree) 3803 msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0); 3804 } 3805 mutex_exit(&arc_reclaim_thr_lock); 3806 mutex_exit(&arc_lowmem_lock); 3807} 3808#endif 3809 3810void 3811arc_init(void) 3812{ 3813 int i, prefetch_tunable_set = 0; 3814 3815 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3816 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3817 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 3818 3819 /* Convert seconds to clock ticks */ 3820 arc_min_prefetch_lifespan = 1 * hz; 3821 3822 /* Start out with 1/8 of all memory */ 3823 arc_c = kmem_size() / 8; 3824 3825#ifdef sun 3826#ifdef _KERNEL 3827 /* 3828 * On architectures where the physical memory can be larger 3829 * than the addressable space (intel in 32-bit mode), we may 3830 * need to limit the cache to 1/8 of VM size. 3831 */ 3832 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3833#endif 3834#endif /* sun */ 3835 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 3836 arc_c_min = MAX(arc_c / 4, 64<<18); 3837 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 3838 if (arc_c * 8 >= 1<<30) 3839 arc_c_max = (arc_c * 8) - (1<<30); 3840 else 3841 arc_c_max = arc_c_min; 3842 arc_c_max = MAX(arc_c * 5, arc_c_max); 3843 3844#ifdef _KERNEL 3845 /* 3846 * Allow the tunables to override our calculations if they are 3847 * reasonable (ie. over 16MB) 3848 */ 3849 if (zfs_arc_max > 64<<18 && zfs_arc_max < kmem_size()) 3850 arc_c_max = zfs_arc_max; 3851 if (zfs_arc_min > 64<<18 && zfs_arc_min <= arc_c_max) 3852 arc_c_min = zfs_arc_min; 3853#endif 3854 3855 arc_c = arc_c_max; 3856 arc_p = (arc_c >> 1); 3857 3858 /* limit meta-data to 1/4 of the arc capacity */ 3859 arc_meta_limit = arc_c_max / 4; 3860 3861 /* Allow the tunable to override if it is reasonable */ 3862 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3863 arc_meta_limit = zfs_arc_meta_limit; 3864 3865 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3866 arc_c_min = arc_meta_limit / 2; 3867 3868 if (zfs_arc_grow_retry > 0) 3869 arc_grow_retry = zfs_arc_grow_retry; 3870 3871 if (zfs_arc_shrink_shift > 0) 3872 arc_shrink_shift = zfs_arc_shrink_shift; 3873 3874 if (zfs_arc_p_min_shift > 0) 3875 arc_p_min_shift = zfs_arc_p_min_shift; 3876 3877 /* if kmem_flags are set, lets try to use less memory */ 3878 if (kmem_debugging()) 3879 arc_c = arc_c / 2; 3880 if (arc_c < arc_c_min) 3881 arc_c = arc_c_min; 3882 3883 zfs_arc_min = arc_c_min; 3884 zfs_arc_max = arc_c_max; 3885 3886 arc_anon = &ARC_anon; 3887 arc_mru = &ARC_mru; 3888 arc_mru_ghost = &ARC_mru_ghost; 3889 arc_mfu = &ARC_mfu; 3890 arc_mfu_ghost = &ARC_mfu_ghost; 3891 arc_l2c_only = &ARC_l2c_only; 3892 arc_size = 0; 3893 3894 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 3895 mutex_init(&arc_anon->arcs_locks[i].arcs_lock, 3896 NULL, MUTEX_DEFAULT, NULL); 3897 mutex_init(&arc_mru->arcs_locks[i].arcs_lock, 3898 NULL, MUTEX_DEFAULT, NULL); 3899 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock, 3900 NULL, MUTEX_DEFAULT, NULL); 3901 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock, 3902 NULL, MUTEX_DEFAULT, NULL); 3903 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock, 3904 NULL, MUTEX_DEFAULT, NULL); 3905 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock, 3906 NULL, MUTEX_DEFAULT, NULL); 3907 3908 list_create(&arc_mru->arcs_lists[i], 3909 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3910 list_create(&arc_mru_ghost->arcs_lists[i], 3911 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3912 list_create(&arc_mfu->arcs_lists[i], 3913 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3914 list_create(&arc_mfu_ghost->arcs_lists[i], 3915 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3916 list_create(&arc_mfu_ghost->arcs_lists[i], 3917 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3918 list_create(&arc_l2c_only->arcs_lists[i], 3919 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3920 } 3921 3922 buf_init(); 3923 3924 arc_thread_exit = 0; 3925 arc_eviction_list = NULL; 3926 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3927 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3928 3929 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3930 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3931 3932 if (arc_ksp != NULL) { 3933 arc_ksp->ks_data = &arc_stats; 3934 kstat_install(arc_ksp); 3935 } 3936 3937 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3938 TS_RUN, minclsyspri); 3939 3940#ifdef _KERNEL 3941 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 3942 EVENTHANDLER_PRI_FIRST); 3943#endif 3944 3945 arc_dead = FALSE; 3946 arc_warm = B_FALSE; 3947 3948 if (zfs_write_limit_max == 0) 3949 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3950 else 3951 zfs_write_limit_shift = 0; 3952 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3953 3954#ifdef _KERNEL 3955 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable)) 3956 prefetch_tunable_set = 1; 3957 3958#ifdef __i386__ 3959 if (prefetch_tunable_set == 0) { 3960 printf("ZFS NOTICE: Prefetch is disabled by default on i386 " 3961 "-- to enable,\n"); 3962 printf(" add \"vfs.zfs.prefetch_disable=0\" " 3963 "to /boot/loader.conf.\n"); 3964 zfs_prefetch_disable = 1; 3965 } 3966#else 3967 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) && 3968 prefetch_tunable_set == 0) { 3969 printf("ZFS NOTICE: Prefetch is disabled by default if less " 3970 "than 4GB of RAM is present;\n" 3971 " to enable, add \"vfs.zfs.prefetch_disable=0\" " 3972 "to /boot/loader.conf.\n"); 3973 zfs_prefetch_disable = 1; 3974 } 3975#endif 3976 /* Warn about ZFS memory and address space requirements. */ 3977 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 3978 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 3979 "expect unstable behavior.\n"); 3980 } 3981 if (kmem_size() < 512 * (1 << 20)) { 3982 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 3983 "expect unstable behavior.\n"); 3984 printf(" Consider tuning vm.kmem_size and " 3985 "vm.kmem_size_max\n"); 3986 printf(" in /boot/loader.conf.\n"); 3987 } 3988#endif 3989} 3990 3991void 3992arc_fini(void) 3993{ 3994 int i; 3995 3996 mutex_enter(&arc_reclaim_thr_lock); 3997 arc_thread_exit = 1; 3998 cv_signal(&arc_reclaim_thr_cv); 3999 while (arc_thread_exit != 0) 4000 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 4001 mutex_exit(&arc_reclaim_thr_lock); 4002 4003 arc_flush(NULL); 4004 4005 arc_dead = TRUE; 4006 4007 if (arc_ksp != NULL) { 4008 kstat_delete(arc_ksp); 4009 arc_ksp = NULL; 4010 } 4011 4012 mutex_destroy(&arc_eviction_mtx); 4013 mutex_destroy(&arc_reclaim_thr_lock); 4014 cv_destroy(&arc_reclaim_thr_cv); 4015 4016 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 4017 list_destroy(&arc_mru->arcs_lists[i]); 4018 list_destroy(&arc_mru_ghost->arcs_lists[i]); 4019 list_destroy(&arc_mfu->arcs_lists[i]); 4020 list_destroy(&arc_mfu_ghost->arcs_lists[i]); 4021 list_destroy(&arc_l2c_only->arcs_lists[i]); 4022 4023 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock); 4024 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock); 4025 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock); 4026 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock); 4027 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock); 4028 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock); 4029 } 4030 4031 mutex_destroy(&zfs_write_limit_lock); 4032 4033 buf_fini(); 4034 4035 ASSERT(arc_loaned_bytes == 0); 4036 4037 mutex_destroy(&arc_lowmem_lock); 4038#ifdef _KERNEL 4039 if (arc_event_lowmem != NULL) 4040 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 4041#endif 4042} 4043 4044/* 4045 * Level 2 ARC 4046 * 4047 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 4048 * It uses dedicated storage devices to hold cached data, which are populated 4049 * using large infrequent writes. The main role of this cache is to boost 4050 * the performance of random read workloads. The intended L2ARC devices 4051 * include short-stroked disks, solid state disks, and other media with 4052 * substantially faster read latency than disk. 4053 * 4054 * +-----------------------+ 4055 * | ARC | 4056 * +-----------------------+ 4057 * | ^ ^ 4058 * | | | 4059 * l2arc_feed_thread() arc_read() 4060 * | | | 4061 * | l2arc read | 4062 * V | | 4063 * +---------------+ | 4064 * | L2ARC | | 4065 * +---------------+ | 4066 * | ^ | 4067 * l2arc_write() | | 4068 * | | | 4069 * V | | 4070 * +-------+ +-------+ 4071 * | vdev | | vdev | 4072 * | cache | | cache | 4073 * +-------+ +-------+ 4074 * +=========+ .-----. 4075 * : L2ARC : |-_____-| 4076 * : devices : | Disks | 4077 * +=========+ `-_____-' 4078 * 4079 * Read requests are satisfied from the following sources, in order: 4080 * 4081 * 1) ARC 4082 * 2) vdev cache of L2ARC devices 4083 * 3) L2ARC devices 4084 * 4) vdev cache of disks 4085 * 5) disks 4086 * 4087 * Some L2ARC device types exhibit extremely slow write performance. 4088 * To accommodate for this there are some significant differences between 4089 * the L2ARC and traditional cache design: 4090 * 4091 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 4092 * the ARC behave as usual, freeing buffers and placing headers on ghost 4093 * lists. The ARC does not send buffers to the L2ARC during eviction as 4094 * this would add inflated write latencies for all ARC memory pressure. 4095 * 4096 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 4097 * It does this by periodically scanning buffers from the eviction-end of 4098 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 4099 * not already there. It scans until a headroom of buffers is satisfied, 4100 * which itself is a buffer for ARC eviction. The thread that does this is 4101 * l2arc_feed_thread(), illustrated below; example sizes are included to 4102 * provide a better sense of ratio than this diagram: 4103 * 4104 * head --> tail 4105 * +---------------------+----------+ 4106 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 4107 * +---------------------+----------+ | o L2ARC eligible 4108 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 4109 * +---------------------+----------+ | 4110 * 15.9 Gbytes ^ 32 Mbytes | 4111 * headroom | 4112 * l2arc_feed_thread() 4113 * | 4114 * l2arc write hand <--[oooo]--' 4115 * | 8 Mbyte 4116 * | write max 4117 * V 4118 * +==============================+ 4119 * L2ARC dev |####|#|###|###| |####| ... | 4120 * +==============================+ 4121 * 32 Gbytes 4122 * 4123 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 4124 * evicted, then the L2ARC has cached a buffer much sooner than it probably 4125 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 4126 * safe to say that this is an uncommon case, since buffers at the end of 4127 * the ARC lists have moved there due to inactivity. 4128 * 4129 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 4130 * then the L2ARC simply misses copying some buffers. This serves as a 4131 * pressure valve to prevent heavy read workloads from both stalling the ARC 4132 * with waits and clogging the L2ARC with writes. This also helps prevent 4133 * the potential for the L2ARC to churn if it attempts to cache content too 4134 * quickly, such as during backups of the entire pool. 4135 * 4136 * 5. After system boot and before the ARC has filled main memory, there are 4137 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 4138 * lists can remain mostly static. Instead of searching from tail of these 4139 * lists as pictured, the l2arc_feed_thread() will search from the list heads 4140 * for eligible buffers, greatly increasing its chance of finding them. 4141 * 4142 * The L2ARC device write speed is also boosted during this time so that 4143 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 4144 * there are no L2ARC reads, and no fear of degrading read performance 4145 * through increased writes. 4146 * 4147 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 4148 * the vdev queue can aggregate them into larger and fewer writes. Each 4149 * device is written to in a rotor fashion, sweeping writes through 4150 * available space then repeating. 4151 * 4152 * 7. The L2ARC does not store dirty content. It never needs to flush 4153 * write buffers back to disk based storage. 4154 * 4155 * 8. If an ARC buffer is written (and dirtied) which also exists in the 4156 * L2ARC, the now stale L2ARC buffer is immediately dropped. 4157 * 4158 * The performance of the L2ARC can be tweaked by a number of tunables, which 4159 * may be necessary for different workloads: 4160 * 4161 * l2arc_write_max max write bytes per interval 4162 * l2arc_write_boost extra write bytes during device warmup 4163 * l2arc_noprefetch skip caching prefetched buffers 4164 * l2arc_headroom number of max device writes to precache 4165 * l2arc_feed_secs seconds between L2ARC writing 4166 * 4167 * Tunables may be removed or added as future performance improvements are 4168 * integrated, and also may become zpool properties. 4169 * 4170 * There are three key functions that control how the L2ARC warms up: 4171 * 4172 * l2arc_write_eligible() check if a buffer is eligible to cache 4173 * l2arc_write_size() calculate how much to write 4174 * l2arc_write_interval() calculate sleep delay between writes 4175 * 4176 * These three functions determine what to write, how much, and how quickly 4177 * to send writes. 4178 */ 4179 4180static boolean_t 4181l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab) 4182{ 4183 /* 4184 * A buffer is *not* eligible for the L2ARC if it: 4185 * 1. belongs to a different spa. 4186 * 2. is already cached on the L2ARC. 4187 * 3. has an I/O in progress (it may be an incomplete read). 4188 * 4. is flagged not eligible (zfs property). 4189 */ 4190 if (ab->b_spa != spa_guid) { 4191 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch); 4192 return (B_FALSE); 4193 } 4194 if (ab->b_l2hdr != NULL) { 4195 ARCSTAT_BUMP(arcstat_l2_write_in_l2); 4196 return (B_FALSE); 4197 } 4198 if (HDR_IO_IN_PROGRESS(ab)) { 4199 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress); 4200 return (B_FALSE); 4201 } 4202 if (!HDR_L2CACHE(ab)) { 4203 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable); 4204 return (B_FALSE); 4205 } 4206 4207 return (B_TRUE); 4208} 4209 4210static uint64_t 4211l2arc_write_size(l2arc_dev_t *dev) 4212{ 4213 uint64_t size; 4214 4215 size = dev->l2ad_write; 4216 4217 if (arc_warm == B_FALSE) 4218 size += dev->l2ad_boost; 4219 4220 return (size); 4221 4222} 4223 4224static clock_t 4225l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 4226{ 4227 clock_t interval, next, now; 4228 4229 /* 4230 * If the ARC lists are busy, increase our write rate; if the 4231 * lists are stale, idle back. This is achieved by checking 4232 * how much we previously wrote - if it was more than half of 4233 * what we wanted, schedule the next write much sooner. 4234 */ 4235 if (l2arc_feed_again && wrote > (wanted / 2)) 4236 interval = (hz * l2arc_feed_min_ms) / 1000; 4237 else 4238 interval = hz * l2arc_feed_secs; 4239 4240 now = ddi_get_lbolt(); 4241 next = MAX(now, MIN(now + interval, began + interval)); 4242 4243 return (next); 4244} 4245 4246static void 4247l2arc_hdr_stat_add(void) 4248{ 4249 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 4250 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 4251} 4252 4253static void 4254l2arc_hdr_stat_remove(void) 4255{ 4256 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 4257 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 4258} 4259 4260/* 4261 * Cycle through L2ARC devices. This is how L2ARC load balances. 4262 * If a device is returned, this also returns holding the spa config lock. 4263 */ 4264static l2arc_dev_t * 4265l2arc_dev_get_next(void) 4266{ 4267 l2arc_dev_t *first, *next = NULL; 4268 4269 /* 4270 * Lock out the removal of spas (spa_namespace_lock), then removal 4271 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 4272 * both locks will be dropped and a spa config lock held instead. 4273 */ 4274 mutex_enter(&spa_namespace_lock); 4275 mutex_enter(&l2arc_dev_mtx); 4276 4277 /* if there are no vdevs, there is nothing to do */ 4278 if (l2arc_ndev == 0) 4279 goto out; 4280 4281 first = NULL; 4282 next = l2arc_dev_last; 4283 do { 4284 /* loop around the list looking for a non-faulted vdev */ 4285 if (next == NULL) { 4286 next = list_head(l2arc_dev_list); 4287 } else { 4288 next = list_next(l2arc_dev_list, next); 4289 if (next == NULL) 4290 next = list_head(l2arc_dev_list); 4291 } 4292 4293 /* if we have come back to the start, bail out */ 4294 if (first == NULL) 4295 first = next; 4296 else if (next == first) 4297 break; 4298 4299 } while (vdev_is_dead(next->l2ad_vdev)); 4300 4301 /* if we were unable to find any usable vdevs, return NULL */ 4302 if (vdev_is_dead(next->l2ad_vdev)) 4303 next = NULL; 4304 4305 l2arc_dev_last = next; 4306 4307out: 4308 mutex_exit(&l2arc_dev_mtx); 4309 4310 /* 4311 * Grab the config lock to prevent the 'next' device from being 4312 * removed while we are writing to it. 4313 */ 4314 if (next != NULL) 4315 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 4316 mutex_exit(&spa_namespace_lock); 4317 4318 return (next); 4319} 4320 4321/* 4322 * Free buffers that were tagged for destruction. 4323 */ 4324static void 4325l2arc_do_free_on_write() 4326{ 4327 list_t *buflist; 4328 l2arc_data_free_t *df, *df_prev; 4329 4330 mutex_enter(&l2arc_free_on_write_mtx); 4331 buflist = l2arc_free_on_write; 4332 4333 for (df = list_tail(buflist); df; df = df_prev) { 4334 df_prev = list_prev(buflist, df); 4335 ASSERT(df->l2df_data != NULL); 4336 ASSERT(df->l2df_func != NULL); 4337 df->l2df_func(df->l2df_data, df->l2df_size); 4338 list_remove(buflist, df); 4339 kmem_free(df, sizeof (l2arc_data_free_t)); 4340 } 4341 4342 mutex_exit(&l2arc_free_on_write_mtx); 4343} 4344 4345/* 4346 * A write to a cache device has completed. Update all headers to allow 4347 * reads from these buffers to begin. 4348 */ 4349static void 4350l2arc_write_done(zio_t *zio) 4351{ 4352 l2arc_write_callback_t *cb; 4353 l2arc_dev_t *dev; 4354 list_t *buflist; 4355 arc_buf_hdr_t *head, *ab, *ab_prev; 4356 l2arc_buf_hdr_t *abl2; 4357 kmutex_t *hash_lock; 4358 4359 cb = zio->io_private; 4360 ASSERT(cb != NULL); 4361 dev = cb->l2wcb_dev; 4362 ASSERT(dev != NULL); 4363 head = cb->l2wcb_head; 4364 ASSERT(head != NULL); 4365 buflist = dev->l2ad_buflist; 4366 ASSERT(buflist != NULL); 4367 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 4368 l2arc_write_callback_t *, cb); 4369 4370 if (zio->io_error != 0) 4371 ARCSTAT_BUMP(arcstat_l2_writes_error); 4372 4373 mutex_enter(&l2arc_buflist_mtx); 4374 4375 /* 4376 * All writes completed, or an error was hit. 4377 */ 4378 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 4379 ab_prev = list_prev(buflist, ab); 4380 4381 hash_lock = HDR_LOCK(ab); 4382 if (!mutex_tryenter(hash_lock)) { 4383 /* 4384 * This buffer misses out. It may be in a stage 4385 * of eviction. Its ARC_L2_WRITING flag will be 4386 * left set, denying reads to this buffer. 4387 */ 4388 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 4389 continue; 4390 } 4391 4392 if (zio->io_error != 0) { 4393 /* 4394 * Error - drop L2ARC entry. 4395 */ 4396 list_remove(buflist, ab); 4397 abl2 = ab->b_l2hdr; 4398 ab->b_l2hdr = NULL; 4399 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4400 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4401 } 4402 4403 /* 4404 * Allow ARC to begin reads to this L2ARC entry. 4405 */ 4406 ab->b_flags &= ~ARC_L2_WRITING; 4407 4408 mutex_exit(hash_lock); 4409 } 4410 4411 atomic_inc_64(&l2arc_writes_done); 4412 list_remove(buflist, head); 4413 kmem_cache_free(hdr_cache, head); 4414 mutex_exit(&l2arc_buflist_mtx); 4415 4416 l2arc_do_free_on_write(); 4417 4418 kmem_free(cb, sizeof (l2arc_write_callback_t)); 4419} 4420 4421/* 4422 * A read to a cache device completed. Validate buffer contents before 4423 * handing over to the regular ARC routines. 4424 */ 4425static void 4426l2arc_read_done(zio_t *zio) 4427{ 4428 l2arc_read_callback_t *cb; 4429 arc_buf_hdr_t *hdr; 4430 arc_buf_t *buf; 4431 kmutex_t *hash_lock; 4432 int equal; 4433 4434 ASSERT(zio->io_vd != NULL); 4435 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 4436 4437 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 4438 4439 cb = zio->io_private; 4440 ASSERT(cb != NULL); 4441 buf = cb->l2rcb_buf; 4442 ASSERT(buf != NULL); 4443 4444 hash_lock = HDR_LOCK(buf->b_hdr); 4445 mutex_enter(hash_lock); 4446 hdr = buf->b_hdr; 4447 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4448 4449 /* 4450 * Check this survived the L2ARC journey. 4451 */ 4452 equal = arc_cksum_equal(buf); 4453 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 4454 mutex_exit(hash_lock); 4455 zio->io_private = buf; 4456 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4457 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4458 arc_read_done(zio); 4459 } else { 4460 mutex_exit(hash_lock); 4461 /* 4462 * Buffer didn't survive caching. Increment stats and 4463 * reissue to the original storage device. 4464 */ 4465 if (zio->io_error != 0) { 4466 ARCSTAT_BUMP(arcstat_l2_io_error); 4467 } else { 4468 zio->io_error = EIO; 4469 } 4470 if (!equal) 4471 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4472 4473 /* 4474 * If there's no waiter, issue an async i/o to the primary 4475 * storage now. If there *is* a waiter, the caller must 4476 * issue the i/o in a context where it's OK to block. 4477 */ 4478 if (zio->io_waiter == NULL) { 4479 zio_t *pio = zio_unique_parent(zio); 4480 4481 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 4482 4483 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 4484 buf->b_data, zio->io_size, arc_read_done, buf, 4485 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4486 } 4487 } 4488 4489 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4490} 4491 4492/* 4493 * This is the list priority from which the L2ARC will search for pages to 4494 * cache. This is used within loops (0..3) to cycle through lists in the 4495 * desired order. This order can have a significant effect on cache 4496 * performance. 4497 * 4498 * Currently the metadata lists are hit first, MFU then MRU, followed by 4499 * the data lists. This function returns a locked list, and also returns 4500 * the lock pointer. 4501 */ 4502static list_t * 4503l2arc_list_locked(int list_num, kmutex_t **lock) 4504{ 4505 list_t *list; 4506 int idx; 4507 4508 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS); 4509 4510 if (list_num < ARC_BUFC_NUMMETADATALISTS) { 4511 idx = list_num; 4512 list = &arc_mfu->arcs_lists[idx]; 4513 *lock = ARCS_LOCK(arc_mfu, idx); 4514 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) { 4515 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4516 list = &arc_mru->arcs_lists[idx]; 4517 *lock = ARCS_LOCK(arc_mru, idx); 4518 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 + 4519 ARC_BUFC_NUMDATALISTS)) { 4520 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4521 list = &arc_mfu->arcs_lists[idx]; 4522 *lock = ARCS_LOCK(arc_mfu, idx); 4523 } else { 4524 idx = list_num - ARC_BUFC_NUMLISTS; 4525 list = &arc_mru->arcs_lists[idx]; 4526 *lock = ARCS_LOCK(arc_mru, idx); 4527 } 4528 4529 ASSERT(!(MUTEX_HELD(*lock))); 4530 mutex_enter(*lock); 4531 return (list); 4532} 4533 4534/* 4535 * Evict buffers from the device write hand to the distance specified in 4536 * bytes. This distance may span populated buffers, it may span nothing. 4537 * This is clearing a region on the L2ARC device ready for writing. 4538 * If the 'all' boolean is set, every buffer is evicted. 4539 */ 4540static void 4541l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4542{ 4543 list_t *buflist; 4544 l2arc_buf_hdr_t *abl2; 4545 arc_buf_hdr_t *ab, *ab_prev; 4546 kmutex_t *hash_lock; 4547 uint64_t taddr; 4548 4549 buflist = dev->l2ad_buflist; 4550 4551 if (buflist == NULL) 4552 return; 4553 4554 if (!all && dev->l2ad_first) { 4555 /* 4556 * This is the first sweep through the device. There is 4557 * nothing to evict. 4558 */ 4559 return; 4560 } 4561 4562 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4563 /* 4564 * When nearing the end of the device, evict to the end 4565 * before the device write hand jumps to the start. 4566 */ 4567 taddr = dev->l2ad_end; 4568 } else { 4569 taddr = dev->l2ad_hand + distance; 4570 } 4571 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4572 uint64_t, taddr, boolean_t, all); 4573 4574top: 4575 mutex_enter(&l2arc_buflist_mtx); 4576 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4577 ab_prev = list_prev(buflist, ab); 4578 4579 hash_lock = HDR_LOCK(ab); 4580 if (!mutex_tryenter(hash_lock)) { 4581 /* 4582 * Missed the hash lock. Retry. 4583 */ 4584 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4585 mutex_exit(&l2arc_buflist_mtx); 4586 mutex_enter(hash_lock); 4587 mutex_exit(hash_lock); 4588 goto top; 4589 } 4590 4591 if (HDR_L2_WRITE_HEAD(ab)) { 4592 /* 4593 * We hit a write head node. Leave it for 4594 * l2arc_write_done(). 4595 */ 4596 list_remove(buflist, ab); 4597 mutex_exit(hash_lock); 4598 continue; 4599 } 4600 4601 if (!all && ab->b_l2hdr != NULL && 4602 (ab->b_l2hdr->b_daddr > taddr || 4603 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4604 /* 4605 * We've evicted to the target address, 4606 * or the end of the device. 4607 */ 4608 mutex_exit(hash_lock); 4609 break; 4610 } 4611 4612 if (HDR_FREE_IN_PROGRESS(ab)) { 4613 /* 4614 * Already on the path to destruction. 4615 */ 4616 mutex_exit(hash_lock); 4617 continue; 4618 } 4619 4620 if (ab->b_state == arc_l2c_only) { 4621 ASSERT(!HDR_L2_READING(ab)); 4622 /* 4623 * This doesn't exist in the ARC. Destroy. 4624 * arc_hdr_destroy() will call list_remove() 4625 * and decrement arcstat_l2_size. 4626 */ 4627 arc_change_state(arc_anon, ab, hash_lock); 4628 arc_hdr_destroy(ab); 4629 } else { 4630 /* 4631 * Invalidate issued or about to be issued 4632 * reads, since we may be about to write 4633 * over this location. 4634 */ 4635 if (HDR_L2_READING(ab)) { 4636 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4637 ab->b_flags |= ARC_L2_EVICTED; 4638 } 4639 4640 /* 4641 * Tell ARC this no longer exists in L2ARC. 4642 */ 4643 if (ab->b_l2hdr != NULL) { 4644 abl2 = ab->b_l2hdr; 4645 ab->b_l2hdr = NULL; 4646 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4647 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4648 } 4649 list_remove(buflist, ab); 4650 4651 /* 4652 * This may have been leftover after a 4653 * failed write. 4654 */ 4655 ab->b_flags &= ~ARC_L2_WRITING; 4656 } 4657 mutex_exit(hash_lock); 4658 } 4659 mutex_exit(&l2arc_buflist_mtx); 4660 4661 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0); 4662 dev->l2ad_evict = taddr; 4663} 4664 4665/* 4666 * Find and write ARC buffers to the L2ARC device. 4667 * 4668 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4669 * for reading until they have completed writing. 4670 */ 4671static uint64_t 4672l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4673{ 4674 arc_buf_hdr_t *ab, *ab_prev, *head; 4675 l2arc_buf_hdr_t *hdrl2; 4676 list_t *list; 4677 uint64_t passed_sz, write_sz, buf_sz, headroom; 4678 void *buf_data; 4679 kmutex_t *hash_lock, *list_lock; 4680 boolean_t have_lock, full; 4681 l2arc_write_callback_t *cb; 4682 zio_t *pio, *wzio; 4683 uint64_t guid = spa_load_guid(spa); 4684 int try; 4685 4686 ASSERT(dev->l2ad_vdev != NULL); 4687 4688 pio = NULL; 4689 write_sz = 0; 4690 full = B_FALSE; 4691 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4692 head->b_flags |= ARC_L2_WRITE_HEAD; 4693 4694 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter); 4695 /* 4696 * Copy buffers for L2ARC writing. 4697 */ 4698 mutex_enter(&l2arc_buflist_mtx); 4699 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) { 4700 list = l2arc_list_locked(try, &list_lock); 4701 passed_sz = 0; 4702 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter); 4703 4704 /* 4705 * L2ARC fast warmup. 4706 * 4707 * Until the ARC is warm and starts to evict, read from the 4708 * head of the ARC lists rather than the tail. 4709 */ 4710 headroom = target_sz * l2arc_headroom; 4711 if (arc_warm == B_FALSE) 4712 ab = list_head(list); 4713 else 4714 ab = list_tail(list); 4715 if (ab == NULL) 4716 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter); 4717 4718 for (; ab; ab = ab_prev) { 4719 if (arc_warm == B_FALSE) 4720 ab_prev = list_next(list, ab); 4721 else 4722 ab_prev = list_prev(list, ab); 4723 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size); 4724 4725 hash_lock = HDR_LOCK(ab); 4726 have_lock = MUTEX_HELD(hash_lock); 4727 if (!have_lock && !mutex_tryenter(hash_lock)) { 4728 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail); 4729 /* 4730 * Skip this buffer rather than waiting. 4731 */ 4732 continue; 4733 } 4734 4735 passed_sz += ab->b_size; 4736 if (passed_sz > headroom) { 4737 /* 4738 * Searched too far. 4739 */ 4740 mutex_exit(hash_lock); 4741 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom); 4742 break; 4743 } 4744 4745 if (!l2arc_write_eligible(guid, ab)) { 4746 mutex_exit(hash_lock); 4747 continue; 4748 } 4749 4750 if ((write_sz + ab->b_size) > target_sz) { 4751 full = B_TRUE; 4752 mutex_exit(hash_lock); 4753 ARCSTAT_BUMP(arcstat_l2_write_full); 4754 break; 4755 } 4756 4757 if (pio == NULL) { 4758 /* 4759 * Insert a dummy header on the buflist so 4760 * l2arc_write_done() can find where the 4761 * write buffers begin without searching. 4762 */ 4763 list_insert_head(dev->l2ad_buflist, head); 4764 4765 cb = kmem_alloc( 4766 sizeof (l2arc_write_callback_t), KM_SLEEP); 4767 cb->l2wcb_dev = dev; 4768 cb->l2wcb_head = head; 4769 pio = zio_root(spa, l2arc_write_done, cb, 4770 ZIO_FLAG_CANFAIL); 4771 ARCSTAT_BUMP(arcstat_l2_write_pios); 4772 } 4773 4774 /* 4775 * Create and add a new L2ARC header. 4776 */ 4777 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4778 hdrl2->b_dev = dev; 4779 hdrl2->b_daddr = dev->l2ad_hand; 4780 4781 ab->b_flags |= ARC_L2_WRITING; 4782 ab->b_l2hdr = hdrl2; 4783 list_insert_head(dev->l2ad_buflist, ab); 4784 buf_data = ab->b_buf->b_data; 4785 buf_sz = ab->b_size; 4786 4787 /* 4788 * Compute and store the buffer cksum before 4789 * writing. On debug the cksum is verified first. 4790 */ 4791 arc_cksum_verify(ab->b_buf); 4792 arc_cksum_compute(ab->b_buf, B_TRUE); 4793 4794 mutex_exit(hash_lock); 4795 4796 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4797 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4798 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4799 ZIO_FLAG_CANFAIL, B_FALSE); 4800 4801 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4802 zio_t *, wzio); 4803 (void) zio_nowait(wzio); 4804 4805 /* 4806 * Keep the clock hand suitably device-aligned. 4807 */ 4808 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4809 4810 write_sz += buf_sz; 4811 dev->l2ad_hand += buf_sz; 4812 } 4813 4814 mutex_exit(list_lock); 4815 4816 if (full == B_TRUE) 4817 break; 4818 } 4819 mutex_exit(&l2arc_buflist_mtx); 4820 4821 if (pio == NULL) {
|
4748 ASSERT3U(write_sz, ==, 0);
| 4822 ASSERT0(write_sz);
|
4749 kmem_cache_free(hdr_cache, head); 4750 return (0); 4751 } 4752 4753 ASSERT3U(write_sz, <=, target_sz); 4754 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4755 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz); 4756 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4757 vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0); 4758 4759 /* 4760 * Bump device hand to the device start if it is approaching the end. 4761 * l2arc_evict() will already have evicted ahead for this case. 4762 */ 4763 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4764 vdev_space_update(dev->l2ad_vdev, 4765 dev->l2ad_end - dev->l2ad_hand, 0, 0); 4766 dev->l2ad_hand = dev->l2ad_start; 4767 dev->l2ad_evict = dev->l2ad_start; 4768 dev->l2ad_first = B_FALSE; 4769 } 4770 4771 dev->l2ad_writing = B_TRUE; 4772 (void) zio_wait(pio); 4773 dev->l2ad_writing = B_FALSE; 4774 4775 return (write_sz); 4776} 4777 4778/* 4779 * This thread feeds the L2ARC at regular intervals. This is the beating 4780 * heart of the L2ARC. 4781 */ 4782static void 4783l2arc_feed_thread(void *dummy __unused) 4784{ 4785 callb_cpr_t cpr; 4786 l2arc_dev_t *dev; 4787 spa_t *spa; 4788 uint64_t size, wrote; 4789 clock_t begin, next = ddi_get_lbolt(); 4790 4791 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4792 4793 mutex_enter(&l2arc_feed_thr_lock); 4794 4795 while (l2arc_thread_exit == 0) { 4796 CALLB_CPR_SAFE_BEGIN(&cpr); 4797 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4798 next - ddi_get_lbolt()); 4799 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4800 next = ddi_get_lbolt() + hz; 4801 4802 /* 4803 * Quick check for L2ARC devices. 4804 */ 4805 mutex_enter(&l2arc_dev_mtx); 4806 if (l2arc_ndev == 0) { 4807 mutex_exit(&l2arc_dev_mtx); 4808 continue; 4809 } 4810 mutex_exit(&l2arc_dev_mtx); 4811 begin = ddi_get_lbolt(); 4812 4813 /* 4814 * This selects the next l2arc device to write to, and in 4815 * doing so the next spa to feed from: dev->l2ad_spa. This 4816 * will return NULL if there are now no l2arc devices or if 4817 * they are all faulted. 4818 * 4819 * If a device is returned, its spa's config lock is also 4820 * held to prevent device removal. l2arc_dev_get_next() 4821 * will grab and release l2arc_dev_mtx. 4822 */ 4823 if ((dev = l2arc_dev_get_next()) == NULL) 4824 continue; 4825 4826 spa = dev->l2ad_spa; 4827 ASSERT(spa != NULL); 4828 4829 /* 4830 * If the pool is read-only then force the feed thread to 4831 * sleep a little longer. 4832 */ 4833 if (!spa_writeable(spa)) { 4834 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 4835 spa_config_exit(spa, SCL_L2ARC, dev); 4836 continue; 4837 } 4838 4839 /* 4840 * Avoid contributing to memory pressure. 4841 */ 4842 if (arc_reclaim_needed()) { 4843 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4844 spa_config_exit(spa, SCL_L2ARC, dev); 4845 continue; 4846 } 4847 4848 ARCSTAT_BUMP(arcstat_l2_feeds); 4849 4850 size = l2arc_write_size(dev); 4851 4852 /* 4853 * Evict L2ARC buffers that will be overwritten. 4854 */ 4855 l2arc_evict(dev, size, B_FALSE); 4856 4857 /* 4858 * Write ARC buffers. 4859 */ 4860 wrote = l2arc_write_buffers(spa, dev, size); 4861 4862 /* 4863 * Calculate interval between writes. 4864 */ 4865 next = l2arc_write_interval(begin, size, wrote); 4866 spa_config_exit(spa, SCL_L2ARC, dev); 4867 } 4868 4869 l2arc_thread_exit = 0; 4870 cv_broadcast(&l2arc_feed_thr_cv); 4871 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4872 thread_exit(); 4873} 4874 4875boolean_t 4876l2arc_vdev_present(vdev_t *vd) 4877{ 4878 l2arc_dev_t *dev; 4879 4880 mutex_enter(&l2arc_dev_mtx); 4881 for (dev = list_head(l2arc_dev_list); dev != NULL; 4882 dev = list_next(l2arc_dev_list, dev)) { 4883 if (dev->l2ad_vdev == vd) 4884 break; 4885 } 4886 mutex_exit(&l2arc_dev_mtx); 4887 4888 return (dev != NULL); 4889} 4890 4891/* 4892 * Add a vdev for use by the L2ARC. By this point the spa has already 4893 * validated the vdev and opened it. 4894 */ 4895void 4896l2arc_add_vdev(spa_t *spa, vdev_t *vd) 4897{ 4898 l2arc_dev_t *adddev; 4899 4900 ASSERT(!l2arc_vdev_present(vd)); 4901 4902 /* 4903 * Create a new l2arc device entry. 4904 */ 4905 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4906 adddev->l2ad_spa = spa; 4907 adddev->l2ad_vdev = vd; 4908 adddev->l2ad_write = l2arc_write_max; 4909 adddev->l2ad_boost = l2arc_write_boost; 4910 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 4911 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 4912 adddev->l2ad_hand = adddev->l2ad_start; 4913 adddev->l2ad_evict = adddev->l2ad_start; 4914 adddev->l2ad_first = B_TRUE; 4915 adddev->l2ad_writing = B_FALSE; 4916 ASSERT3U(adddev->l2ad_write, >, 0); 4917 4918 /* 4919 * This is a list of all ARC buffers that are still valid on the 4920 * device. 4921 */ 4922 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4923 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4924 offsetof(arc_buf_hdr_t, b_l2node)); 4925 4926 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 4927 4928 /* 4929 * Add device to global list 4930 */ 4931 mutex_enter(&l2arc_dev_mtx); 4932 list_insert_head(l2arc_dev_list, adddev); 4933 atomic_inc_64(&l2arc_ndev); 4934 mutex_exit(&l2arc_dev_mtx); 4935} 4936 4937/* 4938 * Remove a vdev from the L2ARC. 4939 */ 4940void 4941l2arc_remove_vdev(vdev_t *vd) 4942{ 4943 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4944 4945 /* 4946 * Find the device by vdev 4947 */ 4948 mutex_enter(&l2arc_dev_mtx); 4949 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4950 nextdev = list_next(l2arc_dev_list, dev); 4951 if (vd == dev->l2ad_vdev) { 4952 remdev = dev; 4953 break; 4954 } 4955 } 4956 ASSERT(remdev != NULL); 4957 4958 /* 4959 * Remove device from global list 4960 */ 4961 list_remove(l2arc_dev_list, remdev); 4962 l2arc_dev_last = NULL; /* may have been invalidated */ 4963 atomic_dec_64(&l2arc_ndev); 4964 mutex_exit(&l2arc_dev_mtx); 4965 4966 /* 4967 * Clear all buflists and ARC references. L2ARC device flush. 4968 */ 4969 l2arc_evict(remdev, 0, B_TRUE); 4970 list_destroy(remdev->l2ad_buflist); 4971 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4972 kmem_free(remdev, sizeof (l2arc_dev_t)); 4973} 4974 4975void 4976l2arc_init(void) 4977{ 4978 l2arc_thread_exit = 0; 4979 l2arc_ndev = 0; 4980 l2arc_writes_sent = 0; 4981 l2arc_writes_done = 0; 4982 4983 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4984 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4985 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4986 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4987 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4988 4989 l2arc_dev_list = &L2ARC_dev_list; 4990 l2arc_free_on_write = &L2ARC_free_on_write; 4991 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4992 offsetof(l2arc_dev_t, l2ad_node)); 4993 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4994 offsetof(l2arc_data_free_t, l2df_list_node)); 4995} 4996 4997void 4998l2arc_fini(void) 4999{ 5000 /* 5001 * This is called from dmu_fini(), which is called from spa_fini(); 5002 * Because of this, we can assume that all l2arc devices have 5003 * already been removed when the pools themselves were removed. 5004 */ 5005 5006 l2arc_do_free_on_write(); 5007 5008 mutex_destroy(&l2arc_feed_thr_lock); 5009 cv_destroy(&l2arc_feed_thr_cv); 5010 mutex_destroy(&l2arc_dev_mtx); 5011 mutex_destroy(&l2arc_buflist_mtx); 5012 mutex_destroy(&l2arc_free_on_write_mtx); 5013 5014 list_destroy(l2arc_dev_list); 5015 list_destroy(l2arc_free_on_write); 5016} 5017 5018void 5019l2arc_start(void) 5020{ 5021 if (!(spa_mode_global & FWRITE)) 5022 return; 5023 5024 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 5025 TS_RUN, minclsyspri); 5026} 5027 5028void 5029l2arc_stop(void) 5030{ 5031 if (!(spa_mode_global & FWRITE)) 5032 return; 5033 5034 mutex_enter(&l2arc_feed_thr_lock); 5035 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 5036 l2arc_thread_exit = 1; 5037 while (l2arc_thread_exit != 0) 5038 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 5039 mutex_exit(&l2arc_feed_thr_lock); 5040}
| 4823 kmem_cache_free(hdr_cache, head); 4824 return (0); 4825 } 4826 4827 ASSERT3U(write_sz, <=, target_sz); 4828 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4829 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz); 4830 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4831 vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0); 4832 4833 /* 4834 * Bump device hand to the device start if it is approaching the end. 4835 * l2arc_evict() will already have evicted ahead for this case. 4836 */ 4837 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4838 vdev_space_update(dev->l2ad_vdev, 4839 dev->l2ad_end - dev->l2ad_hand, 0, 0); 4840 dev->l2ad_hand = dev->l2ad_start; 4841 dev->l2ad_evict = dev->l2ad_start; 4842 dev->l2ad_first = B_FALSE; 4843 } 4844 4845 dev->l2ad_writing = B_TRUE; 4846 (void) zio_wait(pio); 4847 dev->l2ad_writing = B_FALSE; 4848 4849 return (write_sz); 4850} 4851 4852/* 4853 * This thread feeds the L2ARC at regular intervals. This is the beating 4854 * heart of the L2ARC. 4855 */ 4856static void 4857l2arc_feed_thread(void *dummy __unused) 4858{ 4859 callb_cpr_t cpr; 4860 l2arc_dev_t *dev; 4861 spa_t *spa; 4862 uint64_t size, wrote; 4863 clock_t begin, next = ddi_get_lbolt(); 4864 4865 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4866 4867 mutex_enter(&l2arc_feed_thr_lock); 4868 4869 while (l2arc_thread_exit == 0) { 4870 CALLB_CPR_SAFE_BEGIN(&cpr); 4871 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4872 next - ddi_get_lbolt()); 4873 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4874 next = ddi_get_lbolt() + hz; 4875 4876 /* 4877 * Quick check for L2ARC devices. 4878 */ 4879 mutex_enter(&l2arc_dev_mtx); 4880 if (l2arc_ndev == 0) { 4881 mutex_exit(&l2arc_dev_mtx); 4882 continue; 4883 } 4884 mutex_exit(&l2arc_dev_mtx); 4885 begin = ddi_get_lbolt(); 4886 4887 /* 4888 * This selects the next l2arc device to write to, and in 4889 * doing so the next spa to feed from: dev->l2ad_spa. This 4890 * will return NULL if there are now no l2arc devices or if 4891 * they are all faulted. 4892 * 4893 * If a device is returned, its spa's config lock is also 4894 * held to prevent device removal. l2arc_dev_get_next() 4895 * will grab and release l2arc_dev_mtx. 4896 */ 4897 if ((dev = l2arc_dev_get_next()) == NULL) 4898 continue; 4899 4900 spa = dev->l2ad_spa; 4901 ASSERT(spa != NULL); 4902 4903 /* 4904 * If the pool is read-only then force the feed thread to 4905 * sleep a little longer. 4906 */ 4907 if (!spa_writeable(spa)) { 4908 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 4909 spa_config_exit(spa, SCL_L2ARC, dev); 4910 continue; 4911 } 4912 4913 /* 4914 * Avoid contributing to memory pressure. 4915 */ 4916 if (arc_reclaim_needed()) { 4917 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4918 spa_config_exit(spa, SCL_L2ARC, dev); 4919 continue; 4920 } 4921 4922 ARCSTAT_BUMP(arcstat_l2_feeds); 4923 4924 size = l2arc_write_size(dev); 4925 4926 /* 4927 * Evict L2ARC buffers that will be overwritten. 4928 */ 4929 l2arc_evict(dev, size, B_FALSE); 4930 4931 /* 4932 * Write ARC buffers. 4933 */ 4934 wrote = l2arc_write_buffers(spa, dev, size); 4935 4936 /* 4937 * Calculate interval between writes. 4938 */ 4939 next = l2arc_write_interval(begin, size, wrote); 4940 spa_config_exit(spa, SCL_L2ARC, dev); 4941 } 4942 4943 l2arc_thread_exit = 0; 4944 cv_broadcast(&l2arc_feed_thr_cv); 4945 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4946 thread_exit(); 4947} 4948 4949boolean_t 4950l2arc_vdev_present(vdev_t *vd) 4951{ 4952 l2arc_dev_t *dev; 4953 4954 mutex_enter(&l2arc_dev_mtx); 4955 for (dev = list_head(l2arc_dev_list); dev != NULL; 4956 dev = list_next(l2arc_dev_list, dev)) { 4957 if (dev->l2ad_vdev == vd) 4958 break; 4959 } 4960 mutex_exit(&l2arc_dev_mtx); 4961 4962 return (dev != NULL); 4963} 4964 4965/* 4966 * Add a vdev for use by the L2ARC. By this point the spa has already 4967 * validated the vdev and opened it. 4968 */ 4969void 4970l2arc_add_vdev(spa_t *spa, vdev_t *vd) 4971{ 4972 l2arc_dev_t *adddev; 4973 4974 ASSERT(!l2arc_vdev_present(vd)); 4975 4976 /* 4977 * Create a new l2arc device entry. 4978 */ 4979 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4980 adddev->l2ad_spa = spa; 4981 adddev->l2ad_vdev = vd; 4982 adddev->l2ad_write = l2arc_write_max; 4983 adddev->l2ad_boost = l2arc_write_boost; 4984 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 4985 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 4986 adddev->l2ad_hand = adddev->l2ad_start; 4987 adddev->l2ad_evict = adddev->l2ad_start; 4988 adddev->l2ad_first = B_TRUE; 4989 adddev->l2ad_writing = B_FALSE; 4990 ASSERT3U(adddev->l2ad_write, >, 0); 4991 4992 /* 4993 * This is a list of all ARC buffers that are still valid on the 4994 * device. 4995 */ 4996 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4997 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4998 offsetof(arc_buf_hdr_t, b_l2node)); 4999 5000 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 5001 5002 /* 5003 * Add device to global list 5004 */ 5005 mutex_enter(&l2arc_dev_mtx); 5006 list_insert_head(l2arc_dev_list, adddev); 5007 atomic_inc_64(&l2arc_ndev); 5008 mutex_exit(&l2arc_dev_mtx); 5009} 5010 5011/* 5012 * Remove a vdev from the L2ARC. 5013 */ 5014void 5015l2arc_remove_vdev(vdev_t *vd) 5016{ 5017 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 5018 5019 /* 5020 * Find the device by vdev 5021 */ 5022 mutex_enter(&l2arc_dev_mtx); 5023 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 5024 nextdev = list_next(l2arc_dev_list, dev); 5025 if (vd == dev->l2ad_vdev) { 5026 remdev = dev; 5027 break; 5028 } 5029 } 5030 ASSERT(remdev != NULL); 5031 5032 /* 5033 * Remove device from global list 5034 */ 5035 list_remove(l2arc_dev_list, remdev); 5036 l2arc_dev_last = NULL; /* may have been invalidated */ 5037 atomic_dec_64(&l2arc_ndev); 5038 mutex_exit(&l2arc_dev_mtx); 5039 5040 /* 5041 * Clear all buflists and ARC references. L2ARC device flush. 5042 */ 5043 l2arc_evict(remdev, 0, B_TRUE); 5044 list_destroy(remdev->l2ad_buflist); 5045 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 5046 kmem_free(remdev, sizeof (l2arc_dev_t)); 5047} 5048 5049void 5050l2arc_init(void) 5051{ 5052 l2arc_thread_exit = 0; 5053 l2arc_ndev = 0; 5054 l2arc_writes_sent = 0; 5055 l2arc_writes_done = 0; 5056 5057 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 5058 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 5059 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 5060 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 5061 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 5062 5063 l2arc_dev_list = &L2ARC_dev_list; 5064 l2arc_free_on_write = &L2ARC_free_on_write; 5065 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 5066 offsetof(l2arc_dev_t, l2ad_node)); 5067 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 5068 offsetof(l2arc_data_free_t, l2df_list_node)); 5069} 5070 5071void 5072l2arc_fini(void) 5073{ 5074 /* 5075 * This is called from dmu_fini(), which is called from spa_fini(); 5076 * Because of this, we can assume that all l2arc devices have 5077 * already been removed when the pools themselves were removed. 5078 */ 5079 5080 l2arc_do_free_on_write(); 5081 5082 mutex_destroy(&l2arc_feed_thr_lock); 5083 cv_destroy(&l2arc_feed_thr_cv); 5084 mutex_destroy(&l2arc_dev_mtx); 5085 mutex_destroy(&l2arc_buflist_mtx); 5086 mutex_destroy(&l2arc_free_on_write_mtx); 5087 5088 list_destroy(l2arc_dev_list); 5089 list_destroy(l2arc_free_on_write); 5090} 5091 5092void 5093l2arc_start(void) 5094{ 5095 if (!(spa_mode_global & FWRITE)) 5096 return; 5097 5098 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 5099 TS_RUN, minclsyspri); 5100} 5101 5102void 5103l2arc_stop(void) 5104{ 5105 if (!(spa_mode_global & FWRITE)) 5106 return; 5107 5108 mutex_enter(&l2arc_feed_thr_lock); 5109 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 5110 l2arc_thread_exit = 1; 5111 while (l2arc_thread_exit != 0) 5112 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 5113 mutex_exit(&l2arc_feed_thr_lock); 5114}
|