arc.c revision 209101
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26/* 27 * DVA-based Adjustable Replacement Cache 28 * 29 * While much of the theory of operation used here is 30 * based on the self-tuning, low overhead replacement cache 31 * presented by Megiddo and Modha at FAST 2003, there are some 32 * significant differences: 33 * 34 * 1. The Megiddo and Modha model assumes any page is evictable. 35 * Pages in its cache cannot be "locked" into memory. This makes 36 * the eviction algorithm simple: evict the last page in the list. 37 * This also make the performance characteristics easy to reason 38 * about. Our cache is not so simple. At any given moment, some 39 * subset of the blocks in the cache are un-evictable because we 40 * have handed out a reference to them. Blocks are only evictable 41 * when there are no external references active. This makes 42 * eviction far more problematic: we choose to evict the evictable 43 * blocks that are the "lowest" in the list. 44 * 45 * There are times when it is not possible to evict the requested 46 * space. In these circumstances we are unable to adjust the cache 47 * size. To prevent the cache growing unbounded at these times we 48 * implement a "cache throttle" that slows the flow of new data 49 * into the cache until we can make space available. 50 * 51 * 2. The Megiddo and Modha model assumes a fixed cache size. 52 * Pages are evicted when the cache is full and there is a cache 53 * miss. Our model has a variable sized cache. It grows with 54 * high use, but also tries to react to memory pressure from the 55 * operating system: decreasing its size when system memory is 56 * tight. 57 * 58 * 3. The Megiddo and Modha model assumes a fixed page size. All 59 * elements of the cache are therefor exactly the same size. So 60 * when adjusting the cache size following a cache miss, its simply 61 * a matter of choosing a single page to evict. In our model, we 62 * have variable sized cache blocks (rangeing from 512 bytes to 63 * 128K bytes). We therefor choose a set of blocks to evict to make 64 * space for a cache miss that approximates as closely as possible 65 * the space used by the new block. 66 * 67 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 68 * by N. Megiddo & D. Modha, FAST 2003 69 */ 70 71/* 72 * The locking model: 73 * 74 * A new reference to a cache buffer can be obtained in two 75 * ways: 1) via a hash table lookup using the DVA as a key, 76 * or 2) via one of the ARC lists. The arc_read() interface 77 * uses method 1, while the internal arc algorithms for 78 * adjusting the cache use method 2. We therefor provide two 79 * types of locks: 1) the hash table lock array, and 2) the 80 * arc list locks. 81 * 82 * Buffers do not have their own mutexs, rather they rely on the 83 * hash table mutexs for the bulk of their protection (i.e. most 84 * fields in the arc_buf_hdr_t are protected by these mutexs). 85 * 86 * buf_hash_find() returns the appropriate mutex (held) when it 87 * locates the requested buffer in the hash table. It returns 88 * NULL for the mutex if the buffer was not in the table. 89 * 90 * buf_hash_remove() expects the appropriate hash mutex to be 91 * already held before it is invoked. 92 * 93 * Each arc state also has a mutex which is used to protect the 94 * buffer list associated with the state. When attempting to 95 * obtain a hash table lock while holding an arc list lock you 96 * must use: mutex_tryenter() to avoid deadlock. Also note that 97 * the active state mutex must be held before the ghost state mutex. 98 * 99 * Arc buffers may have an associated eviction callback function. 100 * This function will be invoked prior to removing the buffer (e.g. 101 * in arc_do_user_evicts()). Note however that the data associated 102 * with the buffer may be evicted prior to the callback. The callback 103 * must be made with *no locks held* (to prevent deadlock). Additionally, 104 * the users of callbacks must ensure that their private data is 105 * protected from simultaneous callbacks from arc_buf_evict() 106 * and arc_do_user_evicts(). 107 * 108 * Note that the majority of the performance stats are manipulated 109 * with atomic operations. 110 * 111 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 112 * 113 * - L2ARC buflist creation 114 * - L2ARC buflist eviction 115 * - L2ARC write completion, which walks L2ARC buflists 116 * - ARC header destruction, as it removes from L2ARC buflists 117 * - ARC header release, as it removes from L2ARC buflists 118 */ 119 120#include <sys/spa.h> 121#include <sys/zio.h> 122#include <sys/zio_checksum.h> 123#include <sys/zfs_context.h> 124#include <sys/arc.h> 125#include <sys/refcount.h> 126#include <sys/vdev.h> 127#ifdef _KERNEL 128#include <sys/dnlc.h> 129#endif 130#include <sys/callb.h> 131#include <sys/kstat.h> 132#include <sys/sdt.h> 133 134#include <vm/vm_pageout.h> 135 136static kmutex_t arc_reclaim_thr_lock; 137static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 138static uint8_t arc_thread_exit; 139 140extern int zfs_write_limit_shift; 141extern uint64_t zfs_write_limit_max; 142extern kmutex_t zfs_write_limit_lock; 143 144#define ARC_REDUCE_DNLC_PERCENT 3 145uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 146 147typedef enum arc_reclaim_strategy { 148 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 149 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 150} arc_reclaim_strategy_t; 151 152/* number of seconds before growing cache again */ 153static int arc_grow_retry = 60; 154 155/* shift of arc_c for calculating both min and max arc_p */ 156static int arc_p_min_shift = 4; 157 158/* log2(fraction of arc to reclaim) */ 159static int arc_shrink_shift = 5; 160 161/* 162 * minimum lifespan of a prefetch block in clock ticks 163 * (initialized in arc_init()) 164 */ 165static int arc_min_prefetch_lifespan; 166 167static int arc_dead; 168extern int zfs_prefetch_disable; 169 170/* 171 * The arc has filled available memory and has now warmed up. 172 */ 173static boolean_t arc_warm; 174 175/* 176 * These tunables are for performance analysis. 177 */ 178uint64_t zfs_arc_max; 179uint64_t zfs_arc_min; 180uint64_t zfs_arc_meta_limit = 0; 181int zfs_mdcomp_disable = 0; 182int zfs_arc_grow_retry = 0; 183int zfs_arc_shrink_shift = 0; 184int zfs_arc_p_min_shift = 0; 185 186TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max); 187TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min); 188TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 189TUNABLE_INT("vfs.zfs.mdcomp_disable", &zfs_mdcomp_disable); 190SYSCTL_DECL(_vfs_zfs); 191SYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, 192 "Maximum ARC size"); 193SYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, 194 "Minimum ARC size"); 195SYSCTL_INT(_vfs_zfs, OID_AUTO, mdcomp_disable, CTLFLAG_RDTUN, 196 &zfs_mdcomp_disable, 0, "Disable metadata compression"); 197 198/* 199 * Note that buffers can be in one of 6 states: 200 * ARC_anon - anonymous (discussed below) 201 * ARC_mru - recently used, currently cached 202 * ARC_mru_ghost - recentely used, no longer in cache 203 * ARC_mfu - frequently used, currently cached 204 * ARC_mfu_ghost - frequently used, no longer in cache 205 * ARC_l2c_only - exists in L2ARC but not other states 206 * When there are no active references to the buffer, they are 207 * are linked onto a list in one of these arc states. These are 208 * the only buffers that can be evicted or deleted. Within each 209 * state there are multiple lists, one for meta-data and one for 210 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 211 * etc.) is tracked separately so that it can be managed more 212 * explicitly: favored over data, limited explicitly. 213 * 214 * Anonymous buffers are buffers that are not associated with 215 * a DVA. These are buffers that hold dirty block copies 216 * before they are written to stable storage. By definition, 217 * they are "ref'd" and are considered part of arc_mru 218 * that cannot be freed. Generally, they will aquire a DVA 219 * as they are written and migrate onto the arc_mru list. 220 * 221 * The ARC_l2c_only state is for buffers that are in the second 222 * level ARC but no longer in any of the ARC_m* lists. The second 223 * level ARC itself may also contain buffers that are in any of 224 * the ARC_m* states - meaning that a buffer can exist in two 225 * places. The reason for the ARC_l2c_only state is to keep the 226 * buffer header in the hash table, so that reads that hit the 227 * second level ARC benefit from these fast lookups. 228 */ 229 230#define ARCS_LOCK_PAD CACHE_LINE_SIZE 231struct arcs_lock { 232 kmutex_t arcs_lock; 233#ifdef _KERNEL 234 unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))]; 235#endif 236}; 237 238/* 239 * must be power of two for mask use to work 240 * 241 */ 242#define ARC_BUFC_NUMDATALISTS 16 243#define ARC_BUFC_NUMMETADATALISTS 16 244#define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS) 245 246typedef struct arc_state { 247 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 248 uint64_t arcs_size; /* total amount of data in this state */ 249 list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */ 250 struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE); 251} arc_state_t; 252 253#define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock)) 254 255/* The 6 states: */ 256static arc_state_t ARC_anon; 257static arc_state_t ARC_mru; 258static arc_state_t ARC_mru_ghost; 259static arc_state_t ARC_mfu; 260static arc_state_t ARC_mfu_ghost; 261static arc_state_t ARC_l2c_only; 262 263typedef struct arc_stats { 264 kstat_named_t arcstat_hits; 265 kstat_named_t arcstat_misses; 266 kstat_named_t arcstat_demand_data_hits; 267 kstat_named_t arcstat_demand_data_misses; 268 kstat_named_t arcstat_demand_metadata_hits; 269 kstat_named_t arcstat_demand_metadata_misses; 270 kstat_named_t arcstat_prefetch_data_hits; 271 kstat_named_t arcstat_prefetch_data_misses; 272 kstat_named_t arcstat_prefetch_metadata_hits; 273 kstat_named_t arcstat_prefetch_metadata_misses; 274 kstat_named_t arcstat_mru_hits; 275 kstat_named_t arcstat_mru_ghost_hits; 276 kstat_named_t arcstat_mfu_hits; 277 kstat_named_t arcstat_mfu_ghost_hits; 278 kstat_named_t arcstat_allocated; 279 kstat_named_t arcstat_deleted; 280 kstat_named_t arcstat_stolen; 281 kstat_named_t arcstat_recycle_miss; 282 kstat_named_t arcstat_mutex_miss; 283 kstat_named_t arcstat_evict_skip; 284 kstat_named_t arcstat_evict_l2_cached; 285 kstat_named_t arcstat_evict_l2_eligible; 286 kstat_named_t arcstat_evict_l2_ineligible; 287 kstat_named_t arcstat_hash_elements; 288 kstat_named_t arcstat_hash_elements_max; 289 kstat_named_t arcstat_hash_collisions; 290 kstat_named_t arcstat_hash_chains; 291 kstat_named_t arcstat_hash_chain_max; 292 kstat_named_t arcstat_p; 293 kstat_named_t arcstat_c; 294 kstat_named_t arcstat_c_min; 295 kstat_named_t arcstat_c_max; 296 kstat_named_t arcstat_size; 297 kstat_named_t arcstat_hdr_size; 298 kstat_named_t arcstat_data_size; 299 kstat_named_t arcstat_other_size; 300 kstat_named_t arcstat_l2_hits; 301 kstat_named_t arcstat_l2_misses; 302 kstat_named_t arcstat_l2_feeds; 303 kstat_named_t arcstat_l2_rw_clash; 304 kstat_named_t arcstat_l2_read_bytes; 305 kstat_named_t arcstat_l2_write_bytes; 306 kstat_named_t arcstat_l2_writes_sent; 307 kstat_named_t arcstat_l2_writes_done; 308 kstat_named_t arcstat_l2_writes_error; 309 kstat_named_t arcstat_l2_writes_hdr_miss; 310 kstat_named_t arcstat_l2_evict_lock_retry; 311 kstat_named_t arcstat_l2_evict_reading; 312 kstat_named_t arcstat_l2_free_on_write; 313 kstat_named_t arcstat_l2_abort_lowmem; 314 kstat_named_t arcstat_l2_cksum_bad; 315 kstat_named_t arcstat_l2_io_error; 316 kstat_named_t arcstat_l2_size; 317 kstat_named_t arcstat_l2_hdr_size; 318 kstat_named_t arcstat_memory_throttle_count; 319 kstat_named_t arcstat_l2_write_trylock_fail; 320 kstat_named_t arcstat_l2_write_passed_headroom; 321 kstat_named_t arcstat_l2_write_spa_mismatch; 322 kstat_named_t arcstat_l2_write_in_l2; 323 kstat_named_t arcstat_l2_write_hdr_io_in_progress; 324 kstat_named_t arcstat_l2_write_not_cacheable; 325 kstat_named_t arcstat_l2_write_full; 326 kstat_named_t arcstat_l2_write_buffer_iter; 327 kstat_named_t arcstat_l2_write_pios; 328 kstat_named_t arcstat_l2_write_buffer_bytes_scanned; 329 kstat_named_t arcstat_l2_write_buffer_list_iter; 330 kstat_named_t arcstat_l2_write_buffer_list_null_iter; 331} arc_stats_t; 332 333static arc_stats_t arc_stats = { 334 { "hits", KSTAT_DATA_UINT64 }, 335 { "misses", KSTAT_DATA_UINT64 }, 336 { "demand_data_hits", KSTAT_DATA_UINT64 }, 337 { "demand_data_misses", KSTAT_DATA_UINT64 }, 338 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 339 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 340 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 341 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 342 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 343 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 344 { "mru_hits", KSTAT_DATA_UINT64 }, 345 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 346 { "mfu_hits", KSTAT_DATA_UINT64 }, 347 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 348 { "allocated", KSTAT_DATA_UINT64 }, 349 { "deleted", KSTAT_DATA_UINT64 }, 350 { "stolen", KSTAT_DATA_UINT64 }, 351 { "recycle_miss", KSTAT_DATA_UINT64 }, 352 { "mutex_miss", KSTAT_DATA_UINT64 }, 353 { "evict_skip", KSTAT_DATA_UINT64 }, 354 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 355 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 356 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 357 { "hash_elements", KSTAT_DATA_UINT64 }, 358 { "hash_elements_max", KSTAT_DATA_UINT64 }, 359 { "hash_collisions", KSTAT_DATA_UINT64 }, 360 { "hash_chains", KSTAT_DATA_UINT64 }, 361 { "hash_chain_max", KSTAT_DATA_UINT64 }, 362 { "p", KSTAT_DATA_UINT64 }, 363 { "c", KSTAT_DATA_UINT64 }, 364 { "c_min", KSTAT_DATA_UINT64 }, 365 { "c_max", KSTAT_DATA_UINT64 }, 366 { "size", KSTAT_DATA_UINT64 }, 367 { "hdr_size", KSTAT_DATA_UINT64 }, 368 { "data_size", KSTAT_DATA_UINT64 }, 369 { "other_size", KSTAT_DATA_UINT64 }, 370 { "l2_hits", KSTAT_DATA_UINT64 }, 371 { "l2_misses", KSTAT_DATA_UINT64 }, 372 { "l2_feeds", KSTAT_DATA_UINT64 }, 373 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 374 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 375 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 376 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 377 { "l2_writes_done", KSTAT_DATA_UINT64 }, 378 { "l2_writes_error", KSTAT_DATA_UINT64 }, 379 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 380 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 381 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 382 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 383 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 384 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 385 { "l2_io_error", KSTAT_DATA_UINT64 }, 386 { "l2_size", KSTAT_DATA_UINT64 }, 387 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 388 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 389 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 }, 390 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 }, 391 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 }, 392 { "l2_write_in_l2", KSTAT_DATA_UINT64 }, 393 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 }, 394 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 }, 395 { "l2_write_full", KSTAT_DATA_UINT64 }, 396 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 }, 397 { "l2_write_pios", KSTAT_DATA_UINT64 }, 398 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 }, 399 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 }, 400 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 } 401}; 402 403#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 404 405#define ARCSTAT_INCR(stat, val) \ 406 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 407 408#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 409#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 410 411#define ARCSTAT_MAX(stat, val) { \ 412 uint64_t m; \ 413 while ((val) > (m = arc_stats.stat.value.ui64) && \ 414 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 415 continue; \ 416} 417 418#define ARCSTAT_MAXSTAT(stat) \ 419 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 420 421/* 422 * We define a macro to allow ARC hits/misses to be easily broken down by 423 * two separate conditions, giving a total of four different subtypes for 424 * each of hits and misses (so eight statistics total). 425 */ 426#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 427 if (cond1) { \ 428 if (cond2) { \ 429 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 430 } else { \ 431 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 432 } \ 433 } else { \ 434 if (cond2) { \ 435 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 436 } else { \ 437 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 438 } \ 439 } 440 441kstat_t *arc_ksp; 442static arc_state_t *arc_anon; 443static arc_state_t *arc_mru; 444static arc_state_t *arc_mru_ghost; 445static arc_state_t *arc_mfu; 446static arc_state_t *arc_mfu_ghost; 447static arc_state_t *arc_l2c_only; 448 449/* 450 * There are several ARC variables that are critical to export as kstats -- 451 * but we don't want to have to grovel around in the kstat whenever we wish to 452 * manipulate them. For these variables, we therefore define them to be in 453 * terms of the statistic variable. This assures that we are not introducing 454 * the possibility of inconsistency by having shadow copies of the variables, 455 * while still allowing the code to be readable. 456 */ 457#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 458#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 459#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 460#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 461#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 462 463static int arc_no_grow; /* Don't try to grow cache size */ 464static uint64_t arc_tempreserve; 465static uint64_t arc_meta_used; 466static uint64_t arc_meta_limit; 467static uint64_t arc_meta_max = 0; 468SYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RDTUN, 469 &arc_meta_used, 0, "ARC metadata used"); 470SYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RDTUN, 471 &arc_meta_limit, 0, "ARC metadata limit"); 472 473typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 474 475typedef struct arc_callback arc_callback_t; 476 477struct arc_callback { 478 void *acb_private; 479 arc_done_func_t *acb_done; 480 arc_buf_t *acb_buf; 481 zio_t *acb_zio_dummy; 482 arc_callback_t *acb_next; 483}; 484 485typedef struct arc_write_callback arc_write_callback_t; 486 487struct arc_write_callback { 488 void *awcb_private; 489 arc_done_func_t *awcb_ready; 490 arc_done_func_t *awcb_done; 491 arc_buf_t *awcb_buf; 492}; 493 494struct arc_buf_hdr { 495 /* protected by hash lock */ 496 dva_t b_dva; 497 uint64_t b_birth; 498 uint64_t b_cksum0; 499 500 kmutex_t b_freeze_lock; 501 zio_cksum_t *b_freeze_cksum; 502 503 arc_buf_hdr_t *b_hash_next; 504 arc_buf_t *b_buf; 505 uint32_t b_flags; 506 uint32_t b_datacnt; 507 508 arc_callback_t *b_acb; 509 kcondvar_t b_cv; 510 511 /* immutable */ 512 arc_buf_contents_t b_type; 513 uint64_t b_size; 514 spa_t *b_spa; 515 516 /* protected by arc state mutex */ 517 arc_state_t *b_state; 518 list_node_t b_arc_node; 519 520 /* updated atomically */ 521 clock_t b_arc_access; 522 523 /* self protecting */ 524 refcount_t b_refcnt; 525 526 l2arc_buf_hdr_t *b_l2hdr; 527 list_node_t b_l2node; 528}; 529 530static arc_buf_t *arc_eviction_list; 531static kmutex_t arc_eviction_mtx; 532static arc_buf_hdr_t arc_eviction_hdr; 533static void arc_get_data_buf(arc_buf_t *buf); 534static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 535static int arc_evict_needed(arc_buf_contents_t type); 536static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); 537 538static boolean_t l2arc_write_eligible(spa_t *spa, arc_buf_hdr_t *ab); 539 540#define GHOST_STATE(state) \ 541 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 542 (state) == arc_l2c_only) 543 544/* 545 * Private ARC flags. These flags are private ARC only flags that will show up 546 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 547 * be passed in as arc_flags in things like arc_read. However, these flags 548 * should never be passed and should only be set by ARC code. When adding new 549 * public flags, make sure not to smash the private ones. 550 */ 551 552#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 553#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 554#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 555#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 556#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 557#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 558#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 559#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 560#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 561#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 562#define ARC_STORED (1 << 19) /* has been store()d to */ 563 564#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 565#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 566#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 567#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) 568#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 569#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 570#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 571#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 572#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 573 (hdr)->b_l2hdr != NULL) 574#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 575#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 576#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 577 578/* 579 * Other sizes 580 */ 581 582#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 583#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 584 585/* 586 * Hash table routines 587 */ 588 589#define HT_LOCK_PAD CACHE_LINE_SIZE 590 591struct ht_lock { 592 kmutex_t ht_lock; 593#ifdef _KERNEL 594 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 595#endif 596}; 597 598#define BUF_LOCKS 256 599typedef struct buf_hash_table { 600 uint64_t ht_mask; 601 arc_buf_hdr_t **ht_table; 602 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE); 603} buf_hash_table_t; 604 605static buf_hash_table_t buf_hash_table; 606 607#define BUF_HASH_INDEX(spa, dva, birth) \ 608 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 609#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 610#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 611#define HDR_LOCK(buf) \ 612 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 613 614uint64_t zfs_crc64_table[256]; 615 616/* 617 * Level 2 ARC 618 */ 619 620#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 621#define L2ARC_HEADROOM 2 /* num of writes */ 622#define L2ARC_FEED_SECS 1 /* caching interval secs */ 623#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 624 625#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 626#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 627 628/* 629 * L2ARC Performance Tunables 630 */ 631uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 632uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 633uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 634uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 635uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 636boolean_t l2arc_noprefetch = B_FALSE; /* don't cache prefetch bufs */ 637boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 638boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 639 640SYSCTL_QUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW, 641 &l2arc_write_max, 0, "max write size"); 642SYSCTL_QUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW, 643 &l2arc_write_boost, 0, "extra write during warmup"); 644SYSCTL_QUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW, 645 &l2arc_headroom, 0, "number of dev writes"); 646SYSCTL_QUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW, 647 &l2arc_feed_secs, 0, "interval seconds"); 648SYSCTL_QUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW, 649 &l2arc_feed_min_ms, 0, "min interval milliseconds"); 650 651SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW, 652 &l2arc_noprefetch, 0, "don't cache prefetch bufs"); 653SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW, 654 &l2arc_feed_again, 0, "turbo warmup"); 655SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW, 656 &l2arc_norw, 0, "no reads during writes"); 657 658SYSCTL_QUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD, 659 &ARC_anon.arcs_size, 0, "size of anonymous state"); 660SYSCTL_QUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD, 661 &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state"); 662SYSCTL_QUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD, 663 &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state"); 664 665SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD, 666 &ARC_mru.arcs_size, 0, "size of mru state"); 667SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD, 668 &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state"); 669SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD, 670 &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state"); 671 672SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD, 673 &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state"); 674SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD, 675 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 676 "size of metadata in mru ghost state"); 677SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD, 678 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 679 "size of data in mru ghost state"); 680 681SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD, 682 &ARC_mfu.arcs_size, 0, "size of mfu state"); 683SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD, 684 &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state"); 685SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD, 686 &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state"); 687 688SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD, 689 &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state"); 690SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD, 691 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 692 "size of metadata in mfu ghost state"); 693SYSCTL_QUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD, 694 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 695 "size of data in mfu ghost state"); 696 697SYSCTL_QUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD, 698 &ARC_l2c_only.arcs_size, 0, "size of mru state"); 699 700/* 701 * L2ARC Internals 702 */ 703typedef struct l2arc_dev { 704 vdev_t *l2ad_vdev; /* vdev */ 705 spa_t *l2ad_spa; /* spa */ 706 uint64_t l2ad_hand; /* next write location */ 707 uint64_t l2ad_write; /* desired write size, bytes */ 708 uint64_t l2ad_boost; /* warmup write boost, bytes */ 709 uint64_t l2ad_start; /* first addr on device */ 710 uint64_t l2ad_end; /* last addr on device */ 711 uint64_t l2ad_evict; /* last addr eviction reached */ 712 boolean_t l2ad_first; /* first sweep through */ 713 boolean_t l2ad_writing; /* currently writing */ 714 list_t *l2ad_buflist; /* buffer list */ 715 list_node_t l2ad_node; /* device list node */ 716} l2arc_dev_t; 717 718static list_t L2ARC_dev_list; /* device list */ 719static list_t *l2arc_dev_list; /* device list pointer */ 720static kmutex_t l2arc_dev_mtx; /* device list mutex */ 721static l2arc_dev_t *l2arc_dev_last; /* last device used */ 722static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 723static list_t L2ARC_free_on_write; /* free after write buf list */ 724static list_t *l2arc_free_on_write; /* free after write list ptr */ 725static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 726static uint64_t l2arc_ndev; /* number of devices */ 727 728typedef struct l2arc_read_callback { 729 arc_buf_t *l2rcb_buf; /* read buffer */ 730 spa_t *l2rcb_spa; /* spa */ 731 blkptr_t l2rcb_bp; /* original blkptr */ 732 zbookmark_t l2rcb_zb; /* original bookmark */ 733 int l2rcb_flags; /* original flags */ 734} l2arc_read_callback_t; 735 736typedef struct l2arc_write_callback { 737 l2arc_dev_t *l2wcb_dev; /* device info */ 738 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 739} l2arc_write_callback_t; 740 741struct l2arc_buf_hdr { 742 /* protected by arc_buf_hdr mutex */ 743 l2arc_dev_t *b_dev; /* L2ARC device */ 744 uint64_t b_daddr; /* disk address, offset byte */ 745}; 746 747typedef struct l2arc_data_free { 748 /* protected by l2arc_free_on_write_mtx */ 749 void *l2df_data; 750 size_t l2df_size; 751 void (*l2df_func)(void *, size_t); 752 list_node_t l2df_list_node; 753} l2arc_data_free_t; 754 755static kmutex_t l2arc_feed_thr_lock; 756static kcondvar_t l2arc_feed_thr_cv; 757static uint8_t l2arc_thread_exit; 758 759static void l2arc_read_done(zio_t *zio); 760static void l2arc_hdr_stat_add(void); 761static void l2arc_hdr_stat_remove(void); 762 763static uint64_t 764buf_hash(spa_t *spa, const dva_t *dva, uint64_t birth) 765{ 766 uintptr_t spav = (uintptr_t)spa; 767 uint8_t *vdva = (uint8_t *)dva; 768 uint64_t crc = -1ULL; 769 int i; 770 771 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 772 773 for (i = 0; i < sizeof (dva_t); i++) 774 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 775 776 crc ^= (spav>>8) ^ birth; 777 778 return (crc); 779} 780 781#define BUF_EMPTY(buf) \ 782 ((buf)->b_dva.dva_word[0] == 0 && \ 783 (buf)->b_dva.dva_word[1] == 0 && \ 784 (buf)->b_birth == 0) 785 786#define BUF_EQUAL(spa, dva, birth, buf) \ 787 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 788 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 789 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 790 791static arc_buf_hdr_t * 792buf_hash_find(spa_t *spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 793{ 794 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 795 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 796 arc_buf_hdr_t *buf; 797 798 mutex_enter(hash_lock); 799 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 800 buf = buf->b_hash_next) { 801 if (BUF_EQUAL(spa, dva, birth, buf)) { 802 *lockp = hash_lock; 803 return (buf); 804 } 805 } 806 mutex_exit(hash_lock); 807 *lockp = NULL; 808 return (NULL); 809} 810 811/* 812 * Insert an entry into the hash table. If there is already an element 813 * equal to elem in the hash table, then the already existing element 814 * will be returned and the new element will not be inserted. 815 * Otherwise returns NULL. 816 */ 817static arc_buf_hdr_t * 818buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 819{ 820 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 821 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 822 arc_buf_hdr_t *fbuf; 823 uint32_t i; 824 825 ASSERT(!HDR_IN_HASH_TABLE(buf)); 826 *lockp = hash_lock; 827 mutex_enter(hash_lock); 828 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 829 fbuf = fbuf->b_hash_next, i++) { 830 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 831 return (fbuf); 832 } 833 834 buf->b_hash_next = buf_hash_table.ht_table[idx]; 835 buf_hash_table.ht_table[idx] = buf; 836 buf->b_flags |= ARC_IN_HASH_TABLE; 837 838 /* collect some hash table performance data */ 839 if (i > 0) { 840 ARCSTAT_BUMP(arcstat_hash_collisions); 841 if (i == 1) 842 ARCSTAT_BUMP(arcstat_hash_chains); 843 844 ARCSTAT_MAX(arcstat_hash_chain_max, i); 845 } 846 847 ARCSTAT_BUMP(arcstat_hash_elements); 848 ARCSTAT_MAXSTAT(arcstat_hash_elements); 849 850 return (NULL); 851} 852 853static void 854buf_hash_remove(arc_buf_hdr_t *buf) 855{ 856 arc_buf_hdr_t *fbuf, **bufp; 857 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 858 859 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 860 ASSERT(HDR_IN_HASH_TABLE(buf)); 861 862 bufp = &buf_hash_table.ht_table[idx]; 863 while ((fbuf = *bufp) != buf) { 864 ASSERT(fbuf != NULL); 865 bufp = &fbuf->b_hash_next; 866 } 867 *bufp = buf->b_hash_next; 868 buf->b_hash_next = NULL; 869 buf->b_flags &= ~ARC_IN_HASH_TABLE; 870 871 /* collect some hash table performance data */ 872 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 873 874 if (buf_hash_table.ht_table[idx] && 875 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 876 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 877} 878 879/* 880 * Global data structures and functions for the buf kmem cache. 881 */ 882static kmem_cache_t *hdr_cache; 883static kmem_cache_t *buf_cache; 884 885static void 886buf_fini(void) 887{ 888 int i; 889 890 kmem_free(buf_hash_table.ht_table, 891 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 892 for (i = 0; i < BUF_LOCKS; i++) 893 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 894 kmem_cache_destroy(hdr_cache); 895 kmem_cache_destroy(buf_cache); 896} 897 898/* 899 * Constructor callback - called when the cache is empty 900 * and a new buf is requested. 901 */ 902/* ARGSUSED */ 903static int 904hdr_cons(void *vbuf, void *unused, int kmflag) 905{ 906 arc_buf_hdr_t *buf = vbuf; 907 908 bzero(buf, sizeof (arc_buf_hdr_t)); 909 refcount_create(&buf->b_refcnt); 910 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 911 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 912 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 913 914 return (0); 915} 916 917/* ARGSUSED */ 918static int 919buf_cons(void *vbuf, void *unused, int kmflag) 920{ 921 arc_buf_t *buf = vbuf; 922 923 bzero(buf, sizeof (arc_buf_t)); 924 rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL); 925 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 926 927 return (0); 928} 929 930/* 931 * Destructor callback - called when a cached buf is 932 * no longer required. 933 */ 934/* ARGSUSED */ 935static void 936hdr_dest(void *vbuf, void *unused) 937{ 938 arc_buf_hdr_t *buf = vbuf; 939 940 refcount_destroy(&buf->b_refcnt); 941 cv_destroy(&buf->b_cv); 942 mutex_destroy(&buf->b_freeze_lock); 943 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 944} 945 946/* ARGSUSED */ 947static void 948buf_dest(void *vbuf, void *unused) 949{ 950 arc_buf_t *buf = vbuf; 951 952 rw_destroy(&buf->b_lock); 953 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 954} 955 956/* 957 * Reclaim callback -- invoked when memory is low. 958 */ 959/* ARGSUSED */ 960static void 961hdr_recl(void *unused) 962{ 963 dprintf("hdr_recl called\n"); 964 /* 965 * umem calls the reclaim func when we destroy the buf cache, 966 * which is after we do arc_fini(). 967 */ 968 if (!arc_dead) 969 cv_signal(&arc_reclaim_thr_cv); 970} 971 972static void 973buf_init(void) 974{ 975 uint64_t *ct; 976 uint64_t hsize = 1ULL << 12; 977 int i, j; 978 979 /* 980 * The hash table is big enough to fill all of physical memory 981 * with an average 64K block size. The table will take up 982 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 983 */ 984 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 985 hsize <<= 1; 986retry: 987 buf_hash_table.ht_mask = hsize - 1; 988 buf_hash_table.ht_table = 989 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 990 if (buf_hash_table.ht_table == NULL) { 991 ASSERT(hsize > (1ULL << 8)); 992 hsize >>= 1; 993 goto retry; 994 } 995 996 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 997 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 998 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 999 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1000 1001 for (i = 0; i < 256; i++) 1002 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1003 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1004 1005 for (i = 0; i < BUF_LOCKS; i++) { 1006 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1007 NULL, MUTEX_DEFAULT, NULL); 1008 } 1009} 1010 1011#define ARC_MINTIME (hz>>4) /* 62 ms */ 1012 1013static void 1014arc_cksum_verify(arc_buf_t *buf) 1015{ 1016 zio_cksum_t zc; 1017 1018 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1019 return; 1020 1021 mutex_enter(&buf->b_hdr->b_freeze_lock); 1022 if (buf->b_hdr->b_freeze_cksum == NULL || 1023 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 1024 mutex_exit(&buf->b_hdr->b_freeze_lock); 1025 return; 1026 } 1027 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1028 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 1029 panic("buffer modified while frozen!"); 1030 mutex_exit(&buf->b_hdr->b_freeze_lock); 1031} 1032 1033static int 1034arc_cksum_equal(arc_buf_t *buf) 1035{ 1036 zio_cksum_t zc; 1037 int equal; 1038 1039 mutex_enter(&buf->b_hdr->b_freeze_lock); 1040 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1041 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 1042 mutex_exit(&buf->b_hdr->b_freeze_lock); 1043 1044 return (equal); 1045} 1046 1047static void 1048arc_cksum_compute(arc_buf_t *buf, boolean_t force) 1049{ 1050 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 1051 return; 1052 1053 mutex_enter(&buf->b_hdr->b_freeze_lock); 1054 if (buf->b_hdr->b_freeze_cksum != NULL) { 1055 mutex_exit(&buf->b_hdr->b_freeze_lock); 1056 return; 1057 } 1058 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 1059 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 1060 buf->b_hdr->b_freeze_cksum); 1061 mutex_exit(&buf->b_hdr->b_freeze_lock); 1062} 1063 1064void 1065arc_buf_thaw(arc_buf_t *buf) 1066{ 1067 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1068 if (buf->b_hdr->b_state != arc_anon) 1069 panic("modifying non-anon buffer!"); 1070 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 1071 panic("modifying buffer while i/o in progress!"); 1072 arc_cksum_verify(buf); 1073 } 1074 1075 mutex_enter(&buf->b_hdr->b_freeze_lock); 1076 if (buf->b_hdr->b_freeze_cksum != NULL) { 1077 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1078 buf->b_hdr->b_freeze_cksum = NULL; 1079 } 1080 mutex_exit(&buf->b_hdr->b_freeze_lock); 1081} 1082 1083void 1084arc_buf_freeze(arc_buf_t *buf) 1085{ 1086 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1087 return; 1088 1089 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 1090 buf->b_hdr->b_state == arc_anon); 1091 arc_cksum_compute(buf, B_FALSE); 1092} 1093 1094static void 1095get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock) 1096{ 1097 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth); 1098 1099 if (ab->b_type == ARC_BUFC_METADATA) 1100 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1); 1101 else { 1102 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1); 1103 buf_hashid += ARC_BUFC_NUMMETADATALISTS; 1104 } 1105 1106 *list = &state->arcs_lists[buf_hashid]; 1107 *lock = ARCS_LOCK(state, buf_hashid); 1108} 1109 1110 1111static void 1112add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1113{ 1114 1115 ASSERT(MUTEX_HELD(hash_lock)); 1116 1117 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 1118 (ab->b_state != arc_anon)) { 1119 uint64_t delta = ab->b_size * ab->b_datacnt; 1120 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 1121 list_t *list; 1122 kmutex_t *lock; 1123 1124 get_buf_info(ab, ab->b_state, &list, &lock); 1125 ASSERT(!MUTEX_HELD(lock)); 1126 mutex_enter(lock); 1127 ASSERT(list_link_active(&ab->b_arc_node)); 1128 list_remove(list, ab); 1129 if (GHOST_STATE(ab->b_state)) { 1130 ASSERT3U(ab->b_datacnt, ==, 0); 1131 ASSERT3P(ab->b_buf, ==, NULL); 1132 delta = ab->b_size; 1133 } 1134 ASSERT(delta > 0); 1135 ASSERT3U(*size, >=, delta); 1136 atomic_add_64(size, -delta); 1137 mutex_exit(lock); 1138 /* remove the prefetch flag if we get a reference */ 1139 if (ab->b_flags & ARC_PREFETCH) 1140 ab->b_flags &= ~ARC_PREFETCH; 1141 } 1142} 1143 1144static int 1145remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1146{ 1147 int cnt; 1148 arc_state_t *state = ab->b_state; 1149 1150 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1151 ASSERT(!GHOST_STATE(state)); 1152 1153 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1154 (state != arc_anon)) { 1155 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1156 list_t *list; 1157 kmutex_t *lock; 1158 1159 get_buf_info(ab, state, &list, &lock); 1160 ASSERT(!MUTEX_HELD(lock)); 1161 mutex_enter(lock); 1162 ASSERT(!list_link_active(&ab->b_arc_node)); 1163 list_insert_head(list, ab); 1164 ASSERT(ab->b_datacnt > 0); 1165 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1166 mutex_exit(lock); 1167 } 1168 return (cnt); 1169} 1170 1171/* 1172 * Move the supplied buffer to the indicated state. The mutex 1173 * for the buffer must be held by the caller. 1174 */ 1175static void 1176arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1177{ 1178 arc_state_t *old_state = ab->b_state; 1179 int64_t refcnt = refcount_count(&ab->b_refcnt); 1180 uint64_t from_delta, to_delta; 1181 list_t *list; 1182 kmutex_t *lock; 1183 1184 ASSERT(MUTEX_HELD(hash_lock)); 1185 ASSERT(new_state != old_state); 1186 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1187 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1188 1189 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1190 1191 /* 1192 * If this buffer is evictable, transfer it from the 1193 * old state list to the new state list. 1194 */ 1195 if (refcnt == 0) { 1196 if (old_state != arc_anon) { 1197 int use_mutex; 1198 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1199 1200 get_buf_info(ab, old_state, &list, &lock); 1201 use_mutex = !MUTEX_HELD(lock); 1202 if (use_mutex) 1203 mutex_enter(lock); 1204 1205 ASSERT(list_link_active(&ab->b_arc_node)); 1206 list_remove(list, ab); 1207 1208 /* 1209 * If prefetching out of the ghost cache, 1210 * we will have a non-null datacnt. 1211 */ 1212 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1213 /* ghost elements have a ghost size */ 1214 ASSERT(ab->b_buf == NULL); 1215 from_delta = ab->b_size; 1216 } 1217 ASSERT3U(*size, >=, from_delta); 1218 atomic_add_64(size, -from_delta); 1219 1220 if (use_mutex) 1221 mutex_exit(lock); 1222 } 1223 if (new_state != arc_anon) { 1224 int use_mutex; 1225 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1226 1227 get_buf_info(ab, new_state, &list, &lock); 1228 use_mutex = !MUTEX_HELD(lock); 1229 if (use_mutex) 1230 mutex_enter(lock); 1231 1232 list_insert_head(list, ab); 1233 1234 /* ghost elements have a ghost size */ 1235 if (GHOST_STATE(new_state)) { 1236 ASSERT(ab->b_datacnt == 0); 1237 ASSERT(ab->b_buf == NULL); 1238 to_delta = ab->b_size; 1239 } 1240 atomic_add_64(size, to_delta); 1241 1242 if (use_mutex) 1243 mutex_exit(lock); 1244 } 1245 } 1246 1247 ASSERT(!BUF_EMPTY(ab)); 1248 if (new_state == arc_anon) { 1249 buf_hash_remove(ab); 1250 } 1251 1252 /* adjust state sizes */ 1253 if (to_delta) 1254 atomic_add_64(&new_state->arcs_size, to_delta); 1255 if (from_delta) { 1256 ASSERT3U(old_state->arcs_size, >=, from_delta); 1257 atomic_add_64(&old_state->arcs_size, -from_delta); 1258 } 1259 ab->b_state = new_state; 1260 1261 /* adjust l2arc hdr stats */ 1262 if (new_state == arc_l2c_only) 1263 l2arc_hdr_stat_add(); 1264 else if (old_state == arc_l2c_only) 1265 l2arc_hdr_stat_remove(); 1266} 1267 1268void 1269arc_space_consume(uint64_t space, arc_space_type_t type) 1270{ 1271 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1272 1273 switch (type) { 1274 case ARC_SPACE_DATA: 1275 ARCSTAT_INCR(arcstat_data_size, space); 1276 break; 1277 case ARC_SPACE_OTHER: 1278 ARCSTAT_INCR(arcstat_other_size, space); 1279 break; 1280 case ARC_SPACE_HDRS: 1281 ARCSTAT_INCR(arcstat_hdr_size, space); 1282 break; 1283 case ARC_SPACE_L2HDRS: 1284 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1285 break; 1286 } 1287 1288 atomic_add_64(&arc_meta_used, space); 1289 atomic_add_64(&arc_size, space); 1290} 1291 1292void 1293arc_space_return(uint64_t space, arc_space_type_t type) 1294{ 1295 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1296 1297 switch (type) { 1298 case ARC_SPACE_DATA: 1299 ARCSTAT_INCR(arcstat_data_size, -space); 1300 break; 1301 case ARC_SPACE_OTHER: 1302 ARCSTAT_INCR(arcstat_other_size, -space); 1303 break; 1304 case ARC_SPACE_HDRS: 1305 ARCSTAT_INCR(arcstat_hdr_size, -space); 1306 break; 1307 case ARC_SPACE_L2HDRS: 1308 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1309 break; 1310 } 1311 1312 ASSERT(arc_meta_used >= space); 1313 if (arc_meta_max < arc_meta_used) 1314 arc_meta_max = arc_meta_used; 1315 atomic_add_64(&arc_meta_used, -space); 1316 ASSERT(arc_size >= space); 1317 atomic_add_64(&arc_size, -space); 1318} 1319 1320void * 1321arc_data_buf_alloc(uint64_t size) 1322{ 1323 if (arc_evict_needed(ARC_BUFC_DATA)) 1324 cv_signal(&arc_reclaim_thr_cv); 1325 atomic_add_64(&arc_size, size); 1326 return (zio_data_buf_alloc(size)); 1327} 1328 1329void 1330arc_data_buf_free(void *buf, uint64_t size) 1331{ 1332 zio_data_buf_free(buf, size); 1333 ASSERT(arc_size >= size); 1334 atomic_add_64(&arc_size, -size); 1335} 1336 1337arc_buf_t * 1338arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1339{ 1340 arc_buf_hdr_t *hdr; 1341 arc_buf_t *buf; 1342 1343 ASSERT3U(size, >, 0); 1344 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1345 ASSERT(BUF_EMPTY(hdr)); 1346 hdr->b_size = size; 1347 hdr->b_type = type; 1348 hdr->b_spa = spa; 1349 hdr->b_state = arc_anon; 1350 hdr->b_arc_access = 0; 1351 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1352 buf->b_hdr = hdr; 1353 buf->b_data = NULL; 1354 buf->b_efunc = NULL; 1355 buf->b_private = NULL; 1356 buf->b_next = NULL; 1357 hdr->b_buf = buf; 1358 arc_get_data_buf(buf); 1359 hdr->b_datacnt = 1; 1360 hdr->b_flags = 0; 1361 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1362 (void) refcount_add(&hdr->b_refcnt, tag); 1363 1364 return (buf); 1365} 1366 1367static arc_buf_t * 1368arc_buf_clone(arc_buf_t *from) 1369{ 1370 arc_buf_t *buf; 1371 arc_buf_hdr_t *hdr = from->b_hdr; 1372 uint64_t size = hdr->b_size; 1373 1374 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1375 buf->b_hdr = hdr; 1376 buf->b_data = NULL; 1377 buf->b_efunc = NULL; 1378 buf->b_private = NULL; 1379 buf->b_next = hdr->b_buf; 1380 hdr->b_buf = buf; 1381 arc_get_data_buf(buf); 1382 bcopy(from->b_data, buf->b_data, size); 1383 hdr->b_datacnt += 1; 1384 return (buf); 1385} 1386 1387void 1388arc_buf_add_ref(arc_buf_t *buf, void* tag) 1389{ 1390 arc_buf_hdr_t *hdr; 1391 kmutex_t *hash_lock; 1392 1393 /* 1394 * Check to see if this buffer is evicted. Callers 1395 * must verify b_data != NULL to know if the add_ref 1396 * was successful. 1397 */ 1398 rw_enter(&buf->b_lock, RW_READER); 1399 if (buf->b_data == NULL) { 1400 rw_exit(&buf->b_lock); 1401 return; 1402 } 1403 hdr = buf->b_hdr; 1404 ASSERT(hdr != NULL); 1405 hash_lock = HDR_LOCK(hdr); 1406 mutex_enter(hash_lock); 1407 rw_exit(&buf->b_lock); 1408 1409 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1410 add_reference(hdr, hash_lock, tag); 1411 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1412 arc_access(hdr, hash_lock); 1413 mutex_exit(hash_lock); 1414 ARCSTAT_BUMP(arcstat_hits); 1415 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1416 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1417 data, metadata, hits); 1418} 1419 1420/* 1421 * Free the arc data buffer. If it is an l2arc write in progress, 1422 * the buffer is placed on l2arc_free_on_write to be freed later. 1423 */ 1424static void 1425arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1426 void *data, size_t size) 1427{ 1428 if (HDR_L2_WRITING(hdr)) { 1429 l2arc_data_free_t *df; 1430 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1431 df->l2df_data = data; 1432 df->l2df_size = size; 1433 df->l2df_func = free_func; 1434 mutex_enter(&l2arc_free_on_write_mtx); 1435 list_insert_head(l2arc_free_on_write, df); 1436 mutex_exit(&l2arc_free_on_write_mtx); 1437 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1438 } else { 1439 free_func(data, size); 1440 } 1441} 1442 1443static void 1444arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1445{ 1446 arc_buf_t **bufp; 1447 1448 /* free up data associated with the buf */ 1449 if (buf->b_data) { 1450 arc_state_t *state = buf->b_hdr->b_state; 1451 uint64_t size = buf->b_hdr->b_size; 1452 arc_buf_contents_t type = buf->b_hdr->b_type; 1453 1454 arc_cksum_verify(buf); 1455 if (!recycle) { 1456 if (type == ARC_BUFC_METADATA) { 1457 arc_buf_data_free(buf->b_hdr, zio_buf_free, 1458 buf->b_data, size); 1459 arc_space_return(size, ARC_SPACE_DATA); 1460 } else { 1461 ASSERT(type == ARC_BUFC_DATA); 1462 arc_buf_data_free(buf->b_hdr, 1463 zio_data_buf_free, buf->b_data, size); 1464 ARCSTAT_INCR(arcstat_data_size, -size); 1465 atomic_add_64(&arc_size, -size); 1466 } 1467 } 1468 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1469 uint64_t *cnt = &state->arcs_lsize[type]; 1470 1471 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1472 ASSERT(state != arc_anon); 1473 1474 ASSERT3U(*cnt, >=, size); 1475 atomic_add_64(cnt, -size); 1476 } 1477 ASSERT3U(state->arcs_size, >=, size); 1478 atomic_add_64(&state->arcs_size, -size); 1479 buf->b_data = NULL; 1480 ASSERT(buf->b_hdr->b_datacnt > 0); 1481 buf->b_hdr->b_datacnt -= 1; 1482 } 1483 1484 /* only remove the buf if requested */ 1485 if (!all) 1486 return; 1487 1488 /* remove the buf from the hdr list */ 1489 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1490 continue; 1491 *bufp = buf->b_next; 1492 1493 ASSERT(buf->b_efunc == NULL); 1494 1495 /* clean up the buf */ 1496 buf->b_hdr = NULL; 1497 kmem_cache_free(buf_cache, buf); 1498} 1499 1500static void 1501arc_hdr_destroy(arc_buf_hdr_t *hdr) 1502{ 1503 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1504 ASSERT3P(hdr->b_state, ==, arc_anon); 1505 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1506 ASSERT(!(hdr->b_flags & ARC_STORED)); 1507 1508 if (hdr->b_l2hdr != NULL) { 1509 if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 1510 /* 1511 * To prevent arc_free() and l2arc_evict() from 1512 * attempting to free the same buffer at the same time, 1513 * a FREE_IN_PROGRESS flag is given to arc_free() to 1514 * give it priority. l2arc_evict() can't destroy this 1515 * header while we are waiting on l2arc_buflist_mtx. 1516 * 1517 * The hdr may be removed from l2ad_buflist before we 1518 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1519 */ 1520 mutex_enter(&l2arc_buflist_mtx); 1521 if (hdr->b_l2hdr != NULL) { 1522 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, 1523 hdr); 1524 } 1525 mutex_exit(&l2arc_buflist_mtx); 1526 } else { 1527 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1528 } 1529 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1530 kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 1531 if (hdr->b_state == arc_l2c_only) 1532 l2arc_hdr_stat_remove(); 1533 hdr->b_l2hdr = NULL; 1534 } 1535 1536 if (!BUF_EMPTY(hdr)) { 1537 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1538 bzero(&hdr->b_dva, sizeof (dva_t)); 1539 hdr->b_birth = 0; 1540 hdr->b_cksum0 = 0; 1541 } 1542 while (hdr->b_buf) { 1543 arc_buf_t *buf = hdr->b_buf; 1544 1545 if (buf->b_efunc) { 1546 mutex_enter(&arc_eviction_mtx); 1547 rw_enter(&buf->b_lock, RW_WRITER); 1548 ASSERT(buf->b_hdr != NULL); 1549 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1550 hdr->b_buf = buf->b_next; 1551 buf->b_hdr = &arc_eviction_hdr; 1552 buf->b_next = arc_eviction_list; 1553 arc_eviction_list = buf; 1554 rw_exit(&buf->b_lock); 1555 mutex_exit(&arc_eviction_mtx); 1556 } else { 1557 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1558 } 1559 } 1560 if (hdr->b_freeze_cksum != NULL) { 1561 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1562 hdr->b_freeze_cksum = NULL; 1563 } 1564 1565 ASSERT(!list_link_active(&hdr->b_arc_node)); 1566 ASSERT3P(hdr->b_hash_next, ==, NULL); 1567 ASSERT3P(hdr->b_acb, ==, NULL); 1568 kmem_cache_free(hdr_cache, hdr); 1569} 1570 1571void 1572arc_buf_free(arc_buf_t *buf, void *tag) 1573{ 1574 arc_buf_hdr_t *hdr = buf->b_hdr; 1575 int hashed = hdr->b_state != arc_anon; 1576 1577 ASSERT(buf->b_efunc == NULL); 1578 ASSERT(buf->b_data != NULL); 1579 1580 if (hashed) { 1581 kmutex_t *hash_lock = HDR_LOCK(hdr); 1582 1583 mutex_enter(hash_lock); 1584 (void) remove_reference(hdr, hash_lock, tag); 1585 if (hdr->b_datacnt > 1) 1586 arc_buf_destroy(buf, FALSE, TRUE); 1587 else 1588 hdr->b_flags |= ARC_BUF_AVAILABLE; 1589 mutex_exit(hash_lock); 1590 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1591 int destroy_hdr; 1592 /* 1593 * We are in the middle of an async write. Don't destroy 1594 * this buffer unless the write completes before we finish 1595 * decrementing the reference count. 1596 */ 1597 mutex_enter(&arc_eviction_mtx); 1598 (void) remove_reference(hdr, NULL, tag); 1599 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1600 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1601 mutex_exit(&arc_eviction_mtx); 1602 if (destroy_hdr) 1603 arc_hdr_destroy(hdr); 1604 } else { 1605 if (remove_reference(hdr, NULL, tag) > 0) { 1606 ASSERT(HDR_IO_ERROR(hdr)); 1607 arc_buf_destroy(buf, FALSE, TRUE); 1608 } else { 1609 arc_hdr_destroy(hdr); 1610 } 1611 } 1612} 1613 1614int 1615arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1616{ 1617 arc_buf_hdr_t *hdr = buf->b_hdr; 1618 kmutex_t *hash_lock = HDR_LOCK(hdr); 1619 int no_callback = (buf->b_efunc == NULL); 1620 1621 if (hdr->b_state == arc_anon) { 1622 arc_buf_free(buf, tag); 1623 return (no_callback); 1624 } 1625 1626 mutex_enter(hash_lock); 1627 ASSERT(hdr->b_state != arc_anon); 1628 ASSERT(buf->b_data != NULL); 1629 1630 (void) remove_reference(hdr, hash_lock, tag); 1631 if (hdr->b_datacnt > 1) { 1632 if (no_callback) 1633 arc_buf_destroy(buf, FALSE, TRUE); 1634 } else if (no_callback) { 1635 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1636 hdr->b_flags |= ARC_BUF_AVAILABLE; 1637 } 1638 ASSERT(no_callback || hdr->b_datacnt > 1 || 1639 refcount_is_zero(&hdr->b_refcnt)); 1640 mutex_exit(hash_lock); 1641 return (no_callback); 1642} 1643 1644int 1645arc_buf_size(arc_buf_t *buf) 1646{ 1647 return (buf->b_hdr->b_size); 1648} 1649 1650/* 1651 * Evict buffers from list until we've removed the specified number of 1652 * bytes. Move the removed buffers to the appropriate evict state. 1653 * If the recycle flag is set, then attempt to "recycle" a buffer: 1654 * - look for a buffer to evict that is `bytes' long. 1655 * - return the data block from this buffer rather than freeing it. 1656 * This flag is used by callers that are trying to make space for a 1657 * new buffer in a full arc cache. 1658 * 1659 * This function makes a "best effort". It skips over any buffers 1660 * it can't get a hash_lock on, and so may not catch all candidates. 1661 * It may also return without evicting as much space as requested. 1662 */ 1663static void * 1664arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, 1665 arc_buf_contents_t type) 1666{ 1667 arc_state_t *evicted_state; 1668 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1669 int64_t bytes_remaining; 1670 arc_buf_hdr_t *ab, *ab_prev = NULL; 1671 list_t *evicted_list, *list, *evicted_list_start, *list_start; 1672 kmutex_t *lock, *evicted_lock; 1673 kmutex_t *hash_lock; 1674 boolean_t have_lock; 1675 void *stolen = NULL; 1676 static int evict_metadata_offset, evict_data_offset; 1677 int i, idx, offset, list_count, count; 1678 1679 ASSERT(state == arc_mru || state == arc_mfu); 1680 1681 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1682 1683 if (type == ARC_BUFC_METADATA) { 1684 offset = 0; 1685 list_count = ARC_BUFC_NUMMETADATALISTS; 1686 list_start = &state->arcs_lists[0]; 1687 evicted_list_start = &evicted_state->arcs_lists[0]; 1688 idx = evict_metadata_offset; 1689 } else { 1690 offset = ARC_BUFC_NUMMETADATALISTS; 1691 list_start = &state->arcs_lists[offset]; 1692 evicted_list_start = &evicted_state->arcs_lists[offset]; 1693 list_count = ARC_BUFC_NUMDATALISTS; 1694 idx = evict_data_offset; 1695 } 1696 bytes_remaining = evicted_state->arcs_lsize[type]; 1697 count = 0; 1698 1699evict_start: 1700 list = &list_start[idx]; 1701 evicted_list = &evicted_list_start[idx]; 1702 lock = ARCS_LOCK(state, (offset + idx)); 1703 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx)); 1704 1705 mutex_enter(lock); 1706 mutex_enter(evicted_lock); 1707 1708 for (ab = list_tail(list); ab; ab = ab_prev) { 1709 ab_prev = list_prev(list, ab); 1710 bytes_remaining -= (ab->b_size * ab->b_datacnt); 1711 /* prefetch buffers have a minimum lifespan */ 1712 if (HDR_IO_IN_PROGRESS(ab) || 1713 (spa && ab->b_spa != spa) || 1714 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1715 LBOLT - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1716 skipped++; 1717 continue; 1718 } 1719 /* "lookahead" for better eviction candidate */ 1720 if (recycle && ab->b_size != bytes && 1721 ab_prev && ab_prev->b_size == bytes) 1722 continue; 1723 hash_lock = HDR_LOCK(ab); 1724 have_lock = MUTEX_HELD(hash_lock); 1725 if (have_lock || mutex_tryenter(hash_lock)) { 1726 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1727 ASSERT(ab->b_datacnt > 0); 1728 while (ab->b_buf) { 1729 arc_buf_t *buf = ab->b_buf; 1730 if (!rw_tryenter(&buf->b_lock, RW_WRITER)) { 1731 missed += 1; 1732 break; 1733 } 1734 if (buf->b_data) { 1735 bytes_evicted += ab->b_size; 1736 if (recycle && ab->b_type == type && 1737 ab->b_size == bytes && 1738 !HDR_L2_WRITING(ab)) { 1739 stolen = buf->b_data; 1740 recycle = FALSE; 1741 } 1742 } 1743 if (buf->b_efunc) { 1744 mutex_enter(&arc_eviction_mtx); 1745 arc_buf_destroy(buf, 1746 buf->b_data == stolen, FALSE); 1747 ab->b_buf = buf->b_next; 1748 buf->b_hdr = &arc_eviction_hdr; 1749 buf->b_next = arc_eviction_list; 1750 arc_eviction_list = buf; 1751 mutex_exit(&arc_eviction_mtx); 1752 rw_exit(&buf->b_lock); 1753 } else { 1754 rw_exit(&buf->b_lock); 1755 arc_buf_destroy(buf, 1756 buf->b_data == stolen, TRUE); 1757 } 1758 } 1759 1760 if (ab->b_l2hdr) { 1761 ARCSTAT_INCR(arcstat_evict_l2_cached, 1762 ab->b_size); 1763 } else { 1764 if (l2arc_write_eligible(ab->b_spa, ab)) { 1765 ARCSTAT_INCR(arcstat_evict_l2_eligible, 1766 ab->b_size); 1767 } else { 1768 ARCSTAT_INCR( 1769 arcstat_evict_l2_ineligible, 1770 ab->b_size); 1771 } 1772 } 1773 1774 if (ab->b_datacnt == 0) { 1775 arc_change_state(evicted_state, ab, hash_lock); 1776 ASSERT(HDR_IN_HASH_TABLE(ab)); 1777 ab->b_flags |= ARC_IN_HASH_TABLE; 1778 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1779 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1780 } 1781 if (!have_lock) 1782 mutex_exit(hash_lock); 1783 if (bytes >= 0 && bytes_evicted >= bytes) 1784 break; 1785 if (bytes_remaining > 0) { 1786 mutex_exit(evicted_lock); 1787 mutex_exit(lock); 1788 idx = ((idx + 1) & (list_count - 1)); 1789 count++; 1790 goto evict_start; 1791 } 1792 } else { 1793 missed += 1; 1794 } 1795 } 1796 1797 mutex_exit(evicted_lock); 1798 mutex_exit(lock); 1799 1800 idx = ((idx + 1) & (list_count - 1)); 1801 count++; 1802 1803 if (bytes_evicted < bytes) { 1804 if (count < list_count) 1805 goto evict_start; 1806 else 1807 dprintf("only evicted %lld bytes from %x", 1808 (longlong_t)bytes_evicted, state); 1809 } 1810 if (type == ARC_BUFC_METADATA) 1811 evict_metadata_offset = idx; 1812 else 1813 evict_data_offset = idx; 1814 1815 if (skipped) 1816 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1817 1818 if (missed) 1819 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1820 1821 /* 1822 * We have just evicted some date into the ghost state, make 1823 * sure we also adjust the ghost state size if necessary. 1824 */ 1825 if (arc_no_grow && 1826 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1827 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1828 arc_mru_ghost->arcs_size - arc_c; 1829 1830 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1831 int64_t todelete = 1832 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1833 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1834 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1835 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1836 arc_mru_ghost->arcs_size + 1837 arc_mfu_ghost->arcs_size - arc_c); 1838 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1839 } 1840 } 1841 if (stolen) 1842 ARCSTAT_BUMP(arcstat_stolen); 1843 1844 return (stolen); 1845} 1846 1847/* 1848 * Remove buffers from list until we've removed the specified number of 1849 * bytes. Destroy the buffers that are removed. 1850 */ 1851static void 1852arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) 1853{ 1854 arc_buf_hdr_t *ab, *ab_prev; 1855 list_t *list, *list_start; 1856 kmutex_t *hash_lock, *lock; 1857 uint64_t bytes_deleted = 0; 1858 uint64_t bufs_skipped = 0; 1859 static int evict_offset; 1860 int list_count, idx = evict_offset; 1861 int offset, count = 0; 1862 1863 ASSERT(GHOST_STATE(state)); 1864 1865 /* 1866 * data lists come after metadata lists 1867 */ 1868 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS]; 1869 list_count = ARC_BUFC_NUMDATALISTS; 1870 offset = ARC_BUFC_NUMMETADATALISTS; 1871 1872evict_start: 1873 list = &list_start[idx]; 1874 lock = ARCS_LOCK(state, idx + offset); 1875 1876 mutex_enter(lock); 1877 for (ab = list_tail(list); ab; ab = ab_prev) { 1878 ab_prev = list_prev(list, ab); 1879 if (spa && ab->b_spa != spa) 1880 continue; 1881 hash_lock = HDR_LOCK(ab); 1882 if (mutex_tryenter(hash_lock)) { 1883 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1884 ASSERT(ab->b_buf == NULL); 1885 ARCSTAT_BUMP(arcstat_deleted); 1886 bytes_deleted += ab->b_size; 1887 1888 if (ab->b_l2hdr != NULL) { 1889 /* 1890 * This buffer is cached on the 2nd Level ARC; 1891 * don't destroy the header. 1892 */ 1893 arc_change_state(arc_l2c_only, ab, hash_lock); 1894 mutex_exit(hash_lock); 1895 } else { 1896 arc_change_state(arc_anon, ab, hash_lock); 1897 mutex_exit(hash_lock); 1898 arc_hdr_destroy(ab); 1899 } 1900 1901 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1902 if (bytes >= 0 && bytes_deleted >= bytes) 1903 break; 1904 } else { 1905 if (bytes < 0) { 1906 /* 1907 * we're draining the ARC, retry 1908 */ 1909 mutex_exit(lock); 1910 mutex_enter(hash_lock); 1911 mutex_exit(hash_lock); 1912 goto evict_start; 1913 } 1914 bufs_skipped += 1; 1915 } 1916 } 1917 mutex_exit(lock); 1918 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1)); 1919 count++; 1920 1921 if (count < list_count) 1922 goto evict_start; 1923 1924 evict_offset = idx; 1925 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] && 1926 (bytes < 0 || bytes_deleted < bytes)) { 1927 list_start = &state->arcs_lists[0]; 1928 list_count = ARC_BUFC_NUMMETADATALISTS; 1929 offset = count = 0; 1930 goto evict_start; 1931 } 1932 1933 if (bufs_skipped) { 1934 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1935 ASSERT(bytes >= 0); 1936 } 1937 1938 if (bytes_deleted < bytes) 1939 dprintf("only deleted %lld bytes from %p", 1940 (longlong_t)bytes_deleted, state); 1941} 1942 1943static void 1944arc_adjust(void) 1945{ 1946 int64_t adjustment, delta; 1947 1948 /* 1949 * Adjust MRU size 1950 */ 1951 1952 adjustment = MIN(arc_size - arc_c, 1953 arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - arc_p); 1954 1955 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1956 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 1957 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA); 1958 adjustment -= delta; 1959 } 1960 1961 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1962 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 1963 (void) arc_evict(arc_mru, NULL, delta, FALSE, 1964 ARC_BUFC_METADATA); 1965 } 1966 1967 /* 1968 * Adjust MFU size 1969 */ 1970 1971 adjustment = arc_size - arc_c; 1972 1973 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1974 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 1975 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA); 1976 adjustment -= delta; 1977 } 1978 1979 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1980 int64_t delta = MIN(adjustment, 1981 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 1982 (void) arc_evict(arc_mfu, NULL, delta, FALSE, 1983 ARC_BUFC_METADATA); 1984 } 1985 1986 /* 1987 * Adjust ghost lists 1988 */ 1989 1990 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 1991 1992 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 1993 delta = MIN(arc_mru_ghost->arcs_size, adjustment); 1994 arc_evict_ghost(arc_mru_ghost, NULL, delta); 1995 } 1996 1997 adjustment = 1998 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 1999 2000 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 2001 delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 2002 arc_evict_ghost(arc_mfu_ghost, NULL, delta); 2003 } 2004} 2005 2006static void 2007arc_do_user_evicts(void) 2008{ 2009 static arc_buf_t *tmp_arc_eviction_list; 2010 2011 /* 2012 * Move list over to avoid LOR 2013 */ 2014restart: 2015 mutex_enter(&arc_eviction_mtx); 2016 tmp_arc_eviction_list = arc_eviction_list; 2017 arc_eviction_list = NULL; 2018 mutex_exit(&arc_eviction_mtx); 2019 2020 while (tmp_arc_eviction_list != NULL) { 2021 arc_buf_t *buf = tmp_arc_eviction_list; 2022 tmp_arc_eviction_list = buf->b_next; 2023 rw_enter(&buf->b_lock, RW_WRITER); 2024 buf->b_hdr = NULL; 2025 rw_exit(&buf->b_lock); 2026 2027 if (buf->b_efunc != NULL) 2028 VERIFY(buf->b_efunc(buf) == 0); 2029 2030 buf->b_efunc = NULL; 2031 buf->b_private = NULL; 2032 kmem_cache_free(buf_cache, buf); 2033 } 2034 2035 if (arc_eviction_list != NULL) 2036 goto restart; 2037} 2038 2039/* 2040 * Flush all *evictable* data from the cache for the given spa. 2041 * NOTE: this will not touch "active" (i.e. referenced) data. 2042 */ 2043void 2044arc_flush(spa_t *spa) 2045{ 2046 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) { 2047 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); 2048 if (spa) 2049 break; 2050 } 2051 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) { 2052 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); 2053 if (spa) 2054 break; 2055 } 2056 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) { 2057 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); 2058 if (spa) 2059 break; 2060 } 2061 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) { 2062 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); 2063 if (spa) 2064 break; 2065 } 2066 2067 arc_evict_ghost(arc_mru_ghost, spa, -1); 2068 arc_evict_ghost(arc_mfu_ghost, spa, -1); 2069 2070 mutex_enter(&arc_reclaim_thr_lock); 2071 arc_do_user_evicts(); 2072 mutex_exit(&arc_reclaim_thr_lock); 2073 ASSERT(spa || arc_eviction_list == NULL); 2074} 2075 2076void 2077arc_shrink(void) 2078{ 2079 if (arc_c > arc_c_min) { 2080 uint64_t to_free; 2081 2082#ifdef _KERNEL 2083 to_free = arc_c >> arc_shrink_shift; 2084#else 2085 to_free = arc_c >> arc_shrink_shift; 2086#endif 2087 if (arc_c > arc_c_min + to_free) 2088 atomic_add_64(&arc_c, -to_free); 2089 else 2090 arc_c = arc_c_min; 2091 2092 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 2093 if (arc_c > arc_size) 2094 arc_c = MAX(arc_size, arc_c_min); 2095 if (arc_p > arc_c) 2096 arc_p = (arc_c >> 1); 2097 ASSERT(arc_c >= arc_c_min); 2098 ASSERT((int64_t)arc_p >= 0); 2099 } 2100 2101 if (arc_size > arc_c) 2102 arc_adjust(); 2103} 2104 2105static int needfree = 0; 2106 2107static int 2108arc_reclaim_needed(void) 2109{ 2110#if 0 2111 uint64_t extra; 2112#endif 2113 2114#ifdef _KERNEL 2115 if (needfree) 2116 return (1); 2117 if (arc_size > arc_c_max) 2118 return (1); 2119 if (arc_size <= arc_c_min) 2120 return (0); 2121 2122 /* 2123 * If pages are needed or we're within 2048 pages 2124 * of needing to page need to reclaim 2125 */ 2126 if (vm_pages_needed || (vm_paging_target() > -2048)) 2127 return (1); 2128 2129#if 0 2130 /* 2131 * take 'desfree' extra pages, so we reclaim sooner, rather than later 2132 */ 2133 extra = desfree; 2134 2135 /* 2136 * check that we're out of range of the pageout scanner. It starts to 2137 * schedule paging if freemem is less than lotsfree and needfree. 2138 * lotsfree is the high-water mark for pageout, and needfree is the 2139 * number of needed free pages. We add extra pages here to make sure 2140 * the scanner doesn't start up while we're freeing memory. 2141 */ 2142 if (freemem < lotsfree + needfree + extra) 2143 return (1); 2144 2145 /* 2146 * check to make sure that swapfs has enough space so that anon 2147 * reservations can still succeed. anon_resvmem() checks that the 2148 * availrmem is greater than swapfs_minfree, and the number of reserved 2149 * swap pages. We also add a bit of extra here just to prevent 2150 * circumstances from getting really dire. 2151 */ 2152 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 2153 return (1); 2154 2155#if defined(__i386) 2156 /* 2157 * If we're on an i386 platform, it's possible that we'll exhaust the 2158 * kernel heap space before we ever run out of available physical 2159 * memory. Most checks of the size of the heap_area compare against 2160 * tune.t_minarmem, which is the minimum available real memory that we 2161 * can have in the system. However, this is generally fixed at 25 pages 2162 * which is so low that it's useless. In this comparison, we seek to 2163 * calculate the total heap-size, and reclaim if more than 3/4ths of the 2164 * heap is allocated. (Or, in the calculation, if less than 1/4th is 2165 * free) 2166 */ 2167 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 2168 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 2169 return (1); 2170#endif 2171#else 2172 if (kmem_used() > (kmem_size() * 3) / 4) 2173 return (1); 2174#endif 2175 2176#else 2177 if (spa_get_random(100) == 0) 2178 return (1); 2179#endif 2180 return (0); 2181} 2182 2183extern kmem_cache_t *zio_buf_cache[]; 2184extern kmem_cache_t *zio_data_buf_cache[]; 2185 2186static void 2187arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2188{ 2189 size_t i; 2190 kmem_cache_t *prev_cache = NULL; 2191 kmem_cache_t *prev_data_cache = NULL; 2192 2193#ifdef _KERNEL 2194 if (arc_meta_used >= arc_meta_limit) { 2195 /* 2196 * We are exceeding our meta-data cache limit. 2197 * Purge some DNLC entries to release holds on meta-data. 2198 */ 2199 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 2200 } 2201#if defined(__i386) 2202 /* 2203 * Reclaim unused memory from all kmem caches. 2204 */ 2205 kmem_reap(); 2206#endif 2207#endif 2208 2209 /* 2210 * An aggressive reclamation will shrink the cache size as well as 2211 * reap free buffers from the arc kmem caches. 2212 */ 2213 if (strat == ARC_RECLAIM_AGGR) 2214 arc_shrink(); 2215 2216 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2217 if (zio_buf_cache[i] != prev_cache) { 2218 prev_cache = zio_buf_cache[i]; 2219 kmem_cache_reap_now(zio_buf_cache[i]); 2220 } 2221 if (zio_data_buf_cache[i] != prev_data_cache) { 2222 prev_data_cache = zio_data_buf_cache[i]; 2223 kmem_cache_reap_now(zio_data_buf_cache[i]); 2224 } 2225 } 2226 kmem_cache_reap_now(buf_cache); 2227 kmem_cache_reap_now(hdr_cache); 2228} 2229 2230static void 2231arc_reclaim_thread(void *dummy __unused) 2232{ 2233 clock_t growtime = 0; 2234 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2235 callb_cpr_t cpr; 2236 2237 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2238 2239 mutex_enter(&arc_reclaim_thr_lock); 2240 while (arc_thread_exit == 0) { 2241 if (arc_reclaim_needed()) { 2242 2243 if (arc_no_grow) { 2244 if (last_reclaim == ARC_RECLAIM_CONS) { 2245 last_reclaim = ARC_RECLAIM_AGGR; 2246 } else { 2247 last_reclaim = ARC_RECLAIM_CONS; 2248 } 2249 } else { 2250 arc_no_grow = TRUE; 2251 last_reclaim = ARC_RECLAIM_AGGR; 2252 membar_producer(); 2253 } 2254 2255 /* reset the growth delay for every reclaim */ 2256 growtime = LBOLT + (arc_grow_retry * hz); 2257 2258 if (needfree && last_reclaim == ARC_RECLAIM_CONS) { 2259 /* 2260 * If needfree is TRUE our vm_lowmem hook 2261 * was called and in that case we must free some 2262 * memory, so switch to aggressive mode. 2263 */ 2264 arc_no_grow = TRUE; 2265 last_reclaim = ARC_RECLAIM_AGGR; 2266 } 2267 arc_kmem_reap_now(last_reclaim); 2268 arc_warm = B_TRUE; 2269 2270 } else if (arc_no_grow && LBOLT >= growtime) { 2271 arc_no_grow = FALSE; 2272 } 2273 2274 if (needfree || 2275 (2 * arc_c < arc_size + 2276 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)) 2277 arc_adjust(); 2278 2279 if (arc_eviction_list != NULL) 2280 arc_do_user_evicts(); 2281 2282 if (arc_reclaim_needed()) { 2283 needfree = 0; 2284#ifdef _KERNEL 2285 wakeup(&needfree); 2286#endif 2287 } 2288 2289 /* block until needed, or one second, whichever is shorter */ 2290 CALLB_CPR_SAFE_BEGIN(&cpr); 2291 (void) cv_timedwait(&arc_reclaim_thr_cv, 2292 &arc_reclaim_thr_lock, hz); 2293 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2294 } 2295 2296 arc_thread_exit = 0; 2297 cv_broadcast(&arc_reclaim_thr_cv); 2298 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2299 thread_exit(); 2300} 2301 2302/* 2303 * Adapt arc info given the number of bytes we are trying to add and 2304 * the state that we are comming from. This function is only called 2305 * when we are adding new content to the cache. 2306 */ 2307static void 2308arc_adapt(int bytes, arc_state_t *state) 2309{ 2310 int mult; 2311 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2312 2313 if (state == arc_l2c_only) 2314 return; 2315 2316 ASSERT(bytes > 0); 2317 /* 2318 * Adapt the target size of the MRU list: 2319 * - if we just hit in the MRU ghost list, then increase 2320 * the target size of the MRU list. 2321 * - if we just hit in the MFU ghost list, then increase 2322 * the target size of the MFU list by decreasing the 2323 * target size of the MRU list. 2324 */ 2325 if (state == arc_mru_ghost) { 2326 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2327 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2328 2329 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 2330 } else if (state == arc_mfu_ghost) { 2331 uint64_t delta; 2332 2333 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2334 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2335 2336 delta = MIN(bytes * mult, arc_p); 2337 arc_p = MAX(arc_p_min, arc_p - delta); 2338 } 2339 ASSERT((int64_t)arc_p >= 0); 2340 2341 if (arc_reclaim_needed()) { 2342 cv_signal(&arc_reclaim_thr_cv); 2343 return; 2344 } 2345 2346 if (arc_no_grow) 2347 return; 2348 2349 if (arc_c >= arc_c_max) 2350 return; 2351 2352 /* 2353 * If we're within (2 * maxblocksize) bytes of the target 2354 * cache size, increment the target cache size 2355 */ 2356 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2357 atomic_add_64(&arc_c, (int64_t)bytes); 2358 if (arc_c > arc_c_max) 2359 arc_c = arc_c_max; 2360 else if (state == arc_anon) 2361 atomic_add_64(&arc_p, (int64_t)bytes); 2362 if (arc_p > arc_c) 2363 arc_p = arc_c; 2364 } 2365 ASSERT((int64_t)arc_p >= 0); 2366} 2367 2368/* 2369 * Check if the cache has reached its limits and eviction is required 2370 * prior to insert. 2371 */ 2372static int 2373arc_evict_needed(arc_buf_contents_t type) 2374{ 2375 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2376 return (1); 2377 2378#if 0 2379#ifdef _KERNEL 2380 /* 2381 * If zio data pages are being allocated out of a separate heap segment, 2382 * then enforce that the size of available vmem for this area remains 2383 * above about 1/32nd free. 2384 */ 2385 if (type == ARC_BUFC_DATA && zio_arena != NULL && 2386 vmem_size(zio_arena, VMEM_FREE) < 2387 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2388 return (1); 2389#endif 2390#endif 2391 2392 if (arc_reclaim_needed()) 2393 return (1); 2394 2395 return (arc_size > arc_c); 2396} 2397 2398/* 2399 * The buffer, supplied as the first argument, needs a data block. 2400 * So, if we are at cache max, determine which cache should be victimized. 2401 * We have the following cases: 2402 * 2403 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2404 * In this situation if we're out of space, but the resident size of the MFU is 2405 * under the limit, victimize the MFU cache to satisfy this insertion request. 2406 * 2407 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2408 * Here, we've used up all of the available space for the MRU, so we need to 2409 * evict from our own cache instead. Evict from the set of resident MRU 2410 * entries. 2411 * 2412 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2413 * c minus p represents the MFU space in the cache, since p is the size of the 2414 * cache that is dedicated to the MRU. In this situation there's still space on 2415 * the MFU side, so the MRU side needs to be victimized. 2416 * 2417 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2418 * MFU's resident set is consuming more space than it has been allotted. In 2419 * this situation, we must victimize our own cache, the MFU, for this insertion. 2420 */ 2421static void 2422arc_get_data_buf(arc_buf_t *buf) 2423{ 2424 arc_state_t *state = buf->b_hdr->b_state; 2425 uint64_t size = buf->b_hdr->b_size; 2426 arc_buf_contents_t type = buf->b_hdr->b_type; 2427 2428 arc_adapt(size, state); 2429 2430 /* 2431 * We have not yet reached cache maximum size, 2432 * just allocate a new buffer. 2433 */ 2434 if (!arc_evict_needed(type)) { 2435 if (type == ARC_BUFC_METADATA) { 2436 buf->b_data = zio_buf_alloc(size); 2437 arc_space_consume(size, ARC_SPACE_DATA); 2438 } else { 2439 ASSERT(type == ARC_BUFC_DATA); 2440 buf->b_data = zio_data_buf_alloc(size); 2441 ARCSTAT_INCR(arcstat_data_size, size); 2442 atomic_add_64(&arc_size, size); 2443 } 2444 goto out; 2445 } 2446 2447 /* 2448 * If we are prefetching from the mfu ghost list, this buffer 2449 * will end up on the mru list; so steal space from there. 2450 */ 2451 if (state == arc_mfu_ghost) 2452 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2453 else if (state == arc_mru_ghost) 2454 state = arc_mru; 2455 2456 if (state == arc_mru || state == arc_anon) { 2457 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2458 state = (arc_mfu->arcs_lsize[type] >= size && 2459 arc_p > mru_used) ? arc_mfu : arc_mru; 2460 } else { 2461 /* MFU cases */ 2462 uint64_t mfu_space = arc_c - arc_p; 2463 state = (arc_mru->arcs_lsize[type] >= size && 2464 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2465 } 2466 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2467 if (type == ARC_BUFC_METADATA) { 2468 buf->b_data = zio_buf_alloc(size); 2469 arc_space_consume(size, ARC_SPACE_DATA); 2470 } else { 2471 ASSERT(type == ARC_BUFC_DATA); 2472 buf->b_data = zio_data_buf_alloc(size); 2473 ARCSTAT_INCR(arcstat_data_size, size); 2474 atomic_add_64(&arc_size, size); 2475 } 2476 ARCSTAT_BUMP(arcstat_recycle_miss); 2477 } 2478 ASSERT(buf->b_data != NULL); 2479out: 2480 /* 2481 * Update the state size. Note that ghost states have a 2482 * "ghost size" and so don't need to be updated. 2483 */ 2484 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2485 arc_buf_hdr_t *hdr = buf->b_hdr; 2486 2487 atomic_add_64(&hdr->b_state->arcs_size, size); 2488 if (list_link_active(&hdr->b_arc_node)) { 2489 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2490 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2491 } 2492 /* 2493 * If we are growing the cache, and we are adding anonymous 2494 * data, and we have outgrown arc_p, update arc_p 2495 */ 2496 if (arc_size < arc_c && hdr->b_state == arc_anon && 2497 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2498 arc_p = MIN(arc_c, arc_p + size); 2499 } 2500 ARCSTAT_BUMP(arcstat_allocated); 2501} 2502 2503/* 2504 * This routine is called whenever a buffer is accessed. 2505 * NOTE: the hash lock is dropped in this function. 2506 */ 2507static void 2508arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2509{ 2510 ASSERT(MUTEX_HELD(hash_lock)); 2511 2512 if (buf->b_state == arc_anon) { 2513 /* 2514 * This buffer is not in the cache, and does not 2515 * appear in our "ghost" list. Add the new buffer 2516 * to the MRU state. 2517 */ 2518 2519 ASSERT(buf->b_arc_access == 0); 2520 buf->b_arc_access = LBOLT; 2521 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2522 arc_change_state(arc_mru, buf, hash_lock); 2523 2524 } else if (buf->b_state == arc_mru) { 2525 /* 2526 * If this buffer is here because of a prefetch, then either: 2527 * - clear the flag if this is a "referencing" read 2528 * (any subsequent access will bump this into the MFU state). 2529 * or 2530 * - move the buffer to the head of the list if this is 2531 * another prefetch (to make it less likely to be evicted). 2532 */ 2533 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2534 if (refcount_count(&buf->b_refcnt) == 0) { 2535 ASSERT(list_link_active(&buf->b_arc_node)); 2536 } else { 2537 buf->b_flags &= ~ARC_PREFETCH; 2538 ARCSTAT_BUMP(arcstat_mru_hits); 2539 } 2540 buf->b_arc_access = LBOLT; 2541 return; 2542 } 2543 2544 /* 2545 * This buffer has been "accessed" only once so far, 2546 * but it is still in the cache. Move it to the MFU 2547 * state. 2548 */ 2549 if (LBOLT > buf->b_arc_access + ARC_MINTIME) { 2550 /* 2551 * More than 125ms have passed since we 2552 * instantiated this buffer. Move it to the 2553 * most frequently used state. 2554 */ 2555 buf->b_arc_access = LBOLT; 2556 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2557 arc_change_state(arc_mfu, buf, hash_lock); 2558 } 2559 ARCSTAT_BUMP(arcstat_mru_hits); 2560 } else if (buf->b_state == arc_mru_ghost) { 2561 arc_state_t *new_state; 2562 /* 2563 * This buffer has been "accessed" recently, but 2564 * was evicted from the cache. Move it to the 2565 * MFU state. 2566 */ 2567 2568 if (buf->b_flags & ARC_PREFETCH) { 2569 new_state = arc_mru; 2570 if (refcount_count(&buf->b_refcnt) > 0) 2571 buf->b_flags &= ~ARC_PREFETCH; 2572 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2573 } else { 2574 new_state = arc_mfu; 2575 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2576 } 2577 2578 buf->b_arc_access = LBOLT; 2579 arc_change_state(new_state, buf, hash_lock); 2580 2581 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2582 } else if (buf->b_state == arc_mfu) { 2583 /* 2584 * This buffer has been accessed more than once and is 2585 * still in the cache. Keep it in the MFU state. 2586 * 2587 * NOTE: an add_reference() that occurred when we did 2588 * the arc_read() will have kicked this off the list. 2589 * If it was a prefetch, we will explicitly move it to 2590 * the head of the list now. 2591 */ 2592 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2593 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2594 ASSERT(list_link_active(&buf->b_arc_node)); 2595 } 2596 ARCSTAT_BUMP(arcstat_mfu_hits); 2597 buf->b_arc_access = LBOLT; 2598 } else if (buf->b_state == arc_mfu_ghost) { 2599 arc_state_t *new_state = arc_mfu; 2600 /* 2601 * This buffer has been accessed more than once but has 2602 * been evicted from the cache. Move it back to the 2603 * MFU state. 2604 */ 2605 2606 if (buf->b_flags & ARC_PREFETCH) { 2607 /* 2608 * This is a prefetch access... 2609 * move this block back to the MRU state. 2610 */ 2611 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 2612 new_state = arc_mru; 2613 } 2614 2615 buf->b_arc_access = LBOLT; 2616 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2617 arc_change_state(new_state, buf, hash_lock); 2618 2619 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2620 } else if (buf->b_state == arc_l2c_only) { 2621 /* 2622 * This buffer is on the 2nd Level ARC. 2623 */ 2624 2625 buf->b_arc_access = LBOLT; 2626 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2627 arc_change_state(arc_mfu, buf, hash_lock); 2628 } else { 2629 ASSERT(!"invalid arc state"); 2630 } 2631} 2632 2633/* a generic arc_done_func_t which you can use */ 2634/* ARGSUSED */ 2635void 2636arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2637{ 2638 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2639 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2640} 2641 2642/* a generic arc_done_func_t */ 2643void 2644arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2645{ 2646 arc_buf_t **bufp = arg; 2647 if (zio && zio->io_error) { 2648 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2649 *bufp = NULL; 2650 } else { 2651 *bufp = buf; 2652 } 2653} 2654 2655static void 2656arc_read_done(zio_t *zio) 2657{ 2658 arc_buf_hdr_t *hdr, *found; 2659 arc_buf_t *buf; 2660 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2661 kmutex_t *hash_lock; 2662 arc_callback_t *callback_list, *acb; 2663 int freeable = FALSE; 2664 2665 buf = zio->io_private; 2666 hdr = buf->b_hdr; 2667 2668 /* 2669 * The hdr was inserted into hash-table and removed from lists 2670 * prior to starting I/O. We should find this header, since 2671 * it's in the hash table, and it should be legit since it's 2672 * not possible to evict it during the I/O. The only possible 2673 * reason for it not to be found is if we were freed during the 2674 * read. 2675 */ 2676 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 2677 &hash_lock); 2678 2679 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2680 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2681 (found == hdr && HDR_L2_READING(hdr))); 2682 2683 hdr->b_flags &= ~ARC_L2_EVICTED; 2684 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2685 hdr->b_flags &= ~ARC_L2CACHE; 2686 2687 /* byteswap if necessary */ 2688 callback_list = hdr->b_acb; 2689 ASSERT(callback_list != NULL); 2690 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { 2691 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2692 byteswap_uint64_array : 2693 dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap; 2694 func(buf->b_data, hdr->b_size); 2695 } 2696 2697 arc_cksum_compute(buf, B_FALSE); 2698 2699 /* create copies of the data buffer for the callers */ 2700 abuf = buf; 2701 for (acb = callback_list; acb; acb = acb->acb_next) { 2702 if (acb->acb_done) { 2703 if (abuf == NULL) 2704 abuf = arc_buf_clone(buf); 2705 acb->acb_buf = abuf; 2706 abuf = NULL; 2707 } 2708 } 2709 hdr->b_acb = NULL; 2710 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2711 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2712 if (abuf == buf) 2713 hdr->b_flags |= ARC_BUF_AVAILABLE; 2714 2715 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2716 2717 if (zio->io_error != 0) { 2718 hdr->b_flags |= ARC_IO_ERROR; 2719 if (hdr->b_state != arc_anon) 2720 arc_change_state(arc_anon, hdr, hash_lock); 2721 if (HDR_IN_HASH_TABLE(hdr)) 2722 buf_hash_remove(hdr); 2723 freeable = refcount_is_zero(&hdr->b_refcnt); 2724 } 2725 2726 /* 2727 * Broadcast before we drop the hash_lock to avoid the possibility 2728 * that the hdr (and hence the cv) might be freed before we get to 2729 * the cv_broadcast(). 2730 */ 2731 cv_broadcast(&hdr->b_cv); 2732 2733 if (hash_lock) { 2734 /* 2735 * Only call arc_access on anonymous buffers. This is because 2736 * if we've issued an I/O for an evicted buffer, we've already 2737 * called arc_access (to prevent any simultaneous readers from 2738 * getting confused). 2739 */ 2740 if (zio->io_error == 0 && hdr->b_state == arc_anon) 2741 arc_access(hdr, hash_lock); 2742 mutex_exit(hash_lock); 2743 } else { 2744 /* 2745 * This block was freed while we waited for the read to 2746 * complete. It has been removed from the hash table and 2747 * moved to the anonymous state (so that it won't show up 2748 * in the cache). 2749 */ 2750 ASSERT3P(hdr->b_state, ==, arc_anon); 2751 freeable = refcount_is_zero(&hdr->b_refcnt); 2752 } 2753 2754 /* execute each callback and free its structure */ 2755 while ((acb = callback_list) != NULL) { 2756 if (acb->acb_done) 2757 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2758 2759 if (acb->acb_zio_dummy != NULL) { 2760 acb->acb_zio_dummy->io_error = zio->io_error; 2761 zio_nowait(acb->acb_zio_dummy); 2762 } 2763 2764 callback_list = acb->acb_next; 2765 kmem_free(acb, sizeof (arc_callback_t)); 2766 } 2767 2768 if (freeable) 2769 arc_hdr_destroy(hdr); 2770} 2771 2772/* 2773 * "Read" the block block at the specified DVA (in bp) via the 2774 * cache. If the block is found in the cache, invoke the provided 2775 * callback immediately and return. Note that the `zio' parameter 2776 * in the callback will be NULL in this case, since no IO was 2777 * required. If the block is not in the cache pass the read request 2778 * on to the spa with a substitute callback function, so that the 2779 * requested block will be added to the cache. 2780 * 2781 * If a read request arrives for a block that has a read in-progress, 2782 * either wait for the in-progress read to complete (and return the 2783 * results); or, if this is a read with a "done" func, add a record 2784 * to the read to invoke the "done" func when the read completes, 2785 * and return; or just return. 2786 * 2787 * arc_read_done() will invoke all the requested "done" functions 2788 * for readers of this block. 2789 * 2790 * Normal callers should use arc_read and pass the arc buffer and offset 2791 * for the bp. But if you know you don't need locking, you can use 2792 * arc_read_bp. 2793 */ 2794int 2795arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf, 2796 arc_done_func_t *done, void *private, int priority, int zio_flags, 2797 uint32_t *arc_flags, const zbookmark_t *zb) 2798{ 2799 int err; 2800 2801 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); 2802 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); 2803 rw_enter(&pbuf->b_lock, RW_READER); 2804 2805 err = arc_read_nolock(pio, spa, bp, done, private, priority, 2806 zio_flags, arc_flags, zb); 2807 rw_exit(&pbuf->b_lock); 2808 return (err); 2809} 2810 2811int 2812arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp, 2813 arc_done_func_t *done, void *private, int priority, int zio_flags, 2814 uint32_t *arc_flags, const zbookmark_t *zb) 2815{ 2816 arc_buf_hdr_t *hdr; 2817 arc_buf_t *buf; 2818 kmutex_t *hash_lock; 2819 zio_t *rzio; 2820 2821top: 2822 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2823 if (hdr && hdr->b_datacnt > 0) { 2824 2825 *arc_flags |= ARC_CACHED; 2826 2827 if (HDR_IO_IN_PROGRESS(hdr)) { 2828 2829 if (*arc_flags & ARC_WAIT) { 2830 cv_wait(&hdr->b_cv, hash_lock); 2831 mutex_exit(hash_lock); 2832 goto top; 2833 } 2834 ASSERT(*arc_flags & ARC_NOWAIT); 2835 2836 if (done) { 2837 arc_callback_t *acb = NULL; 2838 2839 acb = kmem_zalloc(sizeof (arc_callback_t), 2840 KM_SLEEP); 2841 acb->acb_done = done; 2842 acb->acb_private = private; 2843 if (pio != NULL) 2844 acb->acb_zio_dummy = zio_null(pio, 2845 spa, NULL, NULL, zio_flags); 2846 2847 ASSERT(acb->acb_done != NULL); 2848 acb->acb_next = hdr->b_acb; 2849 hdr->b_acb = acb; 2850 add_reference(hdr, hash_lock, private); 2851 mutex_exit(hash_lock); 2852 return (0); 2853 } 2854 mutex_exit(hash_lock); 2855 return (0); 2856 } 2857 2858 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2859 2860 if (done) { 2861 add_reference(hdr, hash_lock, private); 2862 /* 2863 * If this block is already in use, create a new 2864 * copy of the data so that we will be guaranteed 2865 * that arc_release() will always succeed. 2866 */ 2867 buf = hdr->b_buf; 2868 ASSERT(buf); 2869 ASSERT(buf->b_data); 2870 if (HDR_BUF_AVAILABLE(hdr)) { 2871 ASSERT(buf->b_efunc == NULL); 2872 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2873 } else { 2874 buf = arc_buf_clone(buf); 2875 } 2876 } else if (*arc_flags & ARC_PREFETCH && 2877 refcount_count(&hdr->b_refcnt) == 0) { 2878 hdr->b_flags |= ARC_PREFETCH; 2879 } 2880 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2881 arc_access(hdr, hash_lock); 2882 if (*arc_flags & ARC_L2CACHE) 2883 hdr->b_flags |= ARC_L2CACHE; 2884 mutex_exit(hash_lock); 2885 ARCSTAT_BUMP(arcstat_hits); 2886 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2887 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2888 data, metadata, hits); 2889 2890 if (done) 2891 done(NULL, buf, private); 2892 } else { 2893 uint64_t size = BP_GET_LSIZE(bp); 2894 arc_callback_t *acb; 2895 vdev_t *vd = NULL; 2896 uint64_t addr; 2897 boolean_t devw = B_FALSE; 2898 2899 if (hdr == NULL) { 2900 /* this block is not in the cache */ 2901 arc_buf_hdr_t *exists; 2902 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2903 buf = arc_buf_alloc(spa, size, private, type); 2904 hdr = buf->b_hdr; 2905 hdr->b_dva = *BP_IDENTITY(bp); 2906 hdr->b_birth = bp->blk_birth; 2907 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2908 exists = buf_hash_insert(hdr, &hash_lock); 2909 if (exists) { 2910 /* somebody beat us to the hash insert */ 2911 mutex_exit(hash_lock); 2912 bzero(&hdr->b_dva, sizeof (dva_t)); 2913 hdr->b_birth = 0; 2914 hdr->b_cksum0 = 0; 2915 (void) arc_buf_remove_ref(buf, private); 2916 goto top; /* restart the IO request */ 2917 } 2918 /* if this is a prefetch, we don't have a reference */ 2919 if (*arc_flags & ARC_PREFETCH) { 2920 (void) remove_reference(hdr, hash_lock, 2921 private); 2922 hdr->b_flags |= ARC_PREFETCH; 2923 } 2924 if (*arc_flags & ARC_L2CACHE) 2925 hdr->b_flags |= ARC_L2CACHE; 2926 if (BP_GET_LEVEL(bp) > 0) 2927 hdr->b_flags |= ARC_INDIRECT; 2928 } else { 2929 /* this block is in the ghost cache */ 2930 ASSERT(GHOST_STATE(hdr->b_state)); 2931 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2932 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2933 ASSERT(hdr->b_buf == NULL); 2934 2935 /* if this is a prefetch, we don't have a reference */ 2936 if (*arc_flags & ARC_PREFETCH) 2937 hdr->b_flags |= ARC_PREFETCH; 2938 else 2939 add_reference(hdr, hash_lock, private); 2940 if (*arc_flags & ARC_L2CACHE) 2941 hdr->b_flags |= ARC_L2CACHE; 2942 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2943 buf->b_hdr = hdr; 2944 buf->b_data = NULL; 2945 buf->b_efunc = NULL; 2946 buf->b_private = NULL; 2947 buf->b_next = NULL; 2948 hdr->b_buf = buf; 2949 arc_get_data_buf(buf); 2950 ASSERT(hdr->b_datacnt == 0); 2951 hdr->b_datacnt = 1; 2952 2953 } 2954 2955 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2956 acb->acb_done = done; 2957 acb->acb_private = private; 2958 2959 ASSERT(hdr->b_acb == NULL); 2960 hdr->b_acb = acb; 2961 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2962 2963 /* 2964 * If the buffer has been evicted, migrate it to a present state 2965 * before issuing the I/O. Once we drop the hash-table lock, 2966 * the header will be marked as I/O in progress and have an 2967 * attached buffer. At this point, anybody who finds this 2968 * buffer ought to notice that it's legit but has a pending I/O. 2969 */ 2970 2971 if (GHOST_STATE(hdr->b_state)) 2972 arc_access(hdr, hash_lock); 2973 2974 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 2975 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 2976 devw = hdr->b_l2hdr->b_dev->l2ad_writing; 2977 addr = hdr->b_l2hdr->b_daddr; 2978 /* 2979 * Lock out device removal. 2980 */ 2981 if (vdev_is_dead(vd) || 2982 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 2983 vd = NULL; 2984 } 2985 2986 mutex_exit(hash_lock); 2987 2988 ASSERT3U(hdr->b_size, ==, size); 2989 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2990 zbookmark_t *, zb); 2991 ARCSTAT_BUMP(arcstat_misses); 2992 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2993 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2994 data, metadata, misses); 2995 2996 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 2997 /* 2998 * Read from the L2ARC if the following are true: 2999 * 1. The L2ARC vdev was previously cached. 3000 * 2. This buffer still has L2ARC metadata. 3001 * 3. This buffer isn't currently writing to the L2ARC. 3002 * 4. The L2ARC entry wasn't evicted, which may 3003 * also have invalidated the vdev. 3004 * 5. This isn't prefetch and l2arc_noprefetch is set. 3005 */ 3006 if (hdr->b_l2hdr != NULL && 3007 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 3008 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 3009 l2arc_read_callback_t *cb; 3010 3011 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 3012 ARCSTAT_BUMP(arcstat_l2_hits); 3013 3014 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 3015 KM_SLEEP); 3016 cb->l2rcb_buf = buf; 3017 cb->l2rcb_spa = spa; 3018 cb->l2rcb_bp = *bp; 3019 cb->l2rcb_zb = *zb; 3020 cb->l2rcb_flags = zio_flags; 3021 3022 /* 3023 * l2arc read. The SCL_L2ARC lock will be 3024 * released by l2arc_read_done(). 3025 */ 3026 rzio = zio_read_phys(pio, vd, addr, size, 3027 buf->b_data, ZIO_CHECKSUM_OFF, 3028 l2arc_read_done, cb, priority, zio_flags | 3029 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 3030 ZIO_FLAG_DONT_PROPAGATE | 3031 ZIO_FLAG_DONT_RETRY, B_FALSE); 3032 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 3033 zio_t *, rzio); 3034 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 3035 3036 if (*arc_flags & ARC_NOWAIT) { 3037 zio_nowait(rzio); 3038 return (0); 3039 } 3040 3041 ASSERT(*arc_flags & ARC_WAIT); 3042 if (zio_wait(rzio) == 0) 3043 return (0); 3044 3045 /* l2arc read error; goto zio_read() */ 3046 } else { 3047 DTRACE_PROBE1(l2arc__miss, 3048 arc_buf_hdr_t *, hdr); 3049 ARCSTAT_BUMP(arcstat_l2_misses); 3050 if (HDR_L2_WRITING(hdr)) 3051 ARCSTAT_BUMP(arcstat_l2_rw_clash); 3052 spa_config_exit(spa, SCL_L2ARC, vd); 3053 } 3054 } else { 3055 if (vd != NULL) 3056 spa_config_exit(spa, SCL_L2ARC, vd); 3057 if (l2arc_ndev != 0) { 3058 DTRACE_PROBE1(l2arc__miss, 3059 arc_buf_hdr_t *, hdr); 3060 ARCSTAT_BUMP(arcstat_l2_misses); 3061 } 3062 } 3063 3064 rzio = zio_read(pio, spa, bp, buf->b_data, size, 3065 arc_read_done, buf, priority, zio_flags, zb); 3066 3067 if (*arc_flags & ARC_WAIT) 3068 return (zio_wait(rzio)); 3069 3070 ASSERT(*arc_flags & ARC_NOWAIT); 3071 zio_nowait(rzio); 3072 } 3073 return (0); 3074} 3075 3076/* 3077 * arc_read() variant to support pool traversal. If the block is already 3078 * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 3079 * The idea is that we don't want pool traversal filling up memory, but 3080 * if the ARC already has the data anyway, we shouldn't pay for the I/O. 3081 */ 3082int 3083arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 3084{ 3085 arc_buf_hdr_t *hdr; 3086 kmutex_t *hash_mtx; 3087 int rc = 0; 3088 3089 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 3090 3091 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 3092 arc_buf_t *buf = hdr->b_buf; 3093 3094 ASSERT(buf); 3095 while (buf->b_data == NULL) { 3096 buf = buf->b_next; 3097 ASSERT(buf); 3098 } 3099 bcopy(buf->b_data, data, hdr->b_size); 3100 } else { 3101 rc = ENOENT; 3102 } 3103 3104 if (hash_mtx) 3105 mutex_exit(hash_mtx); 3106 3107 return (rc); 3108} 3109 3110void 3111arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 3112{ 3113 ASSERT(buf->b_hdr != NULL); 3114 ASSERT(buf->b_hdr->b_state != arc_anon); 3115 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 3116 buf->b_efunc = func; 3117 buf->b_private = private; 3118} 3119 3120/* 3121 * This is used by the DMU to let the ARC know that a buffer is 3122 * being evicted, so the ARC should clean up. If this arc buf 3123 * is not yet in the evicted state, it will be put there. 3124 */ 3125int 3126arc_buf_evict(arc_buf_t *buf) 3127{ 3128 arc_buf_hdr_t *hdr; 3129 kmutex_t *hash_lock; 3130 arc_buf_t **bufp; 3131 list_t *list, *evicted_list; 3132 kmutex_t *lock, *evicted_lock; 3133 3134 rw_enter(&buf->b_lock, RW_WRITER); 3135 hdr = buf->b_hdr; 3136 if (hdr == NULL) { 3137 /* 3138 * We are in arc_do_user_evicts(). 3139 */ 3140 ASSERT(buf->b_data == NULL); 3141 rw_exit(&buf->b_lock); 3142 return (0); 3143 } else if (buf->b_data == NULL) { 3144 arc_buf_t copy = *buf; /* structure assignment */ 3145 /* 3146 * We are on the eviction list; process this buffer now 3147 * but let arc_do_user_evicts() do the reaping. 3148 */ 3149 buf->b_efunc = NULL; 3150 rw_exit(&buf->b_lock); 3151 VERIFY(copy.b_efunc(©) == 0); 3152 return (1); 3153 } 3154 hash_lock = HDR_LOCK(hdr); 3155 mutex_enter(hash_lock); 3156 3157 ASSERT(buf->b_hdr == hdr); 3158 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 3159 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3160 3161 /* 3162 * Pull this buffer off of the hdr 3163 */ 3164 bufp = &hdr->b_buf; 3165 while (*bufp != buf) 3166 bufp = &(*bufp)->b_next; 3167 *bufp = buf->b_next; 3168 3169 ASSERT(buf->b_data != NULL); 3170 arc_buf_destroy(buf, FALSE, FALSE); 3171 3172 if (hdr->b_datacnt == 0) { 3173 arc_state_t *old_state = hdr->b_state; 3174 arc_state_t *evicted_state; 3175 3176 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 3177 3178 evicted_state = 3179 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3180 3181 get_buf_info(hdr, old_state, &list, &lock); 3182 get_buf_info(hdr, evicted_state, &evicted_list, &evicted_lock); 3183 mutex_enter(lock); 3184 mutex_enter(evicted_lock); 3185 3186 arc_change_state(evicted_state, hdr, hash_lock); 3187 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3188 hdr->b_flags |= ARC_IN_HASH_TABLE; 3189 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3190 3191 mutex_exit(evicted_lock); 3192 mutex_exit(lock); 3193 } 3194 mutex_exit(hash_lock); 3195 rw_exit(&buf->b_lock); 3196 3197 VERIFY(buf->b_efunc(buf) == 0); 3198 buf->b_efunc = NULL; 3199 buf->b_private = NULL; 3200 buf->b_hdr = NULL; 3201 kmem_cache_free(buf_cache, buf); 3202 return (1); 3203} 3204 3205/* 3206 * Release this buffer from the cache. This must be done 3207 * after a read and prior to modifying the buffer contents. 3208 * If the buffer has more than one reference, we must make 3209 * a new hdr for the buffer. 3210 */ 3211void 3212arc_release(arc_buf_t *buf, void *tag) 3213{ 3214 arc_buf_hdr_t *hdr; 3215 kmutex_t *hash_lock; 3216 l2arc_buf_hdr_t *l2hdr; 3217 uint64_t buf_size; 3218 boolean_t released = B_FALSE; 3219 3220 rw_enter(&buf->b_lock, RW_WRITER); 3221 hdr = buf->b_hdr; 3222 3223 /* this buffer is not on any list */ 3224 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3225 ASSERT(!(hdr->b_flags & ARC_STORED)); 3226 3227 if (hdr->b_state == arc_anon) { 3228 /* this buffer is already released */ 3229 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 3230 ASSERT(BUF_EMPTY(hdr)); 3231 ASSERT(buf->b_efunc == NULL); 3232 arc_buf_thaw(buf); 3233 rw_exit(&buf->b_lock); 3234 released = B_TRUE; 3235 } else { 3236 hash_lock = HDR_LOCK(hdr); 3237 mutex_enter(hash_lock); 3238 } 3239 3240 l2hdr = hdr->b_l2hdr; 3241 if (l2hdr) { 3242 mutex_enter(&l2arc_buflist_mtx); 3243 hdr->b_l2hdr = NULL; 3244 buf_size = hdr->b_size; 3245 } 3246 3247 if (released) 3248 goto out; 3249 3250 /* 3251 * Do we have more than one buf? 3252 */ 3253 if (hdr->b_datacnt > 1) { 3254 arc_buf_hdr_t *nhdr; 3255 arc_buf_t **bufp; 3256 uint64_t blksz = hdr->b_size; 3257 spa_t *spa = hdr->b_spa; 3258 arc_buf_contents_t type = hdr->b_type; 3259 uint32_t flags = hdr->b_flags; 3260 3261 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3262 /* 3263 * Pull the data off of this buf and attach it to 3264 * a new anonymous buf. 3265 */ 3266 (void) remove_reference(hdr, hash_lock, tag); 3267 bufp = &hdr->b_buf; 3268 while (*bufp != buf) 3269 bufp = &(*bufp)->b_next; 3270 *bufp = (*bufp)->b_next; 3271 buf->b_next = NULL; 3272 3273 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 3274 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3275 if (refcount_is_zero(&hdr->b_refcnt)) { 3276 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 3277 ASSERT3U(*size, >=, hdr->b_size); 3278 atomic_add_64(size, -hdr->b_size); 3279 } 3280 hdr->b_datacnt -= 1; 3281 arc_cksum_verify(buf); 3282 3283 mutex_exit(hash_lock); 3284 3285 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3286 nhdr->b_size = blksz; 3287 nhdr->b_spa = spa; 3288 nhdr->b_type = type; 3289 nhdr->b_buf = buf; 3290 nhdr->b_state = arc_anon; 3291 nhdr->b_arc_access = 0; 3292 nhdr->b_flags = flags & ARC_L2_WRITING; 3293 nhdr->b_l2hdr = NULL; 3294 nhdr->b_datacnt = 1; 3295 nhdr->b_freeze_cksum = NULL; 3296 (void) refcount_add(&nhdr->b_refcnt, tag); 3297 buf->b_hdr = nhdr; 3298 rw_exit(&buf->b_lock); 3299 atomic_add_64(&arc_anon->arcs_size, blksz); 3300 } else { 3301 rw_exit(&buf->b_lock); 3302 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3303 ASSERT(!list_link_active(&hdr->b_arc_node)); 3304 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3305 arc_change_state(arc_anon, hdr, hash_lock); 3306 hdr->b_arc_access = 0; 3307 mutex_exit(hash_lock); 3308 3309 bzero(&hdr->b_dva, sizeof (dva_t)); 3310 hdr->b_birth = 0; 3311 hdr->b_cksum0 = 0; 3312 arc_buf_thaw(buf); 3313 } 3314 buf->b_efunc = NULL; 3315 buf->b_private = NULL; 3316 3317out: 3318 if (l2hdr) { 3319 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3320 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3321 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3322 mutex_exit(&l2arc_buflist_mtx); 3323 } 3324} 3325 3326int 3327arc_released(arc_buf_t *buf) 3328{ 3329 int released; 3330 3331 rw_enter(&buf->b_lock, RW_READER); 3332 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3333 rw_exit(&buf->b_lock); 3334 return (released); 3335} 3336 3337int 3338arc_has_callback(arc_buf_t *buf) 3339{ 3340 int callback; 3341 3342 rw_enter(&buf->b_lock, RW_READER); 3343 callback = (buf->b_efunc != NULL); 3344 rw_exit(&buf->b_lock); 3345 return (callback); 3346} 3347 3348#ifdef ZFS_DEBUG 3349int 3350arc_referenced(arc_buf_t *buf) 3351{ 3352 int referenced; 3353 3354 rw_enter(&buf->b_lock, RW_READER); 3355 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3356 rw_exit(&buf->b_lock); 3357 return (referenced); 3358} 3359#endif 3360 3361static void 3362arc_write_ready(zio_t *zio) 3363{ 3364 arc_write_callback_t *callback = zio->io_private; 3365 arc_buf_t *buf = callback->awcb_buf; 3366 arc_buf_hdr_t *hdr = buf->b_hdr; 3367 3368 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3369 callback->awcb_ready(zio, buf, callback->awcb_private); 3370 3371 /* 3372 * If the IO is already in progress, then this is a re-write 3373 * attempt, so we need to thaw and re-compute the cksum. 3374 * It is the responsibility of the callback to handle the 3375 * accounting for any re-write attempt. 3376 */ 3377 if (HDR_IO_IN_PROGRESS(hdr)) { 3378 mutex_enter(&hdr->b_freeze_lock); 3379 if (hdr->b_freeze_cksum != NULL) { 3380 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3381 hdr->b_freeze_cksum = NULL; 3382 } 3383 mutex_exit(&hdr->b_freeze_lock); 3384 } 3385 arc_cksum_compute(buf, B_FALSE); 3386 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3387} 3388 3389static void 3390arc_write_done(zio_t *zio) 3391{ 3392 arc_write_callback_t *callback = zio->io_private; 3393 arc_buf_t *buf = callback->awcb_buf; 3394 arc_buf_hdr_t *hdr = buf->b_hdr; 3395 3396 hdr->b_acb = NULL; 3397 3398 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3399 hdr->b_birth = zio->io_bp->blk_birth; 3400 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3401 /* 3402 * If the block to be written was all-zero, we may have 3403 * compressed it away. In this case no write was performed 3404 * so there will be no dva/birth-date/checksum. The buffer 3405 * must therefor remain anonymous (and uncached). 3406 */ 3407 if (!BUF_EMPTY(hdr)) { 3408 arc_buf_hdr_t *exists; 3409 kmutex_t *hash_lock; 3410 3411 arc_cksum_verify(buf); 3412 3413 exists = buf_hash_insert(hdr, &hash_lock); 3414 if (exists) { 3415 /* 3416 * This can only happen if we overwrite for 3417 * sync-to-convergence, because we remove 3418 * buffers from the hash table when we arc_free(). 3419 */ 3420 ASSERT(zio->io_flags & ZIO_FLAG_IO_REWRITE); 3421 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 3422 BP_IDENTITY(zio->io_bp))); 3423 ASSERT3U(zio->io_bp_orig.blk_birth, ==, 3424 zio->io_bp->blk_birth); 3425 3426 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3427 arc_change_state(arc_anon, exists, hash_lock); 3428 mutex_exit(hash_lock); 3429 arc_hdr_destroy(exists); 3430 exists = buf_hash_insert(hdr, &hash_lock); 3431 ASSERT3P(exists, ==, NULL); 3432 } 3433 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3434 /* if it's not anon, we are doing a scrub */ 3435 if (hdr->b_state == arc_anon) 3436 arc_access(hdr, hash_lock); 3437 mutex_exit(hash_lock); 3438 } else if (callback->awcb_done == NULL) { 3439 int destroy_hdr; 3440 /* 3441 * This is an anonymous buffer with no user callback, 3442 * destroy it if there are no active references. 3443 */ 3444 mutex_enter(&arc_eviction_mtx); 3445 destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 3446 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3447 mutex_exit(&arc_eviction_mtx); 3448 if (destroy_hdr) 3449 arc_hdr_destroy(hdr); 3450 } else { 3451 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3452 } 3453 hdr->b_flags &= ~ARC_STORED; 3454 3455 if (callback->awcb_done) { 3456 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3457 callback->awcb_done(zio, buf, callback->awcb_private); 3458 } 3459 3460 kmem_free(callback, sizeof (arc_write_callback_t)); 3461} 3462 3463static void 3464write_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp) 3465{ 3466 boolean_t ismd = (wp->wp_level > 0 || dmu_ot[wp->wp_type].ot_metadata); 3467 3468 /* Determine checksum setting */ 3469 if (ismd) { 3470 /* 3471 * Metadata always gets checksummed. If the data 3472 * checksum is multi-bit correctable, and it's not a 3473 * ZBT-style checksum, then it's suitable for metadata 3474 * as well. Otherwise, the metadata checksum defaults 3475 * to fletcher4. 3476 */ 3477 if (zio_checksum_table[wp->wp_oschecksum].ci_correctable && 3478 !zio_checksum_table[wp->wp_oschecksum].ci_zbt) 3479 zp->zp_checksum = wp->wp_oschecksum; 3480 else 3481 zp->zp_checksum = ZIO_CHECKSUM_FLETCHER_4; 3482 } else { 3483 zp->zp_checksum = zio_checksum_select(wp->wp_dnchecksum, 3484 wp->wp_oschecksum); 3485 } 3486 3487 /* Determine compression setting */ 3488 if (ismd) { 3489 /* 3490 * XXX -- we should design a compression algorithm 3491 * that specializes in arrays of bps. 3492 */ 3493 zp->zp_compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY : 3494 ZIO_COMPRESS_LZJB; 3495 } else { 3496 zp->zp_compress = zio_compress_select(wp->wp_dncompress, 3497 wp->wp_oscompress); 3498 } 3499 3500 zp->zp_type = wp->wp_type; 3501 zp->zp_level = wp->wp_level; 3502 zp->zp_ndvas = MIN(wp->wp_copies + ismd, spa_max_replication(spa)); 3503} 3504 3505zio_t * 3506arc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp, 3507 boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 3508 arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 3509 int zio_flags, const zbookmark_t *zb) 3510{ 3511 arc_buf_hdr_t *hdr = buf->b_hdr; 3512 arc_write_callback_t *callback; 3513 zio_t *zio; 3514 zio_prop_t zp; 3515 3516 ASSERT(ready != NULL); 3517 ASSERT(!HDR_IO_ERROR(hdr)); 3518 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3519 ASSERT(hdr->b_acb == 0); 3520 if (l2arc) 3521 hdr->b_flags |= ARC_L2CACHE; 3522 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3523 callback->awcb_ready = ready; 3524 callback->awcb_done = done; 3525 callback->awcb_private = private; 3526 callback->awcb_buf = buf; 3527 3528 write_policy(spa, wp, &zp); 3529 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, &zp, 3530 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3531 3532 return (zio); 3533} 3534 3535int 3536arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 3537 zio_done_func_t *done, void *private, uint32_t arc_flags) 3538{ 3539 arc_buf_hdr_t *ab; 3540 kmutex_t *hash_lock; 3541 zio_t *zio; 3542 3543 /* 3544 * If this buffer is in the cache, release it, so it 3545 * can be re-used. 3546 */ 3547 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3548 if (ab != NULL) { 3549 /* 3550 * The checksum of blocks to free is not always 3551 * preserved (eg. on the deadlist). However, if it is 3552 * nonzero, it should match what we have in the cache. 3553 */ 3554 ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3555 bp->blk_cksum.zc_word[0] == ab->b_cksum0 || 3556 bp->blk_fill == BLK_FILL_ALREADY_FREED); 3557 3558 if (ab->b_state != arc_anon) 3559 arc_change_state(arc_anon, ab, hash_lock); 3560 if (HDR_IO_IN_PROGRESS(ab)) { 3561 /* 3562 * This should only happen when we prefetch. 3563 */ 3564 ASSERT(ab->b_flags & ARC_PREFETCH); 3565 ASSERT3U(ab->b_datacnt, ==, 1); 3566 ab->b_flags |= ARC_FREED_IN_READ; 3567 if (HDR_IN_HASH_TABLE(ab)) 3568 buf_hash_remove(ab); 3569 ab->b_arc_access = 0; 3570 bzero(&ab->b_dva, sizeof (dva_t)); 3571 ab->b_birth = 0; 3572 ab->b_cksum0 = 0; 3573 ab->b_buf->b_efunc = NULL; 3574 ab->b_buf->b_private = NULL; 3575 mutex_exit(hash_lock); 3576 } else if (refcount_is_zero(&ab->b_refcnt)) { 3577 ab->b_flags |= ARC_FREE_IN_PROGRESS; 3578 mutex_exit(hash_lock); 3579 arc_hdr_destroy(ab); 3580 ARCSTAT_BUMP(arcstat_deleted); 3581 } else { 3582 /* 3583 * We still have an active reference on this 3584 * buffer. This can happen, e.g., from 3585 * dbuf_unoverride(). 3586 */ 3587 ASSERT(!HDR_IN_HASH_TABLE(ab)); 3588 ab->b_arc_access = 0; 3589 bzero(&ab->b_dva, sizeof (dva_t)); 3590 ab->b_birth = 0; 3591 ab->b_cksum0 = 0; 3592 ab->b_buf->b_efunc = NULL; 3593 ab->b_buf->b_private = NULL; 3594 mutex_exit(hash_lock); 3595 } 3596 } 3597 3598 zio = zio_free(pio, spa, txg, bp, done, private, ZIO_FLAG_MUSTSUCCEED); 3599 3600 if (arc_flags & ARC_WAIT) 3601 return (zio_wait(zio)); 3602 3603 ASSERT(arc_flags & ARC_NOWAIT); 3604 zio_nowait(zio); 3605 3606 return (0); 3607} 3608 3609static int 3610arc_memory_throttle(uint64_t reserve, uint64_t txg) 3611{ 3612#ifdef _KERNEL 3613 uint64_t inflight_data = arc_anon->arcs_size; 3614 uint64_t available_memory = ptoa((uintmax_t)cnt.v_free_count); 3615 static uint64_t page_load = 0; 3616 static uint64_t last_txg = 0; 3617 3618#if 0 3619#if defined(__i386) 3620 available_memory = 3621 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3622#endif 3623#endif 3624 if (available_memory >= zfs_write_limit_max) 3625 return (0); 3626 3627 if (txg > last_txg) { 3628 last_txg = txg; 3629 page_load = 0; 3630 } 3631 /* 3632 * If we are in pageout, we know that memory is already tight, 3633 * the arc is already going to be evicting, so we just want to 3634 * continue to let page writes occur as quickly as possible. 3635 */ 3636 if (curproc == pageproc) { 3637 if (page_load > available_memory / 4) 3638 return (ERESTART); 3639 /* Note: reserve is inflated, so we deflate */ 3640 page_load += reserve / 8; 3641 return (0); 3642 } else if (page_load > 0 && arc_reclaim_needed()) { 3643 /* memory is low, delay before restarting */ 3644 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3645 return (EAGAIN); 3646 } 3647 page_load = 0; 3648 3649 if (arc_size > arc_c_min) { 3650 uint64_t evictable_memory = 3651 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3652 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3653 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3654 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3655 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3656 } 3657 3658 if (inflight_data > available_memory / 4) { 3659 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3660 return (ERESTART); 3661 } 3662#endif 3663 return (0); 3664} 3665 3666void 3667arc_tempreserve_clear(uint64_t reserve) 3668{ 3669 atomic_add_64(&arc_tempreserve, -reserve); 3670 ASSERT((int64_t)arc_tempreserve >= 0); 3671} 3672 3673int 3674arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3675{ 3676 int error; 3677 3678#ifdef ZFS_DEBUG 3679 /* 3680 * Once in a while, fail for no reason. Everything should cope. 3681 */ 3682 if (spa_get_random(10000) == 0) { 3683 dprintf("forcing random failure\n"); 3684 return (ERESTART); 3685 } 3686#endif 3687 if (reserve > arc_c/4 && !arc_no_grow) 3688 arc_c = MIN(arc_c_max, reserve * 4); 3689 if (reserve > arc_c) 3690 return (ENOMEM); 3691 3692 /* 3693 * Writes will, almost always, require additional memory allocations 3694 * in order to compress/encrypt/etc the data. We therefor need to 3695 * make sure that there is sufficient available memory for this. 3696 */ 3697 if (error = arc_memory_throttle(reserve, txg)) 3698 return (error); 3699 3700 /* 3701 * Throttle writes when the amount of dirty data in the cache 3702 * gets too large. We try to keep the cache less than half full 3703 * of dirty blocks so that our sync times don't grow too large. 3704 * Note: if two requests come in concurrently, we might let them 3705 * both succeed, when one of them should fail. Not a huge deal. 3706 */ 3707 if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 3708 arc_anon->arcs_size > arc_c / 4) { 3709 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3710 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3711 arc_tempreserve>>10, 3712 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3713 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3714 reserve>>10, arc_c>>10); 3715 return (ERESTART); 3716 } 3717 atomic_add_64(&arc_tempreserve, reserve); 3718 return (0); 3719} 3720 3721static kmutex_t arc_lowmem_lock; 3722#ifdef _KERNEL 3723static eventhandler_tag arc_event_lowmem = NULL; 3724 3725static void 3726arc_lowmem(void *arg __unused, int howto __unused) 3727{ 3728 3729 /* Serialize access via arc_lowmem_lock. */ 3730 mutex_enter(&arc_lowmem_lock); 3731 needfree = 1; 3732 cv_signal(&arc_reclaim_thr_cv); 3733 while (needfree) 3734 tsleep(&needfree, 0, "zfs:lowmem", hz / 5); 3735 mutex_exit(&arc_lowmem_lock); 3736} 3737#endif 3738 3739void 3740arc_init(void) 3741{ 3742 int prefetch_tunable_set = 0; 3743 int i; 3744 3745 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3746 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3747 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 3748 3749 /* Convert seconds to clock ticks */ 3750 arc_min_prefetch_lifespan = 1 * hz; 3751 3752 /* Start out with 1/8 of all memory */ 3753 arc_c = kmem_size() / 8; 3754#if 0 3755#ifdef _KERNEL 3756 /* 3757 * On architectures where the physical memory can be larger 3758 * than the addressable space (intel in 32-bit mode), we may 3759 * need to limit the cache to 1/8 of VM size. 3760 */ 3761 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3762#endif 3763#endif 3764 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 3765 arc_c_min = MAX(arc_c / 4, 64<<18); 3766 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 3767 if (arc_c * 8 >= 1<<30) 3768 arc_c_max = (arc_c * 8) - (1<<30); 3769 else 3770 arc_c_max = arc_c_min; 3771 arc_c_max = MAX(arc_c * 5, arc_c_max); 3772#ifdef _KERNEL 3773 /* 3774 * Allow the tunables to override our calculations if they are 3775 * reasonable (ie. over 16MB) 3776 */ 3777 if (zfs_arc_max >= 64<<18 && zfs_arc_max < kmem_size()) 3778 arc_c_max = zfs_arc_max; 3779 if (zfs_arc_min >= 64<<18 && zfs_arc_min <= arc_c_max) 3780 arc_c_min = zfs_arc_min; 3781#endif 3782 arc_c = arc_c_max; 3783 arc_p = (arc_c >> 1); 3784 3785 /* limit meta-data to 1/4 of the arc capacity */ 3786 arc_meta_limit = arc_c_max / 4; 3787 3788 /* Allow the tunable to override if it is reasonable */ 3789 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3790 arc_meta_limit = zfs_arc_meta_limit; 3791 3792 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3793 arc_c_min = arc_meta_limit / 2; 3794 3795 if (zfs_arc_grow_retry > 0) 3796 arc_grow_retry = zfs_arc_grow_retry; 3797 3798 if (zfs_arc_shrink_shift > 0) 3799 arc_shrink_shift = zfs_arc_shrink_shift; 3800 3801 if (zfs_arc_p_min_shift > 0) 3802 arc_p_min_shift = zfs_arc_p_min_shift; 3803 3804 /* if kmem_flags are set, lets try to use less memory */ 3805 if (kmem_debugging()) 3806 arc_c = arc_c / 2; 3807 if (arc_c < arc_c_min) 3808 arc_c = arc_c_min; 3809 3810 zfs_arc_min = arc_c_min; 3811 zfs_arc_max = arc_c_max; 3812 3813 arc_anon = &ARC_anon; 3814 arc_mru = &ARC_mru; 3815 arc_mru_ghost = &ARC_mru_ghost; 3816 arc_mfu = &ARC_mfu; 3817 arc_mfu_ghost = &ARC_mfu_ghost; 3818 arc_l2c_only = &ARC_l2c_only; 3819 arc_size = 0; 3820 3821 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 3822 mutex_init(&arc_anon->arcs_locks[i].arcs_lock, 3823 NULL, MUTEX_DEFAULT, NULL); 3824 mutex_init(&arc_mru->arcs_locks[i].arcs_lock, 3825 NULL, MUTEX_DEFAULT, NULL); 3826 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock, 3827 NULL, MUTEX_DEFAULT, NULL); 3828 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock, 3829 NULL, MUTEX_DEFAULT, NULL); 3830 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock, 3831 NULL, MUTEX_DEFAULT, NULL); 3832 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock, 3833 NULL, MUTEX_DEFAULT, NULL); 3834 3835 list_create(&arc_mru->arcs_lists[i], 3836 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3837 list_create(&arc_mru_ghost->arcs_lists[i], 3838 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3839 list_create(&arc_mfu->arcs_lists[i], 3840 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3841 list_create(&arc_mfu_ghost->arcs_lists[i], 3842 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3843 list_create(&arc_mfu_ghost->arcs_lists[i], 3844 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3845 list_create(&arc_l2c_only->arcs_lists[i], 3846 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3847 } 3848 3849 buf_init(); 3850 3851 arc_thread_exit = 0; 3852 arc_eviction_list = NULL; 3853 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3854 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3855 3856 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3857 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3858 3859 if (arc_ksp != NULL) { 3860 arc_ksp->ks_data = &arc_stats; 3861 kstat_install(arc_ksp); 3862 } 3863 3864 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3865 TS_RUN, minclsyspri); 3866 3867#ifdef _KERNEL 3868 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 3869 EVENTHANDLER_PRI_FIRST); 3870#endif 3871 3872 arc_dead = FALSE; 3873 arc_warm = B_FALSE; 3874 3875 if (zfs_write_limit_max == 0) 3876 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3877 else 3878 zfs_write_limit_shift = 0; 3879 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3880 3881#ifdef _KERNEL 3882 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable)) 3883 prefetch_tunable_set = 1; 3884 3885#ifdef __i386__ 3886 if (prefetch_tunable_set == 0) { 3887 printf("ZFS NOTICE: Prefetch is disabled by default on i386 " 3888 "-- to enable,\n"); 3889 printf(" add \"vfs.zfs.prefetch_disable=0\" " 3890 "to /boot/loader.conf.\n"); 3891 zfs_prefetch_disable=1; 3892 } 3893#else 3894 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) && 3895 prefetch_tunable_set == 0) { 3896 printf("ZFS NOTICE: Prefetch is disabled by default if less " 3897 "than 4GB of RAM is present;\n" 3898 " to enable, add \"vfs.zfs.prefetch_disable=0\" " 3899 "to /boot/loader.conf.\n"); 3900 zfs_prefetch_disable=1; 3901 } 3902#endif 3903 /* Warn about ZFS memory and address space requirements. */ 3904 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 3905 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 3906 "expect unstable behavior.\n"); 3907 } 3908 if (kmem_size() < 512 * (1 << 20)) { 3909 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 3910 "expect unstable behavior.\n"); 3911 printf(" Consider tuning vm.kmem_size and " 3912 "vm.kmem_size_max\n"); 3913 printf(" in /boot/loader.conf.\n"); 3914 } 3915#endif 3916} 3917 3918void 3919arc_fini(void) 3920{ 3921 int i; 3922 3923 mutex_enter(&arc_reclaim_thr_lock); 3924 arc_thread_exit = 1; 3925 cv_signal(&arc_reclaim_thr_cv); 3926 while (arc_thread_exit != 0) 3927 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3928 mutex_exit(&arc_reclaim_thr_lock); 3929 3930 arc_flush(NULL); 3931 3932 arc_dead = TRUE; 3933 3934 if (arc_ksp != NULL) { 3935 kstat_delete(arc_ksp); 3936 arc_ksp = NULL; 3937 } 3938 3939 mutex_destroy(&arc_eviction_mtx); 3940 mutex_destroy(&arc_reclaim_thr_lock); 3941 cv_destroy(&arc_reclaim_thr_cv); 3942 3943 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 3944 list_destroy(&arc_mru->arcs_lists[i]); 3945 list_destroy(&arc_mru_ghost->arcs_lists[i]); 3946 list_destroy(&arc_mfu->arcs_lists[i]); 3947 list_destroy(&arc_mfu_ghost->arcs_lists[i]); 3948 list_destroy(&arc_l2c_only->arcs_lists[i]); 3949 3950 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock); 3951 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock); 3952 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock); 3953 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock); 3954 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock); 3955 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock); 3956 } 3957 3958 mutex_destroy(&zfs_write_limit_lock); 3959 3960 buf_fini(); 3961 3962 mutex_destroy(&arc_lowmem_lock); 3963#ifdef _KERNEL 3964 if (arc_event_lowmem != NULL) 3965 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 3966#endif 3967} 3968 3969/* 3970 * Level 2 ARC 3971 * 3972 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3973 * It uses dedicated storage devices to hold cached data, which are populated 3974 * using large infrequent writes. The main role of this cache is to boost 3975 * the performance of random read workloads. The intended L2ARC devices 3976 * include short-stroked disks, solid state disks, and other media with 3977 * substantially faster read latency than disk. 3978 * 3979 * +-----------------------+ 3980 * | ARC | 3981 * +-----------------------+ 3982 * | ^ ^ 3983 * | | | 3984 * l2arc_feed_thread() arc_read() 3985 * | | | 3986 * | l2arc read | 3987 * V | | 3988 * +---------------+ | 3989 * | L2ARC | | 3990 * +---------------+ | 3991 * | ^ | 3992 * l2arc_write() | | 3993 * | | | 3994 * V | | 3995 * +-------+ +-------+ 3996 * | vdev | | vdev | 3997 * | cache | | cache | 3998 * +-------+ +-------+ 3999 * +=========+ .-----. 4000 * : L2ARC : |-_____-| 4001 * : devices : | Disks | 4002 * +=========+ `-_____-' 4003 * 4004 * Read requests are satisfied from the following sources, in order: 4005 * 4006 * 1) ARC 4007 * 2) vdev cache of L2ARC devices 4008 * 3) L2ARC devices 4009 * 4) vdev cache of disks 4010 * 5) disks 4011 * 4012 * Some L2ARC device types exhibit extremely slow write performance. 4013 * To accommodate for this there are some significant differences between 4014 * the L2ARC and traditional cache design: 4015 * 4016 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 4017 * the ARC behave as usual, freeing buffers and placing headers on ghost 4018 * lists. The ARC does not send buffers to the L2ARC during eviction as 4019 * this would add inflated write latencies for all ARC memory pressure. 4020 * 4021 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 4022 * It does this by periodically scanning buffers from the eviction-end of 4023 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 4024 * not already there. It scans until a headroom of buffers is satisfied, 4025 * which itself is a buffer for ARC eviction. The thread that does this is 4026 * l2arc_feed_thread(), illustrated below; example sizes are included to 4027 * provide a better sense of ratio than this diagram: 4028 * 4029 * head --> tail 4030 * +---------------------+----------+ 4031 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 4032 * +---------------------+----------+ | o L2ARC eligible 4033 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 4034 * +---------------------+----------+ | 4035 * 15.9 Gbytes ^ 32 Mbytes | 4036 * headroom | 4037 * l2arc_feed_thread() 4038 * | 4039 * l2arc write hand <--[oooo]--' 4040 * | 8 Mbyte 4041 * | write max 4042 * V 4043 * +==============================+ 4044 * L2ARC dev |####|#|###|###| |####| ... | 4045 * +==============================+ 4046 * 32 Gbytes 4047 * 4048 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 4049 * evicted, then the L2ARC has cached a buffer much sooner than it probably 4050 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 4051 * safe to say that this is an uncommon case, since buffers at the end of 4052 * the ARC lists have moved there due to inactivity. 4053 * 4054 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 4055 * then the L2ARC simply misses copying some buffers. This serves as a 4056 * pressure valve to prevent heavy read workloads from both stalling the ARC 4057 * with waits and clogging the L2ARC with writes. This also helps prevent 4058 * the potential for the L2ARC to churn if it attempts to cache content too 4059 * quickly, such as during backups of the entire pool. 4060 * 4061 * 5. After system boot and before the ARC has filled main memory, there are 4062 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 4063 * lists can remain mostly static. Instead of searching from tail of these 4064 * lists as pictured, the l2arc_feed_thread() will search from the list heads 4065 * for eligible buffers, greatly increasing its chance of finding them. 4066 * 4067 * The L2ARC device write speed is also boosted during this time so that 4068 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 4069 * there are no L2ARC reads, and no fear of degrading read performance 4070 * through increased writes. 4071 * 4072 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 4073 * the vdev queue can aggregate them into larger and fewer writes. Each 4074 * device is written to in a rotor fashion, sweeping writes through 4075 * available space then repeating. 4076 * 4077 * 7. The L2ARC does not store dirty content. It never needs to flush 4078 * write buffers back to disk based storage. 4079 * 4080 * 8. If an ARC buffer is written (and dirtied) which also exists in the 4081 * L2ARC, the now stale L2ARC buffer is immediately dropped. 4082 * 4083 * The performance of the L2ARC can be tweaked by a number of tunables, which 4084 * may be necessary for different workloads: 4085 * 4086 * l2arc_write_max max write bytes per interval 4087 * l2arc_write_boost extra write bytes during device warmup 4088 * l2arc_noprefetch skip caching prefetched buffers 4089 * l2arc_headroom number of max device writes to precache 4090 * l2arc_feed_secs seconds between L2ARC writing 4091 * 4092 * Tunables may be removed or added as future performance improvements are 4093 * integrated, and also may become zpool properties. 4094 * 4095 * There are three key functions that control how the L2ARC warms up: 4096 * 4097 * l2arc_write_eligible() check if a buffer is eligible to cache 4098 * l2arc_write_size() calculate how much to write 4099 * l2arc_write_interval() calculate sleep delay between writes 4100 * 4101 * These three functions determine what to write, how much, and how quickly 4102 * to send writes. 4103 */ 4104 4105static boolean_t 4106l2arc_write_eligible(spa_t *spa, arc_buf_hdr_t *ab) 4107{ 4108 /* 4109 * A buffer is *not* eligible for the L2ARC if it: 4110 * 1. belongs to a different spa. 4111 * 2. is already cached on the L2ARC. 4112 * 3. has an I/O in progress (it may be an incomplete read). 4113 * 4. is flagged not eligible (zfs property). 4114 */ 4115 if (ab->b_spa != spa) { 4116 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch); 4117 return (B_FALSE); 4118 } 4119 if (ab->b_l2hdr != NULL) { 4120 ARCSTAT_BUMP(arcstat_l2_write_in_l2); 4121 return (B_FALSE); 4122 } 4123 if (HDR_IO_IN_PROGRESS(ab)) { 4124 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress); 4125 return (B_FALSE); 4126 } 4127 if (!HDR_L2CACHE(ab)) { 4128 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable); 4129 return (B_FALSE); 4130 } 4131 4132 return (B_TRUE); 4133} 4134 4135static uint64_t 4136l2arc_write_size(l2arc_dev_t *dev) 4137{ 4138 uint64_t size; 4139 4140 size = dev->l2ad_write; 4141 4142 if (arc_warm == B_FALSE) 4143 size += dev->l2ad_boost; 4144 4145 return (size); 4146 4147} 4148 4149static clock_t 4150l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 4151{ 4152 clock_t interval, next; 4153 4154 /* 4155 * If the ARC lists are busy, increase our write rate; if the 4156 * lists are stale, idle back. This is achieved by checking 4157 * how much we previously wrote - if it was more than half of 4158 * what we wanted, schedule the next write much sooner. 4159 */ 4160 if (l2arc_feed_again && wrote > (wanted / 2)) 4161 interval = (hz * l2arc_feed_min_ms) / 1000; 4162 else 4163 interval = hz * l2arc_feed_secs; 4164 4165 next = MAX(LBOLT, MIN(LBOLT + interval, began + interval)); 4166 4167 return (next); 4168} 4169 4170static void 4171l2arc_hdr_stat_add(void) 4172{ 4173 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 4174 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 4175} 4176 4177static void 4178l2arc_hdr_stat_remove(void) 4179{ 4180 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 4181 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 4182} 4183 4184/* 4185 * Cycle through L2ARC devices. This is how L2ARC load balances. 4186 * If a device is returned, this also returns holding the spa config lock. 4187 */ 4188static l2arc_dev_t * 4189l2arc_dev_get_next(void) 4190{ 4191 l2arc_dev_t *first, *next = NULL; 4192 4193 /* 4194 * Lock out the removal of spas (spa_namespace_lock), then removal 4195 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 4196 * both locks will be dropped and a spa config lock held instead. 4197 */ 4198 mutex_enter(&spa_namespace_lock); 4199 mutex_enter(&l2arc_dev_mtx); 4200 4201 /* if there are no vdevs, there is nothing to do */ 4202 if (l2arc_ndev == 0) 4203 goto out; 4204 4205 first = NULL; 4206 next = l2arc_dev_last; 4207 do { 4208 /* loop around the list looking for a non-faulted vdev */ 4209 if (next == NULL) { 4210 next = list_head(l2arc_dev_list); 4211 } else { 4212 next = list_next(l2arc_dev_list, next); 4213 if (next == NULL) 4214 next = list_head(l2arc_dev_list); 4215 } 4216 4217 /* if we have come back to the start, bail out */ 4218 if (first == NULL) 4219 first = next; 4220 else if (next == first) 4221 break; 4222 4223 } while (vdev_is_dead(next->l2ad_vdev)); 4224 4225 /* if we were unable to find any usable vdevs, return NULL */ 4226 if (vdev_is_dead(next->l2ad_vdev)) 4227 next = NULL; 4228 4229 l2arc_dev_last = next; 4230 4231out: 4232 mutex_exit(&l2arc_dev_mtx); 4233 4234 /* 4235 * Grab the config lock to prevent the 'next' device from being 4236 * removed while we are writing to it. 4237 */ 4238 if (next != NULL) 4239 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 4240 mutex_exit(&spa_namespace_lock); 4241 4242 return (next); 4243} 4244 4245/* 4246 * Free buffers that were tagged for destruction. 4247 */ 4248static void 4249l2arc_do_free_on_write() 4250{ 4251 list_t *buflist; 4252 l2arc_data_free_t *df, *df_prev; 4253 4254 mutex_enter(&l2arc_free_on_write_mtx); 4255 buflist = l2arc_free_on_write; 4256 4257 for (df = list_tail(buflist); df; df = df_prev) { 4258 df_prev = list_prev(buflist, df); 4259 ASSERT(df->l2df_data != NULL); 4260 ASSERT(df->l2df_func != NULL); 4261 df->l2df_func(df->l2df_data, df->l2df_size); 4262 list_remove(buflist, df); 4263 kmem_free(df, sizeof (l2arc_data_free_t)); 4264 } 4265 4266 mutex_exit(&l2arc_free_on_write_mtx); 4267} 4268 4269/* 4270 * A write to a cache device has completed. Update all headers to allow 4271 * reads from these buffers to begin. 4272 */ 4273static void 4274l2arc_write_done(zio_t *zio) 4275{ 4276 l2arc_write_callback_t *cb; 4277 l2arc_dev_t *dev; 4278 list_t *buflist; 4279 arc_buf_hdr_t *head, *ab, *ab_prev; 4280 l2arc_buf_hdr_t *abl2; 4281 kmutex_t *hash_lock; 4282 4283 cb = zio->io_private; 4284 ASSERT(cb != NULL); 4285 dev = cb->l2wcb_dev; 4286 ASSERT(dev != NULL); 4287 head = cb->l2wcb_head; 4288 ASSERT(head != NULL); 4289 buflist = dev->l2ad_buflist; 4290 ASSERT(buflist != NULL); 4291 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 4292 l2arc_write_callback_t *, cb); 4293 4294 if (zio->io_error != 0) 4295 ARCSTAT_BUMP(arcstat_l2_writes_error); 4296 4297 mutex_enter(&l2arc_buflist_mtx); 4298 4299 /* 4300 * All writes completed, or an error was hit. 4301 */ 4302 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 4303 ab_prev = list_prev(buflist, ab); 4304 4305 hash_lock = HDR_LOCK(ab); 4306 if (!mutex_tryenter(hash_lock)) { 4307 /* 4308 * This buffer misses out. It may be in a stage 4309 * of eviction. Its ARC_L2_WRITING flag will be 4310 * left set, denying reads to this buffer. 4311 */ 4312 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 4313 continue; 4314 } 4315 4316 if (zio->io_error != 0) { 4317 /* 4318 * Error - drop L2ARC entry. 4319 */ 4320 list_remove(buflist, ab); 4321 abl2 = ab->b_l2hdr; 4322 ab->b_l2hdr = NULL; 4323 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4324 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4325 } 4326 4327 /* 4328 * Allow ARC to begin reads to this L2ARC entry. 4329 */ 4330 ab->b_flags &= ~ARC_L2_WRITING; 4331 4332 mutex_exit(hash_lock); 4333 } 4334 4335 atomic_inc_64(&l2arc_writes_done); 4336 list_remove(buflist, head); 4337 kmem_cache_free(hdr_cache, head); 4338 mutex_exit(&l2arc_buflist_mtx); 4339 4340 l2arc_do_free_on_write(); 4341 4342 kmem_free(cb, sizeof (l2arc_write_callback_t)); 4343} 4344 4345/* 4346 * A read to a cache device completed. Validate buffer contents before 4347 * handing over to the regular ARC routines. 4348 */ 4349static void 4350l2arc_read_done(zio_t *zio) 4351{ 4352 l2arc_read_callback_t *cb; 4353 arc_buf_hdr_t *hdr; 4354 arc_buf_t *buf; 4355 kmutex_t *hash_lock; 4356 int equal; 4357 4358 ASSERT(zio->io_vd != NULL); 4359 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 4360 4361 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 4362 4363 cb = zio->io_private; 4364 ASSERT(cb != NULL); 4365 buf = cb->l2rcb_buf; 4366 ASSERT(buf != NULL); 4367 hdr = buf->b_hdr; 4368 ASSERT(hdr != NULL); 4369 4370 hash_lock = HDR_LOCK(hdr); 4371 mutex_enter(hash_lock); 4372 4373 /* 4374 * Check this survived the L2ARC journey. 4375 */ 4376 equal = arc_cksum_equal(buf); 4377 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 4378 mutex_exit(hash_lock); 4379 zio->io_private = buf; 4380 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4381 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4382 arc_read_done(zio); 4383 } else { 4384 mutex_exit(hash_lock); 4385 /* 4386 * Buffer didn't survive caching. Increment stats and 4387 * reissue to the original storage device. 4388 */ 4389 if (zio->io_error != 0) { 4390 ARCSTAT_BUMP(arcstat_l2_io_error); 4391 } else { 4392 zio->io_error = EIO; 4393 } 4394 if (!equal) 4395 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4396 4397 /* 4398 * If there's no waiter, issue an async i/o to the primary 4399 * storage now. If there *is* a waiter, the caller must 4400 * issue the i/o in a context where it's OK to block. 4401 */ 4402 if (zio->io_waiter == NULL) 4403 zio_nowait(zio_read(zio->io_parent, 4404 cb->l2rcb_spa, &cb->l2rcb_bp, 4405 buf->b_data, zio->io_size, arc_read_done, buf, 4406 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4407 } 4408 4409 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4410} 4411 4412/* 4413 * This is the list priority from which the L2ARC will search for pages to 4414 * cache. This is used within loops (0..3) to cycle through lists in the 4415 * desired order. This order can have a significant effect on cache 4416 * performance. 4417 * 4418 * Currently the metadata lists are hit first, MFU then MRU, followed by 4419 * the data lists. This function returns a locked list, and also returns 4420 * the lock pointer. 4421 */ 4422static list_t * 4423l2arc_list_locked(int list_num, kmutex_t **lock) 4424{ 4425 list_t *list; 4426 int idx; 4427 4428 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS); 4429 4430 if (list_num < ARC_BUFC_NUMMETADATALISTS) { 4431 idx = list_num; 4432 list = &arc_mfu->arcs_lists[idx]; 4433 *lock = ARCS_LOCK(arc_mfu, idx); 4434 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) { 4435 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4436 list = &arc_mru->arcs_lists[idx]; 4437 *lock = ARCS_LOCK(arc_mru, idx); 4438 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 + 4439 ARC_BUFC_NUMDATALISTS)) { 4440 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4441 list = &arc_mfu->arcs_lists[idx]; 4442 *lock = ARCS_LOCK(arc_mfu, idx); 4443 } else { 4444 idx = list_num - ARC_BUFC_NUMLISTS; 4445 list = &arc_mru->arcs_lists[idx]; 4446 *lock = ARCS_LOCK(arc_mru, idx); 4447 } 4448 4449 ASSERT(!(MUTEX_HELD(*lock))); 4450 mutex_enter(*lock); 4451 return (list); 4452} 4453 4454/* 4455 * Evict buffers from the device write hand to the distance specified in 4456 * bytes. This distance may span populated buffers, it may span nothing. 4457 * This is clearing a region on the L2ARC device ready for writing. 4458 * If the 'all' boolean is set, every buffer is evicted. 4459 */ 4460static void 4461l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4462{ 4463 list_t *buflist; 4464 l2arc_buf_hdr_t *abl2; 4465 arc_buf_hdr_t *ab, *ab_prev; 4466 kmutex_t *hash_lock; 4467 uint64_t taddr; 4468 4469 buflist = dev->l2ad_buflist; 4470 4471 if (buflist == NULL) 4472 return; 4473 4474 if (!all && dev->l2ad_first) { 4475 /* 4476 * This is the first sweep through the device. There is 4477 * nothing to evict. 4478 */ 4479 return; 4480 } 4481 4482 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4483 /* 4484 * When nearing the end of the device, evict to the end 4485 * before the device write hand jumps to the start. 4486 */ 4487 taddr = dev->l2ad_end; 4488 } else { 4489 taddr = dev->l2ad_hand + distance; 4490 } 4491 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4492 uint64_t, taddr, boolean_t, all); 4493 4494top: 4495 mutex_enter(&l2arc_buflist_mtx); 4496 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4497 ab_prev = list_prev(buflist, ab); 4498 4499 hash_lock = HDR_LOCK(ab); 4500 if (!mutex_tryenter(hash_lock)) { 4501 /* 4502 * Missed the hash lock. Retry. 4503 */ 4504 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4505 mutex_exit(&l2arc_buflist_mtx); 4506 mutex_enter(hash_lock); 4507 mutex_exit(hash_lock); 4508 goto top; 4509 } 4510 4511 if (HDR_L2_WRITE_HEAD(ab)) { 4512 /* 4513 * We hit a write head node. Leave it for 4514 * l2arc_write_done(). 4515 */ 4516 list_remove(buflist, ab); 4517 mutex_exit(hash_lock); 4518 continue; 4519 } 4520 4521 if (!all && ab->b_l2hdr != NULL && 4522 (ab->b_l2hdr->b_daddr > taddr || 4523 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4524 /* 4525 * We've evicted to the target address, 4526 * or the end of the device. 4527 */ 4528 mutex_exit(hash_lock); 4529 break; 4530 } 4531 4532 if (HDR_FREE_IN_PROGRESS(ab)) { 4533 /* 4534 * Already on the path to destruction. 4535 */ 4536 mutex_exit(hash_lock); 4537 continue; 4538 } 4539 4540 if (ab->b_state == arc_l2c_only) { 4541 ASSERT(!HDR_L2_READING(ab)); 4542 /* 4543 * This doesn't exist in the ARC. Destroy. 4544 * arc_hdr_destroy() will call list_remove() 4545 * and decrement arcstat_l2_size. 4546 */ 4547 arc_change_state(arc_anon, ab, hash_lock); 4548 arc_hdr_destroy(ab); 4549 } else { 4550 /* 4551 * Invalidate issued or about to be issued 4552 * reads, since we may be about to write 4553 * over this location. 4554 */ 4555 if (HDR_L2_READING(ab)) { 4556 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4557 ab->b_flags |= ARC_L2_EVICTED; 4558 } 4559 4560 /* 4561 * Tell ARC this no longer exists in L2ARC. 4562 */ 4563 if (ab->b_l2hdr != NULL) { 4564 abl2 = ab->b_l2hdr; 4565 ab->b_l2hdr = NULL; 4566 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4567 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4568 } 4569 list_remove(buflist, ab); 4570 4571 /* 4572 * This may have been leftover after a 4573 * failed write. 4574 */ 4575 ab->b_flags &= ~ARC_L2_WRITING; 4576 } 4577 mutex_exit(hash_lock); 4578 } 4579 mutex_exit(&l2arc_buflist_mtx); 4580 4581 spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 4582 dev->l2ad_evict = taddr; 4583} 4584 4585/* 4586 * Find and write ARC buffers to the L2ARC device. 4587 * 4588 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4589 * for reading until they have completed writing. 4590 */ 4591static uint64_t 4592l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4593{ 4594 arc_buf_hdr_t *ab, *ab_prev, *head; 4595 l2arc_buf_hdr_t *hdrl2; 4596 list_t *list; 4597 uint64_t passed_sz, write_sz, buf_sz, headroom; 4598 void *buf_data; 4599 kmutex_t *hash_lock, *list_lock; 4600 boolean_t have_lock, full; 4601 l2arc_write_callback_t *cb; 4602 zio_t *pio, *wzio; 4603 int try; 4604 4605 ASSERT(dev->l2ad_vdev != NULL); 4606 4607 pio = NULL; 4608 write_sz = 0; 4609 full = B_FALSE; 4610 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4611 head->b_flags |= ARC_L2_WRITE_HEAD; 4612 4613 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter); 4614 /* 4615 * Copy buffers for L2ARC writing. 4616 */ 4617 mutex_enter(&l2arc_buflist_mtx); 4618 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) { 4619 list = l2arc_list_locked(try, &list_lock); 4620 passed_sz = 0; 4621 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter); 4622 4623 /* 4624 * L2ARC fast warmup. 4625 * 4626 * Until the ARC is warm and starts to evict, read from the 4627 * head of the ARC lists rather than the tail. 4628 */ 4629 headroom = target_sz * l2arc_headroom; 4630 if (arc_warm == B_FALSE) 4631 ab = list_head(list); 4632 else 4633 ab = list_tail(list); 4634 if (ab == NULL) 4635 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter); 4636 4637 for (; ab; ab = ab_prev) { 4638 if (arc_warm == B_FALSE) 4639 ab_prev = list_next(list, ab); 4640 else 4641 ab_prev = list_prev(list, ab); 4642 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size); 4643 4644 hash_lock = HDR_LOCK(ab); 4645 have_lock = MUTEX_HELD(hash_lock); 4646 if (!have_lock && !mutex_tryenter(hash_lock)) { 4647 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail); 4648 /* 4649 * Skip this buffer rather than waiting. 4650 */ 4651 continue; 4652 } 4653 4654 passed_sz += ab->b_size; 4655 if (passed_sz > headroom) { 4656 /* 4657 * Searched too far. 4658 */ 4659 mutex_exit(hash_lock); 4660 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom); 4661 break; 4662 } 4663 4664 if (!l2arc_write_eligible(spa, ab)) { 4665 mutex_exit(hash_lock); 4666 continue; 4667 } 4668 4669 if ((write_sz + ab->b_size) > target_sz) { 4670 full = B_TRUE; 4671 mutex_exit(hash_lock); 4672 ARCSTAT_BUMP(arcstat_l2_write_full); 4673 break; 4674 } 4675 4676 if (pio == NULL) { 4677 /* 4678 * Insert a dummy header on the buflist so 4679 * l2arc_write_done() can find where the 4680 * write buffers begin without searching. 4681 */ 4682 list_insert_head(dev->l2ad_buflist, head); 4683 4684 cb = kmem_alloc( 4685 sizeof (l2arc_write_callback_t), KM_SLEEP); 4686 cb->l2wcb_dev = dev; 4687 cb->l2wcb_head = head; 4688 pio = zio_root(spa, l2arc_write_done, cb, 4689 ZIO_FLAG_CANFAIL); 4690 ARCSTAT_BUMP(arcstat_l2_write_pios); 4691 } 4692 4693 /* 4694 * Create and add a new L2ARC header. 4695 */ 4696 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4697 hdrl2->b_dev = dev; 4698 hdrl2->b_daddr = dev->l2ad_hand; 4699 4700 ab->b_flags |= ARC_L2_WRITING; 4701 ab->b_l2hdr = hdrl2; 4702 list_insert_head(dev->l2ad_buflist, ab); 4703 buf_data = ab->b_buf->b_data; 4704 buf_sz = ab->b_size; 4705 4706 /* 4707 * Compute and store the buffer cksum before 4708 * writing. On debug the cksum is verified first. 4709 */ 4710 arc_cksum_verify(ab->b_buf); 4711 arc_cksum_compute(ab->b_buf, B_TRUE); 4712 4713 mutex_exit(hash_lock); 4714 4715 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4716 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4717 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4718 ZIO_FLAG_CANFAIL, B_FALSE); 4719 4720 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4721 zio_t *, wzio); 4722 (void) zio_nowait(wzio); 4723 4724 /* 4725 * Keep the clock hand suitably device-aligned. 4726 */ 4727 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4728 4729 write_sz += buf_sz; 4730 dev->l2ad_hand += buf_sz; 4731 } 4732 4733 mutex_exit(list_lock); 4734 4735 if (full == B_TRUE) 4736 break; 4737 } 4738 mutex_exit(&l2arc_buflist_mtx); 4739 4740 if (pio == NULL) { 4741 ASSERT3U(write_sz, ==, 0); 4742 kmem_cache_free(hdr_cache, head); 4743 return (0); 4744 } 4745 4746 ASSERT3U(write_sz, <=, target_sz); 4747 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4748 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz); 4749 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4750 spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 4751 4752 /* 4753 * Bump device hand to the device start if it is approaching the end. 4754 * l2arc_evict() will already have evicted ahead for this case. 4755 */ 4756 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4757 spa_l2cache_space_update(dev->l2ad_vdev, 0, 4758 dev->l2ad_end - dev->l2ad_hand); 4759 dev->l2ad_hand = dev->l2ad_start; 4760 dev->l2ad_evict = dev->l2ad_start; 4761 dev->l2ad_first = B_FALSE; 4762 } 4763 4764 dev->l2ad_writing = B_TRUE; 4765 (void) zio_wait(pio); 4766 dev->l2ad_writing = B_FALSE; 4767 4768 return (write_sz); 4769} 4770 4771/* 4772 * This thread feeds the L2ARC at regular intervals. This is the beating 4773 * heart of the L2ARC. 4774 */ 4775static void 4776l2arc_feed_thread(void *dummy __unused) 4777{ 4778 callb_cpr_t cpr; 4779 l2arc_dev_t *dev; 4780 spa_t *spa; 4781 uint64_t size, wrote; 4782 clock_t begin, next = LBOLT; 4783 4784 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4785 4786 mutex_enter(&l2arc_feed_thr_lock); 4787 4788 while (l2arc_thread_exit == 0) { 4789 CALLB_CPR_SAFE_BEGIN(&cpr); 4790 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4791 next - LBOLT); 4792 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4793 next = LBOLT + hz; 4794 4795 /* 4796 * Quick check for L2ARC devices. 4797 */ 4798 mutex_enter(&l2arc_dev_mtx); 4799 if (l2arc_ndev == 0) { 4800 mutex_exit(&l2arc_dev_mtx); 4801 continue; 4802 } 4803 mutex_exit(&l2arc_dev_mtx); 4804 begin = LBOLT; 4805 4806 /* 4807 * This selects the next l2arc device to write to, and in 4808 * doing so the next spa to feed from: dev->l2ad_spa. This 4809 * will return NULL if there are now no l2arc devices or if 4810 * they are all faulted. 4811 * 4812 * If a device is returned, its spa's config lock is also 4813 * held to prevent device removal. l2arc_dev_get_next() 4814 * will grab and release l2arc_dev_mtx. 4815 */ 4816 if ((dev = l2arc_dev_get_next()) == NULL) 4817 continue; 4818 4819 spa = dev->l2ad_spa; 4820 ASSERT(spa != NULL); 4821 4822 /* 4823 * Avoid contributing to memory pressure. 4824 */ 4825 if (arc_reclaim_needed()) { 4826 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4827 spa_config_exit(spa, SCL_L2ARC, dev); 4828 continue; 4829 } 4830 4831 ARCSTAT_BUMP(arcstat_l2_feeds); 4832 4833 size = l2arc_write_size(dev); 4834 4835 /* 4836 * Evict L2ARC buffers that will be overwritten. 4837 */ 4838 l2arc_evict(dev, size, B_FALSE); 4839 4840 /* 4841 * Write ARC buffers. 4842 */ 4843 wrote = l2arc_write_buffers(spa, dev, size); 4844 4845 /* 4846 * Calculate interval between writes. 4847 */ 4848 next = l2arc_write_interval(begin, size, wrote); 4849 spa_config_exit(spa, SCL_L2ARC, dev); 4850 } 4851 4852 l2arc_thread_exit = 0; 4853 cv_broadcast(&l2arc_feed_thr_cv); 4854 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4855 thread_exit(); 4856} 4857 4858boolean_t 4859l2arc_vdev_present(vdev_t *vd) 4860{ 4861 l2arc_dev_t *dev; 4862 4863 mutex_enter(&l2arc_dev_mtx); 4864 for (dev = list_head(l2arc_dev_list); dev != NULL; 4865 dev = list_next(l2arc_dev_list, dev)) { 4866 if (dev->l2ad_vdev == vd) 4867 break; 4868 } 4869 mutex_exit(&l2arc_dev_mtx); 4870 4871 return (dev != NULL); 4872} 4873 4874/* 4875 * Add a vdev for use by the L2ARC. By this point the spa has already 4876 * validated the vdev and opened it. 4877 */ 4878void 4879l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 4880{ 4881 l2arc_dev_t *adddev; 4882 4883 ASSERT(!l2arc_vdev_present(vd)); 4884 4885 /* 4886 * Create a new l2arc device entry. 4887 */ 4888 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4889 adddev->l2ad_spa = spa; 4890 adddev->l2ad_vdev = vd; 4891 adddev->l2ad_write = l2arc_write_max; 4892 adddev->l2ad_boost = l2arc_write_boost; 4893 adddev->l2ad_start = start; 4894 adddev->l2ad_end = end; 4895 adddev->l2ad_hand = adddev->l2ad_start; 4896 adddev->l2ad_evict = adddev->l2ad_start; 4897 adddev->l2ad_first = B_TRUE; 4898 adddev->l2ad_writing = B_FALSE; 4899 ASSERT3U(adddev->l2ad_write, >, 0); 4900 4901 /* 4902 * This is a list of all ARC buffers that are still valid on the 4903 * device. 4904 */ 4905 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4906 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4907 offsetof(arc_buf_hdr_t, b_l2node)); 4908 4909 spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 4910 4911 /* 4912 * Add device to global list 4913 */ 4914 mutex_enter(&l2arc_dev_mtx); 4915 list_insert_head(l2arc_dev_list, adddev); 4916 atomic_inc_64(&l2arc_ndev); 4917 mutex_exit(&l2arc_dev_mtx); 4918} 4919 4920/* 4921 * Remove a vdev from the L2ARC. 4922 */ 4923void 4924l2arc_remove_vdev(vdev_t *vd) 4925{ 4926 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4927 4928 /* 4929 * Find the device by vdev 4930 */ 4931 mutex_enter(&l2arc_dev_mtx); 4932 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4933 nextdev = list_next(l2arc_dev_list, dev); 4934 if (vd == dev->l2ad_vdev) { 4935 remdev = dev; 4936 break; 4937 } 4938 } 4939 ASSERT(remdev != NULL); 4940 4941 /* 4942 * Remove device from global list 4943 */ 4944 list_remove(l2arc_dev_list, remdev); 4945 l2arc_dev_last = NULL; /* may have been invalidated */ 4946 atomic_dec_64(&l2arc_ndev); 4947 mutex_exit(&l2arc_dev_mtx); 4948 4949 /* 4950 * Clear all buflists and ARC references. L2ARC device flush. 4951 */ 4952 l2arc_evict(remdev, 0, B_TRUE); 4953 list_destroy(remdev->l2ad_buflist); 4954 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4955 kmem_free(remdev, sizeof (l2arc_dev_t)); 4956} 4957 4958void 4959l2arc_init(void) 4960{ 4961 l2arc_thread_exit = 0; 4962 l2arc_ndev = 0; 4963 l2arc_writes_sent = 0; 4964 l2arc_writes_done = 0; 4965 4966 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4967 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4968 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4969 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4970 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4971 4972 l2arc_dev_list = &L2ARC_dev_list; 4973 l2arc_free_on_write = &L2ARC_free_on_write; 4974 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4975 offsetof(l2arc_dev_t, l2ad_node)); 4976 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4977 offsetof(l2arc_data_free_t, l2df_list_node)); 4978} 4979 4980void 4981l2arc_fini(void) 4982{ 4983 /* 4984 * This is called from dmu_fini(), which is called from spa_fini(); 4985 * Because of this, we can assume that all l2arc devices have 4986 * already been removed when the pools themselves were removed. 4987 */ 4988 4989 l2arc_do_free_on_write(); 4990 4991 mutex_destroy(&l2arc_feed_thr_lock); 4992 cv_destroy(&l2arc_feed_thr_cv); 4993 mutex_destroy(&l2arc_dev_mtx); 4994 mutex_destroy(&l2arc_buflist_mtx); 4995 mutex_destroy(&l2arc_free_on_write_mtx); 4996 4997 list_destroy(l2arc_dev_list); 4998 list_destroy(l2arc_free_on_write); 4999} 5000 5001void 5002l2arc_start(void) 5003{ 5004 if (!(spa_mode & FWRITE)) 5005 return; 5006 5007 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 5008 TS_RUN, minclsyspri); 5009} 5010 5011void 5012l2arc_stop(void) 5013{ 5014 if (!(spa_mode & FWRITE)) 5015 return; 5016 5017 mutex_enter(&l2arc_feed_thr_lock); 5018 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 5019 l2arc_thread_exit = 1; 5020 while (l2arc_thread_exit != 0) 5021 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 5022 mutex_exit(&l2arc_feed_thr_lock); 5023} 5024